index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
24,545
|
mit-han-lab/apq
|
refs/heads/master
|
/elastic_nn/networks/dynamic_quantized_proxyless.py
|
import copy
import torch.nn.functional as F
import random
import numpy as np
import torch
from elastic_nn.modules.dynamic_layers import DynamicMBConvLayer, DynamicConvLayer, DynamicLinearLayer, \
DynamicMBQConvLayer, DynamicQConvLayer, DynamicQLinearLayer
from imagenet_codebase.modules.layers import ConvLayer, IdentityLayer, LinearLayer, MBInvertedConvLayer, \
MBInvertedQConvLayer, \
QConvLayer, QLinearLayer
from imagenet_codebase.networks.proxyless_nets import ProxylessNASNets, MobileInvertedResidualBlock, AvgMBConvStage
from imagenet_codebase.utils import make_divisible, int2list, list_weighted_sum, AverageMeter
class DynamicQuantizedProxylessNASNets(ProxylessNASNets):
def __init__(self, n_classes=1000, bn_param=(0.1, 1e-3), dropout_rate=0.1, base_stage_width=None,
width_mult_list=1.0, ks_list=3, expand_ratio_list=6, depth_list=4,
depth_ensemble_list=None, depth_ensemble_mode='avg'):
self.width_mult_list = int2list(width_mult_list, 1)
self.ks_list = int2list(ks_list, 1)
self.expand_ratio_list = int2list(expand_ratio_list, 1)
self.depth_list = int2list(depth_list, 1)
self.depth_ensemble_list = depth_ensemble_list
self.depth_ensemble_mode = depth_ensemble_mode
self.width_mult_list.sort()
self.ks_list.sort()
self.expand_ratio_list.sort()
self.depth_list.sort()
if base_stage_width == 'v2':
base_stage_width = [32, 16, 24, 32, 64, 96, 160, 320, 1280]
elif base_stage_width == 'old':
base_stage_width = [32, 16, 32, 40, 80, 96, 192, 320, 1280]
else:
# ProxylessNAS Stage Width
base_stage_width = [32, 16, 24, 40, 80, 96, 192, 320, 1280]
input_channel = [make_divisible(base_stage_width[0] * width_mult, 8) for width_mult in self.width_mult_list]
first_block_width = [make_divisible(base_stage_width[1] * width_mult, 8) for width_mult in self.width_mult_list]
last_channel = [
make_divisible(base_stage_width[-1] * width_mult, 8) if width_mult > 1.0 else base_stage_width[-1]
for width_mult in self.width_mult_list
]
# first conv layer
if len(input_channel) == 1:
first_conv = QConvLayer(
3, max(input_channel), kernel_size=3, stride=2, use_bn=True, act_func='relu6',
ops_order='weight_bn_act',
w_bit=8, a_bit=-1, half_wave=False
)
else:
first_conv = DynamicQConvLayer(
in_channel_list=int2list(3, len(input_channel)), out_channel_list=input_channel, kernel_size=3,
stride=2, act_func='relu6', w_bit=8, a_bit=8, half_wave=False
)
# first block
if len(first_block_width) == 1:
first_block_conv = MBInvertedQConvLayer(
in_channels=max(input_channel), out_channels=max(first_block_width), kernel_size=3, stride=1,
expand_ratio=1, act_func='relu6', pw_w_bit=8, pw_a_bit=8, dw_w_bit=8, dw_a_bit=8
)
else:
first_block_conv = DynamicMBQConvLayer(
in_channel_list=input_channel, out_channel_list=first_block_width, kernel_size_list=3,
expand_ratio_list=1, stride=1, act_func='relu6',
# pw_w_bit=4, pw_a_bit=4, dw_w_bit=4, dw_a_bit=4
pw_w_bit=8, pw_a_bit=8, dw_w_bit=8, dw_a_bit=8
)
first_block = MobileInvertedResidualBlock(first_block_conv, None)
input_channel = first_block_width
# inverted residual blocks
self.block_group_info = []
blocks = [first_block]
_block_index = 1
stride_stages = [2, 2, 2, 1, 2, 1]
if depth_list is None:
n_block_list = [2, 3, 4, 3, 3, 1]
self.depth_list = [4]
else:
n_block_list = [max(self.depth_list)] * 5 + [1]
width_list = []
for base_width in base_stage_width[2:-1]:
width = [make_divisible(base_width * width_mult, 8) for width_mult in self.width_mult_list]
width_list.append(width)
for width, n_block, s in zip(width_list, n_block_list, stride_stages):
self.block_group_info.append(
([_block_index + i for i in range(n_block)], width)
)
_block_index += n_block
output_channel = width
for i in range(n_block):
if i == 0:
stride = s
else:
stride = 1
mobile_inverted_conv = DynamicMBQConvLayer(
in_channel_list=int2list(input_channel, 1), out_channel_list=int2list(output_channel, 1),
kernel_size_list=ks_list, expand_ratio_list=expand_ratio_list, stride=stride, act_func='relu6',
# pw_w_bit=4, pw_a_bit=4, dw_w_bit=4, dw_a_bit=4
pw_w_bit=8, pw_a_bit=8, dw_w_bit=8, dw_a_bit=8
)
if stride == 1 and input_channel == output_channel:
shortcut = IdentityLayer(input_channel, input_channel)
else:
shortcut = None
mb_inverted_block = MobileInvertedResidualBlock(mobile_inverted_conv, shortcut)
blocks.append(mb_inverted_block)
input_channel = output_channel
# 1x1_conv before global average pooling
if len(last_channel) == 1:
feature_mix_layer = QConvLayer(
max(input_channel), max(last_channel), kernel_size=1, use_bn=True, act_func='relu6',
w_bit=8, a_bit=8, half_wave=False
)
classifier = QLinearLayer(max(last_channel), n_classes, dropout_rate=dropout_rate, w_bit=8, a_bit=8)
else:
feature_mix_layer = DynamicMBQConvLayer(
in_channel_list=input_channel, out_channel_list=last_channel, kernel_size=1, stride=1, act_func='relu6',
# pw_w_bit=4, pw_a_bit=4, dw_w_bit=4, dw_a_bit=4
pw_w_bit=8, pw_a_bit=8, dw_w_bit=8, dw_a_bit=8, half_wave=False
)
classifier = DynamicQLinearLayer(
in_features_list=last_channel, out_features=n_classes, bias=True, dropout_rate=dropout_rate,
w_bit=8, a_bit=8
)
super(DynamicQuantizedProxylessNASNets, self).__init__(first_conv, blocks, feature_mix_layer, classifier)
# set bn param
self.set_bn_param(momentum=bn_param[0], eps=bn_param[1])
# runtime_depth
self.runtime_depth = [
len(block_idx) for block_idx, _ in self.block_group_info
]
if self.depth_ensemble_list is not None:
self.depth_ensemble_list.sort()
""" MyNetwork required methods """
def forward(self, x):
# first conv
x = self.first_conv(x)
# first block
x = self.blocks[0](x)
# blocks
for stage_id, (block_idx, _) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
if self.depth_ensemble_list is not None and len(block_idx) == max(self.depth_list):
experts = []
for d, idx in enumerate(active_idx):
x = self.blocks[idx](x)
if (d + 1) in self.depth_ensemble_list:
experts.append(x)
if len(experts) > 0:
ensemble_weights = self.get_depth_ensemble_weights(stage_id, len(experts))
x = list_weighted_sum(experts, ensemble_weights)
else:
for idx in active_idx:
x = self.blocks[idx](x)
# feature_mix_layer
x = self.feature_mix_layer(x)
x = x.mean(3).mean(2)
x = self.classifier(x)
return x
@property
def module_str(self):
_str = self.first_conv.module_str + '\n'
_str += self.blocks[0].module_str + '\n'
for stage_id, (block_idx, _) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
for idx in active_idx:
_str += self.blocks[idx].module_str + '\n'
_str += self.feature_mix_layer.module_str + '\n'
_str += self.classifier.module_str + '\n'
return _str
@property
def config(self):
return '%s' % self
@staticmethod
def build_from_config(config):
raise ValueError('do not support this function')
def load_weights_from_proxylessnas(self, proxyless_model_dict):
model_dict = self.state_dict()
for key in proxyless_model_dict:
if key in model_dict:
new_key = key
elif '.bn.bn.' in key:
new_key = key.replace('.bn.bn.', '.bn.')
elif '.conv.conv.weight' in key:
new_key = key.replace('.conv.conv.weight', '.conv.weight')
elif '.linear.linear.' in key:
new_key = key.replace('.linear.linear.', '.linear.')
##############################################################################
elif '.linear.' in key:
new_key = key.replace('.linear.', '.linear.linear.')
elif 'bn.' in key:
new_key = key.replace('bn.', 'bn.bn.')
elif 'conv.weight' in key:
new_key = key.replace('conv.weight', 'conv.conv.weight')
else:
raise ValueError(key)
assert new_key in model_dict, '%s' % new_key
model_dict[new_key] = proxyless_model_dict[key]
self.load_state_dict(model_dict)
def partial_load(self, a, b):
x = np.flip(np.array(a.size()) - np.array(b.size()), 0)
# print(x)
y = np.zeros(2 * len(x))
for i in range(len(x)):
y[i * 2 + 1] = x[i]
pp = tuple(y.astype(int))
return F.pad(b, pp)
def partial_load_dw(self, a, b):
x = np.flip(np.array(a.size()) - np.array(b.size()), 0)
# print(x)
y = np.zeros(2 * len(x))
for i in range(len(x)):
if i <= 1:
assert x[i] % 2 == 0
y[i * 2 + 1] = x[i] // 2
y[i * 2] = x[i] // 2
else:
y[i * 2 + 1] = x[i]
# print(y)
pp = tuple(y.astype(int))
return F.pad(b, pp)
def load_partial_weights_from_proxylessnas(self, proxyless_model_dict):
model_dict = self.state_dict()
for key in proxyless_model_dict:
if key in model_dict:
new_key = key
elif '.bn.bn.' in key:
new_key = key.replace('.bn.bn.', '.bn.')
elif '.conv.conv.weight' in key:
new_key = key.replace('.conv.conv.weight', '.conv.weight')
elif '.linear.linear.' in key:
new_key = key.replace('.linear.linear.', '.linear.')
##############################################################################
elif '.linear.' in key:
new_key = key.replace('.linear.', '.linear.linear.')
elif 'bn.' in key:
new_key = key.replace('bn.', 'bn.bn.')
elif 'conv.weight' in key:
new_key = key.replace('conv.weight', 'conv.conv.weight')
else:
raise ValueError(key)
assert new_key in model_dict, '%s' % new_key
# model_dict[new_key] = proxyless_model_dict[key]
if 'depth_conv.conv.weight' in key:
# print(new_key, key)
tmp = model_dict[new_key]
tmp2 = proxyless_model_dict[key]
# print(tmp.size())
# print(tmp2.size())
model_dict[new_key] = self.partial_load_dw(tmp, tmp2)
else:
# print(new_key, key)
tmp = model_dict[new_key]
tmp2 = proxyless_model_dict[key]
# print(tmp.size())
# print(tmp2.size())
model_dict[new_key] = self.partial_load(tmp, tmp2)
# print()
self.load_state_dict(model_dict)
""" set, sample and get active sub-networks """
def set_active_subnet(self, wid=None, ks=None, e=None, d=None):
width_mult_id = int2list(wid, 3 + len(self.blocks) - 1)
ks = int2list(ks, len(self.blocks) - 1)
expand_ratio = int2list(e, len(self.blocks) - 1)
depth = int2list(d, len(self.block_group_info))
if len(self.width_mult_list) > 1 and width_mult_id[0] is not None:
# active_out_channel
self.first_conv.active_out_channel = self.first_conv.out_channel_list[width_mult_id[0]]
self.blocks[0].mobile_inverted_conv.active_out_channel = \
self.blocks[0].mobile_inverted_conv.out_channel_list[width_mult_id[1]]
self.feature_mix_layer.active_out_channel = self.feature_mix_layer.out_channel_list[width_mult_id[2]]
for block, w, k, e in zip(self.blocks[1:], width_mult_id[3:], ks, expand_ratio):
if w is not None:
block.mobile_inverted_conv.active_out_channel = block.mobile_inverted_conv.out_channel_list[w]
if k is not None:
block.mobile_inverted_conv.active_kernel_size = k
if e is not None:
block.mobile_inverted_conv.active_expand_ratio = e
for i, d in enumerate(depth):
if d is not None:
self.runtime_depth[i] = min(len(self.block_group_info[i][0]), d)
def sample_active_subnet(self):
width_mult_candidates = [i for i in range(len(self.width_mult_list))]
ks_candidates = self.ks_list
expand_candidates = self.expand_ratio_list
depth_candidates = self.depth_list
# sample width_mult
if len(self.width_mult_list) == 1:
width_mult_setting = None
else:
width_mult_setting = random.choices(width_mult_candidates, k=3 + len(self.blocks) - 1)
# sample kernel size
ks_setting = random.choices(ks_candidates, k=len(self.blocks) - 1)
# sample expand ratio
# expand_setting = random.choices(expand_candidates, k=len(self.blocks) - 1)
# expand_setting = random.choices(expand_candidates, k=len(self.blocks) - 1)
expand_setting = (np.random.rand(21) * 2 + 4).tolist()
print(expand_setting)
assert expand_candidates == [4, 6]
# print(np.random.rand(len(self.blocks) - 1).tolist())
# print(expand_setting, expand_candidates)
# sample depth
depth_setting = random.choices(depth_candidates, k=len(self.block_group_info))
self.set_active_subnet(width_mult_setting, ks_setting, expand_setting, depth_setting)
return {
'wid': width_mult_setting,
'ks': ks_setting,
'e': expand_setting,
'd': depth_setting,
}
def set_quantization_policy(self, pw_w_bits_setting=None, pw_a_bits_setting=None, dw_w_bits_setting=None,
dw_a_bits_setting=None):
pw_w_bits_setting = int2list(pw_w_bits_setting, len(self.blocks) - 1)
pw_a_bits_setting = int2list(pw_a_bits_setting, len(self.blocks) - 1)
dw_w_bits_setting = int2list(dw_w_bits_setting, len(self.blocks) - 1)
dw_a_bits_setting = int2list(dw_a_bits_setting, len(self.blocks) - 1)
for block, pw_w, pw_a, dw_w, dw_a in zip(self.blocks[1:], pw_w_bits_setting, pw_a_bits_setting,
dw_w_bits_setting,
dw_a_bits_setting):
block.mobile_inverted_conv.set_quantization_policy(pw_w_bit=pw_w, pw_a_bit=pw_a, dw_w_bit=dw_w,
dw_a_bit=dw_a)
def sample_quantization_policy(self):
bits_candidates = [4, 6, 8]
# bits_candidates = [2, 4, 6, 8]
# bits_candidates = [2, 4]
# sample bits
pw_w_bits_setting = random.choices(bits_candidates, k=len(self.blocks) - 1)
pw_a_bits_setting = random.choices(bits_candidates, k=len(self.blocks) - 1)
dw_w_bits_setting = random.choices(bits_candidates, k=len(self.blocks) - 1)
dw_a_bits_setting = random.choices(bits_candidates, k=len(self.blocks) - 1)
self.set_quantization_policy(pw_w_bits_setting, pw_a_bits_setting, dw_w_bits_setting, dw_a_bits_setting)
return {
'pw_w_bits_setting': pw_w_bits_setting,
'pw_a_bits_setting': pw_a_bits_setting,
'dw_w_bits_setting': dw_w_bits_setting,
'dw_a_bits_setting': dw_a_bits_setting,
}
def get_active_subnet(self, preserve_weight=True):
if len(self.width_mult_list) == 1:
# print('here')
first_conv = copy.deepcopy(self.first_conv)
blocks = [copy.deepcopy(self.blocks[0])]
feature_mix_layer = copy.deepcopy(self.feature_mix_layer)
classifier = copy.deepcopy(self.classifier)
else:
first_conv = self.first_conv.get_active_subnet(3, preserve_weight)
blocks = [MobileInvertedResidualBlock(
self.blocks[0].mobile_inverted_conv.get_active_subnet(first_conv.out_channels, preserve_weight),
copy.deepcopy(self.blocks[0].shortcut)
)]
feature_mix_layer = self.feature_mix_layer.get_active_subnet(
self.blocks[-1].mobile_inverted_conv.active_out_channel, preserve_weight)
classifier = self.classifier.get_active_subnet(self.feature_mix_layer.active_out_channel, preserve_weight)
input_channel = blocks[0].mobile_inverted_conv.out_channels
# blocks
for stage_id, (block_idx, _) in enumerate(self.block_group_info):
depth = self.runtime_depth[stage_id]
active_idx = block_idx[:depth]
stage_blocks = []
for idx in active_idx:
stage_blocks.append(MobileInvertedResidualBlock(
self.blocks[idx].mobile_inverted_conv.get_active_subnet(input_channel, preserve_weight),
copy.deepcopy(self.blocks[idx].shortcut)
))
input_channel = stage_blocks[-1].mobile_inverted_conv.out_channels
if self.depth_ensemble_list is None:
blocks += stage_blocks
else:
assert self.depth_ensemble_mode == 'avg'
if len(stage_blocks) == 3:
stage_blocks[-1].mobile_inverted_conv.point_linear.bn.weight.data.mul_(0.5)
stage_blocks[-1].mobile_inverted_conv.point_linear.bn.bias.data.mul_(0.5)
blocks += stage_blocks
elif len(stage_blocks) == 4:
stage_blocks[-1].mobile_inverted_conv.point_linear.bn.weight.data.mul_(1 / 3)
stage_blocks[-1].mobile_inverted_conv.point_linear.bn.bias.data.mul_(1 / 3)
mbconv_stage = AvgMBConvStage(stage_blocks)
blocks.append(mbconv_stage)
else:
blocks += stage_blocks
active_subnet = ProxylessNASNets(first_conv, blocks, feature_mix_layer, classifier)
active_subnet.set_bn_param(**self.get_bn_param())
return active_subnet
""" Depth Related Methods """
def get_depth_ensemble_weights(self, stage_id, n_experts=None):
if n_experts is None:
n_experts = len(self.depth_ensemble_list)
if self.depth_ensemble_mode == 'avg':
return [1.0 / n_experts for _ in range(n_experts)]
else:
raise NotImplementedError
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,546
|
mit-han-lab/apq
|
refs/heads/master
|
/utils/converter.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import random
import numpy as np
class Converter():
def __init__(self):
self.cnt = 0
self.k_info = dict(id2val=[], val2id=[], L=[], R=[])
self.e_info = dict(id2val=[], val2id=[], L=[], R=[])
self.q_info = [dict(id2val=[], val2id=[], L=[], R=[]) for it in range(4)] # w_pw a_pw w_dw a_dw
self.num2qname = {0: 'pw_w_bits_setting', 1: 'pw_a_bits_setting', 2: 'dw_w_bits_setting',
3: 'dw_a_bits_setting'}
self.build(self.k_info, [3, 5, 7])
self.build(self.e_info)
self.half_dim = self.cnt
for it in range(4):
self.build(self.q_info[it], [4, 6, 8])
self.full_dim = self.cnt
def build(self, info_dic, ls=None):
if ls is None:
lst = []
lst.extend([16])
lst.extend([24] * 4)
lst.extend([40] * 4)
lst.extend([80] * 4)
lst.extend([96] * 4)
lst.extend([192] * 4)
for i in range(21):
t = lst[i]
dic = {}
dic2 = {}
info_dic['L'].append(self.cnt)
for k in range(t * 4, t * 6 + 8, 8):
dic[k / t] = self.cnt
dic2[self.cnt] = k / t
self.cnt += 1
info_dic['R'].append(self.cnt)
info_dic['val2id'].append(dic)
info_dic['id2val'].append(dic2)
else:
for i in range(21):
dic = {}
dic2 = {}
info_dic['L'].append(self.cnt)
for k in ls:
dic[k] = self.cnt
dic2[self.cnt] = k
self.cnt += 1
info_dic['R'].append(self.cnt)
info_dic['val2id'].append(dic)
info_dic['id2val'].append(dic2)
def spec2feature(self, info, quantize=False):
def make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
x = max(min_value, int(v + divisor / 2) // divisor * divisor)
# make sure that round down does not go down by more than 10%
if x < 0.9 * v:
x += divisor
return x
ks = info['ks']
e = info['e']
d = info['d']
if not quantize:
q = [[8] * 21 for _ in range(4)]
else:
q = []
for it in range(4):
q.append(info[self.num2qname[it]])
# qa = [32] * 21
spec = np.zeros(self.cnt)
for i in range(21):
nowd = i % 4
stg = i // 4
if nowd < d[stg]:
spec[self.k_info['val2id'][i][ks[i]]] = 1
channel_in_list = [16, 24, 40, 80, 96, 192]
channel_out_list = [24, 40, 80, 96, 192, 320]
for i in range(21):
inc = channel_in_list[i // 4] if i % 4 == 0 else channel_in_list[i // 4 + 1]
ouc = channel_out_list[i // 4]
nowd = i % 4
stg = i // 4
if nowd < d[stg]:
if e[i] in self.e_info['val2id'][i]:
spec[self.e_info['val2id'][i][e[i]]] = 1
else:
real_e = make_divisible(e[i] * inc, 8) / inc
spec[self.e_info['val2id'][i][real_e]] = 1
assert min(self.e_info['val2id'][i].keys(), key=lambda key: abs(key - e[i])) == real_e
for it in range(4):
for i in range(21):
nowd = i % 4
stg = i // 4
if nowd < d[stg]:
spec[self.q_info[it]['val2id'][i][q[it][i]]] = 1
return spec
def feature2spec(self, spec):
info = {'wid': None, 'ks': [],
'e': [], 'd': [], 'pw_w_bits_setting': [], 'pw_a_bits_setting': [], 'dw_w_bits_setting': [],
'dw_a_bits_setting': []}
d = 0
for i in range(21):
identity = True
for j in range(self.k_info['L'][i], self.k_info['R'][i]):
if spec[j] == 1:
info['ks'].append(self.k_info['id2val'][i][j])
identity = False
break
for j in range(self.e_info['L'][i], self.e_info['R'][i]):
if spec[j] == 1:
info['e'].append(self.e_info['id2val'][i][j])
identity = False
break
for it in range(4):
for j in range(self.q_info[it]['L'][i], self.q_info[it]['R'][i]):
if spec[j] == 1:
info[self.num2qname[it]].append(self.q_info[it]['id2val'][i][j])
if identity:
info['e'].append(4)
info['ks'].append(3)
for it in range(4):
info[self.num2qname[it]].append(8)
else:
d += 1
if i % 4 == 3:
info['d'].append(d)
d = 0
info['d'].append(d)
return info
def is_valid(self, spec):
for i in range(21):
if spec['ks'][i] == 7 and spec['dw_a_bits_setting'][i] == 4:
return False
return True
def random_spec(self):
info = {'wid': None, 'ks': [],
'e': [], 'd': [], 'pw_w_bits_setting': [], 'pw_a_bits_setting': [], 'dw_w_bits_setting': [],
'dw_a_bits_setting': []}
for i in range(6):
info['d'].append(np.random.randint(3) + 2)
for i in range(21):
info['ks'].append(random.sample(self.k_info['val2id'][i].keys(), 1)[0])
info['e'].append(random.sample(self.e_info['val2id'][i].keys(), 1)[0])
for it in range(4):
info[self.num2qname[it]].append(random.sample(self.q_info[it]['val2id'][i].keys(), 1)[0])
return info
def change_spec(self, spec, i):
spec['ks'][i] = random.sample(self.k_info['val2id'][i].keys(), 1)[0]
spec['e'][i] = random.sample(self.e_info['val2id'][i].keys(), 1)[0]
for it in range(4):
spec[self.num2qname[it]][i] = random.sample(self.q_info[it]['val2id'][i].keys(), 1)[0]
return spec
if __name__ == '__main__':
test()
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,547
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/utils/quantize_utils.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import math
import numpy as np
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from torch.nn.parameter import Parameter
from torch.nn.modules.utils import _single, _pair, _triple
from progress.bar import Bar
from sklearn.cluster import KMeans
def k_means_cpu(weight, n_clusters, init='k-means++', max_iter=50):
# flatten the weight for computing k-means
org_shape = weight.shape
weight = weight.reshape(-1, 1) # single feature
if n_clusters > weight.size:
n_clusters = weight.size
k_means = KMeans(n_clusters=n_clusters, init=init, n_init=1, max_iter=max_iter)
k_means.fit(weight)
centroids = k_means.cluster_centers_
labels = k_means.labels_
labels = labels.reshape(org_shape)
return torch.from_numpy(centroids).cuda().view(1, -1), torch.from_numpy(labels).int().cuda()
def reconstruct_weight_from_k_means_result(centroids, labels):
weight = torch.zeros_like(labels).float().cuda()
for i, c in enumerate(centroids.cpu().numpy().squeeze()):
weight[labels == i] = c.item()
return weight
def quantize_model(model, quantize_index, quantize_bits, max_iter=50, mode='cpu', quantize_bias=False,
centroids_init='k-means++', is_pruned=False, free_high_bit=False):
assert len(quantize_index) == len(quantize_bits), \
'You should provide the same number of bit setting as layer list!'
if free_high_bit:
# quantize weight with high bit will not lead accuracy loss, so we can omit them to save time
quantize_bits = [-1 if i > 6 else i for i in quantize_bits]
quantize_layer_bit_dict = {n: b for n, b in zip(quantize_index, quantize_bits)}
centroid_label_dict = {}
bar = Bar('KMeans:', max=len(quantize_index))
for i, layer in enumerate(model.modules()):
if i not in quantize_index:
continue
this_cl_list = []
n_bit = quantize_layer_bit_dict[i]
if n_bit < 0: # if -1, do not quantize
continue
if type(n_bit) == list: # given both the bit of weight and bias
assert len(n_bit) == 2
assert hasattr(layer, 'weight')
assert hasattr(layer, 'bias')
else:
n_bit = [n_bit, n_bit] # using same setting for W and b
# quantize weight
if hasattr(layer, 'weight'):
w = layer.weight.data
if is_pruned:
nz_mask = w.ne(0)
print('*** pruned density: {:.4f}'.format(torch.sum(nz_mask) / w.numel()))
ori_shape = w.size()
w = w[nz_mask]
if mode == 'cpu':
centroids, labels = k_means_cpu(w.cpu().numpy(), 2 ** n_bit[0], init=centroids_init, max_iter=max_iter)
else:
raise NotImplementedError
if is_pruned:
full_labels = labels.new(ori_shape).zero_() - 1 # use -1 for pruned elements
full_labels[nz_mask] = labels
labels = full_labels
this_cl_list.append([centroids, labels])
w_q = reconstruct_weight_from_k_means_result(centroids, labels)
layer.weight.data = w_q.float()
# quantize bias
if hasattr(layer, 'bias') and quantize_bias:
w = layer.bias.data
if mode == 'cpu':
centroids, labels = k_means_cpu(w.cpu().numpy(), 2 ** n_bit[1], init=centroids_init, max_iter=max_iter)
else:
raise NotImplementedError
this_cl_list.append([centroids, labels])
w_q = reconstruct_weight_from_k_means_result(centroids, labels)
layer.bias.data = w_q.float()
centroid_label_dict[i] = this_cl_list
bar.suffix = ' id: {id:} | bit: {bit:}'.format(id=i, bit=n_bit[0])
bar.next()
bar.finish()
return centroid_label_dict
def kmeans_update_model(model, quantizable_idx, centroid_label_dict, free_high_bit=False):
for i, layer in enumerate(model.modules()):
if i not in quantizable_idx:
continue
new_weight_data = layer.weight.data.clone()
new_weight_data.zero_()
this_cl_list = centroid_label_dict[i]
num_centroids = this_cl_list[0][0].numel()
if num_centroids > 2**6 and free_high_bit:
# quantize weight with high bit will not lead accuracy loss, so we can omit them to save time
continue
for j in range(num_centroids):
mask_cl = (this_cl_list[0][1] == j).float()
new_weight_data += (layer.weight.data * mask_cl).sum() / mask_cl.sum() * mask_cl
layer.weight.data = new_weight_data
class QModule(nn.Module):
def __init__(self, w_bit=-1, a_bit=-1, half_wave=True):
super(QModule, self).__init__()
if half_wave:
self._a_bit = a_bit
else:
self._a_bit = a_bit - 1
self._w_bit = w_bit
self._b_bit = 32
self._half_wave = half_wave
self.init_range = 6.
self.activation_range = nn.Parameter(torch.Tensor([self.init_range]))
self.weight_range = nn.Parameter(torch.Tensor([-1.0]), requires_grad=False)
self._quantized = True
self._tanh_weight = False
self._fix_weight = False
self._trainable_activation_range = True
self._calibrate = False
@property
def w_bit(self):
return self._w_bit
@w_bit.setter
def w_bit(self, w_bit):
self._w_bit = w_bit
@property
def a_bit(self):
if self._half_wave:
return self._a_bit
else:
return self._a_bit + 1
@a_bit.setter
def a_bit(self, a_bit):
if self._half_wave:
self._a_bit = a_bit
else:
self._a_bit = a_bit - 1
@property
def b_bit(self):
return self._b_bit
@property
def half_wave(self):
return self._half_wave
@property
def quantized(self):
return self._quantized
@property
def tanh_weight(self):
return self._tanh_weight
def set_quantize(self, quantized):
self._quantized = quantized
def set_tanh_weight(self, tanh_weight):
self._tanh_weight = tanh_weight
if self._tanh_weight:
self.weight_range.data[0] = 1.0
def set_fix_weight(self, fix_weight):
self._fix_weight = fix_weight
def set_activation_range(self, activation_range):
self.activation_range.data[0] = activation_range
def set_weight_range(self, weight_range):
self.weight_range.data[0] = weight_range
def set_trainable_activation_range(self, trainable_activation_range=True):
self._trainable_activation_range = trainable_activation_range
self.activation_range.requires_grad_(trainable_activation_range)
def set_calibrate(self, calibrate=True):
self._calibrate = calibrate
def set_tanh(self, tanh=True):
self._tanh_weight = tanh
def _compute_threshold(self, data, bitwidth):
mn = 0
mx = np.abs(data).max()
if np.isclose(mx, 0.0):
return 0.0
hist, bin_edges = np.histogram(np.abs(data), bins='sqrt', range=(mn, mx), density=True)
hist = hist / np.sum(hist)
cumsum = np.cumsum(hist)
n = pow(2, int(bitwidth) - 1)
threshold = []
scaling_factor = []
d = []
if n + 1 > len(bin_edges) - 1:
th_layer_out = bin_edges[-1]
# sf_layer_out = th_layer_out / (pow(2, bitwidth - 1) - 1)
return float(th_layer_out)
for i in range(n + 1, len(bin_edges), 1):
threshold_tmp = (i + 0.5) * (bin_edges[1] - bin_edges[0])
threshold = np.concatenate((threshold, [threshold_tmp]))
scaling_factor_tmp = threshold_tmp / (pow(2, bitwidth - 1) - 1)
scaling_factor = np.concatenate((scaling_factor, [scaling_factor_tmp]))
p = np.copy(cumsum)
p[(i - 1):] = 1
x = np.linspace(0.0, 1.0, n)
xp = np.linspace(0.0, 1.0, i)
fp = p[:i]
p_interp = np.interp(x, xp, fp)
x = np.linspace(0.0, 1.0, i)
xp = np.linspace(0.0, 1.0, n)
fp = p_interp
q_interp = np.interp(x, xp, fp)
q = np.copy(p)
q[:i] = q_interp
d_tmp = np.sum((cumsum - q) * np.log2(cumsum / q)) # Kullback-Leibler-J
d = np.concatenate((d, [d_tmp]))
th_layer_out = threshold[np.argmin(d)]
# sf_layer_out = scaling_factor[np.argmin(d)]
threshold = float(th_layer_out)
return threshold
def _quantize_activation(self, inputs):
if self._quantized and self._a_bit > 0:
if self._calibrate:
if self._a_bit < 5:
threshold = self._compute_threshold(inputs.data.cpu().numpy(), self._a_bit)
estimate_activation_range = min(min(self.init_range, inputs.abs().max().item()), threshold)
else:
estimate_activation_range = min(self.init_range, inputs.abs().max().item())
# print('range:', estimate_activation_range, ' shape:', inputs.shape, ' inp_abs_max:', inputs.abs().max())
self.activation_range.data = torch.tensor([estimate_activation_range], device=inputs.device)
return inputs
if self._trainable_activation_range:
if self._half_wave:
ori_x = 0.5 * (inputs.abs() - (inputs - self.activation_range).abs() + self.activation_range)
else:
ori_x = 0.5 * ((-inputs - self.activation_range).abs() - (inputs - self.activation_range).abs())
else:
if self._half_wave:
ori_x = inputs.clamp(0.0, self.activation_range.item())
else:
ori_x = inputs.clamp(-self.activation_range.item(), self.activation_range.item())
scaling_factor = self.activation_range.item() / (2. ** self._a_bit - 1.)
x = ori_x.detach().clone()
x.div_(scaling_factor).round_().mul_(scaling_factor)
# STE
# x = ori_x + x.detach() - ori_x.detach()
return STE.apply(ori_x, x)
else:
return inputs
def _quantize_weight(self, weight):
if self._tanh_weight:
weight = weight.tanh()
weight = weight / weight.abs().max()
if self._quantized and self._w_bit > 0:
threshold = self.weight_range.item()
if threshold <= 0:
threshold = weight.abs().max().item()
self.weight_range.data[0] = threshold
if self._calibrate:
if self._w_bit < 5:
threshold = self._compute_threshold(weight.data.cpu().numpy(), self._w_bit)
else:
threshold = weight.abs().max().item()
self.weight_range.data[0] = threshold
return weight
ori_w = weight
scaling_factor = threshold / (pow(2., self._w_bit - 1) - 1.)
w = ori_w.clamp(-threshold, threshold)
# w[w.abs() > threshold - threshold / 64.] = 0.
w.div_(scaling_factor).round_().mul_(scaling_factor)
# STE
if self._fix_weight:
# w = w.detach()
return w.detach()
else:
# w = ori_w + w.detach() - ori_w.detach()
return STE.apply(ori_w, w)
else:
return weight
def _quantize_bias(self, bias):
if bias is not None and self._quantized and self._b_bit > 0:
if self._calibrate:
return bias
ori_b = bias
threshold = ori_b.data.max().item() + 0.00001
scaling_factor = threshold / (pow(2., self._b_bit - 1) - 1.)
b = torch.clamp(ori_b.data, -threshold, threshold)
b.div_(scaling_factor).round_().mul_(scaling_factor)
# STE
if self._fix_weight:
return b.detach()
else:
# b = ori_b + b.detach() - ori_b.detach()
return STE.apply(ori_b, b)
else:
return bias
def _quantize(self, inputs, weight, bias):
inputs = self._quantize_activation(inputs=inputs)
weight = self._quantize_weight(weight=weight)
# bias = self._quantize_bias(bias=bias)
return inputs, weight, bias
def forward(self, *inputs):
raise NotImplementedError
def extra_repr(self):
return 'w_bit={}, a_bit={}, half_wave={}, tanh_weight={}'.format(
self.w_bit if self.w_bit > 0 else -1, self.a_bit if self.a_bit > 0 else -1,
self.half_wave, self._tanh_weight
)
class STE(torch.autograd.Function):
# for faster inference
@staticmethod
def forward(ctx, origin_inputs, wanted_inputs):
return wanted_inputs.detach()
@staticmethod
def backward(ctx, grad_outputs):
return grad_outputs, None
class QConv2d(QModule):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=False,
w_bit=-1, a_bit=-1, half_wave=True):
super(QConv2d, self).__init__(w_bit=w_bit, a_bit=a_bit, half_wave=half_wave)
if in_channels % groups != 0:
raise ValueError('in_channels must be divisible by groups')
if out_channels % groups != 0:
raise ValueError('out_channels must be divisible by groups')
self.in_channels = in_channels
self.out_channels = out_channels
self.kernel_size = _pair(kernel_size)
self.stride = _pair(stride)
self.padding = _pair(padding)
self.dilation = _pair(dilation)
self.groups = groups
self.weight = nn.Parameter(torch.zeros(out_channels, in_channels // groups, *self.kernel_size))
if bias:
self.bias = nn.Parameter(torch.zeros(out_channels))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def forward(self, inputs):
inputs, weight, bias = self._quantize(inputs=inputs, weight=self.weight, bias=self.bias)
return F.conv2d(inputs, weight, bias, self.stride, self.padding, self.dilation, self.groups)
def extra_repr(self):
s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'
', stride={stride}')
if self.padding != (0,) * len(self.padding):
s += ', padding={padding}'
if self.dilation != (1,) * len(self.dilation):
s += ', dilation={dilation}'
if self.groups != 1:
s += ', groups={groups}'
if self.bias is None:
s += ', bias=False'
if self.w_bit > 0 or self.a_bit > 0:
s += ', w_bit={}, a_bit={}'.format(self.w_bit, self.a_bit)
s += ', half wave' if self.half_wave else ', full wave'
return s.format(**self.__dict__)
class QLinear(QModule):
def __init__(self, in_features, out_features, bias=True, w_bit=-1, a_bit=-1, half_wave=True):
super(QLinear, self).__init__(w_bit=w_bit, a_bit=a_bit, half_wave=half_wave)
self.in_features = in_features
self.out_features = out_features
self.weight = nn.Parameter(torch.zeros(out_features, in_features))
if bias:
self.bias = nn.Parameter(torch.zeros(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def forward(self, inputs):
inputs, weight, bias = self._quantize(inputs=inputs, weight=self.weight, bias=self.bias)
return F.linear(inputs, weight=weight, bias=bias)
def reset_parameters(self):
nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = nn.init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
nn.init.uniform_(self.bias, -bound, bound)
def extra_repr(self):
s = 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None)
if self.w_bit > 0 or self.a_bit > 0:
s += ', w_bit={w_bit}, a_bit={a_bit}'.format(w_bit=self.w_bit, a_bit=self.a_bit)
s += ', half wave' if self.half_wave else ', full wave'
return s
def calibrate(model, loader):
print('\n==> start calibrate')
for name, module in model.named_modules():
if isinstance(module, QModule):
module.set_calibrate(calibrate=True)
inputs, _ = next(iter(loader))
# use 1 gpu to calibrate
inputs = inputs.to('cuda:0', non_blocking=True)
# print('MODEL IS:', model)
with torch.no_grad():
model(inputs)
for name, module in model.named_modules():
if isinstance(module, QModule):
module.set_calibrate(calibrate=False)
print('==> end calibrate')
def dorefa(model):
print('\n==> set weight tanh')
for name, module in model.named_modules():
if isinstance(module, QModule):
module.set_tanh(tanh=True)
def set_fix_weight(model, fix_weight=True):
if fix_weight:
print('\n==> set weight fixed')
for name, module in model.named_modules():
if isinstance(module, QModule):
module.set_fix_weight(fix_weight=fix_weight)
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,548
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/utils/pytorch_utils.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import time
import torch
import torch.nn as nn
import copy
from imagenet_codebase.utils.flops_counter import profile
def cross_entropy_loss_with_soft_target(pred, soft_target):
logsoftmax = nn.LogSoftmax()
return torch.mean(torch.sum(- soft_target * logsoftmax(pred), 1))
def cross_entropy_with_label_smoothing(pred, target, label_smoothing=0.1):
n_classes = pred.size(1)
# convert to one-hot
target = torch.unsqueeze(target, 1)
soft_target = torch.zeros_like(pred)
soft_target.scatter_(1, target, 1)
# label smoothing
soft_target = soft_target * (1 - label_smoothing) + label_smoothing / n_classes
return cross_entropy_loss_with_soft_target(pred, soft_target)
def clean_num_batch_tracked(net):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
if m.num_batches_tracked is not None:
m.num_batches_tracked.zero_()
def rm_bn_from_net(net):
for m in net.modules():
if isinstance(m, nn.BatchNorm2d) or isinstance(m, nn.BatchNorm1d):
m.forward = lambda x: x
""" Net Info """
def get_net_device(net):
return net.parameters().__next__().device
def count_parameters(net):
total_params = sum(p.numel() for p in net.parameters() if p.requires_grad)
return total_params
def count_net_flops(net, data_shape=(1, 3, 224, 224)):
if isinstance(net, nn.DataParallel):
net = net.module
net = copy.deepcopy(net)
flop, _ = profile(net, data_shape)
return flop
def get_net_info(net, input_shape=(3, 224, 224), print_info=True):
net_info = {}
if isinstance(net, nn.DataParallel):
net = net.module
# parameters
net_info['params'] = count_parameters(net)
# flops
net_info['flops'] = count_net_flops(net, [1] + list(input_shape))
if print_info:
print(net)
print('Total training params: %.2fM' % (net_info['params'] / 1e6))
print('Total FLOPs: %.2fM' % (net_info['flops'] / 1e6))
return net_info
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,549
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/utils/latency_estimator.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import yaml
from imagenet_codebase.utils import download_url
class LatencyEstimator(object):
def __init__(self, local_dir='~/.torch/latency_tools/',
url='https://hanlab.mit.edu/files/proxylessNAS/LatencyTools/mobile_trim.yaml'):
if url.startswith('http'):
fname = download_url(url, local_dir, overwrite=True)
else:
fname = url
with open(fname, 'r') as fp:
self.lut = yaml.load(fp)
@staticmethod
def repr_shape(shape):
if isinstance(shape, (list, tuple)):
return 'x'.join(str(_) for _ in shape)
elif isinstance(shape, str):
return shape
else:
return TypeError
def query(self, **kwargs):
raise NotImplementedError
def predict_network_latency(self, net, image_size):
return None
class ProxylessNASLatencyEstimator(LatencyEstimator):
def __init__(self, path=None):
super(ProxylessNASLatencyEstimator, self).__init__()
if path is not None:
self.lut = yaml.load(
open(path, 'r'))
def query(self, l_type: str, input_shape, output_shape, expand=None, ks=None, stride=None, id_skip=None):
"""
:param l_type:
Layer type must be one of the followings
1. `Conv`: The initial 3x3 conv with stride 2.
2. `Conv_1`: feature_mix_layer
3. `Logits`: All operations after `Conv_1`.
4. `expanded_conv`: MobileInvertedResidual
:param input_shape: input shape (h, w, #channels)
:param output_shape: output shape (h, w, #channels)
:param expand: expansion ratio
:param ks: kernel size
:param stride:
:param id_skip: indicate whether has the residual connection
"""
infos = [l_type, 'input:%s' % self.repr_shape(input_shape), 'output:%s' % self.repr_shape(output_shape), ]
if l_type in ('expanded_conv',):
assert None not in (expand, ks, stride, id_skip)
infos += ['expand:%d' % expand, 'kernel:%d' % ks, 'stride:%d' % stride, 'idskip:%d' % id_skip]
key = '-'.join(infos)
return self.lut[key]['mean']
def predict_network_latency(self, net, image_size=224):
predicted_latency = 0
# first conv
predicted_latency += self.query(
'Conv', [image_size, image_size, 3],
[(image_size + 1) // 2, (image_size + 1) // 2, net.first_conv.out_channels]
)
# blocks
fsize = (image_size + 1) // 2
for block in net.blocks:
mb_conv = block.mobile_inverted_conv
shortcut = block.shortcut
if mb_conv is None:
continue
if shortcut is None:
idskip = 0
else:
idskip = 1
out_fz = int((fsize - 1) / mb_conv.stride + 1) # fsize // mb_conv.stride
block_latency = self.query(
'expanded_conv', [fsize, fsize, mb_conv.in_channels], [out_fz, out_fz, mb_conv.out_channels],
expand=mb_conv.expand_ratio, ks=mb_conv.kernel_size, stride=mb_conv.stride, id_skip=idskip
)
predicted_latency += block_latency
fsize = out_fz
# feature mix layer
predicted_latency += self.query(
'Conv_1', [fsize, fsize, net.feature_mix_layer.in_channels],
[fsize, fsize, net.feature_mix_layer.out_channels]
)
# classifier
predicted_latency += self.query(
'Logits', [fsize, fsize, net.classifier.in_features], [net.classifier.out_features] # 1000
)
return predicted_latency
if __name__ == '__main__':
latency_model = ProxylessNASLatencyEstimator()
from imagenet_codebase.networks.proxyless_nets import MnasNet
net_ = MnasNet()
predicted_latency_ = latency_model.predict_network_latency(net_)
print(predicted_latency_, 'ms')
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,550
|
mit-han-lab/apq
|
refs/heads/master
|
/search.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import argparse
import json
from methods.evolution.evo_main_gather import evolution_gather
parser = argparse.ArgumentParser(description='Best Arch Searcher')
parser.add_argument('--prepare', type=str, default=None, choices=['acc', 'acc_quant'])
parser.add_argument('--acc_train_sample', type=int, default=None)
parser.add_argument('--mode', type=str, default='evolution', choices=['evolution'])
parser.add_argument('--constraint', type=float, default=120)
parser.add_argument('--exp_name', type=str, default='test')
args, _ = parser.parse_known_args()
print(args)
def main():
import copy
import os
if args.mode == 'evolution':
def add_arch(info, lst):
info1 = copy.deepcopy(info)
info2 = copy.deepcopy(info)
del info1['dw_w_bits_setting']
del info1['dw_a_bits_setting']
del info1['pw_w_bits_setting']
del info1['pw_a_bits_setting']
del info2['wid']
del info2['ks']
del info2['e']
del info2['d']
lst.append((info1, info2))
dic = {}
whole = {}
candidate_archs = []
out_dir = 'exps/{}'.format(args.exp_name)
lats = []
for i in [args.constraint]:
res, info, t = evolution_gather(parser, force_latency=i)
acc, arch, lat = info
print((i, res, lat, arch, acc))
if i not in dic or dic[i] < acc:
dic[i] = acc
whole[i] = (t, res, lat, arch, acc)
lats.append(lat)
add_arch(arch, candidate_archs)
print('Found Best Architecture: {}'.format(dic))
os.makedirs(out_dir, exist_ok=True)
json.dump(candidate_archs[0], open('{}/arch'.format(out_dir), 'w'))
json.dump(lats, open('{}/lat'.format(out_dir), 'w'))
if __name__ == '__main__':
main()
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,551
|
mit-han-lab/apq
|
refs/heads/master
|
/utils/accuracy_predictor.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import torch
import os
import torch.nn as nn
import numpy as np
from utils.converter import Converter
cvt = Converter()
def preparation(quantize=False, file_name=None, all=False, data_size=None):
from utils.converter import Converter
def data_loader(file_name='dataset/NetInfo/acc/info.dict', quantize=False):
import json
lst = json.load(open(file_name, 'r'))
X_all = []
y_all = []
converter = Converter()
for k, v in lst.items():
dic = json.loads(k)
tmp = converter.spec2feature(dic, quantize)
X_all.append(tmp)
y_all.append(v / 100.)
return X_all, y_all
if file_name is None:
file_name = 'dataset/NetInfo/acc_quant/info.dict' if quantize else 'dataset/NetInfo/acc/info.dict'
X_all, y_all = data_loader(file_name, quantize)
X_all = torch.tensor(X_all, dtype=torch.float)
y_all = torch.tensor(y_all)
if not quantize:
X_all = X_all[:, :cvt.half_dim]
y_all = y_all
shuffle_idx = torch.randperm(len(X_all))
X_all = X_all[shuffle_idx]
y_all = y_all[shuffle_idx]
if all:
X_train = X_all
y_train = y_all
X_test = X_all
y_test = y_all
elif data_size is not None:
X_train = X_all[:data_size, :]
y_train = y_all[:data_size]
X_test = None
y_test = None
else:
idx = X_all.size(0) // 5 * 4
X_train = X_all[:idx]
y_train = y_all[:idx]
X_test = X_all[idx:]
y_test = y_all[idx:]
return X_train, y_train, X_test, y_test
class MLP(nn.Module):
def __init__(self, args, pretrained=True, mlp_hidden_size=400, mlp_layers=3, quantize=False, scratch=False):
super(MLP, self).__init__()
self.mlp_hidden_size = mlp_hidden_size
self.mlp_layers = mlp_layers
self.quantize = quantize
self.layers = nn.ModuleList()
for i in range(self.mlp_layers):
if i == 0:
self.layers.append(
nn.Sequential(
nn.Linear(cvt.half_dim, self.mlp_hidden_size),
nn.ReLU(inplace=False),
)
)
else:
self.layers.append(
nn.Sequential(
nn.Linear(self.mlp_hidden_size, self.mlp_hidden_size),
nn.ReLU(inplace=False),
)
)
if self.quantize:
self.quantize_fc = nn.Linear(cvt.full_dim - cvt.half_dim, self.mlp_hidden_size)
self.regressor = nn.Linear(self.mlp_hidden_size, 1)
if pretrained:
self.save_path = '{}/acc.pt'.format(args.acc_predictor_dir) if not self.quantize \
else '{}/acc_quant.pt'.format(args.acc_predictor_dir)
print('Load from {}'.format(self.save_path))
self.update_acc_state_dict()
elif quantize and not pretrained:
self.save_path = '{}/acc.pt'.format(args.acc_predictor_dir)
if not scratch:
self.update_acc_state_dict(strict=False)
def update_acc_state_dict(self, strict=True):
if not os.path.exists(self.save_path):
assert False
self.load_state_dict(torch.load(self.save_path), strict=strict)
def forward(self, x):
if self.quantize:
x1, x2 = x[:, :cvt.half_dim], x[:, cvt.half_dim:]
x = self.layers[0](x1) + self.quantize_fc(x2)
else:
x = self.layers[0](x)
for i in range(1, self.mlp_layers):
x = self.layers[i](x)
x = self.regressor(x)
return x
class AccuracyPredictor():
def __init__(self, args, quantize=True):
assert quantize
self.quantize = quantize
if self.quantize:
self.mlp_with_q = MLP(args, quantize=True).cuda()
self.mlp_with_q.eval()
def predict_accuracy(self, specs):
assert self.quantize
X = []
for spec in specs:
X.append(cvt.spec2feature(spec))
X = np.array(X)
X = torch.tensor(X).float().cuda()
if self.quantize:
y = self.mlp_with_q(X)
else:
y = self.mlp(X[:, :cvt.half_dim])
return y
if __name__ == "__main__":
mlp = MLP(pretrained=False)
eval(mlp)
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,552
|
mit-han-lab/apq
|
refs/heads/master
|
/utils/latency_predictor.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import json
class LatencyPredictor():
def __init__(self, platform='BitFusion', type='latency', batch=1):
for input_size in range(224, 224 + 4, 4):
self.input_size = input_size
self.sz_in = [self.input_size]
self.sz_out = [(self.input_size + 1) // 2]
for i in range(6):
self.sz_in.append(self.sz_out[-1])
if i == 3 or i == 5:
self.sz_out.append(self.sz_in[-1])
else:
self.sz_out.append((self.sz_in[-1] + 1) // 2)
assert platform in ['BitFusion']
assert type in ['latency', 'energy']
self.platform = platform
self.type = type
file_path = 'lut/{}_new.b{}.dict'.format(self.platform, batch) # bitfusion batch=16
self.dic = json.load(open(file_path, 'r'))
self.other = self.dic['head'][self.type] + self.dic['tail'][self.type]
def build_table(self):
def add(tmp, dic, measure_table):
if tmp not in dic:
dic[tmp] = 1
measure_table.append(tmp)
channel_in_list = [16, 24, 40, 80, 96, 192]
channel_out_list = [24, 40, 80, 96, 192, 320]
input_list = []
output_list = []
for i in range(1, 7):
input_list.append('{}x{}x{}'.format(self.sz_in[i], self.sz_in[i], channel_in_list[i - 1]))
output_list.append('{}x{}x{}'.format(self.sz_out[i], self.sz_out[i], channel_out_list[i - 1]))
kernel_list = [3, 5, 7]
dic = {}
measure_table = []
for layer in range(21):
inp = input_list[layer // 4] if layer % 4 == 0 else input_list[layer // 4 + 1]
out = output_list[layer // 4]
input_sz = int(inp.split('x')[0])
output_sz = int(out.split('x')[0])
in_channels = int(inp.split('x')[-1])
out_channels = int(out.split('x')[-1])
stride = 1 if input_sz == output_sz else 2
idskip = 1 if stride == 1 and in_channels == out_channels else 0
for ks in kernel_list:
lst = list(range(in_channels * 4, in_channels * 6 + 8, 8))
for mid_c in lst:
out_c = out_channels
# in_channels, mid_channels, out_channels, input_size, kernel_size, stride, idskip
tmp = in_channels, mid_c, input_sz, 1, 1, 0, 1 # pw
add(tmp, dic, measure_table)
tmp = mid_c, mid_c, input_sz, ks, stride, ks // 2, mid_c # dw
add(tmp, dic, measure_table)
tmp = mid_c, out_c, input_sz // stride, 1, 1, 0, 1 # pw
add(tmp, dic, measure_table)
return measure_table
def get_lat(self, tmp, q):
info = '{}-W{}A{}'.format(tmp, q[0], q[1])
# print(info)
if info in self.dic:
return self.dic[info][self.type]
else:
assert False, info
def predict_lat(self, spec):
def make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
x = max(min_value, int(v + divisor / 2) // divisor * divisor)
# make sure that round down does not go down by more than 10%
if x < 0.9 * v:
x += divisor
return x
ans = self.other
d = spec['d']
ks = spec['ks']
e = spec['e']
w_pw, a_pw, w_dw, a_dw = spec['pw_w_bits_setting'], spec['pw_a_bits_setting'], \
spec['dw_w_bits_setting'], spec['dw_a_bits_setting']
channel_in_list = [16, 24, 40, 80, 96, 192]
channel_out_list = [24, 40, 80, 96, 192, 320]
input_list = []
output_list = []
for i in range(1, 7):
input_list.append('{}x{}x{}'.format(self.sz_in[i], self.sz_in[i], channel_in_list[i - 1]))
output_list.append('{}x{}x{}'.format(self.sz_out[i], self.sz_out[i], channel_out_list[i - 1]))
# in_channels, out_channels, input_size, kernel_size, stride, padding, groups
for layer in range(21):
inp = input_list[layer // 4] if layer % 4 == 0 else input_list[layer // 4 + 1]
out = output_list[layer // 4]
input_sz = int(inp.split('x')[0])
output_sz = int(out.split('x')[0])
in_channels = int(inp.split('x')[-1])
out_channels = int(out.split('x')[-1])
stride = 1 if input_sz == output_sz else 2
# print(inp, out)
idskip = 1 if stride == 1 and in_channels == out_channels else 0
mid_c = make_divisible(e[layer] * in_channels, 8)
out_c = out_channels
nowd = layer % 4
stg = layer // 4
if nowd < d[stg]:
# in_channels, out_channels, input_size, kernel_size, stride, padding, groups
tmp = in_channels, mid_c, input_sz, 1, 1, 0, 1 # pw
ans += self.get_lat(tmp, (w_pw[layer], a_pw[layer]))
tmp = mid_c, mid_c, input_sz, ks[layer], stride, ks[layer] // 2, mid_c # dw
assert not (ks[layer] == 7 and a_dw[layer] == 4)
ans += self.get_lat(tmp, (w_dw[layer], a_dw[layer]))
tmp = mid_c, out_c, input_sz // stride, 1, 1, 0, 1 # pw
ans += self.get_lat(tmp, (w_pw[layer], a_pw[layer]))
return ans
if __name__ == '__main__':
lut = LatencyPredictor()
tab = lut.build_table()
print(tab)
print(len(tab))
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,553
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/data_providers/base_provider.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import numpy as np
import torch
class DataProvider:
SUB_SEED = 937162211 # random seed for sampling subset
VALID_SEED = 2147483647 # random seed for the validation set
@staticmethod
def name():
""" Return name of the dataset """
raise NotImplementedError
@property
def data_shape(self):
""" Return shape as python list of one data entry """
raise NotImplementedError
@property
def n_classes(self):
""" Return `int` of num classes """
raise NotImplementedError
@property
def save_path(self):
""" local path to save the data """
raise NotImplementedError
@property
def data_url(self):
""" link to download the data """
raise NotImplementedError
@staticmethod
def random_sample_valid_set(train_size, valid_size):
assert train_size > valid_size
g = torch.Generator()
g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set
rand_indexes = torch.randperm(train_size, generator=g).tolist()
valid_indexes = rand_indexes[:valid_size]
train_indexes = rand_indexes[valid_size:]
return train_indexes, valid_indexes
@staticmethod
def labels_to_one_hot(n_classes, labels):
new_labels = np.zeros((labels.shape[0], n_classes), dtype=np.float32)
new_labels[range(labels.shape[0]), labels] = np.ones(labels.shape)
return new_labels
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,554
|
mit-han-lab/apq
|
refs/heads/master
|
/quant_aware.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
from imagenet_codebase.run_manager import ImagenetRunConfig, RunManager
import os
import copy
import torch
from elastic_nn.modules.dynamic_op import DynamicSeparableConv2d, DynamicSeparableQConv2d
from elastic_nn.networks.dynamic_quantized_proxyless import DynamicQuantizedProxylessNASNets
import json
import argparse
parser = argparse.ArgumentParser(description='Quantization-aware Finetuning')
parser.add_argument('--exp_name', type=str, default='test')
parser.add_argument('--id', type=int, default=-1)
args, _ = parser.parse_known_args()
print(args)
if __name__ == '__main__':
exp_dir = 'exps/{}'.format(args.exp_name)
arch_path = '{}/arch'.format(exp_dir)
tmp_lst = json.load(open(arch_path, 'r'))
info, q_info = tmp_lst
print(info)
print(q_info)
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
DynamicSeparableQConv2d.KERNEL_TRANSFORM_MODE = 1
dynamic_proxyless = DynamicQuantizedProxylessNASNets(
ks_list=[3, 5, 7], expand_ratio_list=[4, 6], depth_list=[2, 3, 4], base_stage_width='proxyless',
width_mult_list=1.0, dropout_rate=0, n_classes=1000
)
proxylessnas_init = torch.load(
'./models/imagenet-OFA',
map_location='cpu'
)['state_dict']
dynamic_proxyless.load_weights_from_proxylessnas(proxylessnas_init)
init_lr = 1e-3
run_config = ImagenetRunConfig(
test_batch_size=1000, image_size=224, n_worker=16, valid_size=5000, dataset='imagenet', train_batch_size=256,
init_lr=init_lr, n_epochs=30,
)
tmp_dynamic_proxyless = copy.deepcopy(dynamic_proxyless)
run_manager = RunManager(exp_dir, tmp_dynamic_proxyless, run_config, init=False)
tmp_dynamic_proxyless.set_active_subnet(**info)
tmp_dynamic_proxyless.set_quantization_policy(**q_info)
run_manager.reset_running_statistics()
acc = run_manager.finetune()
acc_list = []
acc_list.append((json.dumps(info), json.dumps(q_info), acc))
output_dir = '{}/acc'.format(exp_dir)
json.dump(acc_list, open(output_dir, 'w'))
print('[Finished] Acc: {}'.format(acc))
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,555
|
mit-han-lab/apq
|
refs/heads/master
|
/test.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.latency_predictor import LatencyPredictor
import sys
import copy
import argparse
import os
import json
import torch
from elastic_nn.modules.dynamic_op import DynamicSeparableConv2d, DynamicSeparableQConv2d
from elastic_nn.networks.dynamic_quantized_proxyless import DynamicQuantizedProxylessNASNets
from imagenet_codebase.run_manager import ImagenetRunConfig, RunManager
parser = argparse.ArgumentParser(description='Test')
parser.add_argument('--exp_dir', type=str, default=None)
args, _ = parser.parse_known_args()
if __name__ == '__main__':
latency_predictor = LatencyPredictor(type='latency')
energy_predictor = LatencyPredictor(type='energy')
arch_dir = '{}/arch'.format(args.exp_dir)
assert os.path.exists(arch_dir)
tmp_lst = json.load(open(arch_dir, 'r'))
info, q_info = tmp_lst
print(info)
print(q_info)
X = LatencyPredictor(type='latency')
print('Latency: {:.2f}ms'.format(X.predict_lat(dict(info, **q_info))))
Y = LatencyPredictor(type='energy')
print('Energy: {:.2f}mJ'.format(Y.predict_lat(dict(info, **q_info))))
ckpt_path = '{}/checkpoint/model_best.pth.tar'.format(args.exp_dir)
if os.path.exists(ckpt_path):
DynamicSeparableConv2d.KERNEL_TRANSFORM_MODE = 1
DynamicSeparableQConv2d.KERNEL_TRANSFORM_MODE = 1
dynamic_proxyless = DynamicQuantizedProxylessNASNets(
ks_list=[3, 5, 7], expand_ratio_list=[4, 6], depth_list=[2, 3, 4], base_stage_width='proxyless',
width_mult_list=1.0, dropout_rate=0, n_classes=1000
)
proxylessnas_init = torch.load(
'./models/imagenet-OFA',
map_location='cpu'
)['state_dict']
dynamic_proxyless.load_weights_from_proxylessnas(proxylessnas_init)
init_lr = 1e-3
run_config = ImagenetRunConfig(
test_batch_size=1000, image_size=224, n_worker=16, valid_size=5000, dataset='imagenet', train_batch_size=256,
init_lr=init_lr, n_epochs=30,
)
run_manager = RunManager('~/tmp', dynamic_proxyless, run_config, init=False)
proxylessnas_init = torch.load(
ckpt_path,
map_location='cpu'
)['state_dict']
dynamic_proxyless.load_weights_from_proxylessnas(proxylessnas_init)
dynamic_proxyless.set_active_subnet(**info)
dynamic_proxyless.set_quantization_policy(**q_info)
acc = run_manager.validate(is_test=True)
print('Accuracy: {:.1f}'.format(acc[1]))
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,556
|
mit-han-lab/apq
|
refs/heads/master
|
/methods/evolution/evo_main_gather.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from utils.accuracy_predictor import AccuracyPredictor
from utils.latency_predictor import LatencyPredictor
from methods.evolution.evolution_finder import EvolutionFinder
import argparse
def evolution_gather(parser: argparse.ArgumentParser, force_latency):
parser.add_argument('--acc_predictor_dir', type=str, default='./models')
parser.add_argument('--type', type=str, default='latency')
args = parser.parse_args()
accuracy_predictor = AccuracyPredictor(args, quantize=True)
latency_predictor = LatencyPredictor(type=args.type)
t = 500
evolution_finder = EvolutionFinder(latency_predictor, accuracy_predictor)
times, best_valid, info = evolution_finder.run_evolution_search(constraint=force_latency, max_time_budget=t)
print(info)
return best_valid[-1], info, t
if __name__ == '__main__':
pass
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,557
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/data_providers/hand.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import warnings
import os
import math
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from imagenet_codebase.data_providers.base_provider import DataProvider
import pickle
import PIL.Image
class Hand(torch.utils.data.Dataset):
"""Hand dataset.
Args:
_root, str: Root directory of the dataset.
_train, bool: Load train/test data.
_transform, callable: A function/transform that takes in a PIL.Image
and transforms it.
_target_transform, callable: A function/transform that takes in the
target and transforms it.
_train_data, list of np.ndarray.
_train_labels, list of int.
_test_data, list of np.ndarray.
_test_labels, list of int.
"""
def __init__(self, root='~/dataset/hand', train=True, transform=None, target_transform=None,
download=False):
"""Load the dataset.
Args
root, str: Root directory of the dataset.
train, bool [True]: Load train/test data.
transform, callable [None]: A function/transform that takes in a
PIL.Image and transforms it.
target_transform, callable [None]: A function/transform that takes
in the target and transforms it.
download, bool [False]: If true, downloads the dataset from the
internet and puts it in root directory. If dataset is already
downloaded, it is not downloaded again.
"""
self._root = os.path.expanduser(root) # Replace ~ by the complete dir
os.makedirs(self._root, exist_ok=True)
self._train = train
self._transform = transform
self._target_transform = target_transform
# Now load the picked data.
if self._train:
self._tmp_data, self._tmp_labels = pickle.load(open(
os.path.join(self._root, 'train.pkl'), 'rb'))
import numpy as np
rep = 3
self._train_data = self._tmp_data * rep
self._train_labels = self._tmp_labels * rep
assert (len(self._train_data) == 74 * rep
and len(self._train_labels) == 74 * rep)
else:
self._test_data, self._test_labels = pickle.load(open(
os.path.join(self._root, 'test.pkl'), 'rb'))
assert (len(self._test_data) == 38
and len(self._test_labels) == 38)
def __getitem__(self, index):
"""
Args:
index, int: Index.
Returns:
image, PIL.Image: Image of the given index.
target, str: target of the given index.
"""
if self._train:
image, target = self._train_data[index], self._train_labels[index]
else:
image, target = self._test_data[index], self._test_labels[index]
# Doing this so that it is consistent with all other datasets.
image = PIL.Image.fromarray(image)
# print(np.array(image))
if self._transform is not None:
image = self._transform(image)
if self._target_transform is not None:
target = self._target_transform(target)
# print(image)
return image, target
@property
def samples(self):
return self._train_data
def __len__(self):
"""Length of the dataset.
Returns:
length, int: Length of the dataset.
"""
if self._train:
return len(self._train_data)
return len(self._test_data)
class HandDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=128, test_batch_size=128, valid_size=None, n_worker=10,
resize_scale=0.08, distort_color=None, image_size=320):
warnings.filterwarnings('ignore')
self._save_path = save_path
self.image_size = image_size # int or list of int
self.distort_color = distort_color
self.resize_scale = resize_scale
self.active_img_size = self.image_size
valid_transforms = transforms.Compose([
transforms.Resize(int(math.ceil(self.active_img_size))),
# transforms.CenterCrop(self.active_img_size),
transforms.ToTensor(),
self.normalize,
])
train_transforms = self.build_train_transform()
self._valid_transform_dict = {self.active_img_size: valid_transforms}
self.transform_ = valid_transforms
train_dataset = Hand(self.save_path, train=True, transform=train_transforms)
if valid_size is not None:
if isinstance(valid_size, float):
valid_size = int(valid_size * len(train_dataset))
else:
assert isinstance(valid_size, int), 'invalid valid_size: %s' % valid_size
valid_dataset = Hand(self.save_path, train=True, transform=valid_transforms)
train_indexes, valid_indexes = self.random_sample_valid_set(
[cls for _, cls in train_dataset.samples], valid_size, self.n_classes,
)
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indexes)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indexes)
self.train = torch.utils.data.DataLoader(
train_dataset, batch_size=train_batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True,
)
self.valid = torch.utils.data.DataLoader(
valid_dataset, batch_size=test_batch_size, sampler=valid_sampler,
num_workers=n_worker, pin_memory=True,
)
else:
self.train = torch.utils.data.DataLoader(
train_dataset, batch_size=train_batch_size, shuffle=True,
num_workers=n_worker, pin_memory=True,
)
self.valid = None
test_dataset = Hand(self.save_path, train=False, transform=valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker, pin_memory=True,
)
if self.valid is None:
self.valid = self.test
@staticmethod
def name():
return 'hand'
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def n_classes(self):
return 1
@property
def save_path(self):
if self._save_path is None:
self._save_path = '/dataset/hand'
if not os.path.exists(self._save_path):
self._save_path = os.path.expanduser('~/dataset/hand')
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download Hand')
def train_dataset(self, _transforms):
return Hand(self.save_path, train=True, transform=_transforms)
def test_dataset(self, _transforms):
return datasets.ImageFolder(self.valid_path, _transforms)
@property
def train_path(self):
return os.path.join(self.save_path, 'train')
@property
def valid_path(self):
return os.path.join(self.save_path, 'val')
@property
def normalize(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = transforms.Compose([
transforms.Resize(int(math.ceil(self.active_img_size))),
# transforms.CenterCrop(self.active_img_size),
transforms.ToTensor(),
self.normalize,
])
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
def build_train_transform(self, image_size=None, print_log=True):
if image_size is None:
image_size = self.image_size
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, image_size))
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
if color_transform is None:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(self.resize_scale, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
])
else:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(self.resize_scale, 1.0)),
transforms.RandomHorizontalFlip(),
color_transform,
transforms.ToTensor(),
self.normalize,
])
return train_transforms
def build_sub_train_loader(self, n_images, batch_size, num_worker=None):
if self.__dict__.get('sub_train_%d' % self.active_img_size, None) is None:
if num_worker is None:
num_worker = self.train.num_workers
n_samples = len(self.train.dataset.samples)
g = torch.Generator()
g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
new_train_dataset = Hand(self.save_path, train=True, transform=self.build_train_transform(print_log=False))
chosen_indexes = rand_indexes[:n_images]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=True,
)
self.__dict__['sub_train_%d' % self.active_img_size] = []
for images, labels in sub_data_loader:
self.__dict__['sub_train_%d' % self.active_img_size].append((images, labels))
return self.__dict__['sub_train_%d' % self.active_img_size]
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,558
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/utils/pytorch_modules.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import torch
import torch.nn as nn
def build_activation(act_func, inplace=True):
if act_func == 'relu':
return nn.ReLU(inplace=inplace)
elif act_func == 'relu6':
return nn.ReLU6(inplace=inplace)
elif act_func == 'tanh':
return nn.Tanh()
elif act_func == 'sigmoid':
return nn.Sigmoid()
elif act_func is None:
return None
else:
raise ValueError('do not support: %s' % act_func)
class ShuffleLayer(nn.Module):
def __init__(self, groups):
super(ShuffleLayer, self).__init__()
self.groups = groups
def forward(self, x):
batchsize, num_channels, height, width = x.size()
channels_per_group = num_channels // self.groups
# reshape
x = x.view(batchsize, self.groups, channels_per_group, height, width)
x = torch.transpose(x, 1, 2).contiguous()
# flatten
x = x.view(batchsize, -1, height, width)
return x
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,559
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/data_providers/svhn.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import warnings
import os
import math
import torch.utils.data
import torchvision.transforms as transforms
import torchvision.datasets as datasets
from imagenet_codebase.data_providers.base_provider import DataProvider
class SvhnDataProvider(DataProvider):
def __init__(self, save_path=None, train_batch_size=2560*8, test_batch_size=5120, valid_size=None, n_worker=16,
resize_scale=0.08, distort_color=None, image_size=224):
warnings.filterwarnings('ignore')
self._save_path = save_path
self.image_size = image_size # int or list of int
self.distort_color = distort_color
self.resize_scale = resize_scale
self.active_img_size = self.image_size
train_transforms = self.build_train_transform()
valid_transforms = transforms.Compose([
transforms.Resize(int(math.ceil(self.active_img_size / 0.875))),
transforms.CenterCrop(self.active_img_size),
transforms.ToTensor(),
self.normalize,
])
self._valid_transform_dict = {self.active_img_size: valid_transforms}
train_dataset = self.train_dataset(train_transforms)
if valid_size is not None:
if isinstance(valid_size, float):
valid_size = int(valid_size * len(train_dataset))
else:
assert isinstance(valid_size, int), 'invalid valid_size: %s' % valid_size
valid_dataset = self.train_dataset(valid_transforms)
train_indexes, valid_indexes = self.random_sample_valid_set(
[cls for _, cls in train_dataset.samples], valid_size, self.n_classes,
)
train_sampler = torch.utils.data.sampler.SubsetRandomSampler(train_indexes)
valid_sampler = torch.utils.data.sampler.SubsetRandomSampler(valid_indexes)
self.train = torch.utils.data.DataLoader(
train_dataset, batch_size=train_batch_size, sampler=train_sampler,
num_workers=n_worker, pin_memory=True,
)
self.valid = torch.utils.data.DataLoader(
valid_dataset, batch_size=test_batch_size, sampler=valid_sampler,
num_workers=n_worker, pin_memory=True,
)
else:
self.train = torch.utils.data.DataLoader(
train_dataset, batch_size=train_batch_size, shuffle=True,
num_workers=n_worker, pin_memory=True,
)
self.valid = None
test_dataset = self.test_dataset(valid_transforms)
self.test = torch.utils.data.DataLoader(
test_dataset, batch_size=test_batch_size, shuffle=False, num_workers=n_worker, pin_memory=True,
)
if self.valid is None:
self.valid = self.test
@staticmethod
def name():
return 'Svhn'
@property
def data_shape(self):
return 3, self.active_img_size, self.active_img_size # C, H, W
@property
def n_classes(self):
return 10
@property
def save_path(self):
if self._save_path is None:
self._save_path = '/dataset/svhn'
if not os.path.exists(self._save_path):
self._save_path = os.path.expanduser('~/dataset/svhn')
return self._save_path
@property
def data_url(self):
raise ValueError('unable to download ImageNet')
def train_dataset(self, _transforms):
return datasets.ImageFolder(self.train_path, _transforms)
def test_dataset(self, _transforms):
return datasets.ImageFolder(self.valid_path, _transforms)
@property
def train_path(self):
return os.path.join(self.save_path, 'train')
@property
def valid_path(self):
return os.path.join(self.save_path, 'val')
@property
def normalize(self):
return transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])
def build_train_transform(self, image_size=None, print_log=True):
if image_size is None:
image_size = self.image_size
if print_log:
print('Color jitter: %s, resize_scale: %s, img_size: %s' %
(self.distort_color, self.resize_scale, image_size))
if self.distort_color == 'torch':
color_transform = transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)
elif self.distort_color == 'tf':
color_transform = transforms.ColorJitter(brightness=32. / 255., saturation=0.5)
else:
color_transform = None
if color_transform is None:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(self.resize_scale, 1.0)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
self.normalize,
])
else:
train_transforms = transforms.Compose([
transforms.RandomResizedCrop(image_size, scale=(self.resize_scale, 1.0)),
transforms.RandomHorizontalFlip(),
color_transform,
transforms.ToTensor(),
self.normalize,
])
return train_transforms
def assign_active_img_size(self, new_img_size):
self.active_img_size = new_img_size
if self.active_img_size not in self._valid_transform_dict:
self._valid_transform_dict[self.active_img_size] = transforms.Compose([
transforms.Resize(int(math.ceil(self.active_img_size / 0.875))),
transforms.CenterCrop(self.active_img_size),
transforms.ToTensor(),
self.normalize,
])
self.valid.dataset.transform = self._valid_transform_dict[self.active_img_size]
self.test.dataset.transform = self._valid_transform_dict[self.active_img_size]
def build_sub_train_loader(self, n_images, batch_size, num_worker=None):
if self.__dict__.get('sub_train_%d' % self.active_img_size, None) is None:
if num_worker is None:
num_worker = self.train.num_workers
n_samples = len(self.train.dataset.samples)
g = torch.Generator()
g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
new_train_dataset = self.train_dataset(
self.build_train_transform(image_size=self.active_img_size, print_log=False))
chosen_indexes = rand_indexes[:n_images]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
sub_data_loader = torch.utils.data.DataLoader(
new_train_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=True,
)
self.__dict__['sub_train_%d' % self.active_img_size] = []
for images, labels in sub_data_loader:
self.__dict__['sub_train_%d' % self.active_img_size].append((images, labels))
return self.__dict__['sub_train_%d' % self.active_img_size]
def build_sub_val_loader(self, n_images, batch_size, num_worker=None):
if self.__dict__.get('sub_val_%d' % self.active_img_size, None) is None:
if num_worker is None:
num_worker = self.valid.num_workers
n_samples = len(self.valid.dataset.samples)
g = torch.Generator()
g.manual_seed(DataProvider.VALID_SEED) # set random seed before sampling validation set
rand_indexes = torch.randperm(n_samples, generator=g).tolist()
new_valid_dataset = self.valid_dataset(
self.build_valid_transform(image_size=self.active_img_size, print_log=False))
chosen_indexes = rand_indexes[:n_images]
sub_sampler = torch.utils.data.sampler.SubsetRandomSampler(chosen_indexes)
sub_data_loader = torch.utils.data.DataLoader(
new_valid_dataset, batch_size=batch_size, sampler=sub_sampler,
num_workers=num_worker, pin_memory=True,
)
self.__dict__['sub_val_%d' % self.active_img_size] = []
for images, labels in sub_data_loader:
self.__dict__['sub_val_%d' % self.active_img_size].append((images, labels))
return self.__dict__['sub_val_%d' % self.active_img_size]
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,560
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/run_manager/__init__.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
from imagenet_codebase.run_manager.run_manager import *
from imagenet_codebase.data_providers.imagenet import *
from imagenet_codebase.data_providers.svhn import *
from imagenet_codebase.data_providers.coil import *
from imagenet_codebase.data_providers.hand import *
class ImagenetRunConfig(RunConfig):
def __init__(self, n_epochs=150, init_lr=0.05, lr_schedule_type='cosine', lr_schedule_param=None,
dataset='imagenet', train_batch_size=256, test_batch_size=512, valid_size=None,
opt_type='sgd', opt_param=None, weight_decay=4e-5, label_smoothing=0.1, no_decay_keys=None,
model_init='he_fout', validation_frequency=1, print_frequency=10,
n_worker=32, resize_scale=0.08, distort_color='tf', image_size=224, **kwargs):
super(ImagenetRunConfig, self).__init__(
n_epochs, init_lr, lr_schedule_type, lr_schedule_param,
dataset, train_batch_size, test_batch_size, valid_size,
opt_type, opt_param, weight_decay, label_smoothing, no_decay_keys,
model_init, validation_frequency, print_frequency
)
self.n_worker = n_worker
self.resize_scale = resize_scale
self.distort_color = distort_color
self.image_size = image_size
@property
def data_provider(self):
if self.__dict__.get('_data_provider', None) is None:
if self.dataset == ImagenetDataProvider.name():
DataProviderClass = ImagenetDataProvider
elif self.dataset == ImageNet100DataProvider.name():
DataProviderClass = ImageNet100DataProvider
else:
raise NotImplementedError
self.__dict__['_data_provider'] = DataProviderClass(
train_batch_size=self.train_batch_size, test_batch_size=self.test_batch_size,
valid_size=self.valid_size, n_worker=self.n_worker, resize_scale=self.resize_scale,
distort_color=self.distort_color, image_size=self.image_size,
)
return self.__dict__['_data_provider']
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,561
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/utils/__init__.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import numpy as np
import os
import sys
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
from imagenet_codebase.utils.my_modules import *
from imagenet_codebase.utils.pytorch_utils import *
from imagenet_codebase.utils.pytorch_modules import *
from imagenet_codebase.utils.quantize_utils import *
def make_divisible(v, divisor, min_val=None):
"""
This function is taken from the original tf repo.
It ensures that all layers have a channel number that is divisible by 8
It can be seen here:
https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
:param v:
:param divisor:
:param min_val:
:return:
"""
if min_val is None:
min_val = divisor
new_v = max(min_val, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def get_same_padding(kernel_size):
if isinstance(kernel_size, tuple):
assert len(kernel_size) == 2, 'invalid kernel size: %s' % kernel_size
p1 = get_same_padding(kernel_size[0])
p2 = get_same_padding(kernel_size[1])
return p1, p2
assert isinstance(kernel_size, int), 'kernel size should be either `int` or `tuple`'
assert kernel_size % 2 > 0, 'kernel size should be odd number'
return kernel_size // 2
def get_split_list(in_dim, child_num):
in_dim_list = [in_dim // child_num] * child_num
for _i in range(in_dim % child_num):
in_dim_list[_i] += 1
return in_dim_list
def list_sum(x):
if len(x) == 1:
return x[0]
else:
return x[0] + list_sum(x[1:])
def list_weighted_sum(x, weights):
if len(x) == 1:
return x[0] * weights[0]
else:
return x[0] * weights[0] + list_weighted_sum(x[1:], weights[1:])
def list_mean(x):
return list_sum(x) / len(x)
def list_mul(x):
if len(x) == 1:
return x[0]
else:
return x[0] * list_mul(x[1:])
def list_join(val_list, sep='\t'):
return sep.join([
str(val) for val in val_list
])
def delta_ij(i, j):
if i == j:
return 1
else:
return 0
def sub_filter_start_end(kernel_size, sub_kernel_size):
center = kernel_size // 2
dev = sub_kernel_size // 2
start, end = center - dev, center + dev + 1
assert end - start == sub_kernel_size
return start, end
def int2list(val, repeat_time=1):
if isinstance(val, list) or isinstance(val, np.ndarray):
return val
elif isinstance(val, tuple):
return list(val)
else:
return [val for _ in range(repeat_time)]
def download_url(url, model_dir='~/.torch/', overwrite=False):
target_dir = url.split('/')[-1]
model_dir = os.path.expanduser(model_dir)
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_dir = os.path.join(model_dir, target_dir)
cached_file = model_dir
if not os.path.exists(cached_file) or overwrite:
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return cached_file
def accuracy(output, target, topk=(1,)):
""" Computes the precision@k for the specified values of k """
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
class AverageMeter(object):
"""
Computes and stores the average and current value
Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
"""
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,562
|
mit-han-lab/apq
|
refs/heads/master
|
/imagenet_codebase/networks/__init__.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
from imagenet_codebase.networks.proxyless_nets import ProxylessNASNets, proxyless_base
def get_net_by_name(name):
if name == ProxylessNASNets.__name__:
return ProxylessNASNets
else:
raise ValueError('unrecognized type of network: %s' % name)
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,563
|
mit-han-lab/apq
|
refs/heads/master
|
/methods/evolution/evolution_finder.py
|
# Code for "APQ: Joint Search for Network Architecture, Pruning and Quantization Policy"
# CVPR 2020
# Tianzhe Wang, Kuan Wang, Han Cai, Ji Lin, Zhijian Liu, Song Han
# {usedtobe, kuanwang, hancai, jilin, zhijian, songhan}@mit.edu
import copy
import random
import numpy as np
from utils.converter import Converter
from utils.latency_predictor import LatencyPredictor
from utils.accuracy_predictor import AccuracyPredictor
class EvolutionFinder():
def __init__(self, latency_predictor: LatencyPredictor, accuracy_predictor: AccuracyPredictor):
self.latency_predictor = latency_predictor
self.accuracy_preditcor = accuracy_predictor
self.converter = Converter()
def random_spec(self, constraint):
while True:
spec = self.converter.random_spec()
if not self.converter.is_valid(spec):
continue
lat = self.latency_predictor.predict_lat(spec)
if lat <= constraint:
return spec, lat
def mutate_spec(self, spec, constraint):
while True:
identity = []
new_spec = copy.deepcopy(spec)
block_mutation_prob = 0.1
father = spec
for i in range(21):
depth = i % 4 + 1
stg = i // 4
if random.random() < block_mutation_prob:
self.converter.change_spec(new_spec, i)
if depth > father['d'][stg]:
identity.append(1)
else:
identity.append(0)
bad = False
for i in range(21):
depth = i % 4 + 1
stg = i // 4
if depth == 3 and identity[i]:
if not identity[i + 1]:
bad = True
if not identity[i]:
new_spec['d'][stg] = max(new_spec['d'][stg], depth)
if not self.converter.is_valid(new_spec):
continue
lat = self.latency_predictor.predict_lat(new_spec)
if not bad and lat <= constraint:
return new_spec, lat
def crossover_spec(self, spec1, spec2, constraint):
while True:
new_spec = copy.deepcopy(spec1)
identity = []
for i in range(21):
depth = i % 4 + 1
stg = i // 4
father = copy.deepcopy(spec1) if random.random() < 0.5 else copy.deepcopy(spec2)
new_spec['ks'][i] = father['ks'][i]
new_spec['e'][i] = father['e'][i]
for it in range(4): # quantization policy
qname = self.converter.num2qname[it]
new_spec[qname][i] = father[qname][i]
if depth > father['d'][stg]:
identity.append(1)
else:
identity.append(0)
bad = False
for i in range(21):
depth = i % 4 + 1
stg = i // 4
if depth == 3 and identity[i]:
if not identity[i + 1]:
bad = True
if not identity[i]:
new_spec['d'][stg] = max(new_spec['d'][stg], depth)
if not self.converter.is_valid(new_spec):
continue
lat = self.latency_predictor.predict_lat(new_spec)
if not bad and lat <= constraint:
return new_spec, lat
def run_evolution_search(self, max_time_budget=1000,
population_size=100, mutation_numbers=50, constraint=120):
"""Run a single roll-out of regularized evolution to a fixed time budget."""
times, best_valids, best_tests = [0.0], [-100], [-100]
population = [] # (validation, spec, latency) tuples
child_pool = []
lat_pool = []
best_info = None
print('Generate random population...')
for _ in range(population_size):
spec, lat = self.random_spec(constraint)
child_pool.append(spec)
lat_pool.append(lat)
accs = self.accuracy_preditcor.predict_accuracy(child_pool)
for i in range(mutation_numbers):
population.append((accs[i].item(), child_pool[i], lat_pool[i]))
print('Start Evolution...')
iter = 0
# After the population is seeded, proceed with evolving the population.
while True:
parents_size = population_size // 4
parents = sorted(population, key=lambda x: x[0])[::-1][:parents_size]
acc = parents[0][0]
if iter > 0 and iter % 100 == 1:
print('Iter: {} Acc: {}'.format(iter - 1, parents[0][0]))
times.append(iter)
if acc > best_valids[-1]:
best_valids.append(acc)
best_info = parents[0]
else:
best_valids.append(best_valids[-1])
if iter > max_time_budget:
break
# sample = random_combination(population, tournament_size)[::-1]
# best_spec = sorted(sample, key=lambda i: i[0])[-1][1]
population = parents
child_pool = []
lat_pool = []
for i in range(mutation_numbers):
par_spec = population[np.random.randint(parents_size)][1]
# Mutate
new_spec, lat = self.mutate_spec(par_spec, constraint)
child_pool.append(new_spec)
lat_pool.append(lat)
for i in range(mutation_numbers):
par_spec1 = population[np.random.randint(parents_size)][1]
par_spec2 = population[np.random.randint(parents_size)][1]
# Crossover
new_spec, lat = self.crossover_spec(par_spec1, par_spec2, constraint)
child_pool.append(new_spec)
lat_pool.append(lat)
accs = self.accuracy_preditcor.predict_accuracy(child_pool)
for i in range(mutation_numbers):
population.append((accs[i].item(), child_pool[i], lat_pool[i]))
iter = iter + 1
return times, best_valids, best_info
|
{"/imagenet_codebase/run_manager/run_manager.py": ["/imagenet_codebase/utils/__init__.py"], "/elastic_nn/networks/dynamic_quantized_proxyless.py": ["/imagenet_codebase/utils/__init__.py"], "/imagenet_codebase/utils/latency_estimator.py": ["/imagenet_codebase/utils/__init__.py"], "/search.py": ["/methods/evolution/evo_main_gather.py"], "/utils/accuracy_predictor.py": ["/utils/converter.py"], "/quant_aware.py": ["/imagenet_codebase/run_manager/__init__.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py"], "/test.py": ["/utils/latency_predictor.py", "/elastic_nn/networks/dynamic_quantized_proxyless.py", "/imagenet_codebase/run_manager/__init__.py"], "/methods/evolution/evo_main_gather.py": ["/utils/accuracy_predictor.py", "/utils/latency_predictor.py", "/methods/evolution/evolution_finder.py"], "/imagenet_codebase/data_providers/hand.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/data_providers/svhn.py": ["/imagenet_codebase/data_providers/base_provider.py"], "/imagenet_codebase/run_manager/__init__.py": ["/imagenet_codebase/run_manager/run_manager.py", "/imagenet_codebase/data_providers/svhn.py", "/imagenet_codebase/data_providers/hand.py"], "/imagenet_codebase/utils/__init__.py": ["/imagenet_codebase/utils/pytorch_utils.py", "/imagenet_codebase/utils/pytorch_modules.py", "/imagenet_codebase/utils/quantize_utils.py"], "/methods/evolution/evolution_finder.py": ["/utils/converter.py", "/utils/latency_predictor.py", "/utils/accuracy_predictor.py"]}
|
24,564
|
knitemblazor/Document-denoizer
|
refs/heads/main
|
/skew_correction.py
|
import numpy as np
import cv2
import math
class SkewCorrection:
def __init__(self, img):
self.img = img
def skew_correction(self, im_arr, angle):
(h, w) = im_arr.shape[:2]
center = (w // 2, h // 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(im_arr, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)
return rotated
def finding_angle(self, img_array):
gray = img_array
edges = cv2.Canny(gray, 50, 150, apertureSize=3)
lines = cv2.HoughLines(edges, 1, np.pi / 180, 100)
for r, theta in lines[2]:
a = np.cos(theta)
b = np.sin(theta)
x0 = a * r
y0 = b * r
x1 = int(x0 + 1000 * (-b))
y1 = int(y0 + 1000 * a)
x2 = int(x0 - 1000 * (-b))
y2 = int(y0 - 1000 * a)
# cv2.line(img, (x1, y1), (x2, y2), (0, 0, 255), 2)
angle = math.atan2(x2 - x1, y2 - y1)
angle = angle * 180 / np.pi
return 90 - angle
def main(self):
im_arr = np.array(self.img)
angle = self.finding_angle(im_arr)
rotated_img_arr = self.skew_correction(im_arr, angle)
return rotated_img_arr
|
{"/app.py": ["/parallel.py", "/skew_correction.py"], "/parallel.py": ["/denoiser.py"]}
|
24,565
|
knitemblazor/Document-denoizer
|
refs/heads/main
|
/denoiser.py
|
import cv2
from models import *
from PIL import Image
from torchvision.utils import *
import torchvision.transforms as transforms
class DeNoiser:
def __init__(self):
self.cuda = True if torch.cuda.is_available() else False
if self.cuda:
self.generator = GeneratorUNet().cuda()
self.generator.load_state_dict(torch.load("saved_models/generator_alter.pth"))
else:
self.generator = GeneratorUNet()
self.generator.load_state_dict(torch.load("saved_models/generator_alter.pth", map_location=torch.device('cpu')))
def den(self, img):
gray = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB)
img = Image.fromarray(gray)
transform = transforms.Compose([ transforms.ToTensor(), ])
img_A = transform(img)
if self.cuda:
img_A = img_A.reshape(1, 3, 256, 256).cuda()
else:
img_A = img_A.reshape(1, 3, 256, 256)
hr = self.generator(img_A)
grid = make_grid(hr)
ndarr = grid.mul(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0).to('cpu', torch.uint8).numpy()
return ndarr
|
{"/app.py": ["/parallel.py", "/skew_correction.py"], "/parallel.py": ["/denoiser.py"]}
|
24,566
|
knitemblazor/Document-denoizer
|
refs/heads/main
|
/app.py
|
from flask import Flask, jsonify, request
from parallel import ReSolver
from skew_correction import SkewCorrection
from PIL import Image
import numpy as np
app = Flask(__name__)
@app.route("/", methods=['POST'])
def index():
if request.method == 'POST':
# request_data = request.form['text']
# print(request_data )
img = request.files.get('image', '')
img = Image.open(img)
img = np.array(img)
obj = ReSolver(img)
img = obj.orifice()
obj = SkewCorrection(img)
img = obj.main()
im = Image.fromarray(img)
im.save("processed.jpg")
return jsonify({"status": "processed_image"})
else:
return jsonify({"status": "processed_image"})
app.run()
|
{"/app.py": ["/parallel.py", "/skew_correction.py"], "/parallel.py": ["/denoiser.py"]}
|
24,567
|
knitemblazor/Document-denoizer
|
refs/heads/main
|
/parallel.py
|
import numpy as np
import cv2
from denoiser import DeNoiser
from joblib import parallel_backend
from joblib import Parallel, delayed
from PIL import Image
class ReSolver:
def __init__(self, img):
try:
self.gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
except:
self.gray = img
self.y, self.x = self.gray.shape
self.denoiser = DeNoiser()
def nona4(self, img):
canvas = np.array(Image.new('L', (2560, 3584), color=(256)))
y, x = img.shape
canvas[:y, :x] = img
return canvas
def executioner(self):
if self.x < 2560 and self.y < 3584:
self.flag = False
resized = self.nona4(self.gray)
else:
self.flag = True
resized = cv2.resize(self.gray, (2560, 3584), interpolation=cv2.INTER_CUBIC)
tiles = []
for i in range(14):
m = i * 256
for j in range(10):
l = 256 * j
tile = resized[m:m + 256, l:l + 256]
tiles.append(tile)
self.tiles = tiles
def de_noiser(self, tile):
tile = self.denoiser.den(tile)
tile = cv2.cvtColor(tile, cv2.COLOR_BGR2GRAY)
return tile
def para(self):
with parallel_backend('threading', n_jobs=4):
self.tiles = Parallel()(delayed(self.de_noiser)(tile) for tile in self.tiles)
def canvas(self):
self.executioner()
self.para()
img = Image.new('L', (2560, 3584), color=(256))
img = np.array(img)
counter = 0
for i in range(14):
m = i * 256
for j in range(10):
l = 256 * j
img[m:m + 256, l:l + 256] = self.tiles[counter]
counter = counter + 1
return img
def orifice(self):
img = self.canvas()
if self.flag:
pass
else:
img = img[:self.y, :self.x]
return Image.fromarray(img)
|
{"/app.py": ["/parallel.py", "/skew_correction.py"], "/parallel.py": ["/denoiser.py"]}
|
24,573
|
obrazowaniebiomedyczne/laboratorium-1-wt-tn-17-05-comm0
|
refs/heads/master
|
/check.py
|
import numpy as np
from solution import *
from obpng import write_png
from obpng import read_png
# Dostateczna
print("- Ocena dostateczna")
## Kwadrat
print(" kwadrat")
image = square(512, 128, (32, 64))
write_png(image, 'results/1_square.png')
# Kółko
print(" kółka")
image = midcircle((512, 256))
write_png(image, 'results/2_circle_1.png')
image = midcircle((256, 512))
write_png(image, 'results/2_circle_2.png')
image = midcircle((512, 512))
write_png(image, 'results/2_circle_3.png')
# Szachownica
print(" szachownica")
image = checkerboard(256)
write_png(image, 'results/3_checkerboard.png')
# Dobra
lenna = np.squeeze(read_png('data/mono/lenna.png'))
## Interpolacja NN
print("- Ocena dobra")
print(" interpolacja najbliższych sąsiadów")
image = nn_interpolation(lenna, (100, 100))
image = nn_interpolation(image, (512, 512))
write_png(image, 'results/4_nn.png')
## Interpolacja dwuliniowa
"""
print("- Ocena bardzo dobra")
print(" interpolacja dwuliniowa")
image = bilinear_interpolation(lenna, (100, 100))
image = nn_interpolation(image, (512, 512))
write_png(image, 'results/5_bilinear.png')
"""
|
{"/check.py": ["/solution.py", "/obpng.py"]}
|
24,574
|
obrazowaniebiomedyczne/laboratorium-1-wt-tn-17-05-comm0
|
refs/heads/master
|
/obpng.py
|
import zlib, struct
import numpy as np
MAGIC_NUMBER = b'\x89PNG\x0d\x0a\x1a\x0a'
PACK_FORMAT = "!2I5B"
DEPTH_TO_COLOR_TYPES = {
1:0, # Grayscale
2:4, # Grayscale with alpha
3:2, # RGB
4:6 # RGBA
}
COLOR_TYPES_TO_DEPTH = {
0:1, # Grayscale
4:2, # Grayscale with alpha
2:3, # RGB
6:4 # RGBA
}
# http://www.libpng.org/pub/png/spec/1.2/PNG-Chunks.html
def read_png(filename):
# Open binary file
f = open(filename, 'rb')
bytes = f.read()
# Check type
if bytes[:len(MAGIC_NUMBER)] != MAGIC_NUMBER:
raise RuntimeError("%s is not a PNG file." % filename)
# Get locations
hdr_loc = bytes.find(b'IHDR') + 4
dat_loc = bytes.find(b'IDAT') + 4
end_loc = bytes.find(b'IEND') + 4
# Read and unpack header
header = bytes[hdr_loc:hdr_loc+13]
width, height, bit_depth, color_type, a, b, c = struct.unpack(PACK_FORMAT,
header)
if bit_depth != 8:
raise RuntimeError("Only 8-bit images in lab.")
# Get structure depth from color type
if color_type in COLOR_TYPES_TO_DEPTH.keys():
depth = COLOR_TYPES_TO_DEPTH[color_type]
else:
raise(RuntimeError("Only grayscale, alpha grayscale, RGB and RGBA images allowed in labs. Chcek out"))
# Read data
compressed_data = bytes[dat_loc:end_loc]
raw_data = zlib.decompress(compressed_data)
# Convert to numpy and remove leading zeros
width_byte = width * depth
image = np.frombuffer(raw_data, dtype=np.uint8)
image = np.delete(image, np.arange(0, len(image), width_byte + 1))
image = image.reshape(height,width,depth)
return(image)
def write_png(image, filename):
# Check correct type
if image.dtype != np.uint8:
raise(RuntimeError("We use only 8-bit images in labs."))
# First convert image to byte array and get its size
buf = bytearray(np.flipud(image))
dimensions = image.shape
# Determine color type
if len(dimensions) == 2:
# Grayscale image
color_type = 0
height, width = dimensions
depth = 1
elif len(dimensions) == 3:
height, width, depth = dimensions
if depth in DEPTH_TO_COLOR_TYPES.keys():
color_type = DEPTH_TO_COLOR_TYPES[depth]
else:
raise(RuntimeError("Only grayscale, alpha grayscale, RGB and RGBA images allowed in labs. Chcek out."))
else:
raise(RuntimeError("Image should be 2D or 3D array."))
# Prepare raw data
width_byte = width * depth
raw_data = b''.join(
b'\x00' + buf[span:span + width_byte]
for span in range((height - 1) * width_byte, -1, - width_byte)
)
def png_pack(png_tag, data):
chunk_head = png_tag + data
return (struct.pack("!I", len(data)) +
chunk_head +
struct.pack("!I", 0xFFFFFFFF & zlib.crc32(chunk_head)))
# Header
bit_depth = 8
header = struct.pack(PACK_FORMAT, width, height,
bit_depth, color_type,
0, 0, 0)
h_pack = struct.unpack(PACK_FORMAT, header)
# Compress data
compressed_data = zlib.compress(raw_data, 9)
# Prepare data
data = b''.join([
MAGIC_NUMBER,
png_pack(b'IHDR', header),
png_pack(b'IDAT', compressed_data),
png_pack(b'IEND', b'')])
# Write data
f = open(filename, 'wb')
f.write(data)
f.close()
|
{"/check.py": ["/solution.py", "/obpng.py"]}
|
24,575
|
obrazowaniebiomedyczne/laboratorium-1-wt-tn-17-05-comm0
|
refs/heads/master
|
/solution.py
|
"""
Rozwiązania do laboratorium 1 z Obrazowania Biomedycznego.
"""
import numpy as np
"""
3 - Kwadrat
"""
def square(size, side, start):
image = np.zeros((size, size)).astype(np.uint8)
for i in range(start[0],side):
for j in range(start[1],side):
image[i,j] = 255
return image
"""
3 - Koło
"""
def midcircle(size):
image = np.zeros((size[0], size[1])).astype(np.uint8)
if (size[0] > size[1]):
r = size[1]//4
else:
r = size[0]//4
y0=size[0]/2
x0=size[1]/2
y,x = np.ogrid[0:size[0], 0:size[1]]
mask = ((x-x0) **2 + (y-y0) ** 2) < r**2
image[mask] = 255
return image
"""
3 - Szachownica.
"""
def drawSquare(image, pos, leng, color):
image[pos] = color
for i in range(pos[0], pos[0]+leng):
for j in range(pos[1], pos[1]+leng):
image[i,j] = color
def checkerboard(size):
image = np.zeros((size, size)).astype(np.uint8)
cnt = 0
for i in range(8):
for j in range(8):
pos = (size//8 * i, size//8 * j)
# print (pos)
if ((i%2 == 0 and j%2 == 1) or ((i%2 == 1 and j%2 == 0))):
drawSquare(image, pos, size//8, 255)
return image
"""
4 - Interpolacja najbliższych sąsiadów.
"""
def nn_interpolation(source, new_size):
w_destination, h_destination = new_size
h_source, w_source = source.shape
ratio_x = w_source / w_destination
ratio_y = h_source / h_destination
image = np.zeros(new_size).astype(np.uint8)
for (i, j) in np.ndindex(image.shape):
image[i, j] = source[int(i*ratio_y), int(j*ratio_x)]
return image
"""
5 - Interpolacja dwuliniowa
"""
def bilinear_interpolation(source, new_size):
pass
|
{"/check.py": ["/solution.py", "/obpng.py"]}
|
24,578
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Player.py
|
import timer, sound_pool as sp, random
class Player:
def __init__(self):
self.x = 0
self.y = 0
self.z = 0
self.movetimer = timer.timer()
self.movetime = 200
def can_move(self):
if self.movetimer.elapsed <= self.movetime: return False
return True
def set_x(self, x):
self.x = x
def get_x(self):
return self.x
def set_y(self, y):
self.y = y
def get_y(self):
return self.y
def set_z(self, z):
self.z = z
def get_z(self):
return self.z
def get_string_coordinates(self):
return str(self.x)+", "+str(self.y)+", "+str(self.z)
def move(self, direction):
self.movetimer.restart()
if direction == 1: self.y += 1
elif direction == 2: self.x += 1
elif direction == 3: self.y -= 1
elif direction == 4: self.x -= 1
elif direction == 5: self.z += 1
else: self.z -= 1
sp.p.play_stationary("footsteps/wood/fs_wood_" + str(random.randint(1, 5)), False)
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,579
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/external/pybassenc.py
|
"BASSENC wrapper by Christopher Toth"""
import ctypes
import os
import platform
import pybass
from paths import x86_path, x64_path
import libloader
bassenc_module = libloader.load_library('bassenc', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
HENCODE = ctypes.c_ulong #encoder handle
#Additional error codes returned by BASS_ErrorGetCode
BASS_ERROR_ACM_CANCEL = 2000 #ACM codec selection cancelled
pybass.error_descriptions[BASS_ERROR_ACM_CANCEL] = "ACM codec selection cancelled"
BASS_ERROR_CAST_DENIED = 2100 #access denied (invalid password)
pybass.error_descriptions[BASS_ERROR_CAST_DENIED] = "access denied (invalid password)"
#Additional BASS_SetConfig options
BASS_CONFIG_ENCODE_PRIORITY = 0x10300
BASS_CONFIG_ENCODE_QUEUE = 0x10301
BASS_CONFIG_ENCODE_CAST_TIMEOUT = 0x10310
#Additional BASS_SetConfigPtr options
BASS_CONFIG_ENCODE_CAST_PROXY = 0x10311
#BASS_Encode_Start flags
BASS_ENCODE_NOHEAD = 1 #don't send a WAV header to the encoder
BASS_ENCODE_FP_8BIT = 2 #convert floating-point sample data to 8-bit integer
BASS_ENCODE_FP_16BIT = 4 #convert floating-point sample data to 16-bit integer
BASS_ENCODE_FP_24BIT = 6#convert floating-point sample data to 24-bit integer
BASS_ENCODE_FP_32BIT = 8 #convert floating-point sample data to 32-bit integer
BASS_ENCODE_BIGEND = 16 #big-endian sample data
BASS_ENCODE_PAUSE = 32 #start encoding paused
BASS_ENCODE_PCM = 64 #write PCM sample data (no encoder)
BASS_ENCODE_RF64 = 128 #send an RF64 header
BASS_ENCODE_MONO = 256 #convert to mono (if not already)
BASS_ENCODE_QUEUE = 512 #queue data to feed encoder asynchronously
BASS_ENCODE_CAST_NOLIMIT = 0x1000 #don't limit casting data rate
BASS_ENCODE_LIMIT = 0x2000 #limit data rate to real-time
BASS_ENCODE_AUTOFREE = 0x40000 #free the encoder when the channel is freed
#BASS_Encode_GetACMFormat flags
BASS_ACM_DEFAULT = 1 #use the format as default selection
BASS_ACM_RATE = 2 #only list formats with same sample rate as the source channel
BASS_ACM_CHANS = 4 #only list formats with same number of channels (eg. mono/stereo)
BASS_ACM_SUGGEST = 8 #suggest a format (HIWORD=format tag)
#BASS_Encode_GetCount counts
(
BASS_ENCODE_COUNT_IN, #sent to encoder
BASS_ENCODE_COUNT_OUT, #received from encoder
BASS_ENCODE_COUNT_CAST, #sent to cast server
BASS_ENCODE_COUNT_QUEUE, #queued
BASS_ENCODE_COUNT_QUEUE_LIMIT, #queue limit
BASS_ENCODE_COUNT_QUEUE_FAIL, #failed to queue
) = range(6)
#BASS_Encode_CastInit content MIME types
BASS_ENCODE_TYPE_MP3 = "audio/mpeg"
BASS_ENCODE_TYPE_OGG = "application/ogg"
BASS_ENCODE_TYPE_AAC = "audio/aacp"
#BASS_Encode_CastGetStats types
BASS_ENCODE_STATS_SHOUT = 0 #Shoutcast stats
BASS_ENCODE_STATS_ICE = 1 #Icecast mount-point stats
BASS_ENCODE_STATS_ICESERV = 2 #Icecast server stats
#typedef void (CALLBACK ENCODEPROC)(HENCODE handle, DWORD channel, const void *buffer, DWORD length, void *user);
ENCODEPROC = func_type(None, HENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_void_p)
#typedef void (CALLBACK ENCODEPROCEX)(HENCODE handle, DWORD channel, const void *buffer, DWORD length, QWORD offset, void *user);
ENCODEPROCEX = func_type(None, HENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, pybass.QWORD, ctypes.c_void_p)
#typedef BOOL (CALLBACK ENCODECLIENTPROC)(HENCODE handle, BOOL connect, const char *client, char *headers, void *user);
ENCODECLIENTPROC = func_type(ctypes.c_byte, HENCODE, ctypes.c_byte, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_void_p)
#typedef void (CALLBACK ENCODENOTIFYPROC)(HENCODE handle, DWORD status, void *user);
ENCODENOTIFYPROC = func_type(None, HENCODE, ctypes.c_ulong, ctypes.c_void_p)
#Encoder notifications
BASS_ENCODE_NOTIFY_ENCODER = 1 #encoder died
BASS_ENCODE_NOTIFY_CAST = 2 #cast server connection died
BASS_ENCODE_NOTIFY_CAST_TIMEOUT = 0x10000 #cast timeout
BASS_ENCODE_NOTIFY_QUEUE_FULL = 0x10001 #queue is out of space
#BASS_Encode_ServerInit flags
BASS_ENCODE_SERVER_NOHTTP = 1 #no HTTP headers
#DWORD BASSENCDEF(BASS_Encode_GetVersion)();
BASS_Encode_GetVersion = func_type(ctypes.c_ulong)(('BASS_Encode_GetVersion', bassenc_module))
#HENCODE BASSENCDEF(BASS_Encode_Start)(DWORD handle, const char *cmdline, DWORD flags, ENCODEPROC *proc, void *user);
BASS_Encode_Start = func_type(HENCODE, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_ulong, ENCODEPROC, ctypes.c_void_p)(('BASS_Encode_Start', bassenc_module))
#HENCODE BASSENCDEF(BASS_Encode_StartLimit)(DWORD handle, const char *cmdline, DWORD flags, ENCODEPROC *proc, void *user, DWORD limit);
BASS_Encode_StartLimit = func_type(HENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ENCODEPROC, ctypes.c_void_p, ctypes.c_ulong)(('BASS_Encode_StartLimit', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_AddChunk)(HENCODE handle, const char *id, const void *buffer, DWORD length);
BASS_Encode_AddChunk = func_type(ctypes.c_byte, HENCODE, ctypes.c_char_p, ctypes.c_void_p, ctypes.c_ulong)(('BASS_Encode_AddChunk', bassenc_module))
#DWORD BASSENCDEF(BASS_Encode_IsActive)(DWORD handle);
BASS_Encode_IsActive = func_type(ctypes.c_ulong, ctypes.c_ulong)(('BASS_Encode_IsActive', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_Stop)(DWORD handle);
BASS_Encode_Stop = func_type(ctypes.c_byte, ctypes.c_ulong)(('BASS_Encode_Stop', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_StopEx)(DWORD handle, BOOL queue);
BASS_Encode_StopEx = func_type(ctypes.c_byte, ctypes.c_ulong, ctypes.c_byte)
#BOOL BASSENCDEF(BASS_Encode_SetPaused)(DWORD handle, BOOL paused);
BASS_Encode_SetPaused = func_type(ctypes.c_byte, ctypes.c_ulong, ctypes.c_byte)(('BASS_Encode_SetPaused', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_Write)(DWORD handle, const void *buffer, DWORD length);
BASS_Encode_Write = func_type(ctypes.c_byte, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong)(('BASS_Encode_Write', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_SetNotify)(DWORD handle, ENCODENOTIFYPROC *proc, void *user);
BASS_Encode_SetNotify = func_type(ctypes.c_byte, ctypes.c_ulong, ENCODENOTIFYPROC, ctypes.c_void_p)(('BASS_Encode_SetNotify', bassenc_module))
#QWORD BASSENCDEF(BASS_Encode_GetCount)(DWORD handle, DWORD count);
BASS_Encode_GetCount = func_type(pybass.QWORD, ctypes.c_ulong, ctypes.c_ulong)(('BASS_Encode_GetCount', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_SetChannel)(DWORD handle, DWORD channel);
BASS_Encode_SetChannel = func_type(ctypes.c_byte, ctypes.c_ulong, ctypes.c_ulong)
#DWORD BASSENCDEF(BASS_Encode_GetChannel)(HENCODE handle);
BASS_Encode_GetChannel = func_type(ctypes.c_ulong, HENCODE)(('BASS_Encode_GetChannel', bassenc_module))
if platform.system() == 'Windows':
#DWORD BASSENCDEF(BASS_Encode_GetACMFormat)(DWORD handle, void *form, DWORD formlen, const char *title, DWORD flags);
BASS_Encode_GetACMFormat = func_type(ctypes.c_ulong, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_char_p, ctypes.c_ulong)(('BASS_Encode_GetACMFormat', bassenc_module))
#HENCODE BASSENCDEF(BASS_Encode_StartACM)(DWORD handle, const void *form, DWORD flags, ENCODEPROC *proc, void *user);
BASS_Encode_StartACM = func_type(HENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ENCODEPROC, ctypes.c_void_p)(('BASS_Encode_StartACM', bassenc_module))
#HENCODE BASSENCDEF(BASS_Encode_StartACMFile)(DWORD handle, const void *form, DWORD flags, const char *file);
BASS_Encode_StartACMFile = func_type(HENCODE, ctypes.c_ulong, ctypes.c_void_p, ctypes.c_ulong, ctypes.c_char_p)(('BASS_Encode_StartACMFile', bassenc_module))
if platform.system() == 'Darwin':
#HENCODE BASSENCDEF(BASS_Encode_StartCA)(DWORD handle, DWORD ftype, DWORD atype, DWORD flags, DWORD bitrate, ENCODEPROCEX *proc, void *user);
BASS_Encode_StartCA = func_type(HENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ENCODEPROCEX, ctypes.c_void_p)(('ENCODEPROCEX ', bassenc_module))
#HENCODE BASSENCDEF(BASS_Encode_StartCAFile)(DWORD handle, DWORD ftype, DWORD atype, DWORD flags, DWORD bitrate, const char *file);
BASS_Encode_StartCAFile = func_type(HENCODE, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_char_p)(('BASS_Encode_StartCAFile', bassenc_module))
#Broadcasting
#BOOL BASSENCDEF(BASS_Encode_CastInit)(HENCODE handle, const char *server, const char *pass, const char *content, const char *name, const char *url, const char *genre, const char *desc, const char *headers, DWORD bitrate, BOOL pub);
BASS_Encode_CastInit = func_type(ctypes.c_byte, HENCODE, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_byte)(('BASS_Encode_CastInit', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_CastSetTitle)(HENCODE handle, const char *title, const char *url);
BASS_Encode_CastSetTitle = func_type(ctypes.c_byte, HENCODE, ctypes.c_char_p, ctypes.c_char_p)(('BASS_Encode_CastSetTitle', bassenc_module))
#const char *BASSENCDEF(BASS_Encode_CastGetStats)(HENCODE handle, DWORD type, const char *pass);
BASS_Encode_CastGetStats = func_type(ctypes.c_char_p, HENCODE, ctypes.c_ulong, ctypes.c_char_p)(('BASS_Encode_CastGetStats', bassenc_module))
#Local audio server
#DWORD BASSENCDEF(BASS_Encode_ServerInit)(HENCODE handle, const char *port, DWORD buffer, DWORD burst, DWORD flags, ENCODECLIENTPROC *proc, void *user);
BASS_Encode_ServerInit = func_type(ctypes.c_ulong, HENCODE, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, ctypes.c_ulong, ENCODECLIENTPROC, ctypes.c_void_p)(('BASS_Encode_ServerInit', bassenc_module))
#BOOL BASSENCDEF(BASS_Encode_ServerKick)(HENCODE handle, const char *client);
BASS_Encode_ServerKick = func_type(ctypes.c_byte, HENCODE, ctypes.c_char_p)(('BASS_Encode_ServerKick', bassenc_module))
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,580
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/effects/__init__.py
|
from __future__ import absolute_import
from .tempo import Tempo
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,581
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/timer.py
|
#Written by Stevo.
import time
def ms():
return time.time()*1000
class timer:
def __init__(self):
self.starttime=ms()
self.paused_at_time=0
self.paused=False
@property
def elapsed(self):
if not self.paused:
return ms() -self.starttime
else:
return self.paused_at_time
def restart(self):
self.starttime=ms()
def set_time(self, x):
"""Note, must pass in negatives to add the value. Handy for punishing a player by subtracting the time on the clock. On the other hand, positive values will decrease the timer."""
self.starttime+=x
def pause(self):
if self.paused: return
self.paused=True
self.paused_at_time=ms()-self.starttime
def resume(self):
if not self.paused: return
self.paused=False
self.starttime=ms() -self.paused_at_time
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,582
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Main.py
|
from Player import *
from Map import *
from Menu import *
from Window import *
from Menus import *
sp.p.set_pack_name("sounds/")
sp.p.set_ext(".ogg")
class main_app:
def __init__(self):
self.window_handler = Window("Testing app")
self.menu_handler = Menu()
self.player = Player()
self.map_handler = Map(0, 0, 0, 100, 100, 100)
self.main_menu_logic()
def main_menu_logic(self):
c = mainmenu(self.menu_handler)
if c == 0: self.mainloop()
elif c == 1:
pygame.quit()
print("Exitting")
exit()
def mainloop(self):
while 1:
self.contkeyloop()
pygame.display.update()
pygame.time.wait(2) #Sleep for 2 milliseconds to relieve our system
def key_pressed(self, key):
return pygame.key.get_pressed()[key]
def contkeyloop(self):
for event in pygame.event.get():
if event.type == pygame.QUIT: #User pressed the close button
pygame.quit()
exit()
if event.type == pygame.KEYDOWN:
if event.key == pl.K_c: speak(self.player.get_string_coordinates())
if self.player.can_move():
if self.key_pressed(pl.K_UP) and self.player.y < self.map_handler.get_max_y(): self.player.move(1)
elif self.key_pressed(pl.K_RIGHT) and self.player.x < self.map_handler.get_max_x(): self.player.move(2)
elif self.key_pressed(pl.K_DOWN) and self.player.y > self.map_handler.get_min_y(): self.player.move(3)
elif self.key_pressed(pl.K_LEFT) and self.player.x > self.map_handler.get_min_x(): self.player.move(4)
m_a = main_app()
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,583
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Window.py
|
import pygame
class Window:
def __init__(self, caption):
pygame.init()
self.screen = pygame.display.set_mode((800, 600)) #Screen size
pygame.display.set_caption(str(caption))
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,584
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Sound_positioning.py
|
#99.9% of this was not done by me, Amerikranian.
#The functions and formulas for the sounds were written by Carter Tem to the best of my knowledge
#The only thing I, Amerikranian added is keeping the sound's pitch, because I thought it would be useful.
import math
def position_sound_1d(handle, listener_x, source_x, pan_step, volume_step):
position_sound_custom_1d(handle, listener_x, source_x, pan_step, volume_step, 0.0, 0.0)
def position_sound_custom_1d(handle, listener_x, source_x, pan_step, volume_step, start_pan, start_volume):
delta=0
final_pan=start_pan
final_volume=start_volume
#First, we calculate the delta between the listener and the source.
if source_x<listener_x:
delta=listener_x-source_x
final_pan-=(delta*pan_step)
final_volume-=(delta*volume_step)
if source_x>listener_x:
delta=source_x-listener_x
final_pan+=(delta*pan_step)
final_volume-=(delta*volume_step)
#Then we check if the calculated values are out of range, and fix them if that's the case.
if final_pan<-100: final_pan=-100
if final_pan>100: final_pan=100
if final_volume<-100: final_volume=-100
#Now we set the properties on the sound, provided that they are not already correct.
if handle.pan!=final_pan:
handle.pan=final_pan
if handle.volume!=final_volume:
handle.volume=final_volume
def position_sound_2d(handle, listener_x, listener_y, source_x, source_y, theta, pan_step, volume_step, behind_pitch_decrease,keep_pitch=False):
position_sound_custom_2d(handle, listener_x, listener_y, source_x, source_y, theta, pan_step, volume_step, behind_pitch_decrease, 0.0, 0.0, 100.0,keep_pitch)
def position_sound_custom_2d(handle, listener_x, listener_y, source_x, source_y, theta, pan_step, volume_step, behind_pitch_decrease, start_pan, start_volume, start_pitch,keep_pitch):
delta_x=0
delta_y=0
final_pan=start_pan
final_volume=start_volume
final_pitch=start_pitch
rotational_source_x=source_x
rotational_source_y=source_y
#First, we calculate the x and y based on the theta the listener is facing.
if theta > 0.0:
rotational_source_x = (math.cos(theta) * (source_x-listener_x)) - (math.sin(theta) * (source_y-listener_y)) + listener_x
rotational_source_y = (math.sin(theta) * (source_x-listener_x)) + (math.cos(theta) * (source_y-listener_y)) + listener_y
source_x=rotational_source_x
source_y=rotational_source_y
#Next, we calculate the delta between the listener and the source.
if source_x<listener_x:
delta_x=listener_x-source_x
final_pan-=(delta_x*pan_step)
final_volume-=(delta_x*volume_step)
if source_x>listener_x:
delta_x=source_x-listener_x
final_pan+=(delta_x*pan_step)
final_volume-=(delta_x*volume_step)
if source_y<listener_y:
final_pitch-=abs(behind_pitch_decrease)
delta_y=listener_y-source_y
final_volume-=(delta_y*volume_step)
if source_y>listener_y:
delta_y=source_y-listener_y
final_volume-=(delta_y*volume_step)
#Then we check if the calculated values are out of range, and fix them if that's the case.
if final_pan<-100: final_pan=-100
if final_pan>100: final_pan=100
if final_volume<-100: final_volume=-100
if final_pitch<0: final_pitch=0
#We don't check for the highest possible pitch as it is hard to determine.
#Now we set the properties on the sound, provided that they are not already correct.
if handle.pan!=final_pan: handle.pan=final_pan
if handle.volume!=final_volume: handle.volume=final_volume
if not keep_pitch:
if handle.pitch!=final_pitch: handle.pitch=final_pitch
def position_sound_3d(handle, listener_x, listener_y, listener_z, source_x, source_y, source_z, theta, pan_step, volume_step, behind_pitch_decrease,keep_pitch=False):
position_sound_custom_3d(handle, listener_x, listener_y, listener_z, source_x, source_y, source_z, theta, pan_step, volume_step, behind_pitch_decrease, 0.0, 0.0, 100.0,keep_pitch)
def position_sound_custom_3d(handle, listener_x, listener_y, listener_z, source_x, source_y, source_z, theta, pan_step, volume_step, behind_pitch_decrease, start_pan, start_volume, start_pitch,keep_pitch):
delta_x=0
delta_y=0
delta_z=0
final_pan=start_pan
final_volume=start_volume
final_pitch=start_pitch
rotational_source_x=source_x
rotational_source_y=source_y
#First, we calculate the x and y based on the theta the listener is facing.
if theta > 0.0:
rotational_source_x = (math.cos(theta) * (source_x-listener_x)) - (math.sin(theta) * (source_y-listener_y)) + listener_x
rotational_source_y = (math.sin(theta) * (source_x-listener_x)) + (math.cos(theta) * (source_y-listener_y)) + listener_y
source_x=rotational_source_x
source_y=rotational_source_y
#Next, we calculate the delta between the listener and the source.
if source_x<listener_x:
delta_x=listener_x-source_x
final_pan-=(delta_x*pan_step)
final_volume-=(delta_x*volume_step)
if source_x>listener_x:
delta_x=source_x-listener_x
final_pan+=(delta_x*pan_step)
final_volume-=(delta_x*volume_step)
if source_y<listener_y:
final_pitch-=abs(behind_pitch_decrease)
delta_y=listener_y-source_y
final_volume-=(delta_y*volume_step)
if source_y>listener_y:
delta_y=source_y-listener_y
final_volume-=(delta_y*volume_step)
if source_z<listener_z:
final_pitch-=abs(behind_pitch_decrease)
delta_z=listener_z-source_z
final_volume-=(delta_z*volume_step)
if source_z>listener_z:
delta_z=source_z-listener_z
final_volume-=(delta_z*volume_step)
#Then we check if the calculated values are out of range, and fix them if that's the case.
if final_pan<-100: final_pan=-100
if final_pan>100: final_pan=100
if final_volume<-100: final_volume=-100
if final_pitch<0: final_pitch=0
# don't check for the highest possible pitch as it is hard to determine.
#Now we set the properties on the sound, provided that they are not already correct.
if handle.pan!=final_pan: handle.pan=final_pan
if handle.volume!=final_volume: handle.volume=final_volume
if not keep_pitch:
if handle.pitch!=final_pitch: handle.pitch=final_pitch
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,585
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Menus.py
|
#This should be used only for placing additional menus
#m must be the object of the menu class
def mainmenu(m):
m.reset()
m.add_item("Start game")
m.add_item("Exit")
m.add_music("music/mainMenuMusic")
m.set_intro("Welcome!")
m.set_move_sound("clicks/click")
return m.run()
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,586
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/speech.py
|
from accessible_output2.outputs.auto import Auto
from accessible_output2.outputs import auto
def find_tts():
screen_readers=["Dolphin","jaws","NVDA","Unnamed Output","VoiceOver","Window-Eyes"]
outputs=Auto().outputs
sr=None
for possible_output in outputs:
if not possible_output.name in screen_readers: #revirce for sapi
continue
if possible_output.is_active():
sr=possible_output
return sr
def find_reader():
screen_readers=["Dolphin","jaws","NVDA","Unnamed Output","VoiceOver","Window-Eyes"]
outputs=Auto().outputs
sr=None
for possible_output in outputs:
if not possible_output.name in screen_readers: continue
if possible_output.is_active(): sr=possible_output
if sr==None:
for possible_output in outputs:
if possible_output.name in screen_readers: continue
if possible_output.is_active(): sr=possible_output
return sr
def speak(text, interrupt=True):
global tts
if not tts or not tts.is_active():
tts=find_reader()
tts.speak(text, interrupt)
tts=None
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,587
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/external/__init__.py
|
from __future__ import absolute_import
import platform
from . import pybassopus
if platform.system() == 'Windows':
from . import pybasswma
if platform.system() != 'Darwin':
from . import pybass_aac
from . import pybass_alac
from . import pybassflac
from . import pybassmidi
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,588
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/effects/bass_fx.py
|
from sound_lib.external import pybass_fx
from effect import SoundEffect
class Volume(SoundEffect):
effect_type = pybass_fx.BASS_FX_BFX_VOLUME
struct = pybass_fx.BASS_BFX_VOLUME
class PeakEq(SoundEffect):
effect_type = pybass_fx.BASS_FX_BFX_PEAKEQ
struct = pybass_fx.BASS_BFX_PEAKEQ
class DAmp(SoundEffect):
effect_type = pybass_fx.BASS_FX_BFX_DAMP
struct = pybass_fx.BASS_BFX_DAMP
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,589
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/dialog.py
|
import wx,pygame, pygame.locals as pl
from speech import speak
def dialog(text):
speak(text)
while 1:
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pl.K_RETURN: return
elif event.key == pl.K_UP or event.key == pl.K_RIGHT or event.key == pl.K_DOWN or event.key == pl.K_LEFT: speak(text)
def text_dialog(title, text):
wx.MessageBox(text, title, wx.OK)
def question(title, text, parent=None):
dlg = wx.MessageDialog(parent, text, title, wx.YES_NO | wx.ICON_QUESTION)
result = dlg.ShowModal()
dlg.Destroy()
if result == wx.ID_YES: return 0
else: return -1
def input_box(parent=None, message='', caption='', default_value=''):
dlg = wx.TextEntryDialog(parent, caption, message, value=default_value)
dlg.ShowModal()
result = dlg.GetValue()
dlg.Destroy()
return result
app = wx.App()
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,590
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_pool.py
|
#Credit to Carter Tem who wrote the fixed Sound positioning functions and the actual Sound class.
#Without him, we would have no Sound pool at all.
#Please note: The conversion was done with the 3D Sound_pool by Sam Tupy.
#The package can be found at the following URL: http://www.samtupy.com/dev/simple_3d_Sound_pool.zip
#I did not create any part of this project, I simply acted as a translator of sorts.
#If you wish to change any of the code below for optimizations and such feel free to do so, just let me, Amerikranian know.
import Sound, Sound_positioning
class Sound_pool_item:
def __init__(self, **kwargbs):
self.handle=Sound.sound()
self.filename= kwargbs.get("filename")
self.x=kwargbs.get("x", 0)
self.y=kwargbs.get("y", 0)
self.z=kwargbs.get("z", 0)
self.looping=kwargbs.get("looping", 0)
self.pan_step=kwargbs.get("pan_step", 0)
self.volume_step=kwargbs.get("volume_step", 0)
self.behind_pitch_decrease=kwargbs.get("behind_pitch_decrease", 0)
self.start_pan=kwargbs.get("start_pan", 0)
self.start_volume=kwargbs.get("start_volume", 0)
self.start_pitch=kwargbs.get("start_pitch", 0)
self.start_offset=kwargbs.get("start_offset",0)
self.upper_range=kwargbs.get("upper_range", 0)
self.lower_range=kwargbs.get("lower_range", 0)
self.left_range=kwargbs.get("left_range", 0)
self.right_range=kwargbs.get("right_range", 0)
self.backward_range=kwargbs.get("backward_range", 0)
self.forward_range=kwargbs.get("forward_range", 0)
self.looping=kwargbs.get("looping", False)
self.is_3d=kwargbs.get("is_3d", False)
self.stationary=kwargbs.get("stationary", False)
self.persistent=kwargbs.get("persistent", False)
self.paused=kwargbs.get("paused", False)
def reset(self,pack=""):
self.__init__()
def update(self,listener_x,listener_y,listener_z,max_distance):
if max_distance>0 and self.looping:
total_distance=self.get_total_distance(listener_x,listener_y,listener_z)
if total_distance>max_distance and self.handle.handle!=None:
self.handle.close()
return
if total_distance<=max_distance and self.handle.handle==None:
try:
self.handle.load(self.filename)
except:
pass
return
if self.handle.handle.position>0: self.handle.handle.position=self.start_offset
self.update_listener_position(listener_x,listener_y,listener_z)
if not self.paused: self.handle.play_looped()
return
self.update_listener_position(listener_x,listener_y,listener_z)
def update_listener_position(self,listener_x,listener_y,listener_z):
if self.handle.handle==None: return
if self.stationary: return
delta_left=self.x-self.left_range
delta_right=self.x+self.right_range
delta_backward=self.y-self.backward_range
delta_forward=self.y+self.forward_range
delta_upper=self.z+self.upper_range
delta_lower=self.z-self.lower_range
True_x=listener_x
True_y=listener_y
True_z=listener_z
if not self.is_3d:
if listener_x>=delta_left and listener_x<=delta_right:
Sound_positioning.position_sound_custom_1d(self.handle, listener_x, listener_x, self.pan_step, self.volume_step, self.start_pan, self.start_volume)
return
if listener_x<delta_left: Sound_positioning.position_sound_custom_1d(self.handle, listener_x, delta_left, self.pan_step, self.volume_step, self.start_pan, self.start_volume)
if listener_x>delta_right: Sound_positioning.position_sound_custom_1d(self.handle, listener_x, delta_right, self.pan_step, self.volume_step, self.start_pan, self.start_volume)
return
if listener_x<delta_left: True_x=delta_left
elif listener_x>delta_right: True_x=delta_right
if listener_y<delta_backward: True_y=delta_backward
elif listener_y>delta_forward: True_y=delta_forward
if listener_z<delta_lower: True_z=delta_lower
elif listener_z>delta_upper: True_z=delta_upper
Sound_positioning.position_sound_custom_3d(self.handle, listener_x, listener_y, listener_z, True_x, True_y, True_z,0,self.pan_step, self.volume_step, self.behind_pitch_decrease, self.start_pan, self.start_volume, self.start_pitch,False)
def get_total_distance(self,listener_x,listener_y,listener_z):
if self.stationary: return 0
delta_left=self.x-self.left_range
delta_right=self.x+self.right_range
delta_backward=self.y-self.backward_range
delta_forward=self.y+self.forward_range
delta_lower=self.z-self.lower_range
delta_upper=self.z+self.upper_range
True_x=listener_x
True_y=listener_y
True_z=listener_z
distance=0
if not self.is_3d:
if listener_x>=delta_left and listener_x<=delta_right: return distance
if listener_x<delta_left: distance=delta_left-listener_x
if listener_x>delta_right: distance=listener_x-delta_right
return distance
if listener_x<delta_left: True_x=delta_left
elif listener_x>delta_right: True_x=delta_right
if listener_y<delta_backward: True_y=delta_backward
elif listener_y>delta_forward: True_y=delta_forward
if listener_z<delta_lower: True_z=delta_lower
elif listener_z>delta_upper: True_z=delta_upper
if listener_x<True_x: distance=(True_x-listener_x)
if listener_x>True_x: distance=(listener_x-True_x)
if listener_y<True_y: distance+=(True_y-listener_y)
if listener_y>True_y: distance+=(listener_y-True_y)
if listener_z<True_z: distance+=(True_z-listener_z)
if listener_z>True_z: distance+=(listener_z-True_z)
return distance
class Sound_pool:
def __init__(self):
self.items=[] #The placeholder for the items
self.max_distance=40 #The max distance the Sounds should play at
self.pan_step=20.0
self.volume_step=2.0
self.behind_pitch_decrease=4.0
self.last_listener_x=0
self.last_listener_y=0
self.last_listener_z=0
self.clean_frequency=3 #How often the pool should be cleaned
self.packname = ""
self.ext = ""
def play_stationary(self,filename,looping,persistent=False):
return self.play_stationary_extended(filename, looping, 0, 0, 0, 100, persistent)
def play_stationary_extended(self,filename, looping, offset, start_pan,start_volume,start_pitch,persistent=False):
self.clean_frequency-=1
if self.clean_frequency<=0: self.clean_unused()
s=Sound_pool_item(filename=filename,looping=looping,start_offset=offset,start_pan=start_pan,start_volume=start_volume,start_pitch=start_pitch,persistent=persistent,stationary=True)
try:
s.handle.load(self.packname+filename + self.ext)
except:
s.reset()
return -1
if s.start_offset>0: s.handle.position=s.start_offset
if start_pan!=0.0: s.handle.pan=start_pan
if start_volume<0.0: s.handle.volume=start_volume
s.handle.pitch=start_pitch
if looping: s.handle.play_looped()
else: s.handle.play()
self.items.append(s)
return s
def play_1d(self,filename,listener_x,Sound_x,looping,persistent=False):
return self.play_extended_1d(filename, listener_x, Sound_x, 0, 0, looping, 0, 0, 0, 100, persistent)
def play_extended_1d(self, filename, listener_x, Sound_x, left_range, right_range, looping, offset = 0.0, start_pan = 0.0, start_volume = 0.0, start_pitch = 100.0, persistent=False):
self.clean_frequency-=1
if self.clean_frequency<=0: self.clean_unused()
s=Sound_pool_item(filename=filename,x=Sound_x,looping=looping,stationary=True,start_pan=start_pan,start_volume=start_volume,start_pitch=start_pitch,persistent=persistent,pan_step=self.pan_step,volume_step=self.volume_step,behind_pitch_decrease=0.0,left_range=left_range,right_range=right_range,backward_range=0,forward_range=0,is_3d=False,start_offset=offset)
if self.max_distance>0 and s.get_total_distance(listener_x, 0, 0)>self.max_distance:
if not looping:
s.reset()
return -2
else:
self.last_listener_x=listener_x
s.handle.pitch=start_pitch
s.update(self.listener_x, 0, 0, self.max_distance)
self.items.append(s)
return s
try:
s.handle.load(self.packname+filename + self.ext)
except:
s.reset()
return -1
if s.start_offset>0: s.handle.position=s.start_offset
s.handle.pitch=start_pitch
self.last_listener_x=listener_x
s.update(listener_x, 0, 0, self.max_distance)
if looping: s.handle.play_looped()
else: s.handle.play()
self.items.append(s)
return s
def play_2d(self,filename, listener_x, listener_y, Sound_x, Sound_y, looping, persistent=False):
return self.play_extended_2d(filename, listener_x, listener_y, Sound_x, Sound_y, 0, 0, 0, 0, looping, 0, 0, 0, 100, persistent)
def play_extended_2d(self,filename, listener_x, listener_y, Sound_x, Sound_y, left_range, right_range, backward_range, forward_range, looping, offset = 0.0, start_pan = 0.0, start_volume = 0.0, start_pitch = 100, persistent=False):
self.clean_frequency-=1
if self.clean_frequency<=0: self.clean_unused()
s=Sound_pool_item(filename=filename,x=Sound_x,y=Sound_y,looping=looping,start_pan=start_pan,start_volume=start_volume,start_pitch=start_pitch,persistent=persistent,pan_step=self.pan_step,volume_step=self.volume_step,behind_pitch_decrease=self.behind_pitch_decrease,left_range=left_range,right_range=right_range,backward_range=backward_range,forward_range=forward_range,is_3d=True,start_offset=offset)
if self.max_distance>0 and s.get_total_distance(listener_x, listener_y, 0)>self.max_distance:
if looping==False:
s.reset()
return -2
else:
self.last_listener_x=listener_x
self.last_listener_y=listener_y
s.update(listener_x, listener_y, 0, self.max_distance)
self.items.append(s)
return s
try:
s.handle.load(self.packname+filename + self.ext)
except:
s.reset()
return -1
if s.start_offset>0: s.handle.position=s.start_offset
self.last_listener_x=listener_x
self.last_listener_y=listener_y
s.update(listener_x, listener_y, 0, self.max_distance)
if looping: s.handle.play_looped()
else: s.handle.play()
self.items.append(s)
return s
def play_3d(self,filename, listener_x, listener_y, listener_z, Sound_x, Sound_y, Sound_z, looping, persistent=False):
return self.play_extended_3d(filename, listener_x, listener_y, listener_z, Sound_x, Sound_y, Sound_z, 0, 0, 0, 0, 0, 0, looping, 0, 0, 0, 100, persistent)
def play_extended_3d(self,filename, listener_x, listener_y, listener_z, Sound_x, Sound_y, Sound_z, left_range, right_range, backward_range, forward_range, upper_range, lower_range, looping, offset = 0.0, start_pan = 0.0, start_volume = 0.0, start_pitch = 100, persistent = False):
self.clean_frequency-=1
if self.clean_frequency<=0: self.clean_unused()
s=Sound_pool_item(filename=filename,x=Sound_x,y=Sound_y,z=Sound_z,looping=looping,pan_step=self.pan_step,volume_step=self.volume_step,behind_pitch_decrease=self.behind_pitch_decrease,start_pan=start_pan,start_volume=start_volume,start_pitch=start_pitch,left_range=left_range,right_range=right_range,backward_range=backward_range,forward_range=forward_range,lower_range=lower_range,upper_range=upper_range,is_3d=True,persistent=persistent,start_offset=offset)
if self.max_distance>0 and s.get_total_distance(listener_x, listener_y, listener_z)>self.max_distance:
if looping==False:
s.reset()
return -2
else:
self.last_listener_x=listener_x
self.last_listener_y=listener_y
self.last_listener_z=listener_z
s.update(listener_x, listener_y, listener_z, self.max_distance)
self.items.append(s)
return s
try:
s.handle.load(self.packname+filename + self.ext)
except:
s.reset()
return -1
if s.start_offset>0: s.handle.position=s.start_offset
self.last_listener_x=listener_x
self.last_listener_y=listener_y
self.last_listener_z=listener_z
s.update(listener_x, listener_y, listener_z, self.max_distance)
if looping: s.handle.play_looped()
else: s.handle.play()
self.items.append(s)
return s
def Sound_is_active(self,s):
if s.looping==False and s.handle==None: return False
if s.looping==False and not s.handle.is_playing: return False
return True
def Sound_is_playing(self,s):
if not self.Sound_is_active(s): return False
return s.handle.handle.is_playing
def pause_Sound(self,s):
if not self.Sound_is_active(s): return False
if s.paused: return False
s.paused=True
if s.handle.handle.is_playing: s.handle.stop()
return True
def resume_Sound(self,s):
if not s.paused: return False
s.paused=False
if self.max_distance>0 and s.get_total_distance(self.last_listener_x, self.last_listener_y, self.last_listener_z)>self.max_distance:
if s.handle!=None: s.handle.close()
return True
s.update(self.last_listener_x, self.last_listener_y, self.last_listener_z, self.max_distance)
if s.handle.handle!=None and not self.items[s].handle.handle.is_playing:
if s.looping: s.handle.play_looped()
else: s.handle.play()
return True
def pause_all(self):
for i in self.items:
if self.Sound_is_playing(i): self.pause_Sound(i)
def resume_all(self):
for i in self.items:
if i.handle.handle!=None: self.resume_Sound(i)
def destroy_all(self):
for i in self.items: i.reset()
def update_listener_1d(self,listener_x):
self.update_listener_3d(listener_x, 0, 0)
def update_listener_2d(self,listener_x, listener_y):
self.update_listener_3d(listener_x, listener_y, 0)
def update_listener_3d(self,listener_x, listener_y, listener_z):
if len(self.items)==0: return
self.last_listener_x=listener_x
self.last_listener_y=listener_y
self.last_listener_z=listener_z
for i in self.items: i.update(listener_x, listener_y, listener_z, self.max_distance)
def update_Sound_1d(self,s, x):
return self.update_Sound_3d(s, x, 0, 0)
def update_Sound_2d(self,s, x, y):
return self.update_Sound_3d(s, x, y, 0)
def update_Sound_3d(self,s, x, y, z):
s.x=x
s.y=y
s.z=z
s.update(self.last_listener_x, self.last_listener_y, self.last_listener_z, self.max_distance)
return True
def update_Sound_start_values(self,s,start_pan, start_volume, start_pitch):
s.start_pan=start_pan
s.start_volume=start_volume
s.start_pitch=start_pitch
s.update(last_listener_x, last_listener_y, last_listener_z, self.max_distance)
if s.stationary and s.handle!=None:
s.handle.pan=start_pan
s.handle.volume=start_volume
s.handle.pitch=start_pitch
return True
if s.is_3d==False and s.handle.pitch!=start_pitch: s.handle.pitch=start_pitch
return True
def update_Sound_range_1d(self,s, left_range, right_range):
return self.update_Sound_range_3d(s, left_range, right_range, 0, 0, 0, 0)
def update_Sound_range_2d(self,s, left_range, right_range, backward_range, forward_range):
return self.update_Sound_range_3d(s, left_range, right_range, backward_range, forward_range, 0, 0)
def update_Sound_range_3d(self,s, left_range, right_range, backward_range, forward_range, lower_range, upper_range):
s.left_range=left_range
s.right_range=right_range
s.backward_range=backward_range
s.forward_range=forward_range
s.lower_range=lower_range
s.upper_range=upper_range
s.update(self.last_listener_x, self.last_listener_y, self.last_listener_z, self.max_distance)
return True
def destroy_sound(self,s):
s.reset()
return True
def clean_unused(self):
if len(self.items)==0: return
for i in self.items:
if i.looping: continue
if i.persistent: continue
if i.handle.handle==None or not i.handle.handle.is_playing and not i.paused:
self.items.remove(i)
self.clean_frequency=3
def set_pack_name(self, name):
self.packname = name
def set_ext(self, extension):
self.ext = extension
p=Sound_pool()
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,591
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/__init__.py
|
__author__ = 'Christopher Toth'
__version__ = 0.8
def find_datafiles():
from glob import glob
import os
import platform
import sound_lib
path = os.path.join(sound_lib.__path__[0], 'lib')
system = platform.system()
if system == 'Windows':
file_ext = '*.dll'
elif system == 'Darwin':
file_ext = '*.dylib'
else:
file_ext = '*.so'
if platform.architecture()[0] == '32bit' or platform.system() == 'Darwin':
arch = 'x86'
else:
arch = 'x64'
dest_dir = os.path.join('sound_lib', 'lib', arch)
source = glob(os.path.join(path, arch, file_ext))
return [(dest_dir, source)]
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,592
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Sound.py
|
#Written By Carter Tem
#No part of this class was done by me, Amerikranian. This is Carter's work alone.
import math
import sound_lib
from sound_lib import output
from sound_lib import stream
o=output.Output()
class sound():
def __init__(self):
self.handle=None
self.freq=44100
def load(self,filename=""):
if self.handle:
self.close()
self.handle =stream.FileStream(file=filename)
self.freq=self.handle.get_frequency()
def play(self):
self.handle.looping=False
self.handle.play()
def play_wait(self):
self.handle.looping=False
self.handle.play_blocking()
def play_looped(self):
self.handle.looping=True
self.looping=True
self.handle.play()
def stop(self):
if self.handle and self.handle.is_playing:
self.handle.stop()
self.handle.set_position(0)
@property
def volume(self):
if not self.handle:
return False
return round(math.log10(self.handle.volume)*20)
@volume.setter
def volume(self,value):
if not self.handle:
return False
self.handle.set_volume(10**(float(value)/20))
@property
def pitch(self):
if not self.handle:
return False
return (self.handle.get_frequency()/self.freq)*100
@pitch.setter
def pitch(self, value):
if not self.handle:
return False
self.handle.set_frequency((float(value)/100)*self.freq)
@property
def pan(self):
if not self.handle:
return False
return self.handle.get_pan()*100
@pan.setter
def pan(self, value):
if not self.handle:
return False
self.handle.set_pan(float(value)/100)
def close(self):
if self.handle:
self.handle.free()
self.__init__()
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,593
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/Menu.py
|
import pygame, pygame.locals as pl, sound_pool as sp, dialog, timer
from speech import speak
class menu_exception(Exception):
pass
class menu_item:
def __init__(self,text,can_click=True,slider=False,sliderval=0,minval=0,maxval=0):
if can_click: self.text=text
else: self.text=text+", unavailable"
self.can_click=can_click
self.slider=slider
self.sliderval=sliderval
if minval<=self.sliderval and maxval>=self.sliderval:
self.minval=minval
self.maxval=maxval
else: raise menu_exception("Invalid argument for slider values.")
class Menu:
def __init__(self,allow_reset=False,allow_top=True,intro="",musicsound="",movesound="",music_volume=0,menuenter="",speak_intro_int=False):
self.menu_choices = []
self.allow_reset=allow_reset
self.allow_top=allow_top
self.musicsound=musicsound
self.movesound=movesound
self.intro=intro
self.music_volume=music_volume
self.menuenter=menuenter
self.speak_menu_int=speak_intro_int
def add_item(self,itemname,can_click=True,slider=False,sliderval=0,minval=0,maxval=0):
self.menu_choices.append(menu_item(itemname,can_click,slider,sliderval,minval,maxval))
def reset(self,everything=False):
if not everything:
del self.menu_choices[:]
self.intro=""
self.musicsound=""
else: self.__init__()
def add_music(self, x):
self.musicsound=x
def set_music_volume(self,volume):
self.music_volume=volume
def return_text(self,key):
if key<0 or key>self.len(menu_choices)-1: return -1
return self.menu_choices[choice].text
def set_move_sound(self,sound):
self.movesound = sound
def set_enter_sound(self,sound):
self.enter_sound=sound
def set_intro(self,intro):
self.intro=intro
def choose_text(self, choice):
if not choice.slider: return choice.text
return choice.text+". Currently set to " + str(choice.sliderval)
def run(self,updater = None):
if len(self.menu_choices)==0: return -2
choice = 0
if self.intro!="": speak(self.intro,self.speak_menu_int)
if self.musicsound!="": msnd=sp.p.play_stationary_extended(self.musicsound,True,0,0,self.music_volume,100)
speak(self.menu_choices[choice].text,False)
while 1:
if updater: updater() #Menus that still want the game to go on
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pl.K_UP and choice>0:
choice-=1
if self.movesound!="": sp.p.play_stationary(self.movesound,False)
speak(self.choose_text(self.menu_choices[choice]))
elif event.key == pl.K_DOWN and choice<len(self.menu_choices)-1:
choice+=1
if self.movesound!="": sp.p.play_stationary(self.movesound,False)
speak(self.choose_text(self.menu_choices[choice]))
elif event.key==pl.K_PAGEUP and self.musicsound!="" and msnd.handle.volume!=0:
msnd.handle.volume+=5
elif event.key==pl.K_PAGEDOWN and self.musicsound!="" and msnd.handle.volume-5>=-100:
msnd.handle.volume-=5
elif self.allow_top and event.key==pl.K_UP and choice==0:
if self.movesound!="": sp.p.play_stationary(self.movesound,False)
choice=len(self.menu_choices)-1
speak(self.choose_text(self.menu_choices[choice]))
elif self.allow_top and event.key==pl.K_DOWN and choice==len(self.menu_choices)-1:
if self.movesound!="": sp.p.play_stationary(self.movesound,False)
choice=0
speak(self.choose_text(self.menu_choices[choice]))
elif event.key==pl.K_RETURN:
if self.menuenter!="": sp.p.play_stationary(self.menuenter,False)
if self.menu_choices[choice].can_click and not self.menu_choices[choice].slider:
if self.musicsound!="": self.fade(msnd, 2)
return choice
elif self.menu_choices[choice].slider: speak("This is a slider. Use left and right arrows to change it's value.")
else: dialog.dlg("Unavailable.")
elif event.key==pl.K_RIGHT and self.menu_choices[choice].slider:
if self.menu_choices[choice].sliderval==self.menu_choices[choice].maxval: speak("Unable to go higher.")
else:
self.menu_choices[choice].sliderval+=1
speak(str(self.menu_choices[choice].sliderval))
elif event.key==pl.K_LEFT and self.menu_choices[choice].slider:
if self.menu_choices[choice].sliderval==self.menu_choices[choice].minval: speak("Unable to go lower.")
else:
self.menu_choices[choice].sliderval-=1
speak(str(self.menu_choices[choice].sliderval))
elif self.allow_reset and event.key==pl.K_ESCAPE:
if self.musicsound!="": self.fade(msnd,3)
return -1
pygame.display.update()
pygame.time.wait(2)
def fade(self,handle,ftime):
if type(handle) is int: return
tmr=timer.timer()
while handle.handle.volume!=-100:
if tmr.elapsed>=ftime:
tmr.restart()
handle.handle.volume-=1
sp.p.destroy_sound(handle)
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,594
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/external/pybassflac.py
|
from __future__ import absolute_import
# Copyright(c) Max Kolosov 2009 maxkolosov@inbox.ru
# http://vosolok2008.narod.ru
# BSD license
__version__ = '0.1'
__versionTime__ = '2009-11-15'
__author__ = 'Max Kolosov <maxkolosov@inbox.ru>'
__doc__ = '''
pybassflac.py - is ctypes python module for
BASSFLAC - extension to the BASS audio library,
enabling the playing of FLAC (Free Lossless Audio Codec) encoded files.
'''
import os, sys, ctypes
from . import pybass
from . paths import x86_path, x64_path
import libloader
bassflac_module = libloader.load_library('bassflac', x86_path=x86_path, x64_path=x64_path)
func_type = libloader.get_functype()
#Register the plugin with the Bass plugin system.
pybass.BASS_PluginLoad(libloader.find_library_path('bassflac', x86_path=x86_path, x64_path=x64_path), 0)
QWORD = pybass.QWORD
HSTREAM = pybass.HSTREAM
DOWNLOADPROC = pybass.DOWNLOADPROC
BASS_FILEPROCS = pybass.BASS_FILEPROCS
# BASS_CHANNELINFO type
BASS_CTYPE_STREAM_FLAC = 0x10900
BASS_CTYPE_STREAM_FLAC_OGG = 0x10901
#HSTREAM BASSFLACDEF(BASS_FLAC_StreamCreateFile)(BOOL mem, const void *file, QWORD offset, QWORD length, DWORD flags);
BASS_FLAC_StreamCreateFile = func_type(HSTREAM, ctypes.c_byte, ctypes.c_void_p, QWORD, QWORD, ctypes.c_ulong)(('BASS_FLAC_StreamCreateFile', bassflac_module))
#HSTREAM BASSFLACDEF(BASS_FLAC_StreamCreateURL)(const char *url, DWORD offset, DWORD flags, DOWNLOADPROC *proc, void *user);
BASS_FLAC_StreamCreateURL = func_type(HSTREAM, ctypes.c_char_p, ctypes.c_ulong, ctypes.c_ulong, DOWNLOADPROC, ctypes.c_void_p)(('BASS_FLAC_StreamCreateURL', bassflac_module))
#HSTREAM BASSFLACDEF(BASS_FLAC_StreamCreateFileUser)(DWORD system, DWORD flags, const BASS_FILEPROCS *procs, void *user);
BASS_FLAC_StreamCreateFileUser = func_type(HSTREAM, ctypes.c_ulong, ctypes.c_ulong, ctypes.POINTER(BASS_FILEPROCS), ctypes.c_void_p)(('BASS_FLAC_StreamCreateFileUser', bassflac_module))
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,595
|
Amerikranian/AudioGame-Sample-with-Pyglet
|
refs/heads/master
|
/src/sound_lib/encoder.py
|
from external import pybass, pybassenc
from main import bass_call, bass_call_0, FlagObject
class Encoder(FlagObject):
def setup_flag_mapping(self):
#super(Encoder, self).setup_flag_mapping()
self.flag_mapping = {
'pcm': pybassenc.BASS_ENCODE_PCM,
'no_header': pybassenc.BASS_ENCODE_NOHEAD,
'rf64': pybassenc.BASS_ENCODE_RF64,
'big_endian': pybassenc.BASS_ENCODE_BIGEND,
'fp_8bit': pybassenc.BASS_ENCODE_FP_8BIT,
'fp_16bit': pybassenc.BASS_ENCODE_FP_16BIT,
'fp_24bit': pybassenc.BASS_ENCODE_FP_24BIT,
'fp_32bit': pybassenc.BASS_ENCODE_FP_32BIT,
'queue': pybassenc.BASS_ENCODE_QUEUE,
'limit': pybassenc.BASS_ENCODE_LIMIT,
'no_limit': pybassenc.BASS_ENCODE_CAST_NOLIMIT,
'pause': pybassenc.BASS_ENCODE_PAUSE,
'autofree': pybassenc.BASS_ENCODE_AUTOFREE,
'unicode': pybass.BASS_UNICODE,
}
def __init__(self, source, command_line, pcm=False, no_header=False, rf64=False, big_endian=False, fp_8bit=False, fp_16bit=False, fp_24bit=False, fp_32bit=False, queue=False, limit=False, no_limit=False, pause=True, autofree=False, callback=None, user=None):
self.setup_flag_mapping()
flags = self.flags_for(pcm=pcm, no_header=no_header, rf64=rf64, big_endian=big_endian, fp_8bit=fp_8bit, fp_16bit=fp_16bit, fp_24bit=fp_24bit, fp_32bit=fp_32bit, queue=queue, limit=limit, no_limit=no_limit, pause=pause, autofree=autofree) #fwiw!
self.source = source
source_handle = source.handle
if callback is None:
callback = lambda *a: None
callback = pybassenc.ENCODEPROC(callback)
self.callback = callback
self.handle = bass_call(pybassenc.BASS_Encode_Start, source_handle, command_line, flags, callback, user)
@property
def paused(self):
return bass_call_0(pybassenc.BASS_Encode_IsActive, self.handle) == pybass.BASS_ACTIVE_PAUSED
@paused.setter
def paused(self, paused):
return bass_call(pybassenc.BASS_Encode_SetPaused, self.handle, paused)
def is_stopped(self):
return bass_call_0(pybassenc.BASS_Encode_IsActive, self.handle) == pybass.BASS_ACTIVE_STOPPED
def stop(self):
return bass_call(pybassenc.BASS_Encode_Stop, self.handle)
class BroadcastEncoder(Encoder):
def __init__(self, source_encoder, server, password, content, name=None, url=None, genre=None, description=None, headers=None, bitrate=0, public=False):
contents = {
'mp3': pybassenc.BASS_ENCODE_TYPE_MP3,
'ogg': pybassenc.BASS_ENCODE_TYPE_OGG,
'aac': pybassenc.BASS_ENCODE_TYPE_AAC
}
if content in contents:
content = contents[content]
self.source_encoder = source_encoder
handle = source_encoder.handle
self.server = server
self.password = password
self.status = bass_call(pybassenc.BASS_Encode_CastInit, handle, server, password, content, name, url, genre, description, headers, bitrate, public)
def set_title(self, title=None, url=None):
return bass_call(pybassenc.BASS_Encode_CastSetTitle, self.source_encoder.handle, title, url)
def get_stats(self, type, password=None):
types = {
'shoutcast': pybassenc.BASS_ENCODE_STATS_SHOUT,
'icecast': pybassenc.BASS_ENCODE_STATS_ICE,
'icecast_server': pybassenc.BASS_ENCODE_STATS_ICESERV,
}
if type in types:
type = types[type]
if password is None:
password = self.password
return bass_call(pybassenc.BASS_Encode_CastGetStats, self.handle, type, password)
|
{"/src/dialog.py": ["/speech.py"], "/src/Menu.py": ["/speech.py"], "/src/sound_lib/external/pybassflac.py": ["/src/sound_lib/external/__init__.py"]}
|
24,598
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/barbican/secrets.py
|
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime as dt
import os
from rally.task import validation
from rally.utils import encodeutils
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.barbican import utils
"""Scenarios for Barbican secrets."""
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanSecrets.list")
class BarbicanSecretsList(utils.BarbicanBase):
def run(self):
"""List secrets."""
self.admin_barbican.list_secrets()
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.create")
class BarbicanSecretsCreate(utils.BarbicanBase):
def run(self):
"""Create secret."""
self.admin_barbican.create_secret()
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.create_and_delete")
class BarbicanSecretsCreateAndDelete(utils.BarbicanBase):
def run(self):
"""Create and Delete secret."""
secret = self.admin_barbican.create_secret()
self.admin_barbican.delete_secret(secret.secret_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.create_and_get")
class BarbicanSecretsCreateAndGet(utils.BarbicanBase):
def run(self):
"""Create and Get Secret."""
secret = self.admin_barbican.create_secret()
self.assertTrue(secret)
secret_info = self.admin_barbican.get_secret(secret.secret_ref)
self.assertEqual(secret.secret_ref, secret_info.secret_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.get")
class BarbicanSecretsGet(utils.BarbicanBase):
def run(self, secret_ref=None):
"""Create and Get Secret.
:param secret_ref: Name of the secret to get
"""
if secret_ref is None:
secret = self.admin_barbican.create_secret()
self.admin_barbican.get_secret(secret.secret_ref)
else:
self.admin_barbican.get_secret(secret_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.create_and_list")
class BarbicanSecretsCreateAndList(utils.BarbicanBase):
def run(self):
"""Create and then list all secrets."""
self.admin_barbican.create_secret()
self.admin_barbican.list_secrets()
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(context={"admin_cleanup@openstack": ["barbican"]},
name="BarbicanSecrets.create_symmetric_and_delete")
class BarbicanSecretsCreateSymmetricAndDelete(utils.BarbicanBase):
def run(self, payload, algorithm, bit_length, mode):
"""Create and delete symmetric secret
:param payload: The unecrypted data
:param algorithm: the algorithm associated with the secret key
:param bit_length: the big length of the secret key
:param mode: the algorithm mode used with the secret key
"""
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.kdf.pbkdf2 import PBKDF2HMAC
payload = encodeutils.safe_encode(payload)
salt = os.urandom(16)
kdf = PBKDF2HMAC(
algorithm=hashes.SHA256(), length=32, salt=salt,
iterations=1000, backend=default_backend())
payload = base64.b64encode(kdf.derive(payload))
payload = encodeutils.safe_decode(payload)
expire_time = (dt.datetime.utcnow() + dt.timedelta(days=5))
secret = self.admin_barbican.create_secret(
expiration=expire_time.isoformat(), algorithm=algorithm,
bit_length=bit_length, mode=mode, payload=payload,
payload_content_type="application/octet-stream",
payload_content_encoding="base64")
self.admin_barbican.delete_secret(secret.secret_ref)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,599
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/mistral/test_executions.py
|
# Copyright 2016: Nokia Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.mistral import executions
from tests.unit import test
BASE = "rally_openstack.task.scenarios.mistral.executions"
MISTRAL_WBS_BASE = "rally_openstack.task.scenarios.mistral.workbooks"
WB_DEFINITION = """---
version: 2.0
name: wb
workflows:
wf1:
type: direct
tasks:
noop_task:
action: std.noop
wf2:
type: direct
tasks:
noop_task:
action: std.noop
wf3:
type: direct
tasks:
noop_task:
action: std.noop
wf4:
type: direct
tasks:
noop_task:
action: std.noop
"""
WB_DEF_ONE_WF = """---
version: 2.0
name: wb
workflows:
wf1:
type: direct
tasks:
noop_task:
action: std.noop
"""
PARAMS_EXAMPLE = {"env": {"env_param": "env_param_value"}}
INPUT_EXAMPLE = """{"input1": "value1", "some_json_input": {"a": "b"}}"""
WB = type("obj", (object,), {"name": "wb", "definition": WB_DEFINITION})()
WB_ONE_WF = (
type("obj", (object,), {"name": "wb", "definition": WB_DEF_ONE_WF})()
)
class MistralExecutionsTestCase(test.ScenarioTestCase):
@mock.patch("%s.ListExecutions._list_executions" % BASE)
def test_list_executions(self, mock__list_executions):
executions.ListExecutions(self.context).run()
self.assertEqual(1, mock__list_executions.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(WB_DEFINITION)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution_with_input(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, wf_input=INPUT_EXAMPLE)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
@mock.patch("json.loads", return_value=PARAMS_EXAMPLE)
def test_create_execution_with_params(self, mock_loads,
mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, params=str(PARAMS_EXAMPLE))
self.assertEqual(1, mock_loads.called)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_execution_with_wf_name(self, mock__create_workbook,
mock__create_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, "wf4")
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf4", None,)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_delete_execution(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB)
def test_create_delete_execution_with_wf_name(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEFINITION, "wf4", do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf4", None)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_execution" % BASE)
@mock.patch("%s.CreateExecutionFromWorkbook._create_workbook" % BASE,
return_value=WB_ONE_WF)
def test_create_delete_execution_without_wf_name(
self, mock__create_workbook, mock__create_execution,
mock__delete_workbook, mock__delete_execution):
executions.CreateExecutionFromWorkbook(self.context).run(
WB_DEF_ONE_WF, do_delete=True)
self.assertEqual(1, mock__create_workbook.called)
self.assertEqual(1, mock__create_execution.called)
self.assertEqual(1, mock__delete_workbook.called)
self.assertEqual(1, mock__delete_execution.called)
# we concatenate workbook name with the workflow name in the test
# the workbook name is not random because we mock the method that
# adds the random part
mock__create_execution.assert_called_once_with("wb.wf1", None)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,600
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/keystone/users.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import copy
import uuid
from rally.common import broker
from rally.common import cfg
from rally.common import logging
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import credential
from rally_openstack.common import osclients
from rally_openstack.common.services.identity import identity
from rally_openstack.common.services.network import neutron
from rally_openstack.task import context
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
RESOURCE_MANAGEMENT_WORKERS_DESCR = (
"The number of concurrent threads to use for serving users context.")
PROJECT_DOMAIN_DESCR = "ID of domain in which projects will be created."
USER_DOMAIN_DESCR = "ID of domain in which users will be created."
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="users", platform="openstack", order=100)
class UserGenerator(context.OpenStackContext):
"""Creates specified amount of keystone users and tenants."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"anyOf": [
{"description": "Create new temporary users and tenants.",
"properties": {
"tenants": {
"type": "integer",
"minimum": 1,
"description": "The number of tenants to create."
},
"users_per_tenant": {
"type": "integer",
"minimum": 1,
"description": "The number of users to create per one "
"tenant."},
"user_password": {
"type": "string",
"description": "Specify custom user password instead of "
"randomly generated in case of password "
"requirements."},
"resource_management_workers": {
"type": "integer",
"minimum": 1,
"description": RESOURCE_MANAGEMENT_WORKERS_DESCR},
"project_domain": {
"type": "string",
"description": PROJECT_DOMAIN_DESCR},
"user_domain": {
"type": "string",
"description": USER_DOMAIN_DESCR},
"user_choice_method": {
"$ref": "#/definitions/user_choice_method"}},
"additionalProperties": False},
# TODO(andreykurilin): add ability to specify users here.
{"description": "Use existing users and tenants.",
"properties": {
"user_choice_method": {
"$ref": "#/definitions/user_choice_method"}
},
"additionalProperties": False}
],
"definitions": {
"user_choice_method": {
"enum": ["random", "round_robin"],
"description": "The mode of balancing usage of users between "
"scenario iterations."}
}
}
DEFAULT_CONFIG = {"user_choice_method": "random"}
DEFAULT_FOR_NEW_USERS = {
"tenants": 1,
"users_per_tenant": 1,
"resource_management_workers":
cfg.CONF.openstack.users_context_resource_management_workers,
}
def __init__(self, context):
super(UserGenerator, self).__init__(context)
creds = self.env["platforms"]["openstack"]
if creds.get("admin"):
admin_cred = copy.deepcopy(creds["admin"])
api_info = copy.deepcopy(creds.get("api_info", {}))
if "api_info" in admin_cred:
api_info.update(creds["admin"]["api_info"])
admin_cred["api_info"] = api_info
context["admin"] = {
"credential": credential.OpenStackCredential(**admin_cred)
}
if creds["users"] and not (set(self.config) - {"user_choice_method"}):
self.existing_users = creds["users"]
else:
self.existing_users = []
self.credential = context["admin"]["credential"]
project_domain = (self.credential["project_domain_name"]
or cfg.CONF.openstack.project_domain)
user_domain = (self.credential["user_domain_name"]
or cfg.CONF.openstack.user_domain)
self.DEFAULT_FOR_NEW_USERS["project_domain"] = project_domain
self.DEFAULT_FOR_NEW_USERS["user_domain"] = user_domain
with self.config.unlocked():
for key, value in self.DEFAULT_FOR_NEW_USERS.items():
self.config.setdefault(key, value)
def _create_tenants(self, threads):
tenants = collections.deque()
def publish(queue):
for i in range(self.config["tenants"]):
args = (self.config["project_domain"], self.task["uuid"], i)
queue.append(args)
def consume(cache, args):
domain, task_id, i = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
tenant = cache["client"].create_project(domain_name=domain)
tenant_dict = {"id": tenant.id, "name": tenant.name, "users": []}
tenants.append(tenant_dict)
# NOTE(msdubov): consume() will fill the tenants list in the closure.
broker.run(publish, consume, threads)
tenants_dict = {}
for t in tenants:
tenants_dict[t["id"]] = t
return tenants_dict
def _create_users(self, threads):
# NOTE(msdubov): This should be called after _create_tenants().
users_per_tenant = self.config["users_per_tenant"]
default_role = cfg.CONF.openstack.keystone_default_role
users = collections.deque()
def publish(queue):
for tenant_id in self.context["tenants"]:
for user_id in range(users_per_tenant):
username = self.generate_random_name()
password = (str(uuid.uuid4())
if self.config.get("user_password") is None
else self.config["user_password"])
args = (username, password, self.config["project_domain"],
self.config["user_domain"], tenant_id)
queue.append(args)
def consume(cache, args):
username, password, project_dom, user_dom, tenant_id = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(
clients, name_generator=self.generate_random_name)
client = cache["client"]
user = client.create_user(username, password=password,
project_id=tenant_id,
domain_name=user_dom,
default_role=default_role)
user_credential = credential.OpenStackCredential(
auth_url=self.credential["auth_url"],
username=user.name,
password=password,
tenant_name=self.context["tenants"][tenant_id]["name"],
permission=consts.EndpointPermission.USER,
project_domain_name=project_dom,
user_domain_name=user_dom,
endpoint_type=self.credential["endpoint_type"],
https_insecure=self.credential["https_insecure"],
https_cacert=self.credential["https_cacert"],
region_name=self.credential["region_name"],
profiler_hmac_key=self.credential["profiler_hmac_key"],
profiler_conn_str=self.credential["profiler_conn_str"],
api_info=self.credential["api_info"])
users.append({"id": user.id,
"credential": user_credential,
"tenant_id": tenant_id})
# NOTE(msdubov): consume() will fill the users list in the closure.
broker.run(publish, consume, threads)
return list(users)
def create_users(self):
"""Create tenants and users, using the broker pattern."""
threads = min(self.config["resource_management_workers"],
self.config["tenants"])
LOG.debug("Creating %(tenants)d tenants using %(threads)s threads"
% {"tenants": self.config["tenants"], "threads": threads})
self.context["tenants"] = self._create_tenants(threads)
if len(self.context["tenants"]) < self.config["tenants"]:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Failed to create the requested number of tenants.")
users_num = self.config["users_per_tenant"] * self.config["tenants"]
threads = min(self.config["resource_management_workers"], users_num)
LOG.debug("Creating %(users)d users using %(threads)s threads"
% {"users": users_num, "threads": threads})
self.context["users"] = self._create_users(threads)
for user in self.context["users"]:
self.context["tenants"][user["tenant_id"]]["users"].append(user)
if len(self.context["users"]) < users_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Failed to create the requested number of users.")
def use_existing_users(self):
LOG.debug("Using existing users for OpenStack platform.")
api_info = copy.deepcopy(self.env["platforms"]["openstack"].get(
"api_info", {}))
for user_credential in self.existing_users:
user_credential = copy.deepcopy(user_credential)
if "api_info" in user_credential:
api_info.update(user_credential["api_info"])
user_credential["api_info"] = api_info
user_credential = credential.OpenStackCredential(**user_credential)
user_clients = osclients.Clients(user_credential)
user_id = user_clients.keystone.auth_ref.user_id
tenant_id = user_clients.keystone.auth_ref.project_id
if tenant_id not in self.context["tenants"]:
self.context["tenants"][tenant_id] = {
"id": tenant_id,
"name": user_credential.tenant_name
}
self.context["users"].append({
"credential": user_credential,
"id": user_id,
"tenant_id": tenant_id
})
def setup(self):
self.context["users"] = []
self.context["tenants"] = {}
self.context["user_choice_method"] = self.config["user_choice_method"]
if self.existing_users:
self.use_existing_users()
else:
self.create_users()
def _remove_default_security_group(self):
"""Delete default security group for tenants."""
admin_client = neutron.NeutronService(
clients=osclients.Clients(self.credential),
atomic_inst=self.atomic_actions()
)
if not admin_client.supports_extension("security-group", silent=True):
LOG.debug("Security group context is disabled.")
return
security_groups = admin_client.list_security_groups(name="default")
for security_group in security_groups:
if security_group["tenant_id"] not in self.context["tenants"]:
continue
admin_client.delete_security_group(security_group["id"])
def _get_consumer_for_deletion(self, func_name):
def consume(cache, resource_id):
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(clients)
getattr(cache["client"], func_name)(resource_id)
return consume
def _delete_tenants(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for tenant_id in self.context["tenants"]:
queue.append(tenant_id)
broker.run(publish, self._get_consumer_for_deletion("delete_project"),
threads)
self.context["tenants"] = {}
def _delete_users(self):
threads = self.config["resource_management_workers"]
def publish(queue):
for user in self.context["users"]:
queue.append(user["id"])
broker.run(publish, self._get_consumer_for_deletion("delete_user"),
threads)
self.context["users"] = []
def cleanup(self):
"""Delete tenants and users, using the broker pattern."""
if self.existing_users:
# nothing to do here.
return
else:
self._remove_default_security_group()
self._delete_users()
self._delete_tenants()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,601
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/sahara/test_sahara_image.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.task.contexts.sahara import sahara_image
from tests.unit import fakes
from tests.unit import test
BASE_CTX = "rally.task.context"
CTX = "rally_openstack.task.contexts.sahara.sahara_image"
BASE_SCN = "rally.task.scenarios"
class SaharaImageTestCase(test.ScenarioTestCase):
def setUp(self):
super(SaharaImageTestCase, self).setUp()
self.tenants_num = 2
self.users_per_tenant = 2
self.users = self.tenants_num * self.users_per_tenant
self.task = mock.MagicMock()
self.tenants = {}
self.users_key = []
for i in range(self.tenants_num):
self.tenants[str(i)] = {"id": str(i), "name": str(i),
"sahara": {"image": "42"}}
for j in range(self.users_per_tenant):
self.users_key.append({"id": "%s_%s" % (str(i), str(j)),
"tenant_id": str(i),
"credential": fakes.FakeCredential()})
@property
def url_image_context(self):
self.context.update({
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_image": {
"image_url": "http://somewhere",
"plugin_name": "test_plugin",
"hadoop_version": "test_version",
"username": "test_user"
}
},
"admin": {"credential": fakes.FakeCredential()},
"users": self.users_key,
"tenants": self.tenants
})
return self.context
@property
def existing_image_context(self):
self.context.update({
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
},
"sahara_image": {
"image_uuid": "some_id"
}
},
"admin": {"credential": fakes.FakeCredential()},
"users": self.users_key,
"tenants": self.tenants,
})
return self.context
@mock.patch("rally_openstack.common.services."
"image.image.Image")
@mock.patch("%s.resource_manager.cleanup" % CTX)
@mock.patch("rally_openstack.common.osclients.Clients")
def test_setup_and_cleanup_url_image(self, mock_clients,
mock_cleanup, mock_image):
ctx = self.url_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx.generate_random_name = mock.Mock()
image_service = mock.Mock()
mock_image.return_value = image_service
image_service.create_image.return_value = mock.Mock(id=42)
clients = mock.Mock()
mock_clients.return_value = clients
sahara_client = mock.Mock()
clients.sahara.return_value = sahara_client
glance_calls = []
for i in range(self.tenants_num):
glance_calls.append(
mock.call(container_format="bare",
image_location="http://somewhere",
disk_format="qcow2"))
sahara_update_image_calls = []
sahara_update_tags_calls = []
for i in range(self.tenants_num):
sahara_update_image_calls.append(mock.call(image_id=42,
user_name="test_user",
desc=""))
sahara_update_tags_calls.append(mock.call(
image_id=42,
new_tags=["test_plugin", "test_version"]))
sahara_ctx.setup()
image_service.create_image.assert_has_calls(glance_calls)
sahara_client.images.update_image.assert_has_calls(
sahara_update_image_calls)
sahara_client.images.update_tags.assert_has_calls(
sahara_update_tags_calls)
sahara_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["glance.images"],
users=ctx["users"],
superclass=sahara_ctx.__class__,
task_id=ctx["owner_id"])
@mock.patch("%s.resource_manager.cleanup" % CTX)
@mock.patch("%s.osclients.Clients" % CTX)
def test_setup_and_cleanup_existing_image(
self, mock_clients, mock_cleanup):
mock_clients.glance.images.get.return_value = mock.MagicMock(
is_public=True)
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx._create_image = mock.Mock()
sahara_ctx.setup()
for tenant_id in sahara_ctx.context["tenants"]:
image_id = (
sahara_ctx.context["tenants"][tenant_id]["sahara"]["image"])
self.assertEqual("some_id", image_id)
self.assertFalse(sahara_ctx._create_image.called)
sahara_ctx.cleanup()
self.assertFalse(mock_cleanup.called)
@mock.patch("%s.osclients.Glance.create_client" % CTX)
def test_check_existing_image(self, mock_glance_create_client):
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
sahara_ctx.setup()
mock_glance_create_client.images.get.asser_called_once_with("some_id")
@mock.patch("%s.osclients.Glance.create_client" % CTX)
def test_check_existing_private_image_fail(self,
mock_glance_create_client):
mock_glance_create_client.return_value.images.get.return_value = (
mock.MagicMock(is_public=False))
ctx = self.existing_image_context
sahara_ctx = sahara_image.SaharaImage(ctx)
self.assertRaises(exceptions.ContextSetupFailure,
sahara_ctx.setup)
mock_glance_create_client.images.get.asser_called_once_with("some_id")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,602
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/mistral/executions.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from rally.task import types
from rally.task import validation
import yaml
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.mistral import utils
"""Scenarios for Mistral execution."""
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_services",
services=[consts.Service.MISTRAL])
@scenario.configure(name="MistralExecutions.list_executions",
platform="openstack")
class ListExecutions(utils.MistralScenario):
def run(self, marker="", limit=None, sort_keys="", sort_dirs=""):
"""Scenario test mistral execution-list command.
This simple scenario tests the Mistral execution-list
command by listing all the executions.
:param marker: The last execution uuid of the previous page, displays
list of executions after "marker".
:param limit: number Maximum number of executions to return in a single
result.
:param sort_keys: id,description
:param sort_dirs: [SORT_DIRS] Comma-separated list of sort directions.
Default: asc.
"""
self._list_executions(marker=marker, limit=limit,
sort_keys=sort_keys, sort_dirs=sort_dirs)
@types.convert(definition={"type": "file"})
@types.convert(params={"type": "file"})
@types.convert(wf_input={"type": "file"})
@validation.add("file_exists", param_name="definition")
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_services",
services=[consts.Service.MISTRAL])
@validation.add("workbook_contains_workflow",
workbook_param="definition",
workflow_param="workflow_name")
@scenario.configure(name="MistralExecutions.create_execution_from_workbook",
context={"cleanup@openstack": ["mistral"]},
platform="openstack")
class CreateExecutionFromWorkbook(utils.MistralScenario):
def run(self, definition, workflow_name=None, wf_input=None, params=None,
do_delete=False):
"""Scenario tests execution creation and deletion.
This scenario is a very useful tool to measure the
"mistral execution-create" and "mistral execution-delete"
commands performance.
:param definition: string (yaml string) representation of given file
content (Mistral workbook definition)
:param workflow_name: string the workflow name to execute. Should be
one of the to workflows in the definition. If no
workflow_name is passed, one of the workflows in
the definition will be taken.
:param wf_input: file containing a json string of mistral workflow
input
:param params: file containing a json string of mistral params
(the string is the place to pass the environment)
:param do_delete: if False than it allows to check performance
in "create only" mode.
"""
wb = self._create_workbook(definition)
wb_def = yaml.safe_load(wb.definition)
if not workflow_name:
workflow_name = next(iter(wb_def["workflows"].keys()))
workflow_identifier = ".".join([wb.name, workflow_name])
if not params:
params = {}
else:
params = json.loads(params)
ex = self._create_execution(workflow_identifier, wf_input, **params)
if do_delete:
self._delete_workbook(wb.name)
self._delete_execution(ex)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,603
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/hooks/fault_injection.py
|
# Copyright 2016: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.common import objects
from rally.task import hook
from rally_openstack.common import consts
LOG = logging.getLogger(__name__)
@hook.configure(name="fault_injection", platform="openstack")
class FaultInjectionHook(hook.HookAction):
"""Performs fault injection using os-faults library.
Configuration:
* action - string that represents an action (more info in [1])
* verify - whether to verify connection to cloud nodes or not
This plugin discovers extra config of ExistingCloud
and looks for "cloud_config" field. If cloud_config is present then
it will be used to connect to the cloud by os-faults.
Another option is to provide os-faults config file through
OS_FAULTS_CONFIG env variable. Format of the config can
be found in [1].
[1] http://os-faults.readthedocs.io/en/latest/usage.html
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"action": {"type": "string"},
"verify": {"type": "boolean"},
},
"required": [
"action",
],
"additionalProperties": False,
}
def get_cloud_config(self):
deployment = objects.Deployment.get(self.task["deployment_uuid"])
deployment_config = deployment["config"]
extra_config = deployment_config.get("extra", {})
return extra_config.get("cloud_config")
def run(self):
import os_faults
# get cloud configuration
cloud_config = self.get_cloud_config()
# connect to the cloud
injector = os_faults.connect(cloud_config)
# verify that all nodes are available
if self.config.get("verify"):
injector.verify()
LOG.debug("Injecting fault: %s" % self.config["action"])
os_faults.human_api(injector, self.config["action"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,604
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/loadbalancer/test_pools.py
|
# Copyright 2018: Red Hat Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.octavia import pools
from tests.unit import test
class PoolsTestCase(test.ScenarioTestCase):
def setUp(self):
super(PoolsTestCase, self).setUp()
patch = mock.patch(
"rally_openstack.common.services.loadbalancer.octavia.Octavia")
self.addCleanup(patch.stop)
self.mock_loadbalancers = patch.start()
def _get_context(self):
context = super(PoolsTestCase, self).get_test_context()
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake_tenant",
"networks": [{"id": "fake_net",
"subnets": ["fake_subnet"]}]}})
return context
def test_create_and_list_pools(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = pools.CreateAndListPools(self._get_context())
scenario.run(protocol="HTTP", lb_algorithm="ROUND_ROBIN")
loadbalancer = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
subnets = []
mock_has_calls = []
networks = self._get_context()["tenant"]["networks"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet in subnets:
mock_has_calls.append(mock.call(subnet_id="fake_subnet",
project_id="fake_tenant"))
loadbalancer_service.load_balancer_create.assert_has_calls(
mock_has_calls)
for lb in loadbalancer:
self.assertEqual(
1, loadbalancer_service.wait_for_loadbalancer_prov_status
.call_count)
self.assertEqual(1,
loadbalancer_service.pool_create.call_count)
loadbalancer_service.pool_list.assert_called_once_with()
def test_create_and_delete_pools(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = pools.CreateAndDeletePools(self._get_context())
scenario.run(protocol="HTTP", lb_algorithm="ROUND_ROBIN")
loadbalancer = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
subnets = []
mock_has_calls = []
networks = self._get_context()["tenant"]["networks"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet in subnets:
mock_has_calls.append(mock.call(subnet_id="fake_subnet",
project_id="fake_tenant"))
loadbalancer_service.load_balancer_create.assert_has_calls(
mock_has_calls)
for lb in loadbalancer:
self.assertEqual(
1, loadbalancer_service.wait_for_loadbalancer_prov_status
.call_count)
self.assertEqual(1,
loadbalancer_service.pool_create.call_count)
self.assertEqual(1,
loadbalancer_service.pool_delete.call_count)
def test_create_and_update_pools(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = pools.CreateAndUpdatePools(self._get_context())
scenario.run(protocol="HTTP", lb_algorithm="ROUND_ROBIN")
loadbalancer = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
subnets = []
mock_has_calls = []
networks = self._get_context()["tenant"]["networks"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet in subnets:
mock_has_calls.append(mock.call(subnet_id="fake_subnet",
project_id="fake_tenant"))
loadbalancer_service.load_balancer_create.assert_has_calls(
mock_has_calls)
for lb in loadbalancer:
self.assertEqual(
1, loadbalancer_service.wait_for_loadbalancer_prov_status
.call_count)
self.assertEqual(1,
loadbalancer_service.pool_create.call_count)
self.assertEqual(1,
loadbalancer_service.pool_set.call_count)
def test_create_and_show_pools(self):
loadbalancer_service = self.mock_loadbalancers.return_value
scenario = pools.CreateAndShowPools(self._get_context())
scenario.run(protocol="HTTP", lb_algorithm="ROUND_ROBIN")
loadbalancer = [{
"loadbalancer": {
"id": "loadbalancer-id"
}
}]
subnets = []
mock_has_calls = []
networks = self._get_context()["tenant"]["networks"]
for network in networks:
subnets.extend(network.get("subnets", []))
for subnet in subnets:
mock_has_calls.append(mock.call(subnet_id="fake_subnet",
project_id="fake_tenant"))
loadbalancer_service.load_balancer_create.assert_has_calls(
mock_has_calls)
for lb in loadbalancer:
self.assertEqual(
1, loadbalancer_service.wait_for_loadbalancer_prov_status
.call_count)
self.assertEqual(1,
loadbalancer_service.pool_create.call_count)
self.assertEqual(1,
loadbalancer_service.pool_show.call_count)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,605
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/network/routers.py
|
# Copyright 2017: Orange
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.neutron import utils as neutron_utils
@validation.add("required_platform", platform="openstack", admin=True,
users=True)
@context.configure(name="router", platform="openstack", order=351)
class Router(context.OpenStackContext):
"""Create networking resources.
This creates router for all tenants.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"routers_per_tenant": {
"type": "integer",
"minimum": 1
},
"admin_state_up ": {
"description": "A human-readable description for the resource",
"type": "boolean",
},
"external_gateway_info": {
"description": "The external gateway information .",
"type": "object",
"properties": {
"network_id": {"type": "string"},
"enable_snat": {"type": "boolean"}
},
"additionalProperties": False
},
"network_id": {
"description": "Network ID",
"type": "string"
},
"external_fixed_ips": {
"description": "Ip(s) of the external gateway interface.",
"type": "array",
"items": {
"type": "object",
"properties": {
"ip_address": {"type": "string"},
"subnet_id": {"type": "string"}
},
"additionalProperties": False,
}
},
"distributed": {
"description": "Distributed router. Require dvr extension.",
"type": "boolean"
},
"ha": {
"description": "Highly-available router. Require l3-ha.",
"type": "boolean"
},
"availability_zone_hints": {
"description": "Require router_availability_zone extension.",
"type": "boolean"
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"routers_per_tenant": 1,
}
def setup(self):
kwargs = {}
parameters = ("admin_state_up", "external_gateway_info", "network_id",
"external_fixed_ips", "distributed", "ha",
"availability_zone_hints")
for parameter in parameters:
if parameter in self.config:
kwargs[parameter] = self.config[parameter]
for user, tenant_id in self._iterate_per_tenants():
self.context["tenants"][tenant_id]["routers"] = []
scenario = neutron_utils.NeutronScenario(
context={"user": user, "task": self.context["task"],
"owner_id": self.context["owner_id"]}
)
for i in range(self.config["routers_per_tenant"]):
router = scenario._create_router(kwargs)
self.context["tenants"][tenant_id]["routers"].append(router)
def cleanup(self):
resource_manager.cleanup(
names=["neutron.router"],
users=self.context.get("users", []),
superclass=neutron_utils.NeutronScenario,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,606
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/neutron/test_bgpvpn.py
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.task.scenarios.neutron import bgpvpn
from tests.unit import test
@ddt.ddt
class NeutronBgpvpnTestCase(test.TestCase):
def _get_context(self, resource=None):
context = test.get_test_context()
if resource in ("network", "router"):
context.update({
"user": {
"id": "fake_user",
"tenant_id": "fake_tenant",
"credential": mock.MagicMock()}
})
if resource == "network":
context.update(
{"tenant": {"id": "fake_tenant",
resource + "s": [{"id": "fake_net",
"tenant_id": "fake_tenant",
"router_id": "fake_router"}]}
})
elif resource == "router":
context.update(
{"tenant": {"id": "fake_tenant",
resource + "s": [
{resource: {"id": "fake_net",
"tenant_id": "fake_tenant"}}]}
})
return context
def _get_bgpvpn_create_data(self):
return {
"route_targets": None,
"import_targets": None,
"export_targets": None,
"route_distinguishers": None}
def _get_bgpvpn_update_data(self):
return {
"route_targets": None,
"import_targets": None,
"export_targets": None,
"route_distinguishers": None}
@ddt.data(
{},
{"bgpvpn_create_args": None},
{"bgpvpn_create_args": {}},
)
@ddt.unpack
def test_create_and_delete_bgpvpns(self, bgpvpn_create_args=None):
scenario = bgpvpn.CreateAndDeleteBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
scenario._create_bgpvpn = mock.Mock()
scenario._delete_bgpvpn = mock.Mock()
scenario.run(**create_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._delete_bgpvpn.assert_called_once_with(
scenario._create_bgpvpn.return_value)
@ddt.data(
{},
{"bgpvpn_create_args": None},
{"bgpvpn_create_args": {}},
)
@ddt.unpack
def test_create_and_list_bgpvpns(self, bgpvpn_create_args=None):
scenario = bgpvpn.CreateAndListBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
bgpvpn_created = {"bgpvpn": {"id": 1, "name": "b1"}}
bgpvpn_listed = [{"id": 1}]
scenario._create_bgpvpn = mock.Mock(return_value=bgpvpn_created)
scenario._list_bgpvpns = mock.Mock(return_value=bgpvpn_listed)
scenario.run(**create_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._list_bgpvpns.assert_called_once_with()
@ddt.data(
{},
{"bgpvpn_create_args": {}},
{"bgpvpn_update_args": {}},
{"bgpvpn_update_args": {"update_name": True}},
{"bgpvpn_update_args": {"update_name": False}},
)
@ddt.unpack
def test_create_and_update_bgpvpns(self, bgpvpn_create_args=None,
bgpvpn_update_args=None):
scenario = bgpvpn.CreateAndUpdateBgpvpns(self._get_context())
bgpvpn_create_data = bgpvpn_create_args or {}
bgpvpn_update_data = bgpvpn_update_args or {}
create_data = self._get_bgpvpn_create_data()
create_data.update(bgpvpn_create_data)
update_data = self._get_bgpvpn_update_data()
update_data.update(bgpvpn_update_data)
if "update_name" not in update_data:
update_data["update_name"] = False
bgpvpn_data = {}
bgpvpn_data.update(bgpvpn_create_data)
bgpvpn_data.update(bgpvpn_update_data)
scenario._create_bgpvpn = mock.Mock()
scenario._update_bgpvpn = mock.Mock()
scenario.run(**bgpvpn_data)
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._update_bgpvpn.assert_called_once_with(
scenario._create_bgpvpn.return_value, **update_data)
@mock.patch.object(bgpvpn, "random")
def test_create_and_associate_disassociate_networks(self, mock_random):
scenario = bgpvpn.CreateAndAssociateDissassociateNetworks(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
networks = self._get_context("network")["tenant"]["networks"]
create_data["tenant_id"] = networks[0]["tenant_id"]
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_network_assoc = mock.Mock()
scenario._delete_bgpvpn_network_assoc = mock.Mock()
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, networks[0])
scenario._delete_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value,
scenario._create_bgpvpn_network_assoc.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_associate_disassociate_routers(self, mock_random):
scenario = bgpvpn.CreateAndAssociateDissassociateRouters(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
router = {"id": self._get_context(
"network")["tenant"]["networks"][0]["router_id"]}
create_data["tenant_id"] = self._get_context("network")["tenant"]["id"]
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_router_assoc = mock.Mock()
scenario._delete_bgpvpn_router_assoc = mock.Mock()
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, router)
scenario._delete_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value,
scenario._create_bgpvpn_router_assoc.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_list_networks_assocs(self, mock_random):
scenario = bgpvpn.CreateAndListNetworksAssocs(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
networks = self._get_context("network")["tenant"]["networks"]
create_data["tenant_id"] = networks[0]["tenant_id"]
network_assocs = {
"network_associations": [{"network_id": networks[0]["id"]}]
}
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_network_assoc = mock.Mock()
scenario._list_bgpvpn_network_assocs = mock.Mock(
return_value=network_assocs)
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_network_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, networks[0])
scenario._list_bgpvpn_network_assocs.assert_called_once_with(
scenario._create_bgpvpn.return_value)
@mock.patch.object(bgpvpn, "random")
def test_create_and_list_routers_assocs(self, mock_random):
scenario = bgpvpn.CreateAndListRoutersAssocs(
self._get_context("network"))
create_data = self._get_bgpvpn_create_data()
router = {"id": self._get_context(
"network")["tenant"]["networks"][0]["router_id"]}
create_data["tenant_id"] = self._get_context("network")["tenant"]["id"]
router_assocs = {
"router_associations": [{"router_id": router["id"]}]
}
mock_random.randint.return_value = 12345
create_data["route_targets"] = "12345:12345"
scenario._create_bgpvpn = mock.Mock()
scenario._create_bgpvpn_router_assoc = mock.Mock()
scenario._list_bgpvpn_router_assocs = mock.Mock(
return_value=router_assocs)
scenario.run()
scenario._create_bgpvpn.assert_called_once_with(
type="l3", **create_data)
scenario._create_bgpvpn_router_assoc.assert_called_once_with(
scenario._create_bgpvpn.return_value, router)
scenario._list_bgpvpn_router_assocs.assert_called_once_with(
scenario._create_bgpvpn.return_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,607
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/verification/tempest/context.py
|
# Copyright 2017: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import os
import re
import requests
from rally.common import logging
from rally import exceptions
from rally.task import utils as task_utils
from rally.verification import context
from rally.verification import utils
from rally_openstack.common import consts
from rally_openstack.common import credential
from rally_openstack.common.services.image import image
from rally_openstack.common.services.network import neutron
from rally_openstack.verification.tempest import config as conf
LOG = logging.getLogger(__name__)
@context.configure("tempest", order=900)
class TempestContext(context.VerifierContext):
"""Context class to create/delete resources needed for Tempest."""
RESOURCE_NAME_FORMAT = "rally_verify_XXXXXXXX_XXXXXXXX"
def __init__(self, ctx):
super(TempestContext, self).__init__(ctx)
openstack_platform = self.verifier.env.data["platforms"]["openstack"]
admin_creds = credential.OpenStackCredential(
permission=consts.EndpointPermission.ADMIN,
**openstack_platform["platform_data"]["admin"])
self.clients = admin_creds.clients()
self.available_services = self.clients.services().values()
self.conf = configparser.ConfigParser(allow_no_value=True)
self.conf.optionxform = str
self.conf_path = self.verifier.manager.configfile
self.data_dir = self.verifier.manager.home_dir
self.image_name = "tempest-image"
self._created_roles = []
self._created_images = []
self._created_flavors = []
self._created_networks = []
def _configure_img_options(self):
try:
tempest_major_version = int(self.verifier.version.split(".", 1)[0])
except ValueError:
# use latest flow by default
tempest_major_version = 27
if tempest_major_version < 27:
self._configure_option("scenario", "img_dir", self.data_dir)
img_file = self.image_name
else:
img_file = self.data_dir + "/" + self.image_name
self._configure_option("scenario", "img_file", img_file,
helper_method=self._download_image)
def setup(self):
self.conf.read(self.conf_path)
utils.create_dir(self.data_dir)
self._create_tempest_roles()
self._configure_option("DEFAULT", "log_file",
os.path.join(self.data_dir, "tempest.log"))
self._configure_option("oslo_concurrency", "lock_path",
os.path.join(self.data_dir, "lock_files"))
self._configure_img_options()
self._configure_option("compute", "image_ref",
helper_method=self._discover_or_create_image)
self._configure_option("compute", "image_ref_alt",
helper_method=self._discover_or_create_image)
self._configure_option("compute", "flavor_ref",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.openstack.flavor_ref_ram,
flv_disk=conf.CONF.openstack.flavor_ref_disk)
self._configure_option("compute", "flavor_ref_alt",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.openstack.flavor_ref_alt_ram,
flv_disk=conf.CONF.openstack.flavor_ref_alt_disk
)
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
if neutronclient.list_networks(shared=True)["networks"]:
# If the OpenStack cloud has some shared networks, we will
# create our own shared network and specify its name in the
# Tempest config file. Such approach will allow us to avoid
# failures of Tempest tests with error "Multiple possible
# networks found". Otherwise the default behavior defined in
# Tempest will be used and Tempest itself will manage network
# resources.
LOG.debug("Shared networks found. "
"'fixed_network_name' option should be configured.")
self._configure_option(
"compute", "fixed_network_name",
helper_method=self._create_network_resources)
if "heat" in self.available_services:
self._configure_option(
"orchestration", "instance_type",
helper_method=self._discover_or_create_flavor,
flv_ram=conf.CONF.openstack.heat_instance_type_ram,
flv_disk=conf.CONF.openstack.heat_instance_type_disk)
with open(self.conf_path, "w") as configfile:
self.conf.write(configfile)
def cleanup(self):
# Tempest tests may take more than 1 hour and we should remove all
# cached clients sessions to avoid tokens expiration when deleting
# Tempest resources.
self.clients.clear()
self._cleanup_tempest_roles()
self._cleanup_images()
self._cleanup_flavors()
if "neutron" in self.available_services:
self._cleanup_network_resources()
with open(self.conf_path, "w") as configfile:
self.conf.write(configfile)
def _create_tempest_roles(self):
keystoneclient = self.clients.verified_keystone()
roles = [conf.CONF.openstack.swift_operator_role,
conf.CONF.openstack.swift_reseller_admin_role,
conf.CONF.openstack.heat_stack_owner_role,
conf.CONF.openstack.heat_stack_user_role]
existing_roles = set(role.name.lower()
for role in keystoneclient.roles.list())
for role in roles:
if role.lower() not in existing_roles:
LOG.debug("Creating role '%s'." % role)
self._created_roles.append(keystoneclient.roles.create(role))
def _configure_option(self, section, option, value=None,
helper_method=None, *args, **kwargs):
option_value = self.conf.get(section, option)
if not option_value:
LOG.debug("Option '%s' from '%s' section is not configured."
% (option, section))
if helper_method:
res = helper_method(*args, **kwargs)
if res:
value = res["network"]["name"] if ("network" in
option) else res.id
LOG.debug("Setting value '%s' to option '%s'." % (value, option))
self.conf.set(section, option, value)
LOG.debug("Option '{opt}' is configured. "
"{opt} = {value}".format(opt=option, value=value))
else:
LOG.debug("Option '{opt}' is already configured "
"in Tempest config file. {opt} = {opt_val}"
.format(opt=option, opt_val=option_value))
def _discover_image(self):
LOG.debug("Trying to discover a public image with name matching "
"regular expression '%s'. Note that case insensitive "
"matching is performed."
% conf.CONF.openstack.img_name_regex)
image_service = image.Image(self.clients)
images = image_service.list_images(status="active",
visibility="public")
for image_obj in images:
if image_obj.name and re.match(conf.CONF.openstack.img_name_regex,
image_obj.name, re.IGNORECASE):
LOG.debug("The following public image discovered: '%s'."
% image_obj.name)
return image_obj
LOG.debug("There is no public image with name matching regular "
"expression '%s'." % conf.CONF.openstack.img_name_regex)
def _download_image_from_source(self, target_path, image=None):
if image:
LOG.debug("Downloading image '%s' from Glance to %s."
% (image.name, target_path))
with open(target_path, "wb") as image_file:
for chunk in self.clients.glance().images.data(image.id):
image_file.write(chunk)
else:
LOG.debug("Downloading image from %s to %s."
% (conf.CONF.openstack.img_url, target_path))
try:
response = requests.get(conf.CONF.openstack.img_url,
stream=True)
except requests.ConnectionError as err:
msg = ("Failed to download image. Possibly there is no "
"connection to Internet. Error: %s."
% (str(err) or "unknown"))
raise exceptions.RallyException(msg)
if response.status_code == 200:
with open(target_path, "wb") as image_file:
for chunk in response.iter_content(chunk_size=1024):
if chunk: # filter out keep-alive new chunks
image_file.write(chunk)
image_file.flush()
else:
if response.status_code == 404:
msg = "Failed to download image. Image was not found."
else:
msg = ("Failed to download image. HTTP error code %d."
% response.status_code)
raise exceptions.RallyException(msg)
LOG.debug("The image has been successfully downloaded!")
def _download_image(self):
image_path = os.path.join(self.data_dir, self.image_name)
if os.path.isfile(image_path):
LOG.debug("Image is already downloaded to %s." % image_path)
return
if conf.CONF.openstack.img_name_regex:
image = self._discover_image()
if image:
return self._download_image_from_source(image_path, image)
self._download_image_from_source(image_path)
def _discover_or_create_image(self):
if conf.CONF.openstack.img_name_regex:
image_obj = self._discover_image()
if image_obj:
LOG.debug("Using image '%s' (ID = %s) for the tests."
% (image_obj.name, image_obj.id))
return image_obj
params = {
"image_name": self.generate_random_name(),
"disk_format": conf.CONF.openstack.img_disk_format,
"container_format": conf.CONF.openstack.img_container_format,
"image_location": os.path.join(self.data_dir, self.image_name),
"visibility": "public"
}
LOG.debug("Creating image '%s'." % params["image_name"])
image_service = image.Image(self.clients)
image_obj = image_service.create_image(**params)
LOG.debug("Image '%s' (ID = %s) has been successfully created!"
% (image_obj.name, image_obj.id))
self._created_images.append(image_obj)
return image_obj
def _discover_or_create_flavor(self, flv_ram, flv_disk):
novaclient = self.clients.nova()
LOG.debug("Trying to discover a flavor with the following properties: "
"RAM = %(ram)dMB, VCPUs = 1, disk >= %(disk)dGiB." %
{"ram": flv_ram, "disk": flv_disk})
for flavor in novaclient.flavors.list():
if (flavor.ram == flv_ram
and flavor.vcpus == 1 and flavor.disk >= flv_disk):
LOG.debug("The following flavor discovered: '{0}'. "
"Using flavor '{0}' (ID = {1}) for the tests."
.format(flavor.name, flavor.id))
return flavor
LOG.debug("There is no flavor with the mentioned properties.")
params = {
"name": self.generate_random_name(),
"ram": flv_ram,
"vcpus": 1,
"disk": flv_disk
}
LOG.debug("Creating flavor '%s' with the following properties: RAM "
"= %dMB, VCPUs = 1, disk = %dGB." %
(params["name"], flv_ram, flv_disk))
flavor = novaclient.flavors.create(**params)
LOG.debug("Flavor '%s' (ID = %s) has been successfully created!"
% (flavor.name, flavor.id))
self._created_flavors.append(flavor)
return flavor
def _create_network_resources(self):
client = neutron.NeutronService(
clients=self.clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
tenant_id = self.clients.keystone.auth_ref.project_id
router_create_args = {"project_id": tenant_id}
public_net = None
if self.conf.has_section("network"):
public_net = self.conf.get("network", "public_network_id")
if public_net:
external_gateway_info = {
"network_id": public_net
}
if client.supports_extension("ext-gw-mode", silent=True):
external_gateway_info["enable_snat"] = True
router_create_args["external_gateway_info"] = external_gateway_info
LOG.debug("Creating network resources: network, subnet, router.")
net = client.create_network_topology(
subnets_count=1,
router_create_args=router_create_args,
subnet_create_args={"project_id": tenant_id},
network_create_args={"shared": True, "project_id": tenant_id})
LOG.debug("Network resources have been successfully created!")
self._created_networks.append(net)
return net
def _cleanup_tempest_roles(self):
keystoneclient = self.clients.keystone()
for role in self._created_roles:
LOG.debug("Deleting role '%s'." % role.name)
keystoneclient.roles.delete(role.id)
LOG.debug("Role '%s' has been deleted." % role.name)
def _cleanup_images(self):
image_service = image.Image(self.clients)
for image_obj in self._created_images:
LOG.debug("Deleting image '%s'." % image_obj.name)
self.clients.glance().images.delete(image_obj.id)
task_utils.wait_for_status(
image_obj, ["deleted", "pending_delete"],
check_deletion=True,
update_resource=image_service.get_image,
timeout=conf.CONF.openstack.glance_image_delete_timeout,
check_interval=conf.CONF.openstack.
glance_image_delete_poll_interval)
LOG.debug("Image '%s' has been deleted." % image_obj.name)
self._remove_opt_value_from_config("compute", image_obj.id)
def _cleanup_flavors(self):
novaclient = self.clients.nova()
for flavor in self._created_flavors:
LOG.debug("Deleting flavor '%s'." % flavor.name)
novaclient.flavors.delete(flavor.id)
LOG.debug("Flavor '%s' has been deleted." % flavor.name)
self._remove_opt_value_from_config("compute", flavor.id)
self._remove_opt_value_from_config("orchestration", flavor.id)
def _cleanup_network_resources(self):
client = neutron.NeutronService(
clients=self.clients,
name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions()
)
for topo in self._created_networks:
LOG.debug("Deleting network resources: router, subnet, network.")
client.delete_network_topology(topo)
self._remove_opt_value_from_config("compute",
topo["network"]["name"])
LOG.debug("Network resources have been deleted.")
def _remove_opt_value_from_config(self, section, opt_value):
for option, value in self.conf.items(section):
if opt_value == value:
LOG.debug("Removing value '%s' of option '%s' "
"from Tempest config file." % (opt_value, option))
self.conf.set(section, option, "")
LOG.debug("Value '%s' has been removed." % opt_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,608
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/cinder/volumes.py
|
# Copyright 2013 Huawei Technologies Co.,LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from rally.common import logging
from rally import exceptions
from rally.task import atomic
from rally.task import types
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.cinder import utils as cinder_utils
from rally_openstack.task.scenarios.glance import images
from rally_openstack.task.scenarios.nova import utils as nova_utils
LOG = logging.getLogger(__name__)
"""Scenarios for Cinder Volumes."""
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_list_volume",
platform="openstack")
class CreateAndListVolume(cinder_utils.CinderBasic):
def run(self, size, detailed=True, image=None, **kwargs):
"""Create a volume and list all volumes.
Measure the "cinder volume-list" command performance.
If you have only 1 user in your context, you will
add 1 volume on every iteration. So you will have more
and more volumes and will be able to measure the
performance of the "cinder volume-list" command depending on
the number of images owned by users.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param detailed: determines whether the volume listing should contain
detailed information about all of them
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self.cinder.create_volume(size, **kwargs)
self.cinder.list_volumes(detailed)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_get_volume",
platform="openstack")
class CreateAndGetVolume(cinder_utils.CinderBasic):
def run(self, size, image=None, **kwargs):
"""Create a volume and get the volume.
Measure the "cinder show" command performance.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self.cinder.create_volume(size, **kwargs)
self.cinder.get_volume(volume.id)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="CinderVolumes.list_volumes",
platform="openstack")
class ListVolumes(cinder_utils.CinderBasic):
def run(self, detailed=True, search_opts=None, marker=None,
limit=None, sort=None):
"""List all volumes.
This simple scenario tests the cinder list command by listing
all the volumes.
:param detailed: True if detailed information about volumes
should be listed
:param search_opts: Search options to filter out volumes.
:param marker: Begin returning volumes that appear later in the volume
list than that represented by this volume id.(For V2 or
higher)
:param limit: Maximum number of volumes to return.
:param sort: Sort information
"""
self.cinder.list_volumes(detailed, search_opts=search_opts,
marker=marker, limit=limit, sort=sort)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="CinderVolumes.list_types", platform="openstack")
class ListTypes(cinder_utils.CinderBasic):
def run(self, search_opts=None, is_public=None):
"""List all volume types.
This simple scenario tests the cinder type-list command by listing
all the volume types.
:param search_opts: Options used when search for volume types
:param is_public: If query public volume type
"""
self.cinder.list_types(search_opts, is_public=is_public)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="CinderVolumes.list_transfers", platform="openstack")
class ListTransfers(cinder_utils.CinderBasic):
def run(self, detailed=True, search_opts=None):
"""List all transfers.
This simple scenario tests the "cinder transfer-list" command by
listing all the volume transfers.
:param detailed: If True, detailed information about volume transfer
should be listed
:param search_opts: Search options to filter out volume transfers.
"""
self.cinder.list_transfers(detailed, search_opts=search_opts)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="update_volume_kwargs")
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_update_volume",
platform="openstack")
class CreateAndUpdateVolume(cinder_utils.CinderBasic):
def run(self, size, image=None, create_volume_kwargs=None,
update_volume_kwargs=None):
"""Create a volume and update its name and description.
:param size: volume size (integer, in GB)
:param image: image to be used to create volume
:param create_volume_kwargs: dict, to be used to create volume
:param update_volume_kwargs: dict, to be used to update volume
update_volume_kwargs["update_name"]=True, if updating the
name of volume.
update_volume_kwargs["description"]="desp", if updating the
description of volume.
"""
create_volume_kwargs = create_volume_kwargs or {}
update_volume_kwargs = update_volume_kwargs or {}
if image:
create_volume_kwargs["imageRef"] = image
if update_volume_kwargs.pop("update_name", False):
update_volume_kwargs["name"] = self.generate_random_name()
volume = self.cinder.create_volume(size, **create_volume_kwargs)
self.cinder.update_volume(volume, **update_volume_kwargs)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_delete_volume",
platform="openstack")
class CreateAndDeleteVolume(cinder_utils.CinderBasic):
def run(self, size, image=None, min_sleep=0, max_sleep=0, **kwargs):
"""Create and then delete a volume.
Good for testing a maximal bandwidth of cloud. Optional 'min_sleep'
and 'max_sleep' parameters allow the scenario to simulate a pause
between volume creation and deletion (of random duration from
[min_sleep, max_sleep]).
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self.cinder.create_volume(size, **kwargs)
self.sleep_between(min_sleep, max_sleep)
self.cinder.delete_volume(volume)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_volume",
platform="openstack")
class CreateVolume(cinder_utils.CinderBasic):
def run(self, size, image=None, **kwargs):
"""Create a volume.
Good test to check how influence amount of active volumes on
performance of creating new.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
self.cinder.create_volume(size, **kwargs)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_contexts", contexts=("volumes"))
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.modify_volume_metadata",
platform="openstack")
class ModifyVolumeMetadata(cinder_utils.CinderBasic):
def run(self, sets=10, set_size=3, deletes=5, delete_size=3):
"""Modify a volume's metadata.
This requires a volume to be created with the volumes
context. Additionally, ``sets * set_size`` must be greater
than or equal to ``deletes * delete_size``.
:param sets: how many set_metadata operations to perform
:param set_size: number of metadata keys to set in each
set_metadata operation
:param deletes: how many delete_metadata operations to perform
:param delete_size: number of metadata keys to delete in each
delete_metadata operation
"""
if sets * set_size < deletes * delete_size:
raise exceptions.InvalidArgumentsException(
"Not enough metadata keys will be created: "
"Setting %(num_keys)s keys, but deleting %(num_deletes)s" %
{"num_keys": sets * set_size,
"num_deletes": deletes * delete_size})
volume = random.choice(self.context["tenant"]["volumes"])
keys = self.cinder.set_metadata(volume["id"], sets=sets,
set_size=set_size)
self.cinder.delete_metadata(volume["id"], keys=keys,
deletes=deletes,
delete_size=delete_size)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_extend_volume",
platform="openstack")
class CreateAndExtendVolume(cinder_utils.CinderBasic):
def run(self, size, new_size, min_sleep=0, max_sleep=0, **kwargs):
"""Create and extend a volume and then delete it.
:param size: volume size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param new_size: volume new size (in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
to extend.
Notice: should be bigger volume size
:param min_sleep: minimum sleep time between volume extension and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume extension and
deletion (in seconds)
:param kwargs: optional args to extend the volume
"""
volume = self.cinder.create_volume(size, **kwargs)
self.cinder.extend_volume(volume, new_size=new_size)
self.sleep_between(min_sleep, max_sleep)
self.cinder.delete_volume(volume)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_contexts", contexts=("volumes"))
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_from_volume_and_delete_volume",
platform="openstack")
class CreateFromVolumeAndDeleteVolume(cinder_utils.CinderBasic):
def run(self, size, min_sleep=0, max_sleep=0, **kwargs):
"""Create volume from volume and then delete it.
Scenario for testing volume clone.Optional 'min_sleep' and 'max_sleep'
parameters allow the scenario to simulate a pause between volume
creation and deletion (of random duration from [min_sleep, max_sleep]).
:param size: volume size (in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
Should be equal or bigger source volume size
:param min_sleep: minimum sleep time between volume creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between volume creation and
deletion (in seconds)
:param kwargs: optional args to create a volume
"""
source_vol = random.choice(self.context["tenant"]["volumes"])
volume = self.cinder.create_volume(size, source_volid=source_vol["id"],
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self.cinder.delete_volume(volume)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_contexts", contexts=("volumes"))
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_delete_snapshot",
platform="openstack")
class CreateAndDeleteSnapshot(cinder_utils.CinderBasic):
def run(self, force=False, min_sleep=0, max_sleep=0, **kwargs):
"""Create and then delete a volume-snapshot.
Optional 'min_sleep' and 'max_sleep' parameters allow the scenario
to simulate a pause between snapshot creation and deletion
(of random duration from [min_sleep, max_sleep]).
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param min_sleep: minimum sleep time between snapshot creation and
deletion (in seconds)
:param max_sleep: maximum sleep time between snapshot creation and
deletion (in seconds)
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self.cinder.create_snapshot(volume["id"], force=force,
**kwargs)
self.sleep_between(min_sleep, max_sleep)
self.cinder.delete_snapshot(snapshot)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_params")
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]},
name="CinderVolumes.create_and_attach_volume",
platform="openstack")
class CreateAndAttachVolume(cinder_utils.CinderBasic,
nova_utils.NovaScenario):
@logging.log_deprecated_args(
"Use 'create_vm_params' for additional instance parameters.",
"0.2.0", ["kwargs"], once=True)
def run(self, size, image, flavor, create_volume_params=None,
create_vm_params=None, **kwargs):
"""Create a VM and attach a volume to it.
Simple test to create a VM and attach a volume, then
detach the volume and delete volume/VM.
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param create_volume_params: optional arguments for volume creation
:param create_vm_params: optional arguments for VM creation
:param kwargs: (deprecated) optional arguments for VM creation
"""
create_volume_params = create_volume_params or {}
if kwargs and create_vm_params:
raise ValueError("You can not set both 'kwargs' "
"and 'create_vm_params' attributes."
"Please use 'create_vm_params'.")
create_vm_params = create_vm_params or kwargs or {}
server = self._boot_server(image, flavor, **create_vm_params)
volume = self.cinder.create_volume(size, **create_volume_params)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self.cinder.delete_volume(volume)
self._delete_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_vm_params")
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("volume_type_exists", param_name="volume_type", nullable=True)
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]},
name="CinderVolumes.create_snapshot_and_attach_volume",
platform="openstack")
class CreateSnapshotAndAttachVolume(cinder_utils.CinderBasic,
nova_utils.NovaScenario):
def run(self, image, flavor, volume_type=None, size=None,
create_vm_params=None, **kwargs):
"""Create vm, volume, snapshot and attach/detach volume.
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param volume_type: Name of volume type to use
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param create_vm_params: optional arguments for VM creation
:param kwargs: Optional parameters used during volume
snapshot creation.
"""
if size is None:
size = {"min": 1, "max": 5}
volume = self.cinder.create_volume(size, volume_type=volume_type)
snapshot = self.cinder.create_snapshot(volume.id, force=False,
**kwargs)
create_vm_params = create_vm_params or {}
server = self._boot_server(image, flavor, **create_vm_params)
self._attach_volume(server, volume)
self._detach_volume(server, volume)
self.cinder.delete_snapshot(snapshot)
self.cinder.delete_volume(volume)
self._delete_server(server)
@types.convert(image={"type": "glance_image"},
flavor={"type": "nova_flavor"})
@validation.add("image_valid_on_flavor", flavor_param="flavor",
image_param="image")
@validation.add("required_services", services=[consts.Service.NOVA,
consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_snapshot_kwargs")
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_vm_params")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder", "nova"]},
name="CinderVolumes.create_nested_snapshots"
"_and_attach_volume",
platform="openstack")
class CreateNestedSnapshotsAndAttachVolume(cinder_utils.CinderBasic,
nova_utils.NovaScenario):
def run(self, image, flavor, size=None, nested_level=1,
create_volume_kwargs=None, create_snapshot_kwargs=None,
create_vm_params=None):
"""Create a volume from snapshot and attach/detach the volume
This scenario create vm, volume, create it's snapshot, attach volume,
then create new volume from existing snapshot and so on,
with defined nested level, after all detach and delete them.
volume->snapshot->volume->snapshot->volume ...
:param image: Glance image name to use for the VM
:param flavor: VM flavor name
:param size: Volume size - dictionary, contains two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
default values: {"min": 1, "max": 5}
:param nested_level: amount of nested levels
:param create_volume_kwargs: optional args to create a volume
:param create_snapshot_kwargs: optional args to create a snapshot
:param create_vm_params: optional arguments for VM creation
"""
if size is None:
size = {"min": 1, "max": 5}
# NOTE: Volume size cannot be smaller than the snapshot size, so
# volume with specified size should be created to avoid
# size mismatching between volume and snapshot due random
# size in _create_volume method.
size = random.randint(size["min"], size["max"])
create_volume_kwargs = create_volume_kwargs or {}
create_snapshot_kwargs = create_snapshot_kwargs or {}
create_vm_params = create_vm_params or {}
server = self._boot_server(image, flavor, **create_vm_params)
source_vol = self.cinder.create_volume(size, **create_volume_kwargs)
snapshot = self.cinder.create_snapshot(source_vol.id, force=False,
**create_snapshot_kwargs)
self._attach_volume(server, source_vol)
nes_objs = [(server, source_vol, snapshot)]
for i in range(nested_level - 1):
volume = self.cinder.create_volume(size, snapshot_id=snapshot.id)
snapshot = self.cinder.create_snapshot(volume.id, force=False,
**create_snapshot_kwargs)
self._attach_volume(server, volume)
nes_objs.append((server, volume, snapshot))
nes_objs.reverse()
for server, volume, snapshot in nes_objs:
self._detach_volume(server, volume)
self.cinder.delete_snapshot(snapshot)
self.cinder.delete_volume(volume)
self._delete_server(server)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_contexts", contexts=("volumes"))
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_list_snapshots",
platform="openstack")
class CreateAndListSnapshots(cinder_utils.CinderBasic,
nova_utils.NovaScenario):
def run(self, force=False, detailed=True, **kwargs):
"""Create and then list a volume-snapshot.
:param force: when set to True, allows snapshot of a volume when
the volume is attached to an instance
:param detailed: True if detailed information about snapshots
should be listed
:param kwargs: optional args to create a snapshot
"""
volume = random.choice(self.context["tenant"]["volumes"])
self.cinder.create_snapshot(volume["id"], force=force, **kwargs)
self.cinder.list_snapshots(detailed)
@types.convert(image={"type": "glance_image"})
@validation.add("required_services", services=[consts.Service.CINDER,
consts.Service.GLANCE])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder", "glance"]},
name="CinderVolumes.create_and_upload_volume_to_image",
platform="openstack")
class CreateAndUploadVolumeToImage(cinder_utils.CinderBasic,
images.GlanceBasic):
def run(self, size, image=None, force=False, container_format="bare",
disk_format="raw", do_delete=True, **kwargs):
"""Create and upload a volume to image.
:param size: volume size (integers, in GB), or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create volume.
:param force: when set to True volume that is attached to an instance
could be uploaded to image
:param container_format: image container format
:param disk_format: disk format for image
:param do_delete: deletes image and volume after uploading if True
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self.cinder.create_volume(size, **kwargs)
image = self.cinder.upload_volume_to_image(
volume, force=force, container_format=container_format,
disk_format=disk_format
)
if do_delete:
self.cinder.delete_volume(volume)
self.glance.delete_image(image.id)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_volume_backup",
platform="openstack")
class CreateVolumeBackup(cinder_utils.CinderBasic):
def run(self, size, do_delete=True, create_volume_kwargs=None,
create_backup_kwargs=None):
"""Create a volume backup.
:param size: volume size in GB
:param do_delete: if True, a volume and a volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)
if do_delete:
self.cinder.delete_volume(volume)
self.cinder.delete_backup(backup)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_restore_volume_backup",
platform="openstack")
class CreateAndRestoreVolumeBackup(cinder_utils.CinderBasic):
def run(self, size, do_delete=True, create_volume_kwargs=None,
create_backup_kwargs=None):
"""Restore volume backup.
:param size: volume size in GB
:param do_delete: if True, the volume and the volume backup will
be deleted after creation.
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)
self.cinder.restore_backup(backup.id)
if do_delete:
self.cinder.delete_volume(volume)
self.cinder.delete_backup(backup)
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_volume_kwargs")
@validation.add("restricted_parameters", param_names="name",
subdict="create_backup_kwargs")
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_cinder_services", services="cinder-backup")
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_list_volume_backups",
platform="openstack")
class CreateAndListVolumeBackups(cinder_utils.CinderBasic):
def run(self, size, detailed=True, do_delete=True,
create_volume_kwargs=None, create_backup_kwargs=None):
"""Create and then list a volume backup.
:param size: volume size in GB
:param detailed: True if detailed information about backup
should be listed
:param do_delete: if True, a volume backup will be deleted
:param create_volume_kwargs: optional args to create a volume
:param create_backup_kwargs: optional args to create a volume backup
"""
create_volume_kwargs = create_volume_kwargs or {}
create_backup_kwargs = create_backup_kwargs or {}
volume = self.cinder.create_volume(size, **create_volume_kwargs)
backup = self.cinder.create_backup(volume.id, **create_backup_kwargs)
self.cinder.list_backups(detailed)
if do_delete:
self.cinder.delete_volume(volume)
self.cinder.delete_backup(backup)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_volume_and_clone",
platform="openstack")
class CreateVolumeAndClone(cinder_utils.CinderBasic):
def run(self, size, image=None, nested_level=1, **kwargs):
"""Create a volume, then clone it to another volume.
This creates a volume, then clone it to anothor volume,
and then clone the new volume to next volume...
1. create source volume (from image)
2. clone source volume to volume1
3. clone volume1 to volume2
4. clone volume2 to volume3
5. ...
:param size: volume size (integer, in GB) or
dictionary, must contain two values:
min - minimum size volumes will be created as;
max - maximum size volumes will be created as.
:param image: image to be used to create initial volume
:param nested_level: amount of nested levels
:param kwargs: optional args to create volumes
"""
if image:
kwargs["imageRef"] = image
source_vol = self.cinder.create_volume(size, **kwargs)
kwargs.pop("imageRef", None)
for i in range(nested_level):
with atomic.ActionTimer(self, "cinder.clone_volume"):
source_vol = self.cinder.create_volume(
source_vol.size, source_volid=source_vol.id,
**kwargs)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("restricted_parameters", param_names=["name", "display_name"],
subdict="create_snapshot_kwargs")
@validation.add("required_contexts", contexts=("volumes"))
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_volume_from_snapshot",
platform="openstack")
class CreateVolumeFromSnapshot(cinder_utils.CinderBasic):
def run(self, do_delete=True, create_snapshot_kwargs=None, **kwargs):
"""Create a volume-snapshot, then create a volume from this snapshot.
:param do_delete: if True, a snapshot and a volume will
be deleted after creation.
:param create_snapshot_kwargs: optional args to create a snapshot
:param kwargs: optional args to create a volume
"""
create_snapshot_kwargs = create_snapshot_kwargs or {}
src_volume = random.choice(self.context["tenant"]["volumes"])
snapshot = self.cinder.create_snapshot(src_volume["id"],
**create_snapshot_kwargs)
volume = self.cinder.create_volume(src_volume["size"],
snapshot_id=snapshot.id,
**kwargs)
if do_delete:
self.cinder.delete_volume(volume)
self.cinder.delete_snapshot(snapshot)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_volume_"
"and_update_readonly_flag",
platform="openstack")
class CreateVolumeAndUpdateReadonlyFlag(cinder_utils.CinderBasic):
def run(self, size, image=None, read_only=True, **kwargs):
"""Create a volume and then update its readonly flag.
:param size: volume size (integer, in GB)
:param image: image to be used to create volume
:param read_only: The value to indicate whether to update volume to
read-only access mode
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self.cinder.create_volume(size, **kwargs)
self.cinder.update_readonly_flag(volume.id, read_only=read_only)
@types.convert(image={"type": "glance_image"})
@validation.add("restricted_parameters", param_names=["name", "display_name"])
@validation.add("image_exists", param_name="image", nullable=True)
@validation.add("required_services", services=[consts.Service.CINDER])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["cinder"]},
name="CinderVolumes.create_and_accept_transfer",
platform="openstack")
class CreateAndAcceptTransfer(cinder_utils.CinderBasic):
def run(self, size, image=None, **kwargs):
"""Create a volume transfer, then accept it
Measure the "cinder transfer-create" and "cinder transfer-accept"
command performace.
:param size: volume size (integer, in GB)
:param image: image to be used to create initial volume
:param kwargs: optional args to create a volume
"""
if image:
kwargs["imageRef"] = image
volume = self.cinder.create_volume(size, **kwargs)
transfer = self.cinder.transfer_create(volume.id)
self.cinder.transfer_accept(transfer.id, auth_key=transfer.auth_key)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,609
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/sahara/sahara_image.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.services.image import image as image_services
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.sahara import utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="sahara_image", platform="openstack", order=440)
class SaharaImage(context.OpenStackContext):
"""Context class for adding and tagging Sahara images."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_uuid": {
"type": "string"
},
"image_url": {
"type": "string",
},
"username": {
"type": "string"
},
"plugin_name": {
"type": "string",
},
"hadoop_version": {
"type": "string",
}
},
"oneOf": [
{"description": "Create an image.",
"required": ["image_url", "username", "plugin_name",
"hadoop_version"]},
{"description": "Use an existing image.",
"required": ["image_uuid"]}
],
"additionalProperties": False
}
def _create_image(self, hadoop_version, image_url, plugin_name, user,
user_name):
clients = osclients.Clients(user["credential"])
image_service = image_services.Image(
clients, name_generator=self.generate_random_name)
image = image_service.create_image(container_format="bare",
image_location=image_url,
disk_format="qcow2")
clients.sahara().images.update_image(
image_id=image.id, user_name=user_name, desc="")
clients.sahara().images.update_tags(
image_id=image.id, new_tags=[plugin_name, hadoop_version])
return image.id
def setup(self):
utils.init_sahara_context(self)
self.context["sahara"]["images"] = {}
# The user may want to use the existing image. In this case he should
# make sure that the image is public and has all required metadata.
image_uuid = self.config.get("image_uuid")
self.context["sahara"]["need_image_cleanup"] = not image_uuid
if image_uuid:
# Using the first user to check the existing image.
user = self.context["users"][0]
clients = osclients.Clients(user["credential"])
image = clients.glance().images.get(image_uuid)
visibility = None
if hasattr(image, "is_public"):
visibility = "public" if image.is_public else "private"
else:
visibility = image["visibility"]
if visibility != "public":
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Use only public image for sahara_image context"
)
image_id = image_uuid
for user, tenant_id in self._iterate_per_tenants():
self.context["tenants"][tenant_id]["sahara"]["image"] = (
image_id)
else:
for user, tenant_id in self._iterate_per_tenants():
image_id = self._create_image(
hadoop_version=self.config["hadoop_version"],
image_url=self.config["image_url"],
plugin_name=self.config["plugin_name"],
user=user,
user_name=self.config["username"])
self.context["tenants"][tenant_id]["sahara"]["image"] = (
image_id)
def cleanup(self):
if self.context["sahara"]["need_image_cleanup"]:
resource_manager.cleanup(names=["glance.images"],
users=self.context.get("users", []),
superclass=self.__class__,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,610
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/doc/test_docker_readme.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from docutils import frontend
from docutils import nodes
from docutils.parsers import rst
from docutils import utils
import os
import re
import sys
from unittest import mock
import rally_openstack
from tests.unit import test
ROOT_DIR = os.path.dirname(os.path.dirname(rally_openstack.__file__))
class DockerReadmeTestCase(test.TestCase):
RE_RELEASE = re.compile(r"\[(?P<version>[0-9]+\.[0-9]+.[0-9]+)\]")
def get_releases(self):
full_path = os.path.join(ROOT_DIR, "CHANGELOG.rst")
with open(full_path) as f:
changelog = f.read()
with mock.patch.object(sys, "stderr"):
parser = rst.Parser()
settings = frontend.OptionParser(
components=(rst.Parser,)).get_default_values()
document = utils.new_document(changelog, settings)
parser.parse(changelog, document)
changelog = document.children
if len(changelog) != 1:
self.fail("'%s' file should contain one global section "
"with subsections for each release." % full_path)
releases = []
for node in changelog[0].children:
if not isinstance(node, nodes.section):
continue
title = node.astext().split("\n", 1)[0]
result = self.RE_RELEASE.match(title)
if result:
releases.append(result.groupdict()["version"])
if not releases:
self.fail("'%s' doesn't mention any releases..." % full_path)
return releases
def test_mentioned_latest_version(self):
full_path = os.path.join(ROOT_DIR, "DOCKER_README.md")
with open(full_path) as f:
readme = f.read()
releases = self.get_releases()
latest_release = releases[0]
previous_release = releases[1]
print("All discovered releases: %s" % ", ".join(releases))
found = False
for i, line in enumerate(readme.split("\n"), 1):
if latest_release in line:
found = True
elif previous_release in line:
self.fail(
"You need to change %s to %s in all places where the "
"latest release is mentioned."
"\n Filename: %s"
"\n Line Number: %s"
"\n Line: %s" %
(previous_release, latest_release, full_path, i, line))
if not found:
self.fail("No latest nor previous release is found at README file "
"for our Docker image. It looks like the format of it "
"had changed. Please adopt the current test suite.")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,611
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/magnum/utils.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import random
import string
import time
from kubernetes import client as k8s_config
from kubernetes.client.api import core_v1_api
from kubernetes.client import api_client
from kubernetes.client.rest import ApiException
from rally.common import cfg
from rally.common import utils as common_utils
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.task import scenario
CONF = cfg.CONF
class MagnumScenario(scenario.OpenStackScenario):
"""Base class for Magnum scenarios with basic atomic actions."""
@atomic.action_timer("magnum.list_cluster_templates")
def _list_cluster_templates(self, **kwargs):
"""Return list of cluster_templates.
:param limit: (Optional) The maximum number of results to return
per request, if:
1) limit > 0, the maximum number of cluster_templates to return.
2) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param kwargs: Optional additional arguments for cluster_templates
listing
:returns: cluster_templates list
"""
return self.clients("magnum").cluster_templates.list(**kwargs)
@atomic.action_timer("magnum.create_cluster_template")
def _create_cluster_template(self, **kwargs):
"""Create a cluster_template
:param kwargs: optional additional arguments for cluster_template
creation
:returns: magnum cluster_template
"""
kwargs["name"] = self.generate_random_name()
return self.clients("magnum").cluster_templates.create(**kwargs)
@atomic.action_timer("magnum.get_cluster_template")
def _get_cluster_template(self, cluster_template):
"""Return details of the specify cluster template.
:param cluster_template: ID or name of the cluster template to show
:returns: clustertemplate detail
"""
return self.clients("magnum").cluster_templates.get(cluster_template)
@atomic.action_timer("magnum.list_clusters")
def _list_clusters(self, limit=None, **kwargs):
"""Return list of clusters.
:param limit: Optional, the maximum number of results to return
per request, if:
1) limit > 0, the maximum number of clusters to return.
2) limit param is NOT specified (None), the number of items
returned respect the maximum imposed by the Magnum API
(see Magnum's api.max_limit option).
:param kwargs: Optional additional arguments for clusters listing
:returns: clusters list
"""
return self.clients("magnum").clusters.list(limit=limit, **kwargs)
@atomic.action_timer("magnum.create_cluster")
def _create_cluster(self, cluster_template, node_count, **kwargs):
"""Create a cluster
:param cluster_template: cluster_template for the cluster
:param node_count: the cluster node count
:param kwargs: optional additional arguments for cluster creation
:returns: magnum cluster
"""
name = self.generate_random_name()
cluster = self.clients("magnum").clusters.create(
name=name, cluster_template_id=cluster_template,
node_count=node_count, **kwargs)
common_utils.interruptable_sleep(
CONF.openstack.magnum_cluster_create_prepoll_delay)
cluster = utils.wait_for_status(
cluster,
ready_statuses=["CREATE_COMPLETE"],
failure_statuses=["CREATE_FAILED", "ERROR"],
update_resource=utils.get_from_manager(),
timeout=CONF.openstack.magnum_cluster_create_timeout,
check_interval=CONF.openstack.magnum_cluster_create_poll_interval,
id_attr="uuid"
)
return cluster
@atomic.action_timer("magnum.get_cluster")
def _get_cluster(self, cluster):
"""Return details of the specify cluster.
:param cluster: ID or name of the cluster to show
:returns: cluster detail
"""
return self.clients("magnum").clusters.get(cluster)
@atomic.action_timer("magnum.get_ca_certificate")
def _get_ca_certificate(self, cluster_uuid):
"""Get CA certificate for this cluster
:param cluster_uuid: uuid of the cluster
"""
return self.clients("magnum").certificates.get(cluster_uuid)
@atomic.action_timer("magnum.create_ca_certificate")
def _create_ca_certificate(self, csr_req):
"""Send csr to Magnum to have it signed
:param csr_req: {"cluster_uuid": <uuid>, "csr": <csr file content>}
"""
return self.clients("magnum").certificates.create(**csr_req)
def _get_k8s_api_client(self):
cluster_uuid = self.context["tenant"]["cluster"]
cluster = self._get_cluster(cluster_uuid)
cluster_template = self._get_cluster_template(
cluster.cluster_template_id)
key_file = None
cert_file = None
ca_certs = None
if not cluster_template.tls_disabled:
dir = self.context["ca_certs_directory"]
key_file = cluster_uuid + ".key"
key_file = os.path.join(dir, key_file)
cert_file = cluster_uuid + ".crt"
cert_file = os.path.join(dir, cert_file)
ca_certs = cluster_uuid + "_ca.crt"
ca_certs = os.path.join(dir, ca_certs)
if hasattr(k8s_config, "ConfigurationObject"):
# k8sclient < 4.0.0
config = k8s_config.ConfigurationObject()
else:
config = k8s_config.Configuration()
config.host = cluster.api_address
config.ssl_ca_cert = ca_certs
config.cert_file = cert_file
config.key_file = key_file
if hasattr(k8s_config, "ConfigurationObject"):
# k8sclient < 4.0.0
client = api_client.ApiClient(config=config)
else:
client = api_client.ApiClient(config)
return core_v1_api.CoreV1Api(client)
@atomic.action_timer("magnum.k8s_list_v1pods")
def _list_v1pods(self):
"""List all pods.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_node(namespace="default")
@atomic.action_timer("magnum.k8s_create_v1pod")
def _create_v1pod(self, manifest):
"""Create a pod on the specify cluster.
:param manifest: manifest use to create the pod
"""
k8s_api = self._get_k8s_api_client()
podname = manifest["metadata"]["name"] + "-"
for i in range(5):
podname = podname + random.choice(string.ascii_lowercase)
manifest["metadata"]["name"] = podname
for i in range(150):
try:
k8s_api.create_namespaced_pod(body=manifest,
namespace="default")
break
except ApiException as e:
if e.status != 403:
raise
time.sleep(2)
start = time.time()
while True:
resp = k8s_api.read_namespaced_pod(
name=podname, namespace="default")
if resp.status.conditions:
for condition in resp.status.conditions:
if condition.type.lower() == "ready" and \
condition.status.lower() == "true":
return resp
if (time.time() - start > CONF.openstack.k8s_pod_create_timeout):
raise exceptions.TimeoutException(
desired_status="Ready",
resource_name=podname,
resource_type="Pod",
resource_id=resp.metadata.uid,
resource_status=resp.status,
timeout=CONF.openstack.k8s_pod_create_timeout)
common_utils.interruptable_sleep(
CONF.openstack.k8s_pod_create_poll_interval)
@atomic.action_timer("magnum.k8s_list_v1rcs")
def _list_v1rcs(self):
"""List all rcs.
"""
k8s_api = self._get_k8s_api_client()
return k8s_api.list_namespaced_replication_controller(
namespace="default")
@atomic.action_timer("magnum.k8s_create_v1rc")
def _create_v1rc(self, manifest):
"""Create rc on the specify cluster.
:param manifest: manifest use to create the replication controller
"""
k8s_api = self._get_k8s_api_client()
suffix = "-"
for i in range(5):
suffix = suffix + random.choice(string.ascii_lowercase)
rcname = manifest["metadata"]["name"] + suffix
manifest["metadata"]["name"] = rcname
resp = k8s_api.create_namespaced_replication_controller(
body=manifest,
namespace="default")
expectd_status = resp.spec.replicas
start = time.time()
while True:
resp = k8s_api.read_namespaced_replication_controller(
name=rcname,
namespace="default")
status = resp.status.replicas
if status == expectd_status:
return resp
else:
if time.time() - start > CONF.openstack.k8s_rc_create_timeout:
raise exceptions.TimeoutException(
desired_status=expectd_status,
resource_name=rcname,
resource_type="ReplicationController",
resource_id=resp.metadata.uid,
resource_status=status,
timeout=CONF.openstack.k8s_rc_create_timeout)
common_utils.interruptable_sleep(
CONF.openstack.k8s_rc_create_poll_interval)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,612
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/monasca/test_metrics.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.task.scenarios.monasca import metrics
from tests.unit import test
@ddt.ddt
class MonascaMetricsTestCase(test.ScenarioTestCase):
@ddt.data(
{"region": None},
{"region": "fake_region"},
)
@ddt.unpack
def test_list_metrics(self, region=None):
scenario = metrics.ListMetrics(self.context)
self.region = region
scenario._list_metrics = mock.MagicMock()
scenario.run(region=self.region)
scenario._list_metrics.assert_called_once_with(region=self.region)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,613
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/gnocchi/resource.py
|
# Copyright 2017 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.gnocchi import utils as gnocchiutils
"""Scenarios for Gnocchi resource."""
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["gnocchi.resource"]},
name="GnocchiResource.create_resource")
class CreateResource(gnocchiutils.GnocchiBase):
def run(self, resource_type="generic"):
"""Create resource.
:param resource_type: Type of the resource
"""
name = self.generate_random_name()
self.gnocchi.create_resource(name, resource_type=resource_type)
@validation.add("required_services", services=[consts.Service.GNOCCHI])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["gnocchi.resource"]},
name="GnocchiResource.create_delete_resource")
class CreateDeleteResource(gnocchiutils.GnocchiBase):
def run(self, resource_type="generic"):
"""Create resource and then delete it.
:param resource_type: Type of the resource
"""
name = self.generate_random_name()
resource = self.gnocchi.create_resource(name,
resource_type=resource_type)
self.gnocchi.delete_resource(resource["id"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,614
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/vm/test_image_command_customizer.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Tests for the image customizer using a command execution."""
from unittest import mock
from rally import exceptions
from rally_openstack.task.contexts.vm import image_command_customizer
from tests.unit import test
BASE = "rally_openstack.task.contexts.vm.image_command_customizer"
class ImageCommandCustomizerContextVMTestCase(test.TestCase):
def setUp(self):
super(ImageCommandCustomizerContextVMTestCase, self).setUp()
self.context = {
"task": mock.MagicMock(),
"config": {
"image_command_customizer": {
"image": {"name": "image"},
"flavor": {"name": "flavor"},
"username": "fedora",
"password": "foo_password",
"floating_network": "floating",
"port": 1022,
"command": {
"interpreter": "foo_interpreter",
"script_file": "foo_script"
}
}
},
"admin": {
"credential": "credential",
}
}
self.user = {"keypair": {"private": "foo_private"}}
self.fip = {"ip": "foo_ip"}
@mock.patch("%s.vm_utils.VMScenario" % BASE)
def test_customize_image(self, mock_vm_scenario):
mock_vm_scenario.return_value._run_command.return_value = (
0, "foo_stdout", "foo_stderr")
customizer = image_command_customizer.ImageCommandCustomizerContext(
self.context)
retval = customizer.customize_image(server=None, ip=self.fip,
user=self.user)
mock_vm_scenario.assert_called_once_with(customizer.context)
mock_vm_scenario.return_value._run_command.assert_called_once_with(
"foo_ip", 1022, "fedora", "foo_password", pkey="foo_private",
command={"interpreter": "foo_interpreter",
"script_file": "foo_script"})
self.assertEqual((0, "foo_stdout", "foo_stderr"), retval)
@mock.patch("%s.vm_utils.VMScenario" % BASE)
def test_customize_image_fail(self, mock_vm_scenario):
mock_vm_scenario.return_value._run_command.return_value = (
1, "foo_stdout", "foo_stderr")
customizer = image_command_customizer.ImageCommandCustomizerContext(
self.context)
exc = self.assertRaises(
exceptions.ScriptError, customizer.customize_image,
server=None, ip=self.fip, user=self.user)
str_exc = str(exc)
self.assertIn("foo_stdout", str_exc)
self.assertIn("foo_stderr", str_exc)
mock_vm_scenario.return_value._run_command.assert_called_once_with(
"foo_ip", 1022, "fedora", "foo_password", pkey="foo_private",
command={"interpreter": "foo_interpreter",
"script_file": "foo_script"})
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,615
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/quotas/neutron_quotas.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
class NeutronQuotas(object):
"""Management of Neutron quotas."""
QUOTAS_SCHEMA = {
"type": "object",
"additionalProperties": False,
"properties": {
"network": {
"type": "integer",
"minimum": -1
},
"subnet": {
"type": "integer",
"minimum": -1
},
"port": {
"type": "integer",
"minimum": -1
},
"router": {
"type": "integer",
"minimum": -1
},
"floatingip": {
"type": "integer",
"minimum": -1
},
"security_group": {
"type": "integer",
"minimum": -1
},
"security_group_rule": {
"type": "integer",
"minimum": -1
},
"pool": {
"type": "integer",
"minimum": -1
},
"vip": {
"type": "integer",
"minimum": -1
},
"health_monitor": {
"type": "integer",
"minimum": -1
},
"trunk": {
"type": "integer",
"minimum": -1
}
}
}
def __init__(self, clients):
self.clients = clients
def update(self, tenant_id, **kwargs):
body = {"quota": kwargs}
self.clients.neutron().update_quota(tenant_id, body=body)
def delete(self, tenant_id):
# Reset quotas to defaults and tag database objects as deleted
self.clients.neutron().delete_quota(tenant_id)
def get(self, tenant_id):
return self.clients.neutron().show_quota(tenant_id)["quota"]
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,616
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/vm/test_vmtasks.py
|
# Copyright 2013: Rackspace UK
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
import ddt
from rally.common import validation
from rally import exceptions
from rally_openstack.task.scenarios.vm import vmtasks
from tests.unit import test
BASE = "rally_openstack.task.scenarios.vm.vmtasks"
@ddt.ddt
class VMTasksTestCase(test.ScenarioTestCase):
def setUp(self):
super(VMTasksTestCase, self).setUp()
self.context.update({"user": {"keypair": {"name": "keypair_name"},
"credential": mock.MagicMock()}})
cinder_patcher = mock.patch(
"rally_openstack.common.services.storage.block.BlockStorage")
self.cinder = cinder_patcher.start().return_value
self.cinder.create_volume.return_value = mock.Mock(id="foo_volume")
self.addCleanup(cinder_patcher.stop)
def create_env(self, scenario):
self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True}
scenario._boot_server_with_fip = mock.Mock(
return_value=("foo_server", self.ip))
scenario._wait_for_ping = mock.Mock()
scenario._delete_server_with_fip = mock.Mock()
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.add_output = mock.Mock()
return scenario
def test_boot_runcommand_delete(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.run("foo_flavor", image="foo_image",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"},
username="foo_username",
password="foo_password",
use_floating_ip="use_fip",
floating_network="ext_network",
force_delete="foo_force",
volume_args={"size": 16},
foo_arg="foo_value")
self.cinder.create_volume.assert_called_once_with(16, imageRef=None)
scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
scenario._wait_for_ping.assert_called_once_with("foo_ip")
scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
scenario.add_output.assert_called_once_with(
complete={"chart_plugin": "TextArea",
"data": [
"StdErr: foo_err",
"StdOut:",
"{\"foo\": 42}"],
"title": "Script Output"})
@ddt.data(
{"output": (0, "", ""),
"expected": [{"complete": {"chart_plugin": "TextArea",
"data": [
"StdErr: (none)",
"StdOut:",
""],
"title": "Script Output"}}]},
{"output": (1, "{\"foo\": 42}", ""), "raises": exceptions.ScriptError},
{"output": ("", 1, ""), "raises": TypeError},
{"output": (0, "{\"foo\": 42}", ""),
"expected": [{"complete": {"chart_plugin": "TextArea",
"data": [
"StdErr: (none)",
"StdOut:",
"{\"foo\": 42}"],
"title": "Script Output"}}]},
{"output": (0, "{\"additive\": [1, 2]}", ""),
"expected": [{"complete": {"chart_plugin": "TextArea",
"data": [
"StdErr: (none)",
"StdOut:", "{\"additive\": [1, 2]}"],
"title": "Script Output"}}]},
{"output": (0, "{\"complete\": [3, 4]}", ""),
"expected": [{"complete": {"chart_plugin": "TextArea",
"data": [
"StdErr: (none)",
"StdOut:",
"{\"complete\": [3, 4]}"],
"title": "Script Output"}}]},
{"output": (0, "{\"additive\": [1, 2], \"complete\": [3, 4]}", ""),
"expected": [{"additive": 1}, {"additive": 2},
{"complete": 3}, {"complete": 4}]}
)
@ddt.unpack
def test_boot_runcommand_delete_add_output(self, output,
expected=None, raises=None):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command.return_value = output
kwargs = {"flavor": "foo_flavor",
"image": "foo_image",
"command": {"remote_path": "foo"},
"username": "foo_username",
"password": "foo_password",
"use_floating_ip": "use_fip",
"floating_network": "ext_network",
"force_delete": "foo_force",
"volume_args": {"size": 16},
"foo_arg": "foo_value"}
if raises:
self.assertRaises(raises, scenario.run, **kwargs)
self.assertFalse(scenario.add_output.called)
else:
scenario.run(**kwargs)
calls = [mock.call(**kw) for kw in expected]
scenario.add_output.assert_has_calls(calls, any_order=True)
self.cinder.create_volume.assert_called_once_with(
16, imageRef=None)
scenario._boot_server_with_fip.assert_called_once_with(
"foo_image", "foo_flavor", key_name="keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"remote_path": "foo"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
def test_boot_runcommand_delete_command_timeouts(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._run_command.side_effect = exceptions.SSHTimeout()
self.assertRaises(exceptions.SSHTimeout,
scenario.run,
"foo_flavor", "foo_image", "foo_interpreter",
"foo_script", "foo_username")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
def test_boot_runcommand_delete_ping_wait_timeouts(self):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
scenario._wait_for_ping.side_effect = exceptions.TimeoutException(
resource_type="foo_resource",
resource_name="foo_name",
resource_id="foo_id",
desired_status="foo_desired_status",
resource_status="foo_resource_status",
timeout=2)
exc = self.assertRaises(exceptions.TimeoutException,
scenario.run,
"foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username",
wait_for_ping=True)
self.assertEqual(exc.kwargs["resource_type"], "foo_resource")
self.assertEqual(exc.kwargs["resource_name"], "foo_name")
self.assertEqual(exc.kwargs["resource_id"], "foo_id")
self.assertEqual(exc.kwargs["desired_status"], "foo_desired_status")
self.assertEqual(exc.kwargs["resource_status"], "foo_resource_status")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
@mock.patch("%s.json" % BASE)
def test_boot_runcommand_delete_json_fails(self, mock_json):
scenario = self.create_env(vmtasks.BootRuncommandDelete(self.context))
mock_json.loads.side_effect = ValueError()
scenario.run("foo_image", "foo_flavor", "foo_interpreter",
"foo_script", "foo_username")
scenario.add_output.assert_called_once_with(complete={
"chart_plugin": "TextArea", "data": ["StdErr: foo_err",
"StdOut:", "{\"foo\": 42}"],
"title": "Script Output"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
def test_boot_runcommand_delete_custom_image(self):
context = {
"user": {
"tenant_id": "tenant_id",
"keypair": {"name": "foo_keypair_name"},
"credential": mock.Mock()
},
"tenant": {
"custom_image": {"id": "image_id"}
}
}
scenario = self.create_env(vmtasks.BootRuncommandDelete(context))
scenario._run_command = mock.MagicMock(
return_value=(0, "{\"foo\": 42}", "foo_err"))
scenario.run("foo_flavor",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"},
username="foo_username",
password="foo_password",
use_floating_ip="use_fip",
floating_network="ext_network",
force_delete="foo_force",
volume_args={"size": 16},
foo_arg="foo_value")
self.cinder.create_volume.assert_called_once_with(16, imageRef=None)
scenario._boot_server_with_fip.assert_called_once_with(
"image_id", "foo_flavor", key_name="foo_keypair_name",
use_floating_ip="use_fip", floating_network="ext_network",
block_device_mapping={"vdrally": "foo_volume:::1"},
foo_arg="foo_value")
scenario._wait_for_ping.assert_called_once_with("foo_ip")
scenario._run_command.assert_called_once_with(
"foo_ip", 22, "foo_username", "foo_password",
command={"script_file": "foo_script",
"interpreter": "foo_interpreter"})
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete="foo_force")
scenario.add_output.assert_called_once_with(
complete={"chart_plugin": "TextArea",
"data": [
"StdErr: foo_err",
"StdOut:", "{\"foo\": 42}"],
"title": "Script Output"})
@mock.patch("%s.heat" % BASE)
@mock.patch("%s.sshutils" % BASE)
def test_runcommand_heat(self, mock_sshutils, mock_heat):
fake_ssh = mock.Mock()
fake_ssh.execute.return_value = [0, "key:val", ""]
mock_sshutils.SSH.return_value = fake_ssh
fake_stack = mock.Mock()
fake_stack.stack.outputs = [{"output_key": "gate_node",
"output_value": "ok"}]
mock_heat.main.Stack.return_value = fake_stack
context = {
"user": {"keypair": {"name": "name", "private": "pk"},
"credential": mock.MagicMock()},
"tenant": {"networks": [{"router_id": "1"}]}
}
scenario = vmtasks.RuncommandHeat(context)
scenario.generate_random_name = mock.Mock(return_value="name")
scenario.add_output = mock.Mock()
workload = {"username": "admin",
"resource": ["foo", "bar"]}
scenario.run(workload, "template",
{"file_key": "file_value"},
{"param_key": "param_value"})
expected = {"chart_plugin": "Table",
"data": {"rows": [["key", "val"]],
"cols": ["key", "value"]},
"description": "Data generated by workload",
"title": "Workload summary"}
scenario.add_output.assert_called_once_with(complete=expected)
def create_env_for_designate(self, zone_config=None):
scenario = vmtasks.CheckDesignateDNSResolving(self.context)
self.ip = {"id": "foo_id", "ip": "foo_ip", "is_floating": True}
scenario._boot_server_with_fip = mock.Mock(
return_value=("foo_server", self.ip))
scenario._delete_server_with_fip = mock.Mock()
scenario._run_command = mock.MagicMock(
return_value=(0, "ANSWER SECTION", "foo_err"))
scenario.add_output = mock.Mock()
if zone_config is None:
zone_config = {
"test_existing_designate_from_VM": {
"bind_ip": "192.168.1.123"
}
}
self.context.update(
{
"config": {
"zones@openstack": zone_config
},
"user": {
"keypair": {"name": "keypair_name"},
"credential": mock.MagicMock()
},
"tenant": {
"id": "0",
"name": "tenant1",
"zones": [
{"name": "zone1.com."}
],
"networks": [
{
"name": "net1",
"subnets": [
{
"name": "subnet1",
"dns_nameservers": "1.2.3.4"
}
]
}
]
}
}
)
args = {"image": "some_image", "flavor": "m1.small",
"username": "chuck norris"}
return scenario, args
@mock.patch("rally.task.utils.get_from_manager")
@mock.patch("rally.task.utils.wait_for_status")
def test_check_designate_dns_resolving_ok(
self,
mock_rally_task_utils_wait_for_status,
mock_rally_task_utils_get_from_manager):
scenario, args = self.create_env_for_designate()
scenario.run(**args)
scenario._boot_server_with_fip.assert_called_once_with(
"some_image", "m1.small", floating_network=None,
key_name="keypair_name", use_floating_ip=True)
mock_rally_task_utils_wait_for_status.assert_called_once_with(
"foo_server", ready_statuses=["ACTIVE"], update_resource=mock.ANY)
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", {"id": "foo_id", "ip": "foo_ip",
"is_floating": True},
force_delete=False)
scenario.add_output.assert_called_with(
complete={"chart_plugin": "TextArea",
"data": [
"foo_err"],
"title": "Script StdErr"})
@mock.patch("rally.task.utils.get_from_manager")
@mock.patch("rally.task.utils.wait_for_status")
def test_test_existing_designate_from_vm_command_timeout(
self,
mock_rally_task_utils_wait_for_status,
mock_rally_task_utils_get_from_manager):
scenario, _ = self.create_env_for_designate()
scenario._run_command.side_effect = exceptions.SSHTimeout()
self.assertRaises(exceptions.SSHTimeout,
scenario.run,
"foo_flavor", "foo_image", "foo_interpreter",
"foo_script", "foo_username")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
@mock.patch("rally.task.utils.get_from_manager")
@mock.patch("rally.task.utils.wait_for_status")
def test_test_existing_designate_from_vm_wait_timeout(
self,
mock_rally_task_utils_wait_for_status,
mock_rally_task_utils_get_from_manager):
scenario, args = self.create_env_for_designate()
mock_rally_task_utils_wait_for_status.side_effect = \
exceptions.TimeoutException(
resource_type="foo_resource",
resource_name="foo_name",
resource_id="foo_id",
desired_status="foo_desired_status",
resource_status="foo_resource_status",
timeout=2)
self.assertRaises(exceptions.TimeoutException,
scenario.run,
"foo_flavor", "foo_image", "foo_interpreter",
"foo_script", "foo_username")
scenario._delete_server_with_fip.assert_called_once_with(
"foo_server", self.ip, force_delete=False)
self.assertFalse(scenario.add_output.called)
@ddt.data(
{"output": (1, "x y z", "error message"),
"raises": exceptions.ScriptError},
{"output": (0, "[1, 2, 3, 4]", ""),
"raises": exceptions.ScriptError}
)
@ddt.unpack
def test_test_existing_designate_from_vm_add_output(self, output,
expected=None,
raises=None):
scenario, _ = self.create_env_for_designate()
scenario._run_command.return_value = output
kwargs = {"flavor": "foo_flavor",
"image": "foo_image",
"username": "foo_username",
"password": "foo_password",
"use_floating_ip": "use_fip",
"floating_network": "ext_network",
"force_delete": "foo_force"}
self.assertRaises(raises, scenario.run, **kwargs)
self.assertFalse(scenario.add_output.called)
@ddt.ddt
class ValidCommandValidatorTestCase(test.TestCase):
def setUp(self):
super(ValidCommandValidatorTestCase, self).setUp()
self.context = {"admin": {"credential": mock.MagicMock()},
"users": [{"credential": mock.MagicMock()}]}
@ddt.data({"command": {"script_inline": "foobar",
"interpreter": ["ENV=bar", "/bin/foo"],
"local_path": "bar",
"remote_path": "/bin/foo"}},
{"command": {"script_inline": "foobar", "interpreter": "foo"}})
@ddt.unpack
def test_check_command_dict(self, command=None):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
self.assertIsNone(validator.check_command_dict(command))
@ddt.data({"raises_message": "Command must be a dictionary"},
{"command": "foo",
"raises_message": "Command must be a dictionary"},
{"command": {"interpreter": "foobar", "script_file": "foo",
"script_inline": "bar"},
"raises_message": "Exactly one of "},
{"command": {"script_file": "foobar"},
"raises_message": "Supplied dict specifies no"},
{"command": {"script_inline": "foobar",
"interpreter": "foo",
"local_path": "bar"},
"raises_message": "When uploading an interpreter its path"},
{"command": {"interpreter": "/bin/bash",
"script_path": "foo"},
"raises_message": ("Unexpected command parameters: "
"script_path")})
@ddt.unpack
def test_check_command_dict_failed(
self, command=None, raises_message=None):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
e = self.assertRaises(
validation.ValidationError,
validator.check_command_dict, command)
self.assertIn(raises_message, e.message)
@mock.patch("rally.plugins.common.validators.FileExistsValidator"
"._file_access_ok")
def test_validate(self, mock__file_access_ok):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
mock__file_access_ok.return_value = None
command = {"script_file": "foobar", "interpreter": "foo"}
result = validator.validate(self.context, {"args": {"p": command}},
None, None)
self.assertIsNone(result)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p",
required=True)
def test_valid_command_not_required(self):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=False)
result = validator.validate(self.context, {"args": {"p": None}},
None, None)
self.assertIsNone(result)
def test_valid_command_required(self):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
e = self.assertRaises(
validation.ValidationError,
validator.validate, {"args": {"p": None}},
self.context, None, None)
self.assertEqual("Command must be a dictionary", e.message)
@mock.patch("rally.plugins.common.validators.FileExistsValidator"
"._file_access_ok")
def test_valid_command_unreadable_script_file(self, mock__file_access_ok):
mock__file_access_ok.side_effect = validation.ValidationError("O_o")
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
command = {"script_file": "foobar", "interpreter": "foo"}
e = self.assertRaises(
validation.ValidationError,
validator.validate, self.context, {"args": {"p": command}},
None, None)
self.assertEqual("O_o", e.message)
@mock.patch("%s.ValidCommandValidator.check_command_dict" % BASE)
def test_valid_command_fail_check_command_dict(self,
mock_check_command_dict):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
mock_check_command_dict.side_effect = validation.ValidationError(
"foobar")
e = self.assertRaises(
validation.ValidationError,
validator.validate, {"args": {"p": {"foo": "bar"}}},
self.context, None, None)
self.assertEqual("foobar", e.message)
def test_valid_command_script_inline(self):
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
command = {"script_inline": "bar", "interpreter": "/bin/sh"}
result = validator.validate(self.context, {"args": {"p": command}},
None, None)
self.assertIsNone(result)
@mock.patch("rally.plugins.common.validators.FileExistsValidator"
"._file_access_ok")
def test_valid_command_local_path(self, mock__file_access_ok):
mock__file_access_ok.side_effect = validation.ValidationError("")
validator = vmtasks.ValidCommandValidator(param_name="p",
required=True)
command = {"remote_path": "bar", "local_path": "foobar"}
self.assertRaises(
validation.ValidationError,
validator.validate, self.context, {"args": {"p": command}},
None, None)
mock__file_access_ok.assert_called_once_with(
filename="foobar", mode=os.R_OK, param_name="p",
required=True)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,617
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/glance/images.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import logging
from rally.common import utils as rutils
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.services.image import image
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="images", platform="openstack", order=410)
class ImageGenerator(context.OpenStackContext):
"""Uploads specified Glance images to every tenant."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"image_url": {
"type": "string",
"description": "Location of the source to create image from."
},
"disk_format": {
"description": "The format of the disk.",
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"]
},
"container_format": {
"description": "Format of the image container.",
"enum": ["aki", "ami", "ari", "bare", "docker", "ova", "ovf"]
},
"image_name": {
"type": "string",
"description": "The name of image to create. NOTE: it will be "
"ignored in case when `images_per_tenant` is "
"bigger then 1."
},
"min_ram": {
"description": "Amount of RAM in MB",
"type": "integer",
"minimum": 0
},
"min_disk": {
"description": "Amount of disk space in GB",
"type": "integer",
"minimum": 0
},
"visibility": {
"description": "Visibility for this image ('shared' and "
"'community' are available only in case of "
"Glance V2).",
"enum": ["public", "private", "shared", "community"]
},
"images_per_tenant": {
"description": "The number of images to create per one single "
"tenant.",
"type": "integer",
"minimum": 1
},
"image_args": {
"description": "This param is deprecated since Rally-0.10.0, "
"specify exact arguments in a root section of "
"context instead.",
"type": "object",
"additionalProperties": True
},
"image_container": {
"description": "This param is deprecated since Rally-0.10.0, "
"use `container_format` instead.",
"type": "string",
},
"image_type": {
"description": "This param is deprecated since Rally-0.10.0, "
"use `disk_format` instead.",
"enum": ["qcow2", "raw", "vhd", "vmdk", "vdi", "iso", "aki",
"ari", "ami"],
},
},
"oneOf": [{"description": "It is been used since Rally 0.10.0",
"required": ["image_url", "disk_format",
"container_format"]},
{"description": "One of backward compatible way",
"required": ["image_url", "image_type",
"container_format"]},
{"description": "One of backward compatible way",
"required": ["image_url", "disk_format",
"image_container"]},
{"description": "One of backward compatible way",
"required": ["image_url", "image_type",
"image_container"]}],
"additionalProperties": False
}
DEFAULT_CONFIG = {"images_per_tenant": 1}
def setup(self):
image_url = self.config.get("image_url")
disk_format = self.config.get("disk_format")
container_format = self.config.get("container_format")
images_per_tenant = self.config.get("images_per_tenant")
visibility = self.config.get("visibility", "private")
min_disk = self.config.get("min_disk", 0)
min_ram = self.config.get("min_ram", 0)
image_args = self.config.get("image_args", {})
if "image_type" in self.config:
LOG.warning("The 'image_type' argument is deprecated since "
"Rally 0.10.0, use disk_format argument instead")
if not disk_format:
disk_format = self.config["image_type"]
if "image_container" in self.config:
LOG.warning("The 'image_container' argument is deprecated since "
"Rally 0.10.0; use container_format argument instead")
if not container_format:
container_format = self.config["image_container"]
if image_args:
LOG.warning(
"The 'image_args' argument is deprecated since Rally 0.10.0; "
"specify arguments in a root section of context instead")
if "is_public" in image_args:
if "visibility" not in self.config:
visibility = ("public" if image_args["is_public"]
else "private")
if "min_ram" in image_args:
if "min_ram" not in self.config:
min_ram = image_args["min_ram"]
if "min_disk" in image_args:
if "min_disk" not in self.config:
min_disk = image_args["min_disk"]
# None image_name means that image.Image will generate a random name
image_name = None
if "image_name" in self.config and images_per_tenant == 1:
image_name = self.config["image_name"]
for user, tenant_id in self._iterate_per_tenants():
current_images = []
clients = osclients.Clients(user["credential"])
image_service = image.Image(
clients, name_generator=self.generate_random_name)
for i in range(images_per_tenant):
image_obj = image_service.create_image(
image_name=image_name,
container_format=container_format,
image_location=image_url,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram)
current_images.append(image_obj.id)
self.context["tenants"][tenant_id]["images"] = current_images
def cleanup(self):
if self.context.get("admin", {}):
# NOTE(andreykurilin): Glance does not require the admin for
# listing tenant images, but the admin is required for
# discovering Cinder volumes which might be created for the
# purpose of caching. Removing such volumes are optional step,
# since Cinder should have own mechanism like garbage collector,
# but if we can, let's remove everything and make the cloud as
# close as possible to the original state.
admin = self.context["admin"]
admin_required = None
else:
admin = None
admin_required = False
if "image_name" in self.config:
matcher = rutils.make_name_matcher(self.config["image_name"])
else:
matcher = self.__class__
resource_manager.cleanup(names=["glance.images",
"cinder.image_volumes_cache"],
admin=admin,
admin_required=admin_required,
users=self.context.get("users", []),
superclass=matcher,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,618
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/quotas/test_quotas.py
|
# Copyright 2014: Kylin Cloud
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.quotas import quotas
from tests.unit import test
class QuotasTestCase(test.ScenarioTestCase):
def setUp(self):
super(QuotasTestCase, self).setUp()
self.context.update({
"user": {
"tenant_id": "fake",
"credential": mock.MagicMock()
},
"tenant": {"id": "fake"}
})
def test_nova_get(self):
scenario = quotas.NovaGet(self.context)
scenario._get_quotas = mock.MagicMock()
scenario.run()
scenario._get_quotas.assert_called_once_with("nova", "fake")
def test_cinder_get(self):
scenario = quotas.CinderGet(self.context)
scenario._get_quotas = mock.MagicMock()
scenario.run()
scenario._get_quotas.assert_called_once_with("cinder", "fake")
def test_nova_update(self):
scenario = quotas.NovaUpdate(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.run(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
def test_nova_update_and_delete(self):
scenario = quotas.NovaUpdateAndDelete(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.run(max_quota=1024)
scenario._update_quotas.assert_called_once_with("nova", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("nova", "fake")
def test_cinder_update(self):
scenario = quotas.CinderUpdate(self.context)
scenario._update_quotas = mock.MagicMock()
scenario.run(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
def test_cinder_update_and_delete(self):
scenario = quotas.CinderUpdateAndDelete(self.context)
scenario._update_quotas = mock.MagicMock()
scenario._delete_quotas = mock.MagicMock()
scenario.run(max_quota=1024)
scenario._update_quotas.assert_called_once_with("cinder", "fake", 1024)
scenario._delete_quotas.assert_called_once_with("cinder", "fake")
def test_neutron_update(self):
scenario = quotas.NeutronUpdate(self.context)
scenario._update_quotas = mock.MagicMock()
mock_quota_update_fn = self.admin_clients("neutron").update_quota
scenario.run(max_quota=1024)
scenario._update_quotas.assert_called_once_with("neutron", "fake",
1024,
mock_quota_update_fn)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,619
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/keystone/roles.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import broker
from rally.common import cfg
from rally.common import logging
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.common.services.identity import identity
from rally_openstack.task import context
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="roles", platform="openstack", order=330)
class RoleGenerator(context.OpenStackContext):
"""Context class for assigning roles for users."""
CONFIG_SCHEMA = {
"type": "array",
"$schema": consts.JSON_SCHEMA,
"items": {
"type": "string",
"description": "The name of role to assign to user"
}
}
def __init__(self, ctx):
super(RoleGenerator, self).__init__(ctx)
self.credential = self.context["admin"]["credential"]
self.workers = (
cfg.CONF.openstack.roles_context_resource_management_workers)
def _get_role_object(self, context_role):
"""Check if role exists.
:param context_role: name of existing role.
"""
keystone = identity.Identity(osclients.Clients(self.credential))
default_roles = keystone.list_roles()
for def_role in default_roles:
if str(def_role.name) == context_role:
return def_role
else:
raise exceptions.NotFoundException(
"There is no role with name `%s`" % context_role)
def _get_user_role_ids(self, user_id, project_id):
keystone = identity.Identity(osclients.Clients(self.credential))
user_roles = keystone.list_roles(user_id=user_id,
project_id=project_id)
return [role.id for role in user_roles]
def _get_consumer(self, func_name):
def consume(cache, args):
role_id, user_id, project_id = args
if "client" not in cache:
clients = osclients.Clients(self.credential)
cache["client"] = identity.Identity(clients)
getattr(cache["client"], func_name)(role_id=role_id,
user_id=user_id,
project_id=project_id)
return consume
def setup(self):
"""Add all roles to users."""
threads = self.workers
roles_dict = {}
def publish(queue):
for context_role in self.config:
role = self._get_role_object(context_role)
roles_dict[role.id] = role.name
LOG.debug("Adding role %(role_name)s having ID %(role_id)s "
"to all users using %(threads)s threads"
% {"role_name": role.name,
"role_id": role.id,
"threads": threads})
for user in self.context["users"]:
if "roles" not in user:
user["roles"] = self._get_user_role_ids(
user["id"],
user["tenant_id"])
user["assigned_roles"] = []
if role.id not in user["roles"]:
args = (role.id, user["id"], user["tenant_id"])
queue.append(args)
user["assigned_roles"].append(role.id)
broker.run(publish, self._get_consumer("add_role"), threads)
self.context["roles"] = roles_dict
def cleanup(self):
"""Remove assigned roles from users."""
threads = self.workers
def publish(queue):
for role_id in self.context["roles"]:
LOG.debug("Removing assigned role %s from all users" % role_id)
for user in self.context["users"]:
if role_id in user["assigned_roles"]:
args = (role_id, user["id"], user["tenant_id"])
queue.append(args)
broker.run(publish, self._get_consumer("revoke_role"), threads)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,620
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/murano/test_environments.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.murano import environments
from tests.unit import test
MURANO_SCENARIO = ("rally_openstack.task.scenarios.murano."
"environments")
class MuranoEnvironmentsTestCase(test.ScenarioTestCase):
def _get_context(self):
self.context.update({
"tenant": {
"packages": [mock.MagicMock(fully_qualified_name="fake")]
},
"user": {
"tenant_id": "fake_tenant_id"
},
"config": {
"murano_packages": {
"app_package": (
"rally-jobs/extra/murano/"
"applications/HelloReporter/"
"io.murano.apps.HelloReporter.zip")
}
}
})
return self.context
def test_list_environments(self):
TEST_TARGET = "ListEnvironments"
list_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_list_environments")
scenario = environments.ListEnvironments(self.context)
with mock.patch(list_env_module) as mock_list_env:
scenario.run()
mock_list_env.assert_called_once_with()
def test_create_and_delete_environment(self):
TEST_TARGET = "CreateAndDeleteEnvironment"
generate_random_name_module = ("{}.{}.{}").format(
MURANO_SCENARIO, TEST_TARGET, "generate_random_name")
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
delete_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_delete_environment")
scenario = environments.CreateAndDeleteEnvironment(self.context)
with mock.patch(generate_random_name_module) as mock_random_name:
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(delete_env_module) as mock_delete_env:
fake_env = mock.Mock(id="fake_id")
mock_create_env.return_value = fake_env
mock_random_name.return_value = "foo"
scenario.run()
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_delete_env.assert_called_once_with(
fake_env)
def test_create_and_deploy_environment(self):
TEST_TARGET = "CreateAndDeployEnvironment"
create_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_environment")
create_session_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_session")
create_service_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_create_service")
deploy_env_module = ("{}.{}.{}").format(MURANO_SCENARIO,
TEST_TARGET,
"_deploy_environment")
scenario = environments.CreateAndDeployEnvironment(self.context)
with mock.patch(create_env_module) as mock_create_env:
with mock.patch(create_session_module) as mock_create_session:
with mock.patch(create_service_module) as mock_create_service:
with mock.patch(deploy_env_module) as mock_deploy_env:
fake_env = mock.MagicMock(id="fake_env_id")
mock_create_env.return_value = fake_env
fake_session = mock.Mock(id="fake_session_id")
mock_create_session.return_value = fake_session
scenario.context = self._get_context()
scenario.context["tenants"] = {
"fake_tenant_id": {
"packages": [mock.MagicMock()]
}
}
scenario.run(1)
mock_create_env.assert_called_once_with()
mock_create_session.assert_called_once_with(
fake_env.id)
mock_create_service.assert_called_once_with(
fake_env,
fake_session,
"fake")
mock_deploy_env.assert_called_once_with(
fake_env, fake_session)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,621
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/cleanup/test_resources.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import ddt
from neutronclient.common import exceptions as neutron_exceptions
from novaclient import exceptions as nova_exc
from watcherclient.common.apiclient import exceptions as watcher_exceptions
from rally_openstack.task.cleanup import resources
from tests.unit import test
BASE = "rally_openstack.task.cleanup.resources"
GLANCE_V2_PATH = ("rally_openstack.common.services.image.glance_v2."
"GlanceV2Service")
class SynchronizedDeletionTestCase(test.TestCase):
def test_is_deleted(self):
self.assertTrue(resources.SynchronizedDeletion().is_deleted())
class QuotaMixinTestCase(test.TestCase):
@mock.patch("%s.identity.Identity" % BASE)
def test_list(self, mock_identity):
quota = resources.QuotaMixin()
quota.tenant_uuid = None
quota.user = mock.MagicMock()
self.assertEqual([], quota.list())
self.assertFalse(mock_identity.called)
quota.tenant_uuid = mock.MagicMock()
self.assertEqual([mock_identity.return_value.get_project.return_value],
quota.list())
mock_identity.assert_called_once_with(quota.user)
class MagnumMixinTestCase(test.TestCase):
def test_id(self):
magnum = resources.MagnumMixin()
magnum._service = "magnum"
magnum.raw_resource = mock.MagicMock()
self.assertEqual(magnum.raw_resource.uuid, magnum.id())
def test_list(self):
magnum = resources.MagnumMixin()
magnum._service = "magnum"
some_resources = [mock.MagicMock(), mock.MagicMock(),
mock.MagicMock(), mock.MagicMock()]
magnum._manager = mock.MagicMock()
magnum._manager.return_value.list.side_effect = (
some_resources[:2], some_resources[2:4], [])
self.assertEqual(some_resources, magnum.list())
self.assertEqual(
[mock.call(marker=None), mock.call(marker=some_resources[1].uuid),
mock.call(marker=some_resources[3].uuid)],
magnum._manager.return_value.list.call_args_list)
class NovaServerTestCase(test.TestCase):
def test_list(self):
server = resources.NovaServer()
server._manager = mock.MagicMock()
server.list()
server._manager.return_value.list.assert_called_once_with(limit=-1)
def test_delete(self):
server = resources.NovaServer()
server.raw_resource = mock.Mock()
server._manager = mock.Mock()
server.delete()
server._manager.return_value.delete.assert_called_once_with(
server.raw_resource.id)
def test_delete_locked(self):
server = resources.NovaServer()
server.raw_resource = mock.Mock()
setattr(server.raw_resource, "OS-EXT-STS:locked", True)
server._manager = mock.Mock()
server.delete()
server.raw_resource.unlock.assert_called_once_with()
server._manager.return_value.delete.assert_called_once_with(
server.raw_resource.id)
class NovaFlavorsTestCase(test.TestCase):
@mock.patch("%s.base.ResourceManager._manager" % BASE)
def test_is_deleted(self, mock_resource_manager__manager):
exc = nova_exc.NotFound(404)
mock_resource_manager__manager().get.side_effect = exc
flavor = resources.NovaFlavors()
flavor.raw_resource = mock.MagicMock()
self.assertTrue(flavor.is_deleted())
@mock.patch("%s.base.ResourceManager._manager" % BASE)
def test_is_deleted_fail(self, mock_resource_manager__manager):
mock_resource_manager__manager().get.side_effect = TypeError()
flavor = resources.NovaFlavors()
flavor.raw_resource = mock.MagicMock()
self.assertRaises(TypeError, flavor.is_deleted)
class NovaServerGroupsTestCase(test.TestCase):
@mock.patch("%s.base.ResourceManager._manager" % BASE)
@mock.patch("rally.common.utils.name_matches_object")
def test_list(self, mock_name_matches_object,
mock_resource_manager__manager):
server_groups = [mock.MagicMock(name="rally_foo1"),
mock.MagicMock(name="rally_foo2"),
mock.MagicMock(name="foo3")]
mock_name_matches_object.side_effect = [False, True, True]
mock_resource_manager__manager().list.return_value = server_groups
self.assertEqual(server_groups, resources.NovaServerGroups().list())
class NeutronMixinTestCase(test.TestCase):
def get_neutron_mixin(self):
neut = resources.NeutronMixin()
neut._service = "neutron"
return neut
def test_manager(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
self.assertEqual(neut.user.neutron.return_value, neut._manager())
def test_id(self):
neut = self.get_neutron_mixin()
neut.raw_resource = {"id": "test"}
self.assertEqual("test", neut.id())
def test_name(self):
neutron = self.get_neutron_mixin()
neutron.raw_resource = {"id": "test_id", "name": "test_name"}
self.assertEqual("test_name", neutron.name())
def test_delete(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
neut._resource = "some_resource"
neut.raw_resource = {"id": "42"}
neut.delete()
neut.user.neutron().delete_some_resource.assert_called_once_with("42")
def test_list(self):
neut = self.get_neutron_mixin()
neut.user = mock.MagicMock()
neut._resource = "some_resource"
neut.tenant_uuid = "user_tenant"
some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}]
neut.user.neutron().list_some_resources.return_value = {
"some_resources": some_resources
}
self.assertEqual([some_resources[0]], list(neut.list()))
neut.user.neutron().list_some_resources.assert_called_once_with(
tenant_id=neut.tenant_uuid)
class NeutronLbaasV1MixinTestCase(test.TestCase):
def get_neutron_lbaasv1_mixin(self, extensions=None):
if extensions is None:
extensions = []
user = mock.MagicMock()
neut = resources.NeutronLbaasV1Mixin(user=user)
neut._service = "neutron"
neut._resource = "some_resource"
neut._manager = mock.Mock()
user.neutron.return_value.list_extensions.return_value = {
"extensions": [{"alias": ext} for ext in extensions]
}
return neut
def test_list_lbaas_available(self):
neut = self.get_neutron_lbaasv1_mixin(extensions=["lbaas"])
neut.tenant_uuid = "user_tenant"
some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}]
neut._manager().list_some_resources.return_value = {
"some_resources": some_resources
}
self.assertEqual([some_resources[0]], list(neut.list()))
neut._manager().list_some_resources.assert_called_once_with(
tenant_id=neut.tenant_uuid)
def test_list_lbaas_unavailable(self):
neut = self.get_neutron_lbaasv1_mixin()
self.assertEqual([], list(neut.list()))
self.assertFalse(neut._manager().list_some_resources.called)
class NeutronLbaasV2MixinTestCase(test.TestCase):
def get_neutron_lbaasv2_mixin(self, extensions=None):
if extensions is None:
extensions = []
user = mock.MagicMock()
neut = resources.NeutronLbaasV2Mixin(user=user)
neut._service = "neutron"
neut._resource = "some_resource"
neut._manager = mock.Mock()
user.neutron.return_value.list_extensions.return_value = {
"extensions": [{"alias": ext} for ext in extensions]
}
return neut
def test_list_lbaasv2_available(self):
neut = self.get_neutron_lbaasv2_mixin(extensions=["lbaasv2"])
neut.tenant_uuid = "user_tenant"
some_resources = [{"tenant_id": neut.tenant_uuid}, {"tenant_id": "a"}]
neut._manager().list_some_resources.return_value = {
"some_resources": some_resources
}
self.assertEqual([some_resources[0]], list(neut.list()))
neut._manager().list_some_resources.assert_called_once_with(
tenant_id=neut.tenant_uuid)
def test_list_lbaasv2_unavailable(self):
neut = self.get_neutron_lbaasv2_mixin()
self.assertEqual([], list(neut.list()))
self.assertFalse(neut._manager().list_some_resources.called)
class NeutronV2LoadbalancerTestCase(test.TestCase):
def get_neutron_lbaasv2_lb(self):
neutron_lb = resources.NeutronV2Loadbalancer()
neutron_lb.raw_resource = {"id": "1", "name": "s_rally"}
neutron_lb._manager = mock.Mock()
return neutron_lb
def test_is_deleted_true(self):
from neutronclient.common import exceptions as n_exceptions
neutron_lb = self.get_neutron_lbaasv2_lb()
neutron_lb._manager().show_loadbalancer.side_effect = (
n_exceptions.NotFound)
self.assertTrue(neutron_lb.is_deleted())
neutron_lb._manager().show_loadbalancer.assert_called_once_with(
neutron_lb.id())
def test_is_deleted_false(self):
from neutronclient.common import exceptions as n_exceptions
neutron_lb = self.get_neutron_lbaasv2_lb()
neutron_lb._manager().show_loadbalancer.return_value = (
neutron_lb.raw_resource)
self.assertFalse(neutron_lb.is_deleted())
neutron_lb._manager().show_loadbalancer.assert_called_once_with(
neutron_lb.id())
neutron_lb._manager().show_loadbalancer.reset_mock()
neutron_lb._manager().show_loadbalancer.side_effect = (
n_exceptions.Forbidden)
self.assertFalse(neutron_lb.is_deleted())
neutron_lb._manager().show_loadbalancer.assert_called_once_with(
neutron_lb.id())
class NeutronBgpvpnTestCase(test.TestCase):
def get_neutron_bgpvpn_mixin(self, extensions=None):
if extensions is None:
extensions = []
admin = mock.Mock()
neut = resources.NeutronBgpvpn(admin=admin)
neut._manager = mock.Mock()
nc = admin.neutron.return_value
nc.list_extensions.return_value = {
"extensions": [{"alias": ext} for ext in extensions]
}
return neut
def test_list_user(self):
neut = self.get_neutron_bgpvpn_mixin(extensions=["bgpvpn"])
user_bgpvpns = {"bgpvpns": [{"tenant_id": "foo", "id": "bgpvpn_id"}]}
neut._manager().list_bgpvpns.return_value = user_bgpvpns
bgpvpns_list = neut.list()
self.assertEqual("bgpvpn", neut._resource)
neut._manager().list_bgpvpns.assert_called_once_with()
self.assertEqual(bgpvpns_list, user_bgpvpns["bgpvpns"])
def test_list_admin(self):
neut = self.get_neutron_bgpvpn_mixin(extensions=["bgpvpn"])
admin_bgpvpns = {"bgpvpns": [{"tenant_id": "foo", "id": "bgpvpn_id"}]}
neut._manager().list_bgpvpns.return_value = admin_bgpvpns
self.assertEqual("bgpvpn", neut._resource)
self.assertEqual(neut.list(), admin_bgpvpns["bgpvpns"])
class NeutronFloatingIPTestCase(test.TestCase):
def test_name(self):
fips = resources.NeutronFloatingIP({"name": "foo",
"description": "OoO"})
self.assertEqual(fips.name(), "OoO")
def test_list(self):
fips = {"floatingips": [{"tenant_id": "foo", "id": "foo"}]}
user = mock.MagicMock()
user.neutron.return_value.list_floatingips.return_value = fips
self.assertEqual(fips["floatingips"], list(
resources.NeutronFloatingIP(user=user, tenant_uuid="foo").list()))
user.neutron.return_value.list_floatingips.assert_called_once_with(
tenant_id="foo")
class NeutronTrunkTestcase(test.TestCase):
def test_list(self):
user = mock.MagicMock()
trunk = resources.NeutronTrunk(user=user)
user.neutron().list_trunks.return_value = {
"trunks": ["trunk"]}
self.assertEqual(["trunk"], trunk.list())
user.neutron().list_trunks.assert_called_once_with(
tenant_id=None)
def test_list_with_not_found(self):
class NotFound(Exception):
status_code = 404
user = mock.MagicMock()
trunk = resources.NeutronTrunk(user=user)
user.neutron().list_trunks.side_effect = NotFound()
self.assertEqual([], trunk.list())
user.neutron().list_trunks.assert_called_once_with(
tenant_id=None)
class NeutronPortTestCase(test.TestCase):
def test_delete(self):
raw_res = {"device_owner": "abbabaab", "id": "some_id"}
user = mock.MagicMock()
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().delete_port.assert_called_once_with(raw_res["id"])
def test_delete_port_raise_exception(self):
raw_res = {"device_owner": "abbabaab", "id": "some_id"}
user = mock.MagicMock()
user.neutron().delete_port.side_effect = (
neutron_exceptions.PortNotFoundClient)
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().delete_port.assert_called_once_with(raw_res["id"])
def test_delete_port_device_owner(self):
raw_res = {
"device_owner": "network:router_interface",
"id": "some_id",
"device_id": "dev_id"
}
user = mock.MagicMock()
resources.NeutronPort(resource=raw_res, user=user).delete()
user.neutron().remove_interface_router.assert_called_once_with(
raw_res["device_id"], {"port_id": raw_res["id"]})
def test_name(self):
raw_res = {
"id": "some_id",
"device_id": "dev_id",
}
# automatically created or manually created port. No name field
self.assertEqual(
resources.NeutronPort(resource=raw_res,
user=mock.MagicMock()).name(),
"")
raw_res["name"] = "foo"
self.assertEqual("foo", resources.NeutronPort(
resource=raw_res, user=mock.MagicMock()).name())
raw_res["parent_name"] = "bar"
self.assertEqual("bar", resources.NeutronPort(
resource=raw_res, user=mock.MagicMock()).name())
del raw_res["name"]
self.assertEqual("bar", resources.NeutronPort(
resource=raw_res, user=mock.MagicMock()).name())
def test_list(self):
tenant_uuid = "uuuu-uuuu-iiii-dddd"
ports = [
# the case when 'name' is present, so 'device_owner' field is not
# required
{"tenant_id": tenant_uuid, "id": "id1", "name": "foo"},
# 3 different cases when router_interface is an owner
{"tenant_id": tenant_uuid, "id": "id2",
"device_owner": "network:router_interface",
"device_id": "router-1"},
{"tenant_id": tenant_uuid, "id": "id3",
"device_owner": "network:router_interface_distributed",
"device_id": "router-1"},
{"tenant_id": tenant_uuid, "id": "id4",
"device_owner": "network:ha_router_replicated_interface",
"device_id": "router-2"},
# the case when gateway router is an owner
{"tenant_id": tenant_uuid, "id": "id5",
"device_owner": "network:router_gateway",
"device_id": "router-3"},
# the case when gateway router is an owner, but device_id is
# invalid
{"tenant_id": tenant_uuid, "id": "id6",
"device_owner": "network:router_gateway",
"device_id": "aaaa"},
# the case when port was auto-created with floating-ip
{"tenant_id": tenant_uuid, "id": "id7",
"device_owner": "network:dhcp",
"device_id": "asdasdasd"},
# the case when port is from another tenant
{"tenant_id": "wrong tenant", "id": "id8", "name": "foo"},
# WTF port without any parent and name
{"tenant_id": tenant_uuid, "id": "id9", "device_owner": ""},
]
routers = [
{"id": "router-1", "name": "Router-1", "tenant_id": tenant_uuid},
{"id": "router-2", "name": "Router-2", "tenant_id": tenant_uuid},
{"id": "router-3", "name": "Router-3", "tenant_id": tenant_uuid},
{"id": "router-4", "name": "Router-4", "tenant_id": tenant_uuid},
{"id": "router-5", "name": "Router-5", "tenant_id": tenant_uuid},
]
expected_ports = []
for port in ports:
if port["tenant_id"] == tenant_uuid:
expected_ports.append(copy.deepcopy(port))
if ("device_id" in port
and port["device_id"].startswith("router")):
expected_ports[-1]["parent_name"] = [
r for r in routers
if r["id"] == port["device_id"]][0]["name"]
class FakeNeutronClient(object):
list_ports = mock.Mock()
list_routers = mock.Mock()
neutron = FakeNeutronClient
neutron.list_ports.return_value = {"ports": ports}
neutron.list_routers.return_value = {"routers": routers}
user = mock.Mock(neutron=neutron)
self.assertEqual(expected_ports, resources.NeutronPort(
user=user, tenant_uuid=tenant_uuid).list())
neutron.list_ports.assert_called_once_with()
neutron.list_routers.assert_called_once_with()
@ddt.ddt
class NeutronSecurityGroupTestCase(test.TestCase):
@ddt.data(
{"admin": mock.Mock(), "admin_required": True},
{"admin": None, "admin_required": False})
@ddt.unpack
def test_list(self, admin, admin_required):
sg_list = [{"tenant_id": "user_tenant", "name": "default"},
{"tenant_id": "user_tenant", "name": "foo_sg"}]
neut = resources.NeutronSecurityGroup()
neut.user = mock.MagicMock()
neut._resource = "security_group"
neut.tenant_uuid = "user_tenant"
neut.user.neutron().list_security_groups.return_value = {
"security_groups": sg_list
}
expected_result = [sg_list[1]]
self.assertEqual(expected_result, list(neut.list()))
neut.user.neutron().list_security_groups.assert_called_once_with(
tenant_id=neut.tenant_uuid)
def test_list_with_not_found(self):
class NotFound(Exception):
status_code = 404
neut = resources.NeutronSecurityGroup()
neut.user = mock.MagicMock()
neut._resource = "security_group"
neut.tenant_uuid = "user_tenant"
neut.user.neutron().list_security_groups.side_effect = NotFound()
expected_result = []
self.assertEqual(expected_result, list(neut.list()))
neut.user.neutron().list_security_groups.assert_called_once_with(
tenant_id=neut.tenant_uuid)
class NeutronQuotaTestCase(test.TestCase):
def test_delete(self):
admin = mock.MagicMock()
resources.NeutronQuota(admin=admin, tenant_uuid="fake").delete()
admin.neutron.return_value.delete_quota.assert_called_once_with("fake")
@ddt.ddt
class GlanceImageTestCase(test.TestCase):
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__client_admin(self, mock_image):
admin = mock.Mock()
glance = resources.GlanceImage(admin=admin)
client = glance._client()
mock_image.assert_called_once_with(admin)
self.assertEqual(client, mock_image.return_value)
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__client_user(self, mock_image):
user = mock.Mock()
glance = resources.GlanceImage(user=user)
wrapper = glance._client()
mock_image.assert_called_once_with(user)
self.assertEqual(wrapper, mock_image.return_value)
@mock.patch("rally_openstack.common.services.image.image.Image")
def test__client_admin_preferred(self, mock_image):
admin = mock.Mock()
user = mock.Mock()
glance = resources.GlanceImage(admin=admin, user=user)
client = glance._client()
mock_image.assert_called_once_with(admin)
self.assertEqual(client, mock_image.return_value)
def test_list(self):
glance = resources.GlanceImage()
glance._client = mock.Mock()
list_images = glance._client.return_value.list_images
list_images.side_effect = (
["active-image1", "active-image2"],
["deactivated-image1"])
glance.tenant_uuid = mock.Mock()
self.assertEqual(
glance.list(),
["active-image1", "active-image2", "deactivated-image1"])
list_images.assert_has_calls([
mock.call(owner=glance.tenant_uuid),
mock.call(status="deactivated", owner=glance.tenant_uuid)])
def test_delete(self):
glance = resources.GlanceImage()
glance._client = mock.Mock()
glance._wrapper = mock.Mock()
glance.raw_resource = mock.Mock()
client = glance._client.return_value
deleted_image = mock.Mock(status="DELETED")
client.get_image.side_effect = [glance.raw_resource, deleted_image]
glance.delete()
client.delete_image.assert_called_once_with(glance.raw_resource.id)
self.assertFalse(client.reactivate_image.called)
@mock.patch("%s.reactivate_image" % GLANCE_V2_PATH)
def test_delete_deactivated_image(self, mock_reactivate_image):
glance = resources.GlanceImage()
glance._client = mock.Mock()
glance._wrapper = mock.Mock()
glance.raw_resource = mock.Mock(status="deactivated")
client = glance._client.return_value
deleted_image = mock.Mock(status="DELETED")
client.get_image.side_effect = [glance.raw_resource, deleted_image]
glance.delete()
mock_reactivate_image.assert_called_once_with(glance.raw_resource.id)
client.delete_image.assert_called_once_with(glance.raw_resource.id)
class CeilometerTestCase(test.TestCase):
def test_id(self):
ceil = resources.CeilometerAlarms()
ceil.raw_resource = mock.MagicMock()
self.assertEqual(ceil.raw_resource.alarm_id, ceil.id())
@mock.patch("%s.CeilometerAlarms._manager" % BASE)
def test_list(self, mock_ceilometer_alarms__manager):
ceil = resources.CeilometerAlarms()
ceil.tenant_uuid = mock.MagicMock()
mock_ceilometer_alarms__manager().list.return_value = ["a", "b", "c"]
mock_ceilometer_alarms__manager.reset_mock()
self.assertEqual(["a", "b", "c"], ceil.list())
mock_ceilometer_alarms__manager().list.assert_called_once_with(
q=[{"field": "project_id", "op": "eq", "value": ceil.tenant_uuid}])
class ZaqarQueuesTestCase(test.TestCase):
def test_list(self):
user = mock.Mock()
zaqar = resources.ZaqarQueues(user=user)
zaqar.list()
user.zaqar().queues.assert_called_once_with()
class KeystoneMixinTestCase(test.TestCase):
def test_is_deleted(self):
self.assertTrue(resources.KeystoneMixin().is_deleted())
def get_keystone_mixin(self):
kmixin = resources.KeystoneMixin()
kmixin._service = "keystone"
return kmixin
@mock.patch("%s.identity" % BASE)
def test_manager(self, mock_identity):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin.admin = mock.MagicMock()
self.assertEqual(mock_identity.Identity.return_value,
keystone_mixin._manager())
mock_identity.Identity.assert_called_once_with(
keystone_mixin.admin)
@mock.patch("%s.identity" % BASE)
def test_delete(self, mock_identity):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin._resource = "some_resource"
keystone_mixin.id = lambda: "id_a"
keystone_mixin.admin = mock.MagicMock()
keystone_mixin.delete()
mock_identity.Identity.assert_called_once_with(keystone_mixin.admin)
identity_service = mock_identity.Identity.return_value
identity_service.delete_some_resource.assert_called_once_with("id_a")
@mock.patch("%s.identity" % BASE)
def test_list(self, mock_identity):
keystone_mixin = self.get_keystone_mixin()
keystone_mixin._resource = "some_resource2"
keystone_mixin.admin = mock.MagicMock()
identity = mock_identity.Identity
self.assertSequenceEqual(
identity.return_value.list_some_resource2s.return_value,
keystone_mixin.list())
identity.assert_called_once_with(keystone_mixin.admin)
identity.return_value.list_some_resource2s.assert_called_once_with()
class KeystoneEc2TestCase(test.TestCase):
def test_user_id_property(self):
user_client = mock.Mock()
admin_client = mock.Mock()
manager = resources.KeystoneEc2(user=user_client, admin=admin_client)
self.assertEqual(user_client.keystone.auth_ref.user_id,
manager.user_id)
def test_list(self):
user_client = mock.Mock()
admin_client = mock.Mock()
with mock.patch("%s.identity.Identity" % BASE, autospec=True) as p:
identity = p.return_value
manager = resources.KeystoneEc2(user=user_client,
admin=admin_client)
self.assertEqual(identity.list_ec2credentials.return_value,
manager.list())
p.assert_called_once_with(user_client)
identity.list_ec2credentials.assert_called_once_with(
manager.user_id)
def test_delete(self):
user_client = mock.Mock()
admin_client = mock.Mock()
raw_resource = mock.Mock()
with mock.patch("%s.identity.Identity" % BASE, autospec=True) as p:
manager = resources.KeystoneEc2(user=user_client,
admin=admin_client,
resource=raw_resource)
manager.delete()
p.assert_called_once_with(user_client)
p.return_value.delete_ec2credential.assert_called_once_with(
manager.user_id, access=raw_resource.access)
class SwiftMixinTestCase(test.TestCase):
def get_swift_mixin(self):
swift_mixin = resources.SwiftMixin()
swift_mixin._service = "swift"
return swift_mixin
def test_manager(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.user = mock.MagicMock()
self.assertEqual(swift_mixin.user.swift.return_value,
swift_mixin._manager())
def test_id(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.raw_resource = mock.MagicMock()
self.assertEqual(swift_mixin.raw_resource, swift_mixin.id())
def test_name(self):
swift = self.get_swift_mixin()
swift.raw_resource = ["name1", "name2"]
self.assertEqual("name2", swift.name())
def test_delete(self):
swift_mixin = self.get_swift_mixin()
swift_mixin.user = mock.MagicMock()
swift_mixin._resource = "some_resource"
swift_mixin.raw_resource = mock.MagicMock()
swift_mixin.delete()
swift_mixin.user.swift().delete_some_resource.assert_called_once_with(
*swift_mixin.raw_resource)
class SwiftObjectTestCase(test.TestCase):
@mock.patch("%s.SwiftMixin._manager" % BASE)
def test_list(self, mock_swift_mixin__manager):
containers = [mock.MagicMock(), mock.MagicMock()]
objects = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
mock_swift_mixin__manager().get_account.return_value = (
"header", containers)
mock_swift_mixin__manager().get_container.return_value = (
"header", objects)
self.assertEqual(len(containers),
len(resources.SwiftContainer().list()))
self.assertEqual(len(containers) * len(objects),
len(resources.SwiftObject().list()))
class SwiftContainerTestCase(test.TestCase):
@mock.patch("%s.SwiftMixin._manager" % BASE)
def test_list(self, mock_swift_mixin__manager):
containers = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock()]
mock_swift_mixin__manager().get_account.return_value = (
"header", containers)
self.assertEqual(len(containers),
len(resources.SwiftContainer().list()))
class ManilaShareTestCase(test.TestCase):
def test_list(self):
share_resource = resources.ManilaShare()
share_resource._manager = mock.MagicMock()
share_resource.list()
self.assertEqual("shares", share_resource._resource)
share_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
share_resource = resources.ManilaShare()
share_resource._manager = mock.MagicMock()
share_resource.id = lambda: "fake_id"
share_resource.delete()
self.assertEqual("shares", share_resource._resource)
share_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class ManilaShareNetworkTestCase(test.TestCase):
def test_list(self):
sn_resource = resources.ManilaShareNetwork()
sn_resource._manager = mock.MagicMock()
sn_resource.list()
self.assertEqual("share_networks", sn_resource._resource)
sn_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
sn_resource = resources.ManilaShareNetwork()
sn_resource._manager = mock.MagicMock()
sn_resource.id = lambda: "fake_id"
sn_resource.delete()
self.assertEqual("share_networks", sn_resource._resource)
sn_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class ManilaSecurityServiceTestCase(test.TestCase):
def test_list(self):
ss_resource = resources.ManilaSecurityService()
ss_resource._manager = mock.MagicMock()
ss_resource.list()
self.assertEqual("security_services", ss_resource._resource)
ss_resource._manager.return_value.list.assert_called_once_with()
def test_delete(self):
ss_resource = resources.ManilaSecurityService()
ss_resource._manager = mock.MagicMock()
ss_resource.id = lambda: "fake_id"
ss_resource.delete()
self.assertEqual("security_services", ss_resource._resource)
ss_resource._manager.return_value.delete.assert_called_once_with(
"fake_id")
class MistralWorkbookTestCase(test.TestCase):
def test_delete(self):
clients = mock.MagicMock()
resource = mock.Mock()
resource.name = "TEST_NAME"
mistral = resources.MistralWorkbooks(
user=clients,
resource=resource)
mistral.delete()
clients.mistral().workbooks.delete.assert_called_once_with(
"TEST_NAME")
class MistralExecutionsTestCase(test.TestCase):
def test_name(self):
execution = mock.MagicMock(workflow_name="bar")
execution.name = "foo"
self.assertEqual("bar", resources.MistralExecutions(execution).name())
class SenlinMixinTestCase(test.TestCase):
def test_id(self):
senlin = resources.SenlinMixin()
senlin.raw_resource = {"id": "TEST_ID"}
self.assertEqual("TEST_ID", senlin.id())
def test__manager(self):
senlin = resources.SenlinMixin()
senlin._service = "senlin"
senlin.user = mock.MagicMock()
self.assertEqual(senlin.user.senlin.return_value, senlin._manager())
def test_list(self):
senlin = resources.SenlinMixin()
senlin._service = "senlin"
senlin.user = mock.MagicMock()
senlin._resource = "some_resources"
some_resources = [{"name": "resource1"}, {"name": "resource2"}]
senlin.user.senlin().some_resources.return_value = some_resources
self.assertEqual(some_resources, senlin.list())
senlin.user.senlin().some_resources.assert_called_once_with()
def test_delete(self):
senlin = resources.SenlinMixin()
senlin._service = "senlin"
senlin.user = mock.MagicMock()
senlin._resource = "some_resources"
senlin.raw_resource = {"id": "TEST_ID"}
senlin.user.senlin().delete_some_resource.return_value = None
senlin.delete()
senlin.user.senlin().delete_some_resource.assert_called_once_with(
"TEST_ID")
class WatcherTemplateTestCase(test.TestCase):
def test_id(self):
watcher = resources.WatcherTemplate()
watcher.raw_resource = mock.MagicMock(uuid=100)
self.assertEqual(100, watcher.id())
@mock.patch("%s.WatcherTemplate._manager" % BASE)
def test_is_deleted(self, mock__manager):
mock__manager.return_value.get.return_value = None
watcher = resources.WatcherTemplate()
watcher.id = mock.Mock()
self.assertFalse(watcher.is_deleted())
mock__manager.side_effect = [watcher_exceptions.NotFound()]
self.assertTrue(watcher.is_deleted())
def test_list(self):
watcher = resources.WatcherTemplate()
watcher._manager = mock.MagicMock()
watcher.list()
self.assertEqual("audit_template", watcher._resource)
watcher._manager().list.assert_called_once_with(limit=0)
class WatcherAuditTestCase(test.TestCase):
def test_id(self):
watcher = resources.WatcherAudit()
watcher.raw_resource = mock.MagicMock(uuid=100)
self.assertEqual(100, watcher.id())
def test_name(self):
watcher = resources.WatcherAudit()
watcher.raw_resource = mock.MagicMock(uuid="name")
self.assertEqual("name", watcher.name())
@mock.patch("%s.WatcherAudit._manager" % BASE)
def test_is_deleted(self, mock__manager):
mock__manager.return_value.get.return_value = None
watcher = resources.WatcherAudit()
watcher.id = mock.Mock()
self.assertFalse(watcher.is_deleted())
mock__manager.side_effect = [watcher_exceptions.NotFound()]
self.assertTrue(watcher.is_deleted())
def test_list(self):
watcher = resources.WatcherAudit()
watcher._manager = mock.MagicMock()
watcher.list()
self.assertEqual("audit", watcher._resource)
watcher._manager().list.assert_called_once_with(limit=0)
class WatcherActionPlanTestCase(test.TestCase):
def test_id(self):
watcher = resources.WatcherActionPlan()
watcher.raw_resource = mock.MagicMock(uuid=100)
self.assertEqual(100, watcher.id())
def test_name(self):
watcher = resources.WatcherActionPlan()
self.assertIsInstance(watcher.name(), resources.base.NoName)
@mock.patch("%s.WatcherActionPlan._manager" % BASE)
def test_is_deleted(self, mock__manager):
mock__manager.return_value.get.return_value = None
watcher = resources.WatcherActionPlan()
watcher.id = mock.Mock()
self.assertFalse(watcher.is_deleted())
mock__manager.side_effect = [watcher_exceptions.NotFound()]
self.assertTrue(watcher.is_deleted())
def test_list(self):
watcher = resources.WatcherActionPlan()
watcher._manager = mock.MagicMock()
watcher.list()
self.assertEqual("action_plan", watcher._resource)
watcher._manager().list.assert_called_once_with(limit=0)
class CinderImageVolumeCacheTestCase(test.TestCase):
class Resource(object):
def __init__(self, id=None, name=None):
self.id = id
self.name = name
@mock.patch("rally_openstack.common.services.image.image.Image")
def test_list(self, mock_image):
admin = mock.Mock()
glance = mock_image.return_value
cinder = admin.cinder.return_value
image_1 = self.Resource("foo", name="foo-name")
image_2 = self.Resource("bar", name="bar-name")
glance.list_images.return_value = [image_1, image_2]
volume_1 = self.Resource(name="v1")
volume_2 = self.Resource(name="image-foo")
volume_3 = self.Resource(name="foo")
volume_4 = self.Resource(name="bar")
cinder.volumes.list.return_value = [volume_1, volume_2, volume_3,
volume_4]
manager = resources.CinderImageVolumeCache(admin=admin)
self.assertEqual([{"volume": volume_2, "image": image_1}],
manager.list())
mock_image.assert_called_once_with(admin)
glance.list_images.assert_called_once_with()
cinder.volumes.list.assert_called_once_with(
search_opts={"all_tenants": 1})
def test_id_and_name(self):
res = resources.CinderImageVolumeCache(
{"volume": self.Resource("volume-id", "volume-name"),
"image": self.Resource("image-id", "image-name")})
self.assertEqual("volume-id", res.id())
self.assertEqual("image-name", res.name())
class GnocchiMixinTestCase(test.TestCase):
def get_gnocchi(self):
gnocchi = resources.GnocchiMixin()
gnocchi._service = "gnocchi"
return gnocchi
def test_id(self):
gnocchi = self.get_gnocchi()
gnocchi.raw_resource = {"name": "test_name"}
self.assertEqual("test_name", gnocchi.id())
def test_name(self):
gnocchi = self.get_gnocchi()
gnocchi.raw_resource = {"name": "test_name"}
self.assertEqual("test_name", gnocchi.name())
class GnocchiMetricTestCase(test.TestCase):
def get_gnocchi(self):
gnocchi = resources.GnocchiMetric()
gnocchi._service = "gnocchi"
return gnocchi
def test_id(self):
gnocchi = self.get_gnocchi()
gnocchi.raw_resource = {"id": "test_id"}
self.assertEqual("test_id", gnocchi.id())
def test_list(self):
gnocchi = self.get_gnocchi()
gnocchi._manager = mock.MagicMock()
metrics = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
gnocchi._manager.return_value.list.side_effect = (
metrics[:2], metrics[2:4], [])
self.assertEqual(metrics, gnocchi.list())
self.assertEqual(
[mock.call(marker=None), mock.call(marker=metrics[1]["id"]),
mock.call(marker=metrics[3]["id"])],
gnocchi._manager.return_value.list.call_args_list)
class GnocchiResourceTestCase(test.TestCase):
def get_gnocchi(self):
gnocchi = resources.GnocchiResource()
gnocchi._service = "gnocchi"
return gnocchi
def test_id(self):
gnocchi = self.get_gnocchi()
gnocchi.raw_resource = {"id": "test_id"}
self.assertEqual("test_id", gnocchi.id())
def test_list(self):
gnocchi = self.get_gnocchi()
gnocchi._manager = mock.MagicMock()
res = [mock.MagicMock(), mock.MagicMock(), mock.MagicMock(),
mock.MagicMock()]
gnocchi._manager.return_value.list.side_effect = (
res[:2], res[2:4], [])
self.assertEqual(res, gnocchi.list())
self.assertEqual(
[mock.call(marker=None), mock.call(marker=res[1]["id"]),
mock.call(marker=res[3]["id"])],
gnocchi._manager.return_value.list.call_args_list)
class BarbicanSecretsTestCase(test.TestCase):
def test_id(self):
barbican = resources.BarbicanSecrets()
barbican.raw_resource = mock.MagicMock(secret_ref="fake_uuid")
self.assertEqual("fake_uuid", barbican.id())
def test_list(self):
barbican = resources.BarbicanSecrets()
barbican._manager = mock.MagicMock()
barbican.list()
barbican._manager.assert_called_once_with()
def test_delete(self):
barbican = resources.BarbicanSecrets()
barbican._manager = mock.MagicMock()
barbican.raw_resource = mock.MagicMock(uuid="fake_uuid")
barbican.delete()
barbican._manager.assert_called_once_with()
def test_is_deleted(self):
barbican = resources.BarbicanSecrets()
barbican._manager = mock.MagicMock()
barbican.raw_resource = mock.MagicMock(uuid="fake_uuid")
self.assertFalse(barbican.is_deleted())
@resources.base.resource("octavia", "some", order=3)
class OctaviaSimpleResource(resources.OctaviaMixIn):
pass
class OctaviaResourceTestCase(test.TestCase):
def test_name(self):
resource = OctaviaSimpleResource({"name": "test_name"})
self.assertEqual("test_name", resource.name())
def test_id(self):
resource = OctaviaSimpleResource({"id": "test_id"})
self.assertEqual("test_id", resource.id())
def test_delete(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
resource = OctaviaSimpleResource(
user=clients, resource={"id": "test_id"})
resource.delete()
octavia_client.some_delete.assert_called_once_with("test_id")
def test_delete_load_balancers(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
resource = resources.OctaviaLoadBalancers(
user=clients, resource={"id": "test_id"})
resource.delete()
octavia_client.load_balancer_delete.assert_called_once_with(
"test_id", cascade=True)
def test_delete_with_exception(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
resource = OctaviaSimpleResource(
user=clients, resource={"id": "test_id"})
# case #1: random exception is raised
octavia_client.some_delete.side_effect = ValueError("asd")
self.assertRaises(ValueError, resource.delete)
# case #2: octaviaclient inner exception with random message
from octaviaclient.api.v2 import octavia as octavia_exc
e = octavia_exc.OctaviaClientException(409, "bla bla bla")
octavia_client.some_delete.side_effect = e
self.assertRaises(octavia_exc.OctaviaClientException, resource.delete)
# case #3: octaviaclient inner exception with specific message
e = octavia_exc.OctaviaClientException(
409, "Invalid state PENDING_DELETE bla bla")
octavia_client.some_delete.side_effect = e
resource.delete()
def test_delete_load_balancer_with_exception(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
resource = resources.OctaviaLoadBalancers(
user=clients, resource={"id": "test_id"})
# case #1: random exception is raised
octavia_client.load_balancer_delete.side_effect = ValueError("asd")
self.assertRaises(ValueError, resource.delete)
# case #2: octaviaclient inner exception with random message
from octaviaclient.api.v2 import octavia as octavia_exc
e = octavia_exc.OctaviaClientException(409, "bla bla bla")
octavia_client.load_balancer_delete.side_effect = e
self.assertRaises(octavia_exc.OctaviaClientException, resource.delete)
# case #3: octaviaclient inner exception with specific message
e = octavia_exc.OctaviaClientException(
409, "Invalid state PENDING_DELETE bla bla")
octavia_client.load_balancer_delete.side_effect = e
resource.delete()
def test_is_deleted_false(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
resource = OctaviaSimpleResource(
user=clients, resource={"id": "test_id"})
self.assertFalse(resource.is_deleted())
octavia_client.some_show.assert_called_once_with("test_id")
def test_is_deleted_true(self):
from osc_lib import exceptions as osc_exc
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
octavia_client.some_show.side_effect = osc_exc.NotFound(404, "foo")
resource = OctaviaSimpleResource(
user=clients, resource={"id": "test_id"})
self.assertTrue(resource.is_deleted())
octavia_client.some_show.assert_called_once_with("test_id")
def test_list(self):
clients = mock.MagicMock()
octavia_client = clients.octavia.return_value
octavia_client.some_list.return_value = {"somes": [1, 2]}
manager = OctaviaSimpleResource(user=clients)
self.assertEqual([1, 2], manager.list())
octavia_client.some_list.assert_called_once_with()
octavia_client.l7policy_list.return_value = {"l7policies": [3, 4]}
manager = resources.OctaviaL7Policies(user=clients)
self.assertEqual([3, 4], manager.list())
octavia_client.l7policy_list.assert_called_once_with()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,622
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/heat/main.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.common import utils as common_utils
from rally.task import atomic
from rally.task import utils
CONF = cfg.CONF
class Stack(common_utils.RandomNameGeneratorMixin):
"""Represent heat stack.
Usage:
>>> stack = Stack(scenario, task, "template.yaml", parameters={"nodes": 3})
>>> do_testing(stack)
>>> stack.update(nodes=4)
>>> do_testing(stack)
"""
def __init__(self, scenario, task, template, files, parameters=None):
"""Init heat wrapper.
:param Scenario scenario: scenario instance
:param Task task: task instance
:param str template: template file path
:param dict files: dict with file name and path
:param dict parameters: parameters for template
"""
self.scenario = scenario
self.task = task
self.template = open(template).read()
self.files = {}
self.parameters = parameters
for name, path in files.items():
self.files[name] = open(path).read()
def _wait(self, ready_statuses, failure_statuses):
self.stack = utils.wait_for_status(
self.stack,
check_interval=CONF.openstack.heat_stack_create_poll_interval,
timeout=CONF.openstack.heat_stack_create_timeout,
ready_statuses=ready_statuses,
failure_statuses=failure_statuses,
update_resource=utils.get_from_manager(),
)
def create(self):
with atomic.ActionTimer(self.scenario, "heat.create"):
self.stack = self.scenario.clients("heat").stacks.create(
stack_name=self.scenario.generate_random_name(),
template=self.template,
files=self.files,
parameters=self.parameters)
self.stack_id = self.stack["stack"]["id"]
self.stack = self.scenario.clients(
"heat").stacks.get(self.stack_id)
self._wait(["CREATE_COMPLETE"], ["CREATE_FAILED"])
def update(self, data):
self.parameters.update(data)
with atomic.ActionTimer(self.scenario, "heat.update"):
self.scenario.clients("heat").stacks.update(
self.stack_id, template=self.template,
files=self.files, parameters=self.parameters)
self._wait(["UPDATE_COMPLETE"], ["UPDATE_FAILED"])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,623
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/cfg/glance.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
OPTS = {"openstack": [
cfg.FloatOpt("glance_image_delete_timeout",
default=120.0,
deprecated_group="benchmark",
help="Time to wait for glance image to be deleted."),
cfg.FloatOpt("glance_image_delete_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"deletion."),
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_timeout",
default=120.0,
deprecated_group="benchmark",
help="Time to wait for glance image to be created."),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"creation."),
cfg.FloatOpt("glance_image_create_prepoll_delay",
default=2.0,
deprecated_group="benchmark",
help="Time to sleep after creating a resource before "
"polling for it status"),
cfg.FloatOpt("glance_image_create_poll_interval",
default=1.0,
deprecated_group="benchmark",
help="Interval between checks when waiting for image "
"creation.")
]}
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,624
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/wrappers/network.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from neutronclient.common import exceptions as neutron_exceptions
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.common.services.network import net_utils
from rally_openstack.common.services.network import neutron
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
def generate_cidr(start_cidr="10.2.0.0/24"):
"""Generate next CIDR for network or subnet, without IP overlapping.
This is process and thread safe, because `cidr_incr' points to
value stored directly in RAM. This guarantees that CIDRs will be
serial and unique even under hard multiprocessing/threading load.
:param start_cidr: start CIDR str
:returns: next available CIDR str
"""
ip_version, cidr = net_utils.generate_cidr(start_cidr=start_cidr)
return cidr
class NetworkWrapperException(exceptions.RallyException):
error_code = 532
msg_fmt = "%(message)s"
class NetworkWrapper(object, metaclass=abc.ABCMeta):
"""Base class for network service implementations.
We actually have two network services implementations, with different API:
NovaNetwork and Neutron. The idea is (at least to try) to use unified
service, which hides most differences and routines behind the scenes.
This allows to significantly re-use and simplify code.
"""
START_CIDR = "10.2.0.0/24"
START_IPV6_CIDR = "dead:beaf::/64"
SERVICE_IMPL = None
def __init__(self, clients, owner, config=None):
"""Returns available network wrapper instance.
:param clients: rally.plugins.openstack.osclients.Clients instance
:param owner: The object that owns resources created by this
wrapper instance. It will be used to generate
random names, so must implement
rally.common.utils.RandomNameGeneratorMixin
:param config: The configuration of the network
wrapper. Currently only two config options are
recognized, 'start_cidr' and 'start_ipv6_cidr'.
:returns: NetworkWrapper subclass instance
"""
self.clients = clients
if hasattr(clients, self.SERVICE_IMPL):
self.client = getattr(clients, self.SERVICE_IMPL)()
else:
self.client = clients(self.SERVICE_IMPL)
self.config = config or {}
self.owner = owner
self.start_cidr = self.config.get("start_cidr", self.START_CIDR)
self.start_ipv6_cidr = self.config.get(
"start_ipv6_cidr", self.START_IPV6_CIDR)
@abc.abstractmethod
def create_network(self):
"""Create network."""
@abc.abstractmethod
def delete_network(self):
"""Delete network."""
@abc.abstractmethod
def list_networks(self):
"""List networks."""
@abc.abstractmethod
def create_floating_ip(self):
"""Create floating IP."""
@abc.abstractmethod
def delete_floating_ip(self):
"""Delete floating IP."""
@abc.abstractmethod
def supports_extension(self):
"""Checks whether a network extension is supported."""
class NeutronWrapper(NetworkWrapper):
SERVICE_IMPL = consts.Service.NEUTRON
SUBNET_IP_VERSION = 4
SUBNET_IPV6_VERSION = 6
LB_METHOD = "ROUND_ROBIN"
LB_PROTOCOL = "HTTP"
def __init__(self, *args, **kwargs):
super(NeutronWrapper, self).__init__(*args, **kwargs)
class _SingleClientWrapper(object):
def neutron(_self):
return self.client
@property
def credential(_self):
return self.clients.credential
self.neutron = neutron.NeutronService(
clients=_SingleClientWrapper(),
name_generator=self.owner.generate_random_name,
atomic_inst=getattr(self.owner, "_atomic_actions", [])
)
@property
def external_networks(self):
return self.neutron.list_networks(router_external=True)
@property
def ext_gw_mode_enabled(self):
"""Determine if the ext-gw-mode extension is enabled.
Without this extension, we can't pass the enable_snat parameter.
"""
return self.neutron.supports_extension("ext-gw-mode", silent=True)
def get_network(self, net_id=None, name=None):
net = None
try:
if net_id:
net = self.neutron.get_network(net_id)
else:
networks = self.neutron.list_networks(name=name)
if networks:
net = networks[0]
except neutron_exceptions.NeutronClientException:
pass
if net:
return {"id": net["id"],
"name": net["name"],
"tenant_id": net.get("tenant_id",
net.get("project_id", None)),
"status": net["status"],
"external": net.get("router:external", False),
"subnets": net.get("subnets", []),
"router_id": None}
else:
raise NetworkWrapperException(
"Network not found: %s" % (name or net_id))
def create_router(self, external=False, **kwargs):
"""Create neutron router.
:param external: bool, whether to set setup external_gateway_info
:param **kwargs: POST /v2.0/routers request options
:returns: neutron router dict
"""
kwargs.pop("name", None)
if "tenant_id" in kwargs and "project_id" not in kwargs:
kwargs["project_id"] = kwargs.pop("tenant_id")
return self.neutron.create_router(
discover_external_gw=external, **kwargs)
def create_v1_pool(self, tenant_id, subnet_id, **kwargs):
"""Create LB Pool (v1).
:param tenant_id: str, pool tenant id
:param subnet_id: str, neutron subnet-id
:param **kwargs: extra options
:returns: neutron lb-pool dict
"""
pool_args = {
"pool": {
"tenant_id": tenant_id,
"name": self.owner.generate_random_name(),
"subnet_id": subnet_id,
"lb_method": kwargs.get("lb_method", self.LB_METHOD),
"protocol": kwargs.get("protocol", self.LB_PROTOCOL)
}
}
return self.client.create_pool(pool_args)
def _generate_cidr(self, ip_version=4):
# TODO(amaretskiy): Generate CIDRs unique for network, not cluster
ip_version, cidr = net_utils.generate_cidr(
start_cidr=self.start_cidr if ip_version == 4
else self.start_ipv6_cidr)
return cidr
def _create_network_infrastructure(self, tenant_id, **kwargs):
"""Create network.
The following keyword arguments are accepted:
* add_router: Deprecated, please use router_create_args instead.
Create an external router and add an interface to each
subnet created. Default: False
* subnets_num: Number of subnets to create per network. Default: 0
* dualstack: Whether subnets should be of both IPv4 and IPv6
* dns_nameservers: Nameservers for each subnet. Default:
8.8.8.8, 8.8.4.4
* network_create_args: Additional network creation arguments.
* router_create_args: Additional router creation arguments.
:param tenant_id: str, tenant ID
:param kwargs: Additional options, left open-ended for compatbilitiy.
See above for recognized keyword args.
:returns: dict, network data
"""
network_args = dict(kwargs.get("network_create_args", {}))
network_args["project_id"] = tenant_id
router_args = dict(kwargs.get("router_create_args", {}))
add_router = kwargs.get("add_router", False)
if not (router_args or add_router):
router_args = None
else:
router_args["project_id"] = tenant_id
router_args["discover_external_gw"] = router_args.pop(
"external", False) or add_router
subnet_create_args = {"project_id": tenant_id}
if "dns_nameservers" in kwargs:
subnet_create_args["dns_nameservers"] = kwargs["dns_nameservers"]
net_topo = self.neutron.create_network_topology(
network_create_args=network_args,
router_create_args=router_args,
subnet_create_args=subnet_create_args,
subnets_dualstack=kwargs.get("dualstack", False),
subnets_count=kwargs.get("subnets_num", 0)
)
network = net_topo["network"]
subnets = net_topo["subnets"]
if net_topo["routers"]:
router = net_topo["routers"][0]
else:
router = None
return {
"network": {
"id": network["id"],
"name": network["name"],
"status": network["status"],
"subnets": [s["id"] for s in subnets],
"external": network.get("router:external", False),
"router_id": router and router["id"] or None,
"tenant_id": tenant_id
},
"subnets": subnets,
"router": router
}
def create_network(self, tenant_id, **kwargs):
"""Create network.
The following keyword arguments are accepted:
* add_router: Deprecated, please use router_create_args instead.
Create an external router and add an interface to each
subnet created. Default: False
* subnets_num: Number of subnets to create per network. Default: 0
* dualstack: Whether subnets should be of both IPv4 and IPv6
* dns_nameservers: Nameservers for each subnet. Default:
8.8.8.8, 8.8.4.4
* network_create_args: Additional network creation arguments.
* router_create_args: Additional router creation arguments.
:param tenant_id: str, tenant ID
:param kwargs: Additional options, left open-ended for compatbilitiy.
See above for recognized keyword args.
:returns: dict, network data
"""
return self._create_network_infrastructure(
tenant_id, **kwargs)["network"]
def delete_v1_pool(self, pool_id):
"""Delete LB Pool (v1)
:param pool_id: str, Lb-Pool-id
"""
self.client.delete_pool(pool_id)
def delete_network(self, network):
"""Delete network
:param network: network object returned by create_network method
"""
router = {"id": network["router_id"]} if network["router_id"] else None
# delete_network_topology uses only IDs, but let's transmit as much as
# possible info
topo = {
"network": {
"id": network["id"],
"name": network["name"],
"status": network["status"],
"subnets": network["subnets"],
"router:external": network["external"]
},
"subnets": [{"id": s} for s in network["subnets"]],
"routers": [router] if router else []
}
self.neutron.delete_network_topology(topo)
def _delete_subnet(self, subnet_id):
self.neutron.delete_subnet(subnet_id)
def list_networks(self):
return self.neutron.list_networks()
def create_port(self, network_id, **kwargs):
"""Create neutron port.
:param network_id: neutron network id
:param **kwargs: POST /v2.0/ports request options
:returns: neutron port dict
"""
return self.neutron.create_port(network_id=network_id, **kwargs)
def create_floating_ip(self, ext_network=None,
tenant_id=None, port_id=None, **kwargs):
"""Create Neutron floating IP.
:param ext_network: floating network name or dict
:param tenant_id: str tenant id
:param port_id: str port id
:param **kwargs: for compatibility, not used here
:returns: floating IP dict
"""
if not tenant_id:
raise ValueError("Missed tenant_id")
try:
fip = self.neutron.create_floatingip(
floating_network=ext_network, project_id=tenant_id,
port_id=port_id)
except (exceptions.NotFoundException,
exceptions.GetResourceFailure) as e:
raise NetworkWrapperException(str(e)) from None
return {"id": fip["id"], "ip": fip["floating_ip_address"]}
def delete_floating_ip(self, fip_id, **kwargs):
"""Delete floating IP.
:param fip_id: int floating IP id
:param **kwargs: for compatibility, not used here
"""
self.neutron.delete_floatingip(fip_id)
def supports_extension(self, extension):
"""Check whether a neutron extension is supported
:param extension: str, neutron extension
:returns: result tuple
:rtype: (bool, string)
"""
try:
self.neutron.supports_extension(extension)
except exceptions.NotFoundException as e:
return False, str(e)
return True, ""
def wrap(clients, owner, config=None):
"""Returns available network wrapper instance.
:param clients: rally.plugins.openstack.osclients.Clients instance
:param owner: The object that owns resources created by this
wrapper instance. It will be used to generate random
names, so must implement
rally.common.utils.RandomNameGeneratorMixin
:param config: The configuration of the network wrapper. Currently
only one config option is recognized, 'start_cidr',
and only for Nova network.
:returns: NetworkWrapper subclass instance
"""
if hasattr(clients, "services"):
services = clients.services()
else:
services = clients("services")
if consts.Service.NEUTRON in services.values():
return NeutronWrapper(clients, owner, config=config)
LOG.warning("NovaNetworkWrapper is deprecated since 0.9.0")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,625
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/vm/test_utils.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
from unittest import mock
import netaddr
from rally.common import cfg
from rally_openstack.task.scenarios.vm import utils
from tests.unit import test
VMTASKS_UTILS = "rally_openstack.task.scenarios.vm.utils"
CONF = cfg.CONF
class VMScenarioTestCase(test.ScenarioTestCase):
@mock.patch("%s.open" % VMTASKS_UTILS,
side_effect=mock.mock_open(), create=True)
def test__run_command_over_ssh_script_file(self, mock_open):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"script_file": "foobar",
"interpreter": ["interpreter", "interpreter_arg"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["interpreter", "interpreter_arg", "arg1", "arg2"],
stdin=mock_open.side_effect())
mock_open.assert_called_once_with("foobar", "rb")
@mock.patch("%s.io.StringIO" % VMTASKS_UTILS)
def test__run_command_over_ssh_script_inline(self, mock_string_io):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"script_inline": "foobar",
"interpreter": ["interpreter", "interpreter_arg"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["interpreter", "interpreter_arg", "arg1", "arg2"],
stdin=mock_string_io.return_value)
mock_string_io.assert_called_once_with("foobar")
def test__run_command_over_ssh_remote_path(self):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"remote_path": ["foo", "bar"],
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.execute.assert_called_once_with(
["foo", "bar", "arg1", "arg2"],
stdin=None)
def test__run_command_over_ssh_remote_path_copy(self):
mock_ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._run_command_over_ssh(
mock_ssh,
{
"remote_path": ["foo", "bar"],
"local_path": "/bin/false",
"command_args": ["arg1", "arg2"]
}
)
mock_ssh.put_file.assert_called_once_with(
"/bin/false", "bar", mode=0o755
)
mock_ssh.execute.assert_called_once_with(
["foo", "bar", "arg1", "arg2"],
stdin=None)
def test__wait_for_ssh(self):
ssh = mock.MagicMock()
vm_scenario = utils.VMScenario(self.context)
vm_scenario._wait_for_ssh(ssh)
ssh.wait.assert_called_once_with(120, 1)
def test__wait_for_ping(self):
vm_scenario = utils.VMScenario(self.context)
vm_scenario._ping_ip_address = mock.Mock(return_value=True)
vm_scenario._wait_for_ping(netaddr.IPAddress("1.2.3.4"))
self.mock_wait_for_status.mock.assert_called_once_with(
utils.Host("1.2.3.4"),
ready_statuses=[utils.Host.ICMP_UP_STATUS],
update_resource=utils.Host.update_status,
timeout=CONF.openstack.vm_ping_timeout,
check_interval=CONF.openstack.vm_ping_poll_interval)
@mock.patch(VMTASKS_UTILS + ".VMScenario._run_command_over_ssh")
@mock.patch("rally.utils.sshutils.SSH")
def test__run_command(self, mock_sshutils_ssh,
mock_vm_scenario__run_command_over_ssh):
vm_scenario = utils.VMScenario(self.context)
vm_scenario.context = {"user": {"keypair": {"private": "ssh"}}}
vm_scenario._run_command("1.2.3.4", 22, "username", "password",
command={"script_file": "foo",
"interpreter": "bar"})
mock_sshutils_ssh.assert_called_once_with(
"username", "1.2.3.4",
port=22, pkey="ssh", password="password")
mock_sshutils_ssh.return_value.wait.assert_called_once_with(120, 1)
mock_vm_scenario__run_command_over_ssh.assert_called_once_with(
mock_sshutils_ssh.return_value,
{"script_file": "foo", "interpreter": "bar"})
def get_scenario(self):
server = mock.Mock(
networks={"foo_net": "foo_data"},
addresses={"foo_net": [{"addr": "foo_ip"}]},
tenant_id="foo_tenant"
)
scenario = utils.VMScenario(
self.context,
clients=mock.MagicMock(credential=mock.MagicMock(api_info={})))
scenario._boot_server = mock.Mock(return_value=server)
scenario._delete_server = mock.Mock()
scenario._associate_floating_ip = mock.Mock()
scenario._wait_for_ping = mock.Mock()
return scenario, server
def test__boot_server_with_fip_without_networks(self):
scenario, server = self.get_scenario()
server.networks = {}
self.assertRaises(RuntimeError,
scenario._boot_server_with_fip,
"foo_image", "foo_flavor", foo_arg="foo_value")
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
foo_arg="foo_value", auto_assign_nic=True)
def test__boot_server_with_fixed_ip(self):
scenario, server = self.get_scenario()
scenario._attach_floating_ip = mock.Mock()
server, ip = scenario._boot_server_with_fip(
"foo_image", "foo_flavor", floating_network="ext_network",
use_floating_ip=False, foo_arg="foo_value")
self.assertEqual(ip, {"ip": "foo_ip", "id": None,
"is_floating": False})
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
auto_assign_nic=True, foo_arg="foo_value")
self.assertEqual(scenario._attach_floating_ip.mock_calls, [])
def test__boot_server_with_fip(self):
scenario, server = self.get_scenario()
scenario._attach_floating_ip = mock.Mock(
return_value={"id": "foo_id", "ip": "foo_ip"})
server, ip = scenario._boot_server_with_fip(
"foo_image", "foo_flavor", floating_network="ext_network",
use_floating_ip=True, foo_arg="foo_value")
self.assertEqual(ip, {"ip": "foo_ip", "id": "foo_id",
"is_floating": True})
scenario._boot_server.assert_called_once_with(
"foo_image", "foo_flavor",
auto_assign_nic=True, foo_arg="foo_value")
scenario._attach_floating_ip.assert_called_once_with(
server, "ext_network")
def test__delete_server_with_fixed_ip(self):
ip = {"ip": "foo_ip", "id": None, "is_floating": False}
scenario, server = self.get_scenario()
scenario._delete_floating_ip = mock.Mock()
scenario._delete_server_with_fip(server, ip, force_delete=True)
self.assertEqual(scenario._delete_floating_ip.mock_calls, [])
scenario._delete_server.assert_called_once_with(server, force=True)
def test__delete_server_with_fip(self):
fip = {"ip": "foo_ip", "id": "foo_id", "is_floating": True}
scenario, server = self.get_scenario()
scenario._delete_floating_ip = mock.Mock()
scenario._delete_server_with_fip(server, fip, force_delete=True)
scenario._delete_floating_ip.assert_called_once_with(server, fip)
scenario._delete_server.assert_called_once_with(server, force=True)
def test__attach_floating_ip(self):
scenario, server = self.get_scenario()
nc = scenario._clients.neutron.return_value
fip = {"id": "foo_id", "floating_ip_address": "foo_ip"}
nc.create_floatingip.return_value = {"floatingip": fip}
floating_network = {"id": "floating-network-id",
"name": "floating-network"}
scenario._attach_floating_ip(
server, floating_network=floating_network)
nc.create_floatingip.assert_called_once_with({
"floatingip": {"description": mock.ANY,
"floating_network_id": floating_network["id"]}
})
scenario._associate_floating_ip.assert_called_once_with(
server, fip, fixed_address=fip["floating_ip_address"])
def test__delete_floating_ip(self):
scenario, server = self.get_scenario()
nc = scenario._clients.neutron.return_value
_check_addr = mock.Mock(return_value=True)
scenario.check_ip_address = mock.Mock(return_value=_check_addr)
scenario._dissociate_floating_ip = mock.Mock()
fip = {"id": "foo_id", "ip": "foo_ip"}
scenario._delete_floating_ip(server, fip=fip)
scenario.check_ip_address.assert_called_once_with(
"foo_ip")
_check_addr.assert_called_once_with(server)
scenario._dissociate_floating_ip.assert_called_once_with(
server, fip)
nc.delete_floatingip.assert_called_once_with("foo_id")
class HostTestCase(test.TestCase):
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_linux(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "linux2"
host = utils.Host("1.2.3.4")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping", "-c1", "-w1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_linux_ipv6(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "linux2"
host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping6", "-c1", "-w1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_other_os(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "freebsd10"
host = utils.Host("1.2.3.4")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping", "-c1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
@mock.patch(VMTASKS_UTILS + ".sys")
@mock.patch("subprocess.Popen")
def test__ping_ip_address_other_os_ipv6(self, mock_popen, mock_sys):
mock_popen.return_value.returncode = 0
mock_sys.platform = "freebsd10"
host = utils.Host("1ce:c01d:bee2:15:a5:900d:a5:11fe")
self.assertEqual(utils.Host.ICMP_UP_STATUS,
utils.Host.update_status(host).status)
mock_popen.assert_called_once_with(
["ping6", "-c1", str(host.ip)],
stderr=subprocess.PIPE, stdout=subprocess.PIPE)
mock_popen.return_value.wait.assert_called_once_with()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,626
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/swift/objects.py
|
# Copyright 2015: Cisco Systems, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.common import validation
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.task import context
from rally_openstack.task.contexts.swift import utils as swift_utils
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="swift_objects", platform="openstack", order=360)
class SwiftObjectGenerator(swift_utils.SwiftObjectMixin,
context.OpenStackContext):
"""Create containers and objects in each tenant."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"containers_per_tenant": {
"type": "integer",
"minimum": 1
},
"objects_per_container": {
"type": "integer",
"minimum": 1
},
"object_size": {
"type": "integer",
"minimum": 1
},
"resource_management_workers": {
"type": "integer",
"minimum": 1
}
},
"additionalProperties": False
}
DEFAULT_CONFIG = {
"containers_per_tenant": 1,
"objects_per_container": 1,
"object_size": 1024,
"resource_management_workers": 30
}
def setup(self):
"""Create containers and objects, using the broker pattern."""
threads = self.config["resource_management_workers"]
containers_per_tenant = self.config["containers_per_tenant"]
containers_num = len(self.context["tenants"]) * containers_per_tenant
LOG.debug("Creating %d containers using %d threads."
% (containers_num, threads))
containers_count = len(self._create_containers(containers_per_tenant,
threads))
if containers_count != containers_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Failed to create the requested number of containers, "
"expected %(expected)s but got %(actual)s."
% {"expected": containers_num, "actual": containers_count})
objects_per_container = self.config["objects_per_container"]
objects_num = containers_num * objects_per_container
LOG.debug("Creating %d objects using %d threads."
% (objects_num, threads))
objects_count = len(self._create_objects(objects_per_container,
self.config["object_size"],
threads))
if objects_count != objects_num:
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Failed to create the requested number of objects, "
"expected %(expected)s but got %(actual)s."
% {"expected": objects_num, "actual": objects_count})
def cleanup(self):
"""Delete containers and objects, using the broker pattern."""
threads = self.config["resource_management_workers"]
self._delete_objects(threads)
self._delete_containers(threads)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,627
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/quotas/quotas.py
|
# Copyright 2014: Dassault Systemes
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task import context
from rally_openstack.task.contexts.quotas import cinder_quotas
from rally_openstack.task.contexts.quotas import designate_quotas
from rally_openstack.task.contexts.quotas import manila_quotas
from rally_openstack.task.contexts.quotas import neutron_quotas
from rally_openstack.task.contexts.quotas import nova_quotas
LOG = logging.getLogger(__name__)
@validation.add("required_platform", platform="openstack", admin=True)
@context.configure(name="quotas", platform="openstack", order=300)
class Quotas(context.OpenStackContext):
"""Sets OpenStack Tenants quotas."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False,
"properties": {
"nova": nova_quotas.NovaQuotas.QUOTAS_SCHEMA,
"cinder": cinder_quotas.CinderQuotas.QUOTAS_SCHEMA,
"manila": manila_quotas.ManilaQuotas.QUOTAS_SCHEMA,
"designate": designate_quotas.DesignateQuotas.QUOTAS_SCHEMA,
"neutron": neutron_quotas.NeutronQuotas.QUOTAS_SCHEMA
}
}
def __init__(self, ctx):
super(Quotas, self).__init__(ctx)
self.clients = osclients.Clients(
self.context["admin"]["credential"])
self.manager = {
"nova": nova_quotas.NovaQuotas(self.clients),
"cinder": cinder_quotas.CinderQuotas(self.clients),
"manila": manila_quotas.ManilaQuotas(self.clients),
"designate": designate_quotas.DesignateQuotas(self.clients),
"neutron": neutron_quotas.NeutronQuotas(self.clients)
}
self.original_quotas = []
def _service_has_quotas(self, service):
return len(self.config.get(service, {})) > 0
def setup(self):
for tenant_id in self.context["tenants"]:
for service in self.manager:
if self._service_has_quotas(service):
# NOTE(andreykurilin): in case of existing users it is
# required to restore original quotas instead of reset
# to default ones.
if "existing_users" in self.context:
self.original_quotas.append(
(service, tenant_id,
self.manager[service].get(tenant_id)))
self.manager[service].update(tenant_id,
**self.config[service])
def _restore_quotas(self):
for service, tenant_id, quotas in self.original_quotas:
try:
self.manager[service].update(tenant_id, **quotas)
except Exception as e:
LOG.warning("Failed to restore quotas for tenant %(tenant_id)s"
" in service %(service)s \n reason: %(exc)s" %
{"tenant_id": tenant_id, "service": service,
"exc": e})
def _delete_quotas(self):
for service in self.manager:
if self._service_has_quotas(service):
for tenant_id in self.context["tenants"]:
try:
self.manager[service].delete(tenant_id)
except Exception as e:
LOG.warning(
"Failed to remove quotas for tenant %(tenant)s "
"in service %(service)s reason: %(e)s" %
{"tenant": tenant_id, "service": service, "e": e})
def cleanup(self):
if self.original_quotas:
# existing users
self._restore_quotas()
else:
self._delete_quotas()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,628
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/ironic/test_nodes.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.task.scenarios.ironic import nodes
from tests.unit import test
class IronicNodesTestCase(test.ScenarioTestCase):
def test_create_and_list_node(self):
class Node(object):
def __init__(self, name):
self.name = name
scenario = nodes.CreateAndListNode(self.context)
scenario._create_node = mock.Mock(return_value=Node("node_obj1"))
scenario._list_nodes = mock.Mock(
return_value=[Node(name)
for name in ("node_obj1", "node_obj2", "node_obj3")])
driver = "foo"
properties = "fake_prop"
fake_params = {
"sort_dir": "foo1",
"associated": "foo2",
"detail": True,
"maintenance": "foo5",
"fake_parameter1": "foo7"
}
# Positive case:
scenario.run(driver, properties, **fake_params)
scenario._create_node.assert_called_once_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_once_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
# Negative case: created node not in the list of available nodes
scenario._create_node = mock.Mock(uuid="foooo")
self.assertRaises(exceptions.RallyAssertionError,
scenario.run, driver, properties, **fake_params)
scenario._create_node.assert_called_with(driver, properties,
fake_parameter1="foo7")
scenario._list_nodes.assert_called_with(
sort_dir="foo1", associated="foo2", detail=True,
maintenance="foo5")
def test_create_and_delete_node(self):
fake_node = mock.Mock(uuid="fake_uuid")
scenario = nodes.CreateAndDeleteNode(self.context)
scenario._create_node = mock.Mock(return_value=fake_node)
scenario._delete_node = mock.Mock()
driver = "fake"
properties = "fake_prop"
scenario.run(driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._create_node.assert_called_once_with(
driver, properties, fake_parameter1="fake1",
fake_parameter2="fake2")
scenario._delete_node.assert_called_once_with(
scenario._create_node.return_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,629
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/glance/images.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import logging
from rally.task import types
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.common.services.image import glance_v2
from rally_openstack.common.services.image import image
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.nova import utils as nova_utils
LOG = logging.getLogger(__name__)
"""Scenarios for Glance images."""
class GlanceBasic(scenario.OpenStackScenario):
def __init__(self, context=None, admin_clients=None, clients=None):
super(GlanceBasic, self).__init__(context, admin_clients, clients)
if hasattr(self, "_admin_clients"):
self.admin_glance = image.Image(
self._admin_clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
if hasattr(self, "_clients"):
self.glance = image.Image(
self._clients, name_generator=self.generate_random_name,
atomic_inst=self.atomic_actions())
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@types.convert(image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_list_image",
platform="openstack")
class CreateAndListImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
visibility="private", min_disk=0, min_ram=0, properties=None):
"""Create an image and then list all images.
Measure the "glance image-list" command performance.
If you have only 1 user in your context, you will
add 1 image on every iteration. So you will have more
and more images and will be able to measure the
performance of the "glance image-list" command depending on
the number of images owned by users.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: A dict of image metadata properties to set
on the image
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
self.assertTrue(image)
image_list = self.glance.list_images()
self.assertIn(image.id, [i.id for i in image_list])
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@types.convert(image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_get_image",
platform="openstack")
class CreateAndGetImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
visibility="private", min_disk=0, min_ram=0, properties=None):
"""Create and get detailed information of an image.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: A dict of image metadata properties to set
on the image
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
self.assertTrue(image)
image_info = self.glance.get_image(image)
self.assertEqual(image.id, image_info.id)
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(name="GlanceImages.list_images",
platform="openstack")
class ListImages(GlanceBasic):
def run(self):
"""List all images.
This simple scenario tests the glance image-list command by listing
all the images.
Suppose if we have 2 users in context and each has 2 images
uploaded for them we will be able to test the performance of
glance image-list command in this case.
"""
self.glance.list_images()
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@types.convert(image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_delete_image",
platform="openstack")
class CreateAndDeleteImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
visibility="private", min_disk=0, min_ram=0, properties=None):
"""Create and then delete an image.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: A dict of image metadata properties to set
on the image
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
self.glance.delete_image(image.id)
@types.convert(flavor={"type": "nova_flavor"},
image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@validation.add("restricted_parameters", param_names=["image_name", "name"])
@validation.add("flavor_exists", param_name="flavor")
@validation.add("required_services", services=[consts.Service.GLANCE,
consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance", "nova"]},
name="GlanceImages.create_image_and_boot_instances",
platform="openstack")
class CreateImageAndBootInstances(GlanceBasic, nova_utils.NovaScenario):
def run(self, container_format, image_location, disk_format,
flavor, number_instances, visibility="private", min_disk=0,
min_ram=0, properties=None, boot_server_kwargs=None):
"""Create an image and boot several instances from it.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: A dict of image metadata properties to set
on the image
:param flavor: Nova flavor to be used to launch an instance
:param number_instances: number of Nova servers to boot
:param boot_server_kwargs: optional parameters to boot server
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
self._boot_servers(image.id, flavor, number_instances,
**(boot_server_kwargs or {}))
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@types.convert(image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_update_image",
platform="openstack")
class CreateAndUpdateImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
remove_props=None, visibility="private", create_min_disk=0,
create_min_ram=0, create_properties=None,
update_min_disk=0, update_min_ram=0):
"""Create an image then update it.
Measure the "glance image-create" and "glance image-update" commands
performance.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param remove_props: List of property names to remove.
(It is only supported by Glance v2.)
:param visibility: The access permission for the created image
:param create_min_disk: The min disk of created images
:param create_min_ram: The min ram of created images
:param create_properties: A dict of image metadata properties to set
on the created image
:param update_min_disk: The min disk of updated images
:param update_min_ram: The min ram of updated images
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=create_min_disk,
min_ram=create_min_ram,
properties=create_properties)
self.glance.update_image(image.id,
min_disk=update_min_disk,
min_ram=update_min_ram,
remove_props=remove_props)
@validation.add("required_services", services=(consts.Service.GLANCE, ))
@validation.add("required_platform", platform="openstack", users=True)
@validation.add("required_api_versions", component="glance", versions=["2"])
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_deactivate_image",
platform="openstack")
class CreateAndDeactivateImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
visibility="private", min_disk=0, min_ram=0):
"""Create an image, then deactivate it.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
"""
service = glance_v2.GlanceV2Service(self._clients,
self.generate_random_name,
atomic_inst=self.atomic_actions())
image = service.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram)
service.deactivate_image(image.id)
@validation.add("enum", param_name="container_format",
values=["ami", "ari", "aki", "bare", "ovf"])
@validation.add("enum", param_name="disk_format",
values=["ami", "ari", "aki", "vhd", "vmdk", "raw",
"qcow2", "vdi", "iso"])
@types.convert(image_location={"type": "path_or_url"},
kwargs={"type": "glance_image_args"})
@validation.add("required_services", services=[consts.Service.GLANCE])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance"]},
name="GlanceImages.create_and_download_image",
platform="openstack")
class CreateAndDownloadImage(GlanceBasic):
def run(self, container_format, image_location, disk_format,
visibility="private", min_disk=0, min_ram=0, properties=None):
"""Create an image, then download data of the image.
:param container_format: container format of image. Acceptable
formats: ami, ari, aki, bare, and ovf
:param image_location: image file location
:param disk_format: disk format of image. Acceptable formats:
ami, ari, aki, vhd, vmdk, raw, qcow2, vdi, and iso
:param visibility: The access permission for the created image
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: A dict of image metadata properties to set
on the image
"""
image = self.glance.create_image(
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
self.glance.download_image(image.id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,630
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/nova/test_flavors.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from novaclient import exceptions as nova_exceptions
from rally_openstack.task.contexts.nova import flavors
from tests.unit import test
CTX = "rally_openstack.task.contexts.nova"
class FlavorsGeneratorTestCase(test.TestCase):
def setUp(self):
super(FlavorsGeneratorTestCase, self).setUp()
self.context = {
"config": {
"flavors": [{
"name": "flavor_name",
"ram": 2048,
"disk": 10,
"vcpus": 3,
"ephemeral": 3,
"swap": 5,
"extra_specs": {
"key": "value"
}
}]
},
"admin": {
"credential": mock.MagicMock()
},
"task": mock.MagicMock(),
}
@mock.patch("%s.flavors.osclients.Clients" % CTX)
def test_setup(self, mock_clients):
# Setup and mock
mock_create = mock_clients().nova().flavors.create
mock_create().to_dict.return_value = {"flavor_key": "flavor_value"}
# Run
flavors_ctx = flavors.FlavorsGenerator(self.context)
flavors_ctx.setup()
# Assertions
self.assertEqual({"flavor_name": {"flavor_key": "flavor_value"}},
flavors_ctx.context["flavors"])
mock_clients.assert_called_with(self.context["admin"]["credential"])
mock_create.assert_called_with(
name="flavor_name", ram=2048, vcpus=3,
disk=10, ephemeral=3, swap=5)
mock_create().set_keys.assert_called_with({"key": "value"})
mock_create().to_dict.assert_called_with()
@mock.patch("%s.flavors.osclients.Clients" % CTX)
def test_setup_failexists(self, mock_clients):
# Setup and mock
new_context = copy.deepcopy(self.context)
new_context["flavors"] = {}
mock_flavor_create = mock_clients().nova().flavors.create
exception = nova_exceptions.Conflict("conflict")
mock_flavor_create.side_effect = exception
# Run
flavors_ctx = flavors.FlavorsGenerator(self.context)
flavors_ctx.setup()
# Assertions
self.assertEqual(new_context, flavors_ctx.context)
mock_clients.assert_called_with(self.context["admin"]["credential"])
mock_flavor_create.assert_called_once_with(
name="flavor_name", ram=2048, vcpus=3,
disk=10, ephemeral=3, swap=5)
@mock.patch("%s.flavors.rutils.make_name_matcher" % CTX)
@mock.patch("%s.flavors.resource_manager.cleanup" % CTX)
def test_cleanup(self, mock_cleanup, mock_make_name_matcher):
# Setup and mock
real_context = {
"config": {
"flavors": [
{"name": "flavor_name"},
]
},
"admin": {
"credential": mock.MagicMock()
},
"task": mock.MagicMock(),
}
# Run
flavors_ctx = flavors.FlavorsGenerator(real_context)
flavors_ctx.cleanup()
mock_cleanup.assert_called_once_with(
names=["nova.flavors"],
admin=real_context["admin"],
superclass=mock_make_name_matcher.return_value,
task_id=flavors_ctx.get_owner_id())
mock_make_name_matcher.assert_called_once_with("flavor_name")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,631
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/test.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import os
from unittest import mock
import uuid
import testtools
from rally.common import cfg
from rally.common import db
from rally import plugins
from tests.unit import fakes
plugins.load()
class DatabaseFixture(cfg.fixture.Config):
"""Create clean DB before starting test."""
def setUp(self):
super(DatabaseFixture, self).setUp()
db_url = os.environ.get("RALLY_UNITTEST_DB_URL", "sqlite://")
db.engine_reset()
self.conf.set_default("connection", db_url, group="database")
db.schema_cleanup()
db.schema_create()
class TestCase(testtools.TestCase):
"""Test case base class for all unit tests."""
def setUp(self):
super(TestCase, self).setUp()
self.addCleanup(mock.patch.stopall)
def _test_atomic_action_timer(self, atomic_actions, name, count=1,
parent=[]):
if parent:
is_found = False
for action in atomic_actions:
if action["name"] == parent[0]:
is_found = True
self._test_atomic_action_timer(action["children"],
name, count=count,
parent=parent[1:])
if not is_found:
self.fail("The parent action %s can not be found."
% parent[0])
else:
actual_count = 0
for atomic_action in atomic_actions:
if atomic_action["name"] == name:
self.assertIsInstance(atomic_action["started_at"], float)
self.assertIsInstance(atomic_action["finished_at"], float)
actual_count += 1
if count != actual_count:
self.fail("%(count)d count is expected for atomic action"
" %(name)s, the actual count"
" is %(actual_count)d."
% {"name": name, "count": count,
"actual_count": actual_count})
def assertSequenceEqual(self, iterable_1, iterable_2, msg=None):
self.assertEqual(tuple(iterable_1), tuple(iterable_2), msg)
class DBTestCase(TestCase):
"""Base class for tests which use DB."""
def setUp(self):
super(DBTestCase, self).setUp()
self.useFixture(DatabaseFixture())
# TODO(boris-42): This should be moved to test.plugins.test module
# or similar
class ScenarioTestCase(TestCase):
"""Base class for Scenario tests using mocked self.clients."""
task_utils = "rally.task.utils"
patch_task_utils = True
def client_factory(self, client_type, version=None, admin=False):
"""Create a new client object."""
return mock.MagicMock(client_type=client_type,
version=version,
admin=admin)
def clients(self, client_type, version=None, admin=False):
"""Get a mocked client."""
key = (client_type, version, admin)
if key not in self._clients:
self._clients[key] = self.client_factory(client_type,
version=version,
admin=admin)
return self._clients[key]
def admin_clients(self, client_type, version=None):
"""Get a mocked admin client."""
return self.clients(client_type, version=version, admin=True)
def client_created(self, client_type, version=None, admin=False):
"""Determine if a client has been created.
This can be used to see if a scenario calls
'self.clients("foo")', without checking to see what was done
with the client object returned by that call.
"""
key = (client_type, version, admin)
return key in self._clients
def get_client_mocks(self):
base_path = "rally_openstack.task"
return [
mock.patch(
f"{base_path}.scenario.OpenStackScenario.clients",
mock.Mock(side_effect=self.clients)),
mock.patch(
f"{base_path}.scenario.OpenStackScenario.admin_clients",
mock.Mock(side_effect=self.admin_clients))
]
def get_test_context(self):
return get_test_context()
def setUp(self):
super(ScenarioTestCase, self).setUp()
if self.patch_task_utils:
self.mock_resource_is = fixtures.MockPatch(
self.task_utils + ".resource_is")
self.mock_get_from_manager = fixtures.MockPatch(
self.task_utils + ".get_from_manager")
self.mock_wait_for = fixtures.MockPatch(
self.task_utils + ".wait_for")
self.mock_wait_for_delete = fixtures.MockPatch(
self.task_utils + ".wait_for_delete")
self.mock_wait_for_status = fixtures.MockPatch(
self.task_utils + ".wait_for_status")
self.useFixture(self.mock_resource_is)
self.useFixture(self.mock_get_from_manager)
self.useFixture(self.mock_wait_for)
self.useFixture(self.mock_wait_for_delete)
self.useFixture(self.mock_wait_for_status)
self.mock_sleep = fixtures.MockPatch("time.sleep")
self.useFixture(self.mock_sleep)
self._clients = {}
self._client_mocks = self.get_client_mocks()
for patcher in self._client_mocks:
patcher.start()
self.context = self.get_test_context()
def tearDown(self):
for patcher in self._client_mocks:
patcher.stop()
super(ScenarioTestCase, self).tearDown()
class ContextClientAdapter(object):
def __init__(self, endpoint, test_case):
self.endpoint = endpoint
self.test_case = test_case
def mock_client(self, name, version=None):
admin = self.endpoint.startswith("admin")
client = self.test_case.clients(name, version=version, admin=admin)
if not isinstance(client.return_value, mock.Mock):
return client.return_value
if client.side_effect is not None:
# NOTE(pboldin): if a client has side_effects that means the
# user wants some of the returned values overrided (look at
# the test_existing_users for instance)
return client()
return client
def __getattr__(self, name):
# NOTE(pboldin): __getattr__ magic is called last, after the value
# were looked up for in __dict__
return lambda version=None: self.mock_client(name, version)
class ContextTestCase(ScenarioTestCase):
def setUp(self):
super(ContextTestCase, self).setUp()
self._adapters = {}
def context_client(self, endpoint, api_info=None):
if endpoint not in self._adapters:
self._adapters[endpoint] = ContextClientAdapter(endpoint, self)
return self._adapters[endpoint]
def get_client_mocks(self):
return [
mock.patch(
"rally_openstack.common.osclients.Clients",
mock.Mock(side_effect=self.context_client))
]
class FakeClientsScenarioTestCase(ScenarioTestCase):
"""Base class for Scenario tests using fake (not mocked) self.clients."""
def client_factory(self, client_type, version=None, admin=False):
return getattr(self._fake_clients, client_type)()
def setUp(self):
super(FakeClientsScenarioTestCase, self).setUp()
self._fake_clients = fakes.FakeClients()
def get_test_context(**kwargs):
kwargs["task"] = {"uuid": str(uuid.uuid4())}
kwargs["owner_id"] = str(uuid.uuid4())
return kwargs
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,632
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/cleanup/test_base.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.cleanup import base
from tests.unit import test
BASE = "rally_openstack.task.cleanup.base"
class ResourceDecoratorTestCase(test.TestCase):
def test_resource(self):
@base.resource("service", "res")
class Fake(object):
pass
self.assertEqual("service", Fake._service)
self.assertEqual("res", Fake._resource)
class ResourceManagerTestCase(test.TestCase):
def test__manager(self):
user = mock.MagicMock()
user.service1().resource1 = "user_res"
manager = base.ResourceManager(user=user)
manager._service = "service1"
manager._resource = "resource1"
self.assertEqual("user_res", manager._manager())
def test__manager_admin(self):
admin = mock.MagicMock()
admin.service1().resource1 = "admin_res"
manager = base.ResourceManager(admin=admin)
manager._service = "service1"
manager._resource = "resource1"
manager._admin_required = True
self.assertEqual("admin_res", manager._manager())
def test_id(self):
resource = mock.MagicMock(id="test_id")
manager = base.ResourceManager(resource=resource)
self.assertEqual(resource.id, manager.id())
def test_name(self):
resource = mock.MagicMock(name="test_name")
manager = base.ResourceManager(resource=resource)
self.assertEqual(resource.name, manager.name())
@mock.patch("%s.ResourceManager._manager" % BASE)
def test_is_deleted(self, mock_resource_manager__manager):
raw_res = mock.MagicMock(status="deleted")
mock_resource_manager__manager().get.return_value = raw_res
mock_resource_manager__manager.reset_mock()
resource = mock.MagicMock(id="test_id")
manager = base.ResourceManager(resource=resource)
self.assertTrue(manager.is_deleted())
raw_res.status = "DELETE_COMPLETE"
self.assertTrue(manager.is_deleted())
raw_res.status = "ACTIVE"
self.assertFalse(manager.is_deleted())
mock_resource_manager__manager.assert_has_calls(
[mock.call(), mock.call().get(resource.id)] * 3)
self.assertEqual(3, mock_resource_manager__manager.call_count)
@mock.patch("%s.ResourceManager._manager" % BASE)
def test_is_deleted_exceptions(self, mock_resource_manager__manager):
class Fake500Exc(Exception):
code = 500
class Fake404Exc(Exception):
code = 404
mock_resource_manager__manager.side_effect = [
Exception, Fake500Exc, Fake404Exc]
manager = base.ResourceManager(resource=mock.MagicMock())
self.assertFalse(manager.is_deleted())
self.assertFalse(manager.is_deleted())
self.assertTrue(manager.is_deleted())
@mock.patch("%s.ResourceManager._manager" % BASE)
def test_delete(self, mock_resource_manager__manager):
res = mock.MagicMock(id="test_id")
manager = base.ResourceManager(resource=res)
manager.delete()
mock_resource_manager__manager.assert_has_calls(
[mock.call(), mock.call().delete(res.id)])
@mock.patch("%s.ResourceManager._manager" % BASE)
def test_list(self, mock_resource_manager__manager):
base.ResourceManager().list()
mock_resource_manager__manager.assert_has_calls(
[mock.call(), mock.call().list()])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,633
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/barbican/orders.py
|
# Copyright 2018 Red Hat, Inc. <http://www.redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import validation
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.barbican import utils
"""Scenarios for Barbican orders."""
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanOrders.list")
class BarbicanOrdersList(utils.BarbicanBase):
def run(self):
"""List Orders."""
self.admin_barbican.orders_list()
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanOrders.create_key_and_delete")
class BarbicanOrdersCreateKeyAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete key orders"""
keys = self.admin_barbican.create_key()
self.admin_barbican.orders_delete(keys.order_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanOrders.create_certificate_and_delete")
class BarbicanOrdersCreateCertificateAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete certificate orders"""
certificate = self.admin_barbican.create_certificate()
self.admin_barbican.orders_delete(certificate.order_ref)
@validation.add("required_services", services=[consts.Service.BARBICAN])
@validation.add("required_platform", platform="openstack", admin=True)
@scenario.configure(name="BarbicanOrders.create_asymmetric_and_delete")
class BarbicanOrdersCreateAsymmetricAndDelete(utils.BarbicanBase):
def run(self):
"""Create and delete asymmetric order."""
certificate = self.admin_barbican.create_asymmetric()
self.admin_barbican.orders_delete(certificate.order_ref)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,634
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/sahara/test_node_group_templates.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.sahara import node_group_templates as ngts
from tests.unit import test
BASE = "rally_openstack.task.scenarios.sahara.node_group_templates"
class SaharaNodeGroupTemplatesTestCase(test.TestCase):
def setUp(self):
super(SaharaNodeGroupTemplatesTestCase, self).setUp()
self.context = test.get_test_context()
@mock.patch("%s.CreateAndListNodeGroupTemplates"
"._list_node_group_templates" % BASE)
@mock.patch("%s.CreateAndListNodeGroupTemplates"
"._create_master_node_group_template" % BASE)
@mock.patch("%s.CreateAndListNodeGroupTemplates"
"._create_worker_node_group_template" % BASE)
def test_create_and_list_node_group_templates(self,
mock_create_worker,
mock_create_master,
mock_list_group):
ngts.CreateAndListNodeGroupTemplates(self.context).run(
"test_flavor", "test_plugin", "test_version")
mock_create_master.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock_create_worker.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock_list_group.assert_called_once_with()
@mock.patch("%s.CreateDeleteNodeGroupTemplates"
"._delete_node_group_template" % BASE)
@mock.patch("%s.CreateDeleteNodeGroupTemplates"
"._create_master_node_group_template" % BASE)
@mock.patch("%s.CreateDeleteNodeGroupTemplates"
"._create_worker_node_group_template" % BASE)
def test_create_delete_node_group_templates(self,
mock_create_worker,
mock_create_master,
mock_delete_group):
ngts.CreateDeleteNodeGroupTemplates(self.context).run(
"test_flavor", "test_plugin", "test_version")
mock_create_master.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock_create_worker.assert_called_once_with(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True)
mock_delete_group.assert_has_calls(calls=[
mock.call(mock_create_master.return_value),
mock.call(mock_create_worker.return_value)])
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,635
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/verification/tempest/test_config.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally.common import cfg
from rally_openstack.common import osclients
from rally_openstack.verification.tempest import config
from tests.unit import fakes
from tests.unit import test
CONF = cfg.CONF
CRED = {
"username": "admin",
"tenant_name": "admin",
"password": "admin-12345",
"auth_url": "http://test:5000/v2.0/",
"region_name": "test",
"https_insecure": False,
"https_cacert": "/path/to/cacert/file",
"user_domain_name": "admin",
"project_domain_name": "admin"
}
PATH = "rally_openstack.verification.tempest.config"
@ddt.ddt
class TempestConfigfileManagerTestCase(test.TestCase):
def setUp(self):
super(TempestConfigfileManagerTestCase, self).setUp()
env = fakes.FakeEnvironment(
env_uuid="fake_env",
data={
"platforms": {
"openstack": {
"platform_data": {
"admin": CRED
}
}
}
}
)
with mock.patch("%s.credential.OpenStackCredential" % PATH,
return_value=fakes.FakeCredential(**CRED)):
self.tempest = config.TempestConfigfileManager(env)
def test__configure_auth(self):
self.tempest.conf.add_section("auth")
self.tempest._configure_auth()
expected = (
("admin_username", CRED["username"]),
("admin_password", CRED["password"]),
("admin_project_name", CRED["tenant_name"]),
("admin_domain_name", CRED["user_domain_name"]))
result = self.tempest.conf.items("auth")
for item in expected:
self.assertIn(item, result)
@ddt.data("data_processing", "data-processing")
def test__configure_data_processing(self, service_type):
self.tempest.available_services = ["sahara"]
self.tempest.clients.services.return_value = {
service_type: "sahara"}
self.tempest.conf.add_section("data-processing")
self.tempest._configure_data_processing()
self.assertEqual(service_type,
self.tempest.conf.get("data-processing",
"catalog_type"))
@ddt.data(
# The prefix "ex_" is abbreviation of "expected"
# case #1: both versions are discoverable; version is in the auth_url
{"auth_url": "http://example.com/v2.0",
"data": [{"version": (3, 0), "url": "foo3.com"},
{"version": (2, 0), "url": "foo2.com"}],
"ex_uri": "http://example.com/v2.0", "ex_auth_version": "v2",
"ex_uri_v3": "http://example.com/v3"},
# case #2: the same case, but v3 is in the url
{"auth_url": "http://example.com/v3",
"data": [{"version": (3, 0), "url": "foo3.com"},
{"version": (2, 0), "url": "foo2.com"}],
"ex_uri": "http://example.com/v2.0", "ex_auth_version": "v3",
"ex_uri_v3": "http://example.com/v3"},
# case #3: both versions are discoverable; version is not in auth_url
{"auth_url": "http://example.com",
"data": [{"version": (3, 0), "url": "foo3.com"},
{"version": (2, 0), "url": "foo2.com"}],
"ex_uri": "foo2.com", "ex_uri_v3": "foo3.com",
"ex_auth_version": "v3"},
# case #4: the same case, but data in the another sort.
{"auth_url": "http://example.com",
"data": [{"version": (2, 0), "url": "foo2.com"},
{"version": (3, 0), "url": "foo3.com"}],
"ex_uri": "foo2.com", "ex_uri_v3": "foo3.com",
"ex_auth_version": "v3"},
# case #5: only one version is discoverable;
{"auth_url": "http://example.com",
"data": [{"version": (2, 0), "url": "foo2.com"}],
"ex_uri": "foo2.com", "ex_auth_version": "v2",
"ex_uri_v3": "http://example.com/v3"},
# case #6: the same case, but keystone v3 is discoverable
{"auth_url": "http://example.com",
"data": [{"version": (3, 0), "url": "foo3.com"}],
"ex_uri": "http://example.com/v2.0", "ex_auth_version": "v3",
"ex_uri_v3": "foo3.com",
"ex_v2_off": True}
)
@ddt.unpack
def test__configure_identity(self, auth_url, data, ex_uri,
ex_uri_v3, ex_auth_version, ex_v2_off=False):
self.tempest.conf.add_section("identity")
self.tempest.conf.add_section("identity-feature-enabled")
self.tempest.credential.auth_url = auth_url
process_url = osclients.Keystone(
self.tempest.credential, 0)._remove_url_version
self.tempest.clients.keystone._remove_url_version = process_url
from keystoneauth1 import discover
from keystoneauth1 import session
with mock.patch.object(discover, "Discover") as mock_discover:
with mock.patch.object(session, "Session") as mock_session:
mock_discover.return_value.version_data.return_value = data
self.tempest._configure_identity()
mock_discover.assert_called_once_with(
mock_session.return_value, auth_url)
expected = {"region": CRED["region_name"],
"auth_version": ex_auth_version,
"uri": ex_uri, "uri_v3": ex_uri_v3,
"disable_ssl_certificate_validation": str(
CRED["https_insecure"]),
"ca_certificates_file": CRED["https_cacert"]}
self.assertEqual(expected, dict(self.tempest.conf.items("identity")))
if ex_v2_off:
self.assertEqual(
"False",
self.tempest.conf.get("identity-feature-enabled", "api_v2"))
# Test a conf setting with a None value
try:
self.tempest.conf.set("identity", "region", None)
except TypeError as e:
self.fail("self.tempest.conf.set('identity', 'region', None) "
"raised a TypeError: " + str(e))
def test__configure_network_if_neutron(self):
self.tempest.available_services = ["neutron"]
client = self.tempest.clients.neutron()
client.list_networks.return_value = {
"networks": [
{
"status": "ACTIVE",
"id": "test_id",
"name": "test_name",
"router:external": True
}
]
}
self.tempest.conf.add_section("network")
self.tempest._configure_network()
self.assertEqual("test_id",
self.tempest.conf.get("network", "public_network_id"))
self.assertEqual("test_name",
self.tempest.conf.get("network",
"floating_network_name"))
def test__configure_network_if_nova(self):
self.tempest.available_services = ["nova"]
client = self.tempest.clients.nova()
client.networks.list.return_value = [
mock.MagicMock(human_id="fake-network")]
self.tempest.conf.add_section("compute")
self.tempest.conf.add_section("validation")
self.tempest._configure_network()
expected = {"compute": ("fixed_network_name", "fake-network"),
"validation": ("network_for_ssh", "fake-network")}
for section, option in expected.items():
result = self.tempest.conf.items(section)
self.assertIn(option, result)
def test__configure_network_feature_enabled(self):
self.tempest.available_services = ["neutron"]
client = self.tempest.clients.neutron()
client.list_ext.return_value = {
"extensions": [
{"alias": "dvr"},
{"alias": "extra_dhcp_opt"},
{"alias": "extraroute"}
]
}
self.tempest.conf.add_section("network-feature-enabled")
self.tempest._configure_network_feature_enabled()
client.list_ext.assert_called_once_with("extensions", "/extensions",
retrieve_all=True)
self.assertEqual("dvr,extra_dhcp_opt,extraroute",
self.tempest.conf.get("network-feature-enabled",
"api_extensions"))
def test__configure_object_storage(self):
self.tempest.conf.add_section("object-storage")
self.tempest._configure_object_storage()
expected = (
("operator_role", CONF.openstack.swift_operator_role),
("reseller_admin_role", CONF.openstack.swift_reseller_admin_role))
result = self.tempest.conf.items("object-storage")
for item in expected:
self.assertIn(item, result)
def test__configure_orchestration(self):
self.tempest.conf.add_section("orchestration")
self.tempest._configure_orchestration()
expected = (
("stack_owner_role", CONF.openstack.heat_stack_owner_role),
("stack_user_role", CONF.openstack.heat_stack_user_role))
result = self.tempest.conf.items("orchestration")
for item in expected:
self.assertIn(item, result)
def test__configure_service_available(self):
available_services = ("nova", "cinder", "glance", "sahara")
self.tempest.available_services = available_services
self.tempest.conf.add_section("service_available")
self.tempest._configure_service_available()
expected = (
("neutron", "False"), ("heat", "False"), ("nova", "True"),
("swift", "False"), ("cinder", "True"), ("sahara", "True"),
("glance", "True"))
result = self.tempest.conf.items("service_available")
for item in expected:
self.assertIn(item, result)
@ddt.data({}, {"service": "neutron", "connect_method": "floating"})
@ddt.unpack
def test__configure_validation(self, service="nova",
connect_method="fixed"):
self.tempest.available_services = [service]
self.tempest.conf.add_section("validation")
self.tempest._configure_validation()
expected = (("connect_method", connect_method), )
result = self.tempest.conf.items("validation")
for item in expected:
self.assertIn(item, result)
@mock.patch("%s.io.StringIO" % PATH)
@mock.patch("%s.open" % PATH, side_effect=mock.mock_open())
@mock.patch("inspect.getmembers")
def test_create(self, mock_inspect_getmembers, mock_open, mock_string_io):
configure_something_method = mock.MagicMock()
mock_inspect_getmembers.return_value = [("_configure_something",
configure_something_method)]
self.tempest.conf.read = mock.Mock()
self.tempest.conf.write = mock.Mock()
self.tempest.conf.read.return_value = "[section]\noption = value"
fake_extra_conf = {"section2": {"option2": "value2"}}
self.tempest.create("/path/to/fake/conf", fake_extra_conf)
self.assertEqual(1, configure_something_method.call_count)
self.assertIn(("option2", "value2"),
self.tempest.conf.items("section2"))
mock_open.assert_called_once_with("/path/to/fake/conf", "w")
self.tempest.conf.write.assert_has_calls(
[mock.call(mock_open.side_effect()),
mock.call(mock_string_io.return_value)])
mock_string_io.return_value.getvalue.assert_called_once_with()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,636
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/sahara/sahara_output_data_sources.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task.cleanup import manager as resource_manager
from rally_openstack.task import context
from rally_openstack.task.scenarios.sahara import utils
from rally_openstack.task.scenarios.swift import utils as swift_utils
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="sahara_output_data_sources", platform="openstack",
order=444)
class SaharaOutputDataSources(context.OpenStackContext):
"""Context class for setting up Output Data Sources for an EDP job."""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"properties": {
"output_type": {
"enum": ["swift", "hdfs"],
},
"output_url_prefix": {
"type": "string",
}
},
"additionalProperties": False,
"required": ["output_type", "output_url_prefix"]
}
def setup(self):
utils.init_sahara_context(self)
for user, tenant_id in self._iterate_per_tenants():
clients = osclients.Clients(user["credential"])
sahara = clients.sahara()
if self.config["output_type"] == "swift":
swift = swift_utils.SwiftScenario(clients=clients,
context=self.context)
container_name = self.generate_random_name()
self.context["tenants"][tenant_id]["sahara"]["container"] = {
"name": swift._create_container(
container_name=container_name),
"output_swift_objects": []
}
self.setup_outputs_swift(swift, sahara, tenant_id,
container_name,
user["credential"].username,
user["credential"].password)
else:
self.setup_outputs_hdfs(sahara, tenant_id,
self.config["output_url_prefix"])
def setup_outputs_hdfs(self, sahara, tenant_id, output_url):
output_ds = sahara.data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type="hdfs",
url=output_url)
self.context["tenants"][tenant_id]["sahara"]["output"] = output_ds.id
def setup_outputs_swift(self, swift, sahara, tenant_id, container_name,
username, password):
output_ds_swift = sahara.data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type="swift",
url="swift://" + container_name + ".sahara/",
credential_user=username,
credential_pass=password)
self.context["tenants"][tenant_id]["sahara"]["output"] = (
output_ds_swift.id
)
def cleanup(self):
resource_manager.cleanup(
names=["swift.object", "swift.container"],
users=self.context.get("users", []),
superclass=self.__class__,
task_id=self.get_owner_id())
resource_manager.cleanup(
names=["sahara.data_sources"],
users=self.context.get("users", []),
superclass=self.__class__,
task_id=self.get_owner_id())
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,637
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/mistral/test_workbooks.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally_openstack.task.scenarios.mistral import workbooks
from tests.unit import test
BASE = "rally_openstack.task.scenarios.mistral.workbooks"
class MistralWorkbooksTestCase(test.ScenarioTestCase):
@mock.patch("%s.ListWorkbooks._list_workbooks" % BASE)
def test_list_workbooks(self, mock_list_workbooks__list_workbooks):
workbooks.ListWorkbooks(self.context).run()
mock_list_workbooks__list_workbooks.assert_called_once_with()
@mock.patch("%s.CreateWorkbook._create_workbook" % BASE)
def test_create_workbook(self, mock_create_workbook__create_workbook):
definition = "---\nversion: \"2.0\"\nname: wb"
fake_wb = mock.MagicMock()
fake_wb.name = "wb"
mock_create_workbook__create_workbook.return_value = fake_wb
workbooks.CreateWorkbook(self.context).run(definition)
self.assertEqual(1, mock_create_workbook__create_workbook.called)
@mock.patch("%s.CreateWorkbook._delete_workbook" % BASE)
@mock.patch("%s.CreateWorkbook._create_workbook" % BASE)
def test_create_delete_workbook(self,
mock_create_workbook__create_workbook,
mock_create_workbook__delete_workbook):
definition = "---\nversion: \"2.0\"\nname: wb"
fake_wb = mock.MagicMock()
fake_wb.name = "wb"
mock_create_workbook__create_workbook.return_value = fake_wb
workbooks.CreateWorkbook(self.context).run(definition, do_delete=True)
self.assertTrue(mock_create_workbook__create_workbook.called)
mock_create_workbook__delete_workbook.assert_called_once_with(
fake_wb.name)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,638
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/manila/test_manila_shares.py
|
# Copyright 2016 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
import ddt
from rally_openstack.common import consts as rally_consts
from rally_openstack.task.contexts.manila import consts
from rally_openstack.task.contexts.manila import manila_shares
from rally_openstack.task.scenarios.manila import utils as manila_utils
from tests.unit import test
MANILA_UTILS_PATH = (
"rally_openstack.task.scenarios.manila.utils.ManilaScenario.")
class Fake(object):
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
def __getitem__(self, item):
return getattr(self, item)
def to_dict(self):
return self.__dict__
@ddt.ddt
class SharesTestCase(test.TestCase):
TENANTS_AMOUNT = 3
USERS_PER_TENANT = 4
SHARES_PER_TENANT = 7
SHARE_NETWORKS = [{"id": "sn_%s_id" % d} for d in range(3)]
def _get_context(self, use_share_networks=False, shares_per_tenant=None,
share_size=1, share_proto="fake_proto", share_type=None):
tenants = {}
for t_id in range(self.TENANTS_AMOUNT):
tenants[str(t_id)] = {"name": str(t_id)}
users = []
for t_id in sorted(list(tenants.keys())):
for i in range(self.USERS_PER_TENANT):
users.append({
"id": i, "tenant_id": t_id,
"credential": mock.MagicMock()})
context = {
"config": {
"users": {
"tenants": self.TENANTS_AMOUNT,
"users_per_tenant": self.USERS_PER_TENANT,
"user_choice_method": "round_robin",
},
consts.SHARE_NETWORKS_CONTEXT_NAME: {
"use_share_networks": use_share_networks,
"share_networks": self.SHARE_NETWORKS,
},
consts.SHARES_CONTEXT_NAME: {
"shares_per_tenant": (
shares_per_tenant or self.SHARES_PER_TENANT),
"size": share_size,
"share_proto": share_proto,
"share_type": share_type,
},
},
"admin": {
"credential": mock.MagicMock(),
},
"task": mock.MagicMock(),
"owner_id": "foo_uuid",
"users": users,
"tenants": tenants,
}
if use_share_networks:
for t in context["tenants"].keys():
context["tenants"][t][consts.SHARE_NETWORKS_CONTEXT_NAME] = {
"share_networks": self.SHARE_NETWORKS,
}
return context
def test_init(self):
ctxt = {
"task": mock.MagicMock(),
"config": {
consts.SHARES_CONTEXT_NAME: {"foo": "bar"},
"fake": {"fake_key": "fake_value"},
},
}
inst = manila_shares.Shares(ctxt)
self.assertEqual(
{"foo": "bar", "shares_per_tenant": 1, "size": 1,
"share_proto": "NFS", "share_type": None},
inst.config)
self.assertIn(
rally_consts.JSON_SCHEMA, inst.CONFIG_SCHEMA.get("$schema"))
self.assertFalse(inst.CONFIG_SCHEMA.get("additionalProperties"))
self.assertEqual("object", inst.CONFIG_SCHEMA.get("type"))
props = inst.CONFIG_SCHEMA.get("properties", {})
self.assertEqual(
{"minimum": 1, "type": "integer"}, props.get("shares_per_tenant"))
self.assertEqual({"minimum": 1, "type": "integer"}, props.get("size"))
self.assertEqual({"type": "string"}, props.get("share_proto"))
self.assertEqual({"type": "string"}, props.get("share_type"))
self.assertEqual(455, inst.get_order())
self.assertEqual(consts.SHARES_CONTEXT_NAME, inst.get_name())
@mock.patch(MANILA_UTILS_PATH + "_create_share")
@ddt.data(True, False)
def test_setup(
self,
use_share_networks,
mock_manila_scenario__create_share):
share_type = "fake_share_type"
ctxt = self._get_context(
use_share_networks=use_share_networks, share_type=share_type)
inst = manila_shares.Shares(ctxt)
shares = [
Fake(id="fake_share_id_%d" % s_id)
for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT)
]
mock_manila_scenario__create_share.side_effect = shares
expected_ctxt = copy.deepcopy(ctxt)
inst.setup()
self.assertEqual(
self.TENANTS_AMOUNT * self.SHARES_PER_TENANT,
mock_manila_scenario__create_share.call_count)
for d in range(self.TENANTS_AMOUNT):
self.assertEqual(
[
s.to_dict() for s in shares[
(d * self.SHARES_PER_TENANT):(
d * self.SHARES_PER_TENANT + self.SHARES_PER_TENANT
)
]
],
inst.context.get("tenants", {}).get("%s" % d, {}).get("shares")
)
self.assertEqual(expected_ctxt["task"], inst.context.get("task"))
self.assertEqual(expected_ctxt["config"], inst.context.get("config"))
self.assertEqual(expected_ctxt["users"], inst.context.get("users"))
if use_share_networks:
mock_calls = [
mock.call(
share_proto=ctxt["config"][consts.SHARES_CONTEXT_NAME][
"share_proto"],
size=ctxt["config"][consts.SHARES_CONTEXT_NAME]["size"],
share_type=ctxt["config"][consts.SHARES_CONTEXT_NAME][
"share_type"],
share_network=self.SHARE_NETWORKS[
int(t_id) % len(self.SHARE_NETWORKS)]["id"]
) for t_id in expected_ctxt["tenants"].keys()
]
else:
mock_calls = [
mock.call(
share_proto=ctxt["config"][consts.SHARES_CONTEXT_NAME][
"share_proto"],
size=ctxt["config"][consts.SHARES_CONTEXT_NAME]["size"],
share_type=ctxt["config"][consts.SHARES_CONTEXT_NAME][
"share_type"],
) for t_id in expected_ctxt["tenants"].keys()
]
mock_manila_scenario__create_share.assert_has_calls(
mock_calls, any_order=True)
@mock.patch(MANILA_UTILS_PATH + "_create_share")
@mock.patch("rally_openstack.task.cleanup.manager.cleanup")
def test_cleanup(
self,
mock_cleanup_manager_cleanup,
mock_manila_scenario__create_share):
ctxt = self._get_context()
inst = manila_shares.Shares(ctxt)
shares = [
Fake(id="fake_share_id_%d" % s_id)
for s_id in range(self.TENANTS_AMOUNT * self.SHARES_PER_TENANT)
]
mock_manila_scenario__create_share.side_effect = shares
inst.setup()
inst.cleanup()
mock_cleanup_manager_cleanup.assert_called_once_with(
names=["manila.shares"],
users=inst.context.get("users", []),
superclass=manila_utils.ManilaScenario,
task_id="foo_uuid")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,639
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/services/image/image.py
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally import exceptions
from rally.task import service
CONF = cfg.CONF
UnifiedImage = service.make_resource_cls(
"Image", properties=["id", "name", "visibility", "status"])
class VisibilityException(exceptions.RallyException):
"""Wrong visibility value exception.
"""
error_code = 531
class RemovePropsException(exceptions.RallyException):
"""Remove Props it not supported exception.
"""
error_code = 560
class Image(service.UnifiedService):
@classmethod
def is_applicable(cls, clients):
cloud_version = str(clients.glance().version).split(".")[0]
return cloud_version == cls._meta_get("impl")._meta_get("version")
@service.should_be_overridden
def create_image(self, image_name=None, container_format=None,
image_location=None, disk_format=None,
visibility="private", min_disk=0,
min_ram=0, properties=None):
"""Creates new image.
:param image_name: Image name for which need to be created
:param container_format: Container format
:param image_location: The new image's location
:param disk_format: Disk format
:param visibility: The access permission for the created image.
:param min_disk: The min disk of created images
:param min_ram: The min ram of created images
:param properties: Dict of image properties
"""
properties = properties or {}
image = self._impl.create_image(
image_name=image_name,
container_format=container_format,
image_location=image_location,
disk_format=disk_format,
visibility=visibility,
min_disk=min_disk,
min_ram=min_ram,
properties=properties)
return image
@service.should_be_overridden
def update_image(self, image_id, image_name=None,
min_disk=0, min_ram=0, remove_props=None):
"""Update image.
:param image_id: ID of image to update
:param image_name: Image name to be updated to
:param min_disk: The min disk of updated image
:param min_ram: The min ram of updated image
:param remove_props: List of property names to remove
"""
return self._impl.update_image(
image_id,
image_name=image_name,
min_disk=min_disk,
min_ram=min_ram,
remove_props=remove_props)
@service.should_be_overridden
def list_images(self, status="active", visibility=None, owner=None):
"""List images.
:param status: Filter in images for the specified status
:param visibility: Filter in images for the specified visibility
:param owner: Filter in images for tenant ID
"""
return self._impl.list_images(status=status,
visibility=visibility,
owner=owner)
@service.should_be_overridden
def set_visibility(self, image_id, visibility="public"):
"""Update visibility.
:param image_id: ID of image to update
:param visibility: The visibility of specified image
"""
self._impl.set_visibility(image_id, visibility=visibility)
@service.should_be_overridden
def get_image(self, image):
"""Get specified image.
:param image: ID or object with ID of image to obtain.
"""
return self._impl.get_image(image)
@service.should_be_overridden
def delete_image(self, image_id):
"""delete image."""
self._impl.delete_image(image_id)
@service.should_be_overridden
def download_image(self, image, do_checksum=True):
"""Download data for an image.
:param image: image object or id to look up
:param do_checksum: Enable/disable checksum validation
:rtype: iterable containing image data or None
"""
return self._impl.download_image(image, do_checksum=do_checksum)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,640
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/heat/test_stacks.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.task.scenarios.heat import stacks
from tests.unit import test
BASE = "rally_openstack.task.scenarios.heat.stacks"
class HeatStacksTestCase(test.ScenarioTestCase):
def setUp(self):
super(HeatStacksTestCase, self).setUp()
self.default_template = "heat_template_version: 2013-05-23"
self.default_parameters = {"dummy_param": "dummy_key"}
self.default_files = ["dummy_file.yaml"]
self.default_environment = {"env": "dummy_env"}
self.default_output_key = "dummy_output_key"
@mock.patch("%s.CreateAndListStack._list_stacks" % BASE)
@mock.patch("%s.CreateAndListStack._create_stack" % BASE)
def test_create_and_list_stack(self,
mock__create_stack,
mock__list_stacks):
stack = mock.Mock()
mock__create_stack.return_value = stack
mock__list_stacks.return_value = [stack] * 3
# Positive case:
stacks.CreateAndListStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__list_stacks.assert_called_once_with()
# Negative case1: stack isn't created
mock__create_stack.return_value = None
self.assertRaises(exceptions.RallyAssertionError,
stacks.CreateAndListStack(self.context).run,
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
# Negative case2: created stack not in the list of available stacks
fake_stack = mock.Mock()
mock__create_stack.return_value = fake_stack
self.assertRaises(exceptions.RallyAssertionError,
stacks.CreateAndListStack(self.context).run,
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__list_stacks.assert_called_with()
@mock.patch("%s.ListStacksAndResources._list_stacks" % BASE)
def test_list_stack_and_resources(self, mock__list_stacks):
stack = mock.Mock()
heat_scenario = stacks.ListStacksAndResources(self.context)
mock__list_stacks.return_value = [stack]
heat_scenario.run()
self.clients("heat").resources.list.assert_called_once_with(
stack.id)
self._test_atomic_action_timer(heat_scenario.atomic_actions(),
"heat.list_resources")
@mock.patch("%s.ListStacksAndEvents._list_stacks" % BASE)
def test_list_stack_and_events(self, mock__list_stacks):
stack = mock.Mock()
mock__list_stacks.return_value = [stack]
heat_scenario = stacks.ListStacksAndEvents(self.context)
heat_scenario.run()
self.clients("heat").events.list.assert_called_once_with(stack.id)
self._test_atomic_action_timer(
heat_scenario.atomic_actions(), "heat.list_events")
@mock.patch("%s.CreateAndDeleteStack._delete_stack" % BASE)
@mock.patch("%s.CreateAndDeleteStack._create_stack" % BASE)
@mock.patch("%s.CreateAndDeleteStack.generate_random_name" % BASE,
return_value="test-rally-stack")
def test_create_and_delete_stack(self,
mock_generate_random_name,
mock__create_stack,
mock__delete_stack):
fake_stack = object()
mock__create_stack.return_value = fake_stack
stacks.CreateAndDeleteStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__delete_stack.assert_called_once_with(fake_stack)
@mock.patch("%s.CreateCheckDeleteStack._delete_stack" % BASE)
@mock.patch("%s.CreateCheckDeleteStack._check_stack" % BASE)
@mock.patch("%s.CreateCheckDeleteStack._create_stack" % BASE)
def test_create_check_delete_stack(self,
mock__create_stack,
mock__check_stack,
mock__delete_stack):
stacks.CreateCheckDeleteStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__check_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value)
@mock.patch("%s.CreateUpdateDeleteStack._delete_stack" % BASE)
@mock.patch("%s.CreateUpdateDeleteStack._update_stack" % BASE)
@mock.patch("%s.CreateUpdateDeleteStack._create_stack" % BASE)
@mock.patch("%s.CreateUpdateDeleteStack.generate_random_name" % BASE,
return_value="test-rally-stack")
def test_create_update_delete_stack(self,
mock_generate_random_name,
mock__create_stack,
mock__update_stack,
mock__delete_stack):
fake_stack = object()
mock__create_stack.return_value = fake_stack
stacks.CreateUpdateDeleteStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
updated_template_path=self.default_template,
files=self.default_files,
environment=self.default_environment
)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__update_stack.assert_called_once_with(
fake_stack, self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
mock__delete_stack.assert_called_once_with(fake_stack)
def test_create_stack_and_scale(self):
heat_scenario = stacks.CreateStackAndScale(self.context)
stack = mock.Mock()
heat_scenario._create_stack = mock.Mock(return_value=stack)
heat_scenario._scale_stack = mock.Mock()
heat_scenario.run(
self.default_template, "key", -1,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
heat_scenario._create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment)
heat_scenario._scale_stack.assert_called_once_with(
stack, "key", -1)
@mock.patch("%s.CreateSuspendResumeDeleteStack._delete_stack" % BASE)
@mock.patch("%s.CreateSuspendResumeDeleteStack._resume_stack" % BASE)
@mock.patch("%s.CreateSuspendResumeDeleteStack._suspend_stack" % BASE)
@mock.patch("%s.CreateSuspendResumeDeleteStack._create_stack" % BASE)
def test_create_suspend_resume_delete_stack(self,
mock__create_stack,
mock__suspend_stack,
mock__resume_stack,
mock__delete_stack):
stacks.CreateSuspendResumeDeleteStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template,
self.default_parameters,
self.default_files,
self.default_environment
)
mock__suspend_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__resume_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value)
@mock.patch("%s.CreateSnapshotRestoreDeleteStack._delete_stack" % BASE)
@mock.patch("%s.CreateSnapshotRestoreDeleteStack._restore_stack" % BASE)
@mock.patch("%s.CreateSnapshotRestoreDeleteStack._snapshot_stack" % BASE,
return_value={"id": "dummy_id"})
@mock.patch("%s.CreateSnapshotRestoreDeleteStack._create_stack" % BASE,
return_value=object())
def test_create_snapshot_restore_delete_stack(self,
mock__create_stack,
mock__snapshot_stack,
mock__restore_stack,
mock__delete_stack):
stacks.CreateSnapshotRestoreDeleteStack(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__snapshot_stack.assert_called_once_with(
mock__create_stack.return_value)
mock__restore_stack.assert_called_once_with(
mock__create_stack.return_value, "dummy_id")
mock__delete_stack.assert_called_once_with(
mock__create_stack.return_value)
@mock.patch("%s.CreateStackAndShowOutputViaAPI"
"._stack_show_output_via_API" % BASE)
@mock.patch("%s.CreateStackAndShowOutputViaAPI._create_stack" % BASE)
def test_create_and_show_output_via_API(self,
mock__create_stack,
mock__stack_show_output_api):
stacks.CreateStackAndShowOutputViaAPI(self.context).run(
template_path=self.default_template,
output_key=self.default_output_key,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__stack_show_output_api.assert_called_once_with(
mock__create_stack.return_value, self.default_output_key)
@mock.patch("%s.CreateStackAndShowOutput._stack_show_output" % BASE)
@mock.patch("%s.CreateStackAndShowOutput._create_stack" % BASE)
def test_create_and_show_output(self,
mock__create_stack,
mock__stack_show_output):
stacks.CreateStackAndShowOutput(self.context).run(
template_path=self.default_template,
output_key=self.default_output_key,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__stack_show_output.assert_called_once_with(
mock__create_stack.return_value, self.default_output_key)
@mock.patch("%s.CreateStackAndListOutputViaAPI"
"._stack_list_output_via_API" % BASE)
@mock.patch("%s.CreateStackAndListOutputViaAPI._create_stack" % BASE)
def test_create_and_list_output_via_API(self,
mock__create_stack,
mock__stack_list_output_api):
stacks.CreateStackAndListOutputViaAPI(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__stack_list_output_api.assert_called_once_with(
mock__create_stack.return_value)
@mock.patch("%s.CreateStackAndListOutput._stack_list_output" % BASE)
@mock.patch("%s.CreateStackAndListOutput._create_stack" % BASE)
def test_create_and_list_output(self,
mock__create_stack,
mock__stack_list_output):
stacks.CreateStackAndListOutput(self.context).run(
template_path=self.default_template,
parameters=self.default_parameters,
files=self.default_files,
environment=self.default_environment)
mock__create_stack.assert_called_once_with(
self.default_template, self.default_parameters,
self.default_files, self.default_environment)
mock__stack_list_output.assert_called_once_with(
mock__create_stack.return_value)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,641
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/cleanup/base.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
from rally.task import utils
CONF = cfg.CONF
cleanup_group = cfg.OptGroup(name="cleanup", title="Cleanup Options")
# NOTE(andreykurilin): There are cases when there is no way to use any kind
# of "name" for resource as an identifier of alignment resource to the
# particular task run and even to Rally itself. Previously, we used empty
# strings as a workaround for name matching specific templates, but
# theoretically such behaviour can hide other cases when resource should have
# a name property, but it is missed.
# Let's use instances of specific class to return as a name of resources
# which do not have names at all.
class NoName(object):
def __init__(self, resource_type):
self.resource_type = resource_type
def __repr__(self):
return "<NoName %s resource>" % self.resource_type
def resource(service, resource, order=0, admin_required=False,
perform_for_admin_only=False, tenant_resource=False,
max_attempts=3, timeout=CONF.openstack.resource_deletion_timeout,
interval=1, threads=CONF.openstack.cleanup_threads):
"""Decorator that overrides resource specification.
Just put it on top of your resource class and specify arguments that you
need.
:param service: It is equal to client name for corresponding service.
E.g. "nova", "cinder" or "zaqar"
:param resource: Client manager name for resource. E.g. in case of
nova.servers you should write here "servers"
:param order: Used to adjust priority of cleanup for different resource
types
:param admin_required: Admin user is required
:param perform_for_admin_only: Perform cleanup for admin user only
:param tenant_resource: Perform deletion only 1 time per tenant
:param max_attempts: Max amount of attempts to delete single resource
:param timeout: Max duration of deletion in seconds
:param interval: Resource status pooling interval
:param threads: Amount of threads (workers) that are deleting resources
simultaneously
"""
def inner(cls):
# TODO(boris-42): This can be written better I believe =)
cls._service = service
cls._resource = resource
cls._order = order
cls._admin_required = admin_required
cls._perform_for_admin_only = perform_for_admin_only
cls._max_attempts = max_attempts
cls._timeout = timeout
cls._interval = interval
cls._threads = threads
cls._tenant_resource = tenant_resource
return cls
return inner
@resource(service=None, resource=None)
class ResourceManager(object):
"""Base class for cleanup plugins for specific resources.
You should use @resource decorator to specify major configuration of
resource manager. Usually you should specify: service, resource and order.
If project python client is very specific, you can override delete(),
list() and is_deleted() methods to make them fit to your case.
"""
def __init__(self, resource=None, admin=None, user=None, tenant_uuid=None):
self.admin = admin
self.user = user
self.raw_resource = resource
self.tenant_uuid = tenant_uuid
def _manager(self):
client = self._admin_required and self.admin or self.user
return getattr(getattr(client, self._service)(), self._resource)
def id(self):
"""Returns id of resource."""
return self.raw_resource.id
def name(self):
"""Returns name of resource."""
return self.raw_resource.name
def is_deleted(self):
"""Checks if the resource is deleted.
Fetch resource by id from service and check it status.
In case of NotFound or status is DELETED or DELETE_COMPLETE returns
True, otherwise False.
"""
try:
resource = self._manager().get(self.id())
except Exception as e:
return getattr(e, "code", getattr(e, "http_status", 400)) == 404
return utils.get_status(resource) in ("DELETED", "DELETE_COMPLETE")
def delete(self):
"""Delete resource that corresponds to instance of this class."""
self._manager().delete(self.id())
def list(self):
"""List all resources specific for admin or user."""
return self._manager().list()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,642
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/ironic/utils.py
|
# Copyright 2015: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import string
from rally.common import cfg
from rally.task import atomic
from rally.task import utils
from rally_openstack.task import scenario
CONF = cfg.CONF
class IronicScenario(scenario.OpenStackScenario):
"""Base class for Ironic scenarios with basic atomic actions."""
# NOTE(stpierre): Ironic has two name checkers. The new-style
# checker, in API v1.10+, is quite relaxed and will Just Work with
# the default random name pattern. (See
# https://bugs.launchpad.net/ironic/+bug/1434376.) The old-style
# checker *claims* to implement RFCs 952 and 1123, but it doesn't
# actually. (See https://bugs.launchpad.net/ironic/+bug/1468508
# for details.) The default RESOURCE_NAME_FORMAT works fine for
# the new-style checker, but the old-style checker only allows
# underscores after the first dot, for reasons that I'm sure are
# entirely obvious, so we have to supply a bespoke format for
# Ironic names.
RESOURCE_NAME_FORMAT = "s-rally-XXXXXXXX-XXXXXXXX"
RESOURCE_NAME_ALLOWED_CHARACTERS = string.ascii_lowercase + string.digits
@atomic.action_timer("ironic.create_node")
def _create_node(self, driver, properties, **kwargs):
"""Create node immediately.
:param driver: The name of the driver used to manage this Node.
:param properties: Key/value pair describing the physical
characteristics of the node.
:param kwargs: optional parameters to create image
:returns: node object
"""
kwargs["name"] = self.generate_random_name()
node = self.admin_clients("ironic").node.create(driver=driver,
properties=properties,
**kwargs)
self.sleep_between(CONF.openstack.ironic_node_create_poll_interval)
node = utils.wait_for_status(
node,
ready_statuses=["AVAILABLE"],
update_resource=utils.get_from_manager(),
timeout=CONF.openstack.ironic_node_create_timeout,
check_interval=CONF.openstack.ironic_node_poll_interval,
id_attr="uuid", status_attr="provision_state"
)
return node
@atomic.action_timer("ironic.list_nodes")
def _list_nodes(self, associated=None, maintenance=None, detail=False,
sort_dir=None):
"""Return list of nodes.
:param associated: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return a list of associated (True or "True") or
unassociated (False or "False") nodes.
:param maintenance: Optional. Either a Boolean or a string
representation of a Boolean that indicates whether
to return nodes in maintenance mode (True or
"True"), or not in maintenance mode (False or
"False").
:param detail: Optional, boolean whether to return detailed information
about nodes.
:param sort_dir: Optional, direction of sorting, either 'asc' (the
default) or 'desc'.
:returns: A list of nodes.
"""
return self.admin_clients("ironic").node.list(
associated=associated, maintenance=maintenance, detail=detail,
sort_dir=sort_dir)
@atomic.action_timer("ironic.delete_node")
def _delete_node(self, node):
"""Delete the node with specific id.
:param node: Ironic node object
"""
self.admin_clients("ironic").node.delete(node.uuid)
utils.wait_for_status(
node,
ready_statuses=["deleted"],
check_deletion=True,
update_resource=utils.get_from_manager(),
timeout=CONF.openstack.ironic_node_delete_timeout,
check_interval=CONF.openstack.ironic_node_poll_interval,
id_attr="uuid", status_attr="provision_state"
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,643
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/identity/test_identity.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
from rally_openstack.common.services.identity import identity
from tests.unit import test
@ddt.ddt
class IdentityTestCase(test.TestCase):
def setUp(self):
super(IdentityTestCase, self).setUp()
self.clients = mock.MagicMock()
def get_service_with_fake_impl(self):
path = "rally_openstack.common.services.identity.identity"
with mock.patch("%s.Identity.discover_impl" % path) as mock_discover:
mock_discover.return_value = mock.MagicMock(), None
service = identity.Identity(self.clients)
return service
def test_create_project(self):
service = self.get_service_with_fake_impl()
project_name = "name"
domain_name = "domain"
service.create_project(project_name, domain_name=domain_name)
service._impl.create_project.assert_called_once_with(
project_name, domain_name=domain_name)
def test_update_project(self):
service = self.get_service_with_fake_impl()
project_id = "id"
project_name = "name"
description = "descr"
enabled = False
service.update_project(project_id=project_id, name=project_name,
description=description, enabled=enabled)
service._impl.update_project.assert_called_once_with(
project_id, name=project_name, description=description,
enabled=enabled)
def test_delete_project(self):
service = self.get_service_with_fake_impl()
project = "id"
service.delete_project(project)
service._impl.delete_project.assert_called_once_with(project)
def test_list_projects(self):
service = self.get_service_with_fake_impl()
service.list_projects()
service._impl.list_projects.assert_called_once_with()
def test_get_project(self):
service = self.get_service_with_fake_impl()
project = "id"
service.get_project(project)
service._impl.get_project.assert_called_once_with(project)
def test_create_user(self):
service = self.get_service_with_fake_impl()
username = "username"
password = "password"
project_id = "project_id"
domain_name = "domain_name"
service.create_user(username=username, password=password,
project_id=project_id, domain_name=domain_name)
service._impl.create_user.assert_called_once_with(
username=username, password=password, project_id=project_id,
domain_name=domain_name, default_role="member")
def test_create_users(self):
service = self.get_service_with_fake_impl()
project_id = "project_id"
n = 3
user_create_args = {}
service.create_users(project_id, number_of_users=n,
user_create_args=user_create_args)
service._impl.create_users.assert_called_once_with(
project_id, number_of_users=n, user_create_args=user_create_args)
def test_delete_user(self):
service = self.get_service_with_fake_impl()
user_id = "fake_id"
service.delete_user(user_id)
service._impl.delete_user.assert_called_once_with(user_id)
def test_list_users(self):
service = self.get_service_with_fake_impl()
service.list_users()
service._impl.list_users.assert_called_once_with()
def test_update_user(self):
service = self.get_service_with_fake_impl()
user_id = "id"
user_name = "name"
email = "mail"
password = "pass"
enabled = False
service.update_user(user_id, name=user_name, password=password,
email=email, enabled=enabled)
service._impl.update_user.assert_called_once_with(
user_id, name=user_name, password=password, email=email,
enabled=enabled)
def test_get_user(self):
service = self.get_service_with_fake_impl()
user = "id"
service.get_user(user)
service._impl.get_user.assert_called_once_with(user)
def test_create_service(self):
service = self.get_service_with_fake_impl()
service_name = "name"
service_type = "service_type"
description = "descr"
service.create_service(service_name, service_type=service_type,
description=description)
service._impl.create_service.assert_called_once_with(
name=service_name, service_type=service_type,
description=description)
def test_delete_service(self):
service = self.get_service_with_fake_impl()
service_id = "id"
service.delete_service(service_id)
service._impl.delete_service.assert_called_once_with(service_id)
def test_list_services(self):
service = self.get_service_with_fake_impl()
service.list_services()
service._impl.list_services.assert_called_once_with()
def test_get_service(self):
service = self.get_service_with_fake_impl()
service_id = "id"
service.get_service(service_id)
service._impl.get_service.assert_called_once_with(service_id)
def test_get_service_by_name(self):
service = self.get_service_with_fake_impl()
service_name = "name"
service.get_service_by_name(service_name)
service._impl.get_service_by_name.assert_called_once_with(service_name)
def test_create_role(self):
service = self.get_service_with_fake_impl()
name = "name"
service.create_role(name)
service._impl.create_role.assert_called_once_with(
name=name, domain_name=None)
def test_add_role(self):
service = self.get_service_with_fake_impl()
role_id = "id"
user_id = "user_id"
project_id = "project_id"
service.add_role(role_id, user_id=user_id, project_id=project_id)
service._impl.add_role.assert_called_once_with(role_id=role_id,
user_id=user_id,
project_id=project_id)
def test_delete_role(self):
service = self.get_service_with_fake_impl()
role = "id"
service.delete_role(role)
service._impl.delete_role.assert_called_once_with(role)
def test_revoke_role(self):
service = self.get_service_with_fake_impl()
role_id = "id"
user_id = "user_id"
project_id = "project_id"
service.revoke_role(role_id, user_id=user_id, project_id=project_id)
service._impl.revoke_role.assert_called_once_with(
role_id=role_id, user_id=user_id, project_id=project_id)
@ddt.data((None, None, None), ("user_id", "project_id", "domain"))
def test_list_roles(self, params):
user, project, domain = params
service = self.get_service_with_fake_impl()
service.list_roles(user_id=user, project_id=project,
domain_name=domain)
service._impl.list_roles.assert_called_once_with(user_id=user,
project_id=project,
domain_name=domain)
def test_get_role(self):
service = self.get_service_with_fake_impl()
role = "id"
service.get_role(role)
service._impl.get_role.assert_called_once_with(role)
def test_create_ec2credentials(self):
service = self.get_service_with_fake_impl()
user_id = "id"
project_id = "project-id"
service.create_ec2credentials(user_id=user_id, project_id=project_id)
service._impl.create_ec2credentials.assert_called_once_with(
user_id=user_id, project_id=project_id)
def test_list_ec2credentials(self):
service = self.get_service_with_fake_impl()
user_id = "id"
service.list_ec2credentials(user_id=user_id)
service._impl.list_ec2credentials.assert_called_once_with(user_id)
def test_delete_ec2credential(self):
service = self.get_service_with_fake_impl()
user_id = "id"
access = "access"
service.delete_ec2credential(user_id=user_id, access=access)
service._impl.delete_ec2credential.assert_called_once_with(
user_id=user_id, access=access)
def test_fetch_token(self):
service = self.get_service_with_fake_impl()
service.fetch_token()
service._impl.fetch_token.assert_called_once_with()
def test_validate_token(self):
service = self.get_service_with_fake_impl()
token = "id"
service.validate_token(token)
service._impl.validate_token.assert_called_once_with(token)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,644
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/common/cfg/sahara.py
|
# Copyright 2013: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import cfg
OPTS = {"openstack": [
cfg.IntOpt("sahara_cluster_create_timeout",
default=1800,
deprecated_group="benchmark",
help="A timeout in seconds for a cluster create operation"),
cfg.IntOpt("sahara_cluster_delete_timeout",
default=900,
deprecated_group="benchmark",
help="A timeout in seconds for a cluster delete operation"),
cfg.IntOpt("sahara_cluster_check_interval",
default=5,
deprecated_group="benchmark",
help="Cluster status polling interval in seconds"),
cfg.IntOpt("sahara_job_execution_timeout",
default=600,
deprecated_group="benchmark",
help="A timeout in seconds for a Job Execution to complete"),
cfg.IntOpt("sahara_job_check_interval",
default=5,
deprecated_group="benchmark",
help="Job Execution status polling interval in seconds"),
cfg.IntOpt("sahara_workers_per_proxy",
default=20,
deprecated_group="benchmark",
help="Amount of workers one proxy should serve to.")
]}
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,645
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/network/test_network.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import ddt
import netaddr
from rally_openstack.task.contexts.network import networks as network_context
from tests.unit import test
PATH = "rally_openstack.task.contexts.network.networks"
@ddt.ddt
class NetworkTestCase(test.TestCase):
def get_context(self, **kwargs):
return {"task": {"uuid": "foo_task"},
"admin": {"credential": "foo_admin"},
"config": {"network": kwargs},
"users": [{"id": "foo_user", "tenant_id": "foo_tenant",
"credential": mock.MagicMock()},
{"id": "bar_user", "tenant_id": "bar_tenant",
"credential": mock.MagicMock()}],
"tenants": {"foo_tenant": {"networks": [{"id": "foo_net"}]},
"bar_tenant": {"networks": [{"id": "bar_net"}]}}}
def test_default_start_cidr_is_valid(self):
netaddr.IPNetwork(network_context.Network.DEFAULT_CONFIG["start_cidr"])
def test__init__default(self):
context = network_context.Network(self.get_context())
self.assertEqual(1, context.config["networks_per_tenant"])
self.assertEqual(network_context.Network.DEFAULT_CONFIG["start_cidr"],
context.config["start_cidr"])
def test__init__explicit(self):
context = network_context.Network(
self.get_context(start_cidr="foo_cidr", networks_per_tenant=42,
network_create_args={"fakearg": "fake"},
dns_nameservers=["1.2.3.4", "5.6.7.8"]))
self.assertEqual(42, context.config["networks_per_tenant"])
self.assertEqual("foo_cidr", context.config["start_cidr"])
self.assertEqual({"fakearg": "fake"},
context.config["network_create_args"])
self.assertEqual(("1.2.3.4", "5.6.7.8"),
context.config["dns_nameservers"])
def test_setup(self):
ctx = self.get_context(networks_per_tenant=1,
network_create_args={},
subnets_per_network=2,
dns_nameservers=None,
external=True)
user = ctx["users"][0]
nc = user["credential"].clients.return_value.neutron.return_value
network = {"id": "net-id", "name": "s-1"}
subnets = [
{"id": "subnet1-id", "name": "subnet1-name"},
{"id": "subnet2-id", "name": "subnet2-name"}
]
router = {"id": "router"}
nc.create_network.return_value = {"network": network.copy()}
nc.create_router.return_value = {"router": router.copy()}
nc.create_subnet.side_effect = [{"subnet": s} for s in subnets]
network_context.Network(ctx).setup()
ctx_data = ctx["tenants"][ctx["users"][0]["tenant_id"]]
self.assertEqual(
[{
"id": network["id"],
"name": network["name"],
"router_id": router["id"],
"subnets": [s["id"] for s in subnets]
}],
ctx_data["networks"]
)
nc.create_network.assert_called_once_with(
{"network": {"name": mock.ANY}})
nc.create_router.assert_called_once_with(
{"router": {"name": mock.ANY}})
self.assertEqual(
[
mock.call({"subnet": {
"name": mock.ANY, "network_id": network["id"],
"dns_nameservers": mock.ANY,
"ip_version": 4,
"cidr": mock.ANY}})
for i in range(2)],
nc.create_subnet.call_args_list
)
self.assertEqual(
[
mock.call(router["id"], {"subnet_id": subnets[0]["id"]}),
mock.call(router["id"], {"subnet_id": subnets[1]["id"]})
],
nc.add_interface_router.call_args_list
)
def test_setup_without_router(self):
dns_nameservers = ["1.2.3.4", "5.6.7.8"]
ctx = self.get_context(networks_per_tenant=1,
network_create_args={},
subnets_per_network=2,
router=None,
dns_nameservers=dns_nameservers)
user = ctx["users"][0]
nc = user["credential"].clients.return_value.neutron.return_value
network = {"id": "net-id", "name": "s-1"}
subnets = [
{"id": "subnet1-id", "name": "subnet1-name"},
{"id": "subnet2-id", "name": "subnet2-name"}
]
router = {"id": "router"}
nc.create_network.return_value = {"network": network.copy()}
nc.create_router.return_value = {"router": router.copy()}
nc.create_subnet.side_effect = [{"subnet": s} for s in subnets]
network_context.Network(ctx).setup()
ctx_data = ctx["tenants"][ctx["users"][0]["tenant_id"]]
self.assertEqual(
[{
"id": network["id"],
"name": network["name"],
"router_id": None,
"subnets": [s["id"] for s in subnets]
}],
ctx_data["networks"]
)
nc.create_network.assert_called_once_with(
{"network": {"name": mock.ANY}})
self.assertEqual(
[
mock.call({"subnet": {
"name": mock.ANY, "network_id": network["id"],
# rally.task.context.Context converts list to unchangeable
# collection - tuple
"dns_nameservers": tuple(dns_nameservers),
"ip_version": 4,
"cidr": mock.ANY}})
for i in range(2)],
nc.create_subnet.call_args_list
)
self.assertFalse(nc.create_router.called)
self.assertFalse(nc.add_interface_router.called)
@mock.patch("%s.resource_manager.cleanup" % PATH)
def test_cleanup(self, mock_cleanup):
ctx = self.get_context()
network_context.Network(ctx).cleanup()
mock_cleanup.assert_called_once_with(
names=["neutron.subnet", "neutron.network", "neutron.router",
"neutron.port"],
superclass=network_context.Network,
admin=ctx.get("admin"),
users=ctx.get("users", []),
task_id=ctx["task"]["uuid"]
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,646
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/sahara/test_utils.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from oslo_utils import uuidutils
from saharaclient.api import base as sahara_base
from rally.common import cfg
from rally import exceptions
from rally_openstack.common import consts
from rally_openstack.task.scenarios.sahara import utils
from tests.unit import test
CONF = cfg.CONF
SAHARA_UTILS = "rally_openstack.task.scenarios.sahara.utils"
class SaharaScenarioTestCase(test.ScenarioTestCase):
# NOTE(stpierre): the Sahara utils generally do funny stuff with
# wait_for() calls -- frequently the is_ready and
# update_resource arguments are functions defined in the Sahara
# utils themselves instead of the more standard resource_is() and
# get_from_manager() calls. As a result, the tests below do more
# integrated/functional testing of wait_for() calls, and we can't
# just mock out wait_for and friends the way we usually do.
patch_task_utils = False
def setUp(self):
super(SaharaScenarioTestCase, self).setUp()
CONF.set_override("sahara_cluster_check_interval", 0, "openstack")
CONF.set_override("sahara_job_check_interval", 0, "openstack")
def test_list_node_group_templates(self):
ngts = []
self.clients("sahara").node_group_templates.list.return_value = ngts
scenario = utils.SaharaScenario(self.context)
return_ngts_list = scenario._list_node_group_templates()
self.assertEqual(ngts, return_ngts_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.list_node_group_templates")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_create_node_group_templates(
self, mock_sahara_consts,
mock_generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
scenario._create_master_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True
)
scenario._create_worker_node_group_template(
flavor_id="test_flavor",
plugin_name="test_plugin",
hadoop_version="test_version",
use_autoconfig=True
)
create_calls = [
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p1"],
use_autoconfig=True),
mock.call(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
flavor_id="test_flavor",
node_processes=["p2"],
use_autoconfig=True
)]
self.clients("sahara").node_group_templates.create.assert_has_calls(
create_calls)
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_master_node_group_template")
self._test_atomic_action_timer(
scenario.atomic_actions(),
"sahara.create_worker_node_group_template")
def test_delete_node_group_templates(self):
scenario = utils.SaharaScenario(self.context)
ng = mock.MagicMock(id=42)
scenario._delete_node_group_template(ng)
delete_mock = self.clients("sahara").node_group_templates.delete
delete_mock.assert_called_once_with(42)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_node_group_template")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster(self, mock_sahara_consts,
mock_generate_random_name):
self.context.update({
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
})
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor_m",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"use_autoconfig": True,
}, {
"name": "worker-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 42,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"use_autoconfig": True,
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value.id = (
"test_cluster_id")
self.clients("sahara").clusters.get.return_value.status = (
"active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}},
use_autoconfig=True
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None,
use_autoconfig=True
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_with_proxy(self, mock_sahara_consts,
mock_generate_random_name):
context = {
"tenant": {
"networks": [
{
"id": "test_neutron_id",
"router_id": "test_router_id"
}
]
}
}
self.clients("services").values.return_value = [
consts.Service.NEUTRON
]
scenario = utils.SaharaScenario(context=context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
floating_ip_pool_uuid = uuidutils.generate_uuid()
node_groups = [
{
"name": "master-ng",
"flavor_id": "test_flavor_m",
"node_processes": ["p1"],
"floating_ip_pool": floating_ip_pool_uuid,
"count": 1,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True,
"use_autoconfig": True,
}, {
"name": "worker-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"volumes_per_node": 5,
"volumes_size": 10,
"count": 40,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"use_autoconfig": True,
}, {
"name": "proxy-ng",
"flavor_id": "test_flavor_w",
"node_processes": ["p2"],
"floating_ip_pool": floating_ip_pool_uuid,
"volumes_per_node": 5,
"volumes_size": 10,
"count": 2,
"auto_security_group": True,
"security_groups": ["g1", "g2"],
"node_configs": {"HDFS": {"local_config": "local_value"}},
"is_proxy_gateway": True,
"use_autoconfig": True,
}
]
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="active")
scenario._launch_cluster(
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool=floating_ip_pool_uuid,
volumes_per_node=5,
volumes_size=10,
auto_security_group=True,
security_groups=["g1", "g2"],
workers_count=42,
node_configs={"HDFS": {"local_config": "local_value"}},
enable_proxy=True,
use_autoconfig=True
)
self.clients("sahara").clusters.create.assert_called_once_with(
name="random_name",
plugin_name="test_plugin",
hadoop_version="test_version",
node_groups=node_groups,
default_image_id="test_image",
cluster_configs={"HDFS": {"dfs.replication": 3}},
net_id="test_neutron_id",
anti_affinity=None,
use_autoconfig=True
)
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.launch_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="random_name")
@mock.patch(SAHARA_UTILS + ".sahara_consts")
def test_launch_cluster_error(self, mock_sahara_consts,
mock_generate_random_name):
scenario = utils.SaharaScenario(self.context)
mock_processes = {
"test_plugin": {
"test_version": {
"master": ["p1"],
"worker": ["p2"]
}
}
}
mock_configs = {
"test_plugin": {
"test_version": {
"target": "HDFS",
"config_name": "dfs.replication"
}
}
}
mock_sahara_consts.NODE_PROCESSES = mock_processes
mock_sahara_consts.REPLICATION_CONFIGS = mock_configs
self.clients("sahara").clusters.create.return_value = mock.MagicMock(
id="test_cluster_id")
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
status="error")
self.assertRaises(exceptions.GetResourceErrorStatus,
scenario._launch_cluster,
plugin_name="test_plugin",
hadoop_version="test_version",
master_flavor_id="test_flavor_m",
worker_flavor_id="test_flavor_w",
image_id="test_image",
floating_ip_pool="test_pool",
volumes_per_node=5,
volumes_size=10,
workers_count=42,
node_configs={"HDFS": {"local_config":
"local_value"}})
def test_scale_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42, node_groups=[{
"name": "random_master",
"count": 1
}, {
"name": "random_worker",
"count": 41
}])
self.clients("sahara").clusters.get.return_value = mock.MagicMock(
id=42,
status="active")
expected_scale_object = {
"resize_node_groups": [{
"name": "random_worker",
"count": 42
}]
}
scenario._scale_cluster(cluster, 1)
self.clients("sahara").clusters.scale.assert_called_once_with(
42, expected_scale_object)
def test_delete_cluster(self):
scenario = utils.SaharaScenario(self.context)
cluster = mock.MagicMock(id=42)
self.clients("sahara").clusters.get.side_effect = [
cluster, sahara_base.APIException()
]
scenario._delete_cluster(cluster)
delete_mock = self.clients("sahara").clusters.delete
delete_mock.assert_called_once_with(42)
cl_get_expected = mock.call(42)
self.clients("sahara").clusters.get.assert_has_calls([cl_get_expected,
cl_get_expected])
self._test_atomic_action_timer(scenario.atomic_actions(),
"sahara.delete_cluster")
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="42")
def test_create_output_ds(self, mock_generate_random_name):
self.context.update({
"sahara": {
"output_conf": {
"output_type": "hdfs",
"output_url_prefix": "hdfs://test_out/"
}
}
})
scenario = utils.SaharaScenario(self.context)
scenario._create_output_ds()
self.clients("sahara").data_sources.create.assert_called_once_with(
name="42",
description="",
data_source_type="hdfs",
url="hdfs://test_out/42"
)
@mock.patch(SAHARA_UTILS + ".SaharaScenario.generate_random_name",
return_value="42")
def test_create_output_ds_swift(self, mock_generate_random_name):
self.context.update({
"sahara": {
"output_conf": {
"output_type": "swift",
"output_url_prefix": "swift://test_out/"
}
}
})
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._create_output_ds)
def test_run_job_execution(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "SUCCESS"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
scenario._run_job_execution(job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
je_get_expected = mock.call("42")
self.clients("sahara").job_executions.get.assert_has_calls(
[je_get_expected, je_get_expected]
)
def test_run_job_execution_fail(self):
self.clients("sahara").job_executions.get.side_effect = [
mock.MagicMock(info={"status": "pending"}, id="42"),
mock.MagicMock(info={"status": "killed"}, id="42")]
self.clients("sahara").job_executions.create.return_value = (
mock.MagicMock(id="42"))
scenario = utils.SaharaScenario(self.context)
self.assertRaises(exceptions.RallyException,
scenario._run_job_execution,
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"},
job_idx=0)
self.clients("sahara").job_executions.create.assert_called_once_with(
job_id="test_job_id",
cluster_id="test_cluster_id",
input_id="test_input_id",
output_id="test_output_id",
configs={"k": "v"}
)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,647
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/contexts/keystone/test_users.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally import exceptions
from rally_openstack.common import credential as oscredential
from rally_openstack.task.contexts.keystone import users
from tests.unit import test
from rally_openstack.common import consts
CTX = "rally_openstack.task.contexts.keystone.users"
class UserGeneratorBaseTestCase(test.ScenarioTestCase):
def setUp(self):
super(UserGeneratorBaseTestCase, self).setUp()
self.osclients_patcher = mock.patch("%s.osclients" % CTX)
self.osclients = self.osclients_patcher.start()
self.addCleanup(self.osclients_patcher.stop)
self.deployment_uuid = "deployment_id"
self.admin_cred = {
"username": "root", "password": "qwerty",
"auth_url": "https://example.com",
"project_domain_name": "foo",
"user_domain_name": "bar"}
self.platforms = {
"openstack": {
"admin": self.admin_cred,
"users": []
}
}
self.context.update({
"config": {"users": {}},
"env": {"platforms": self.platforms},
"task": {"uuid": "task_id",
"deployment_uuid": self.deployment_uuid}
})
def test___init__for_new_users(self):
self.context["config"]["users"] = {
"tenants": 1, "users_per_tenant": 1,
"resource_management_workers": 1}
user_generator = users.UserGenerator(self.context)
self.assertEqual([], user_generator.existing_users)
self.assertEqual(self.admin_cred["project_domain_name"],
user_generator.config["project_domain"])
self.assertEqual(self.admin_cred["user_domain_name"],
user_generator.config["user_domain"])
# the case #2 - existing users are presented in deployment but
# the user forces to create new ones
self.platforms["openstack"]["users"] = [mock.Mock()]
user_generator = users.UserGenerator(self.context)
self.assertEqual([], user_generator.existing_users)
self.assertEqual(self.admin_cred["project_domain_name"],
user_generator.config["project_domain"])
self.assertEqual(self.admin_cred["user_domain_name"],
user_generator.config["user_domain"])
def test___init__for_existing_users(self):
foo_user = mock.Mock()
self.platforms["openstack"]["users"] = [foo_user]
user_generator = users.UserGenerator(self.context)
self.assertEqual([foo_user], user_generator.existing_users)
self.assertEqual({"user_choice_method": "random"},
user_generator.config)
# the case #2: the config with `user_choice_method` option
self.context["config"]["users"] = {"user_choice_method": "foo"}
user_generator = users.UserGenerator(self.context)
self.assertEqual([foo_user], user_generator.existing_users)
self.assertEqual({"user_choice_method": "foo"}, user_generator.config)
def test_setup(self):
user_generator = users.UserGenerator(self.context)
user_generator.use_existing_users = mock.Mock()
user_generator.create_users = mock.Mock()
# no existing users -> new users should be created
user_generator.existing_users = []
user_generator.setup()
user_generator.create_users.assert_called_once_with()
self.assertFalse(user_generator.use_existing_users.called)
user_generator.create_users.reset_mock()
user_generator.use_existing_users.reset_mock()
# existing_users is not empty -> existing users should be created
user_generator.existing_users = [mock.Mock()]
user_generator.setup()
user_generator.use_existing_users.assert_called_once_with()
self.assertFalse(user_generator.create_users.called)
def test_cleanup(self):
user_generator = users.UserGenerator(self.context)
user_generator._remove_default_security_group = mock.Mock()
user_generator._delete_users = mock.Mock()
user_generator._delete_tenants = mock.Mock()
# In case if existing users nothing should be done
user_generator.existing_users = [mock.Mock]
user_generator.cleanup()
self.assertFalse(user_generator._remove_default_security_group.called)
self.assertFalse(user_generator._delete_users.called)
self.assertFalse(user_generator._delete_tenants.called)
# In case when new users were created, the proper cleanup should be
# performed
user_generator.existing_users = []
user_generator.cleanup()
user_generator._remove_default_security_group.assert_called_once_with()
user_generator._delete_users.assert_called_once_with()
user_generator._delete_tenants.assert_called_once_with()
class UserGeneratorForExistingUsersTestCase(test.ScenarioTestCase):
def setUp(self):
super(UserGeneratorForExistingUsersTestCase, self).setUp()
self.osclients_patcher = mock.patch("%s.osclients" % CTX)
self.osclients = self.osclients_patcher.start()
self.addCleanup(self.osclients_patcher.stop)
self.deployment_uuid = "deployment_id"
self.platforms = {
"openstack": {
"admin": {"username": "root",
"password": "qwerty",
"auth_url": "https://example.com"},
"users": []
}
}
self.context.update({
"config": {"users": {}},
"users": [],
"env": {"platforms": self.platforms},
"task": {"uuid": "task_id",
"deployment_uuid": self.deployment_uuid}
})
@mock.patch("%s.credential.OpenStackCredential" % CTX)
@mock.patch("%s.osclients.Clients" % CTX)
def test_use_existing_users(self, mock_clients,
mock_open_stack_credential):
user1 = {"tenant_name": "proj", "username": "usr",
"password": "pswd", "auth_url": "https://example.com"}
user2 = {"tenant_name": "proj", "username": "usr",
"password": "pswd", "auth_url": "https://example.com"}
user3 = {"tenant_name": "proj", "username": "usr",
"password": "pswd", "auth_url": "https://example.com"}
user_list = [user1, user2, user3]
class AuthRef(object):
USER_ID_COUNT = 0
PROJECT_ID_COUNT = 0
@property
def user_id(self):
self.USER_ID_COUNT += 1
return "u%s" % self.USER_ID_COUNT
@property
def project_id(self):
self.PROJECT_ID_COUNT += 1
return "p%s" % (self.PROJECT_ID_COUNT % 2)
auth_ref = AuthRef()
mock_clients.return_value.keystone.auth_ref = auth_ref
self.platforms["openstack"]["users"] = user_list
user_generator = users.UserGenerator(self.context)
user_generator.setup()
self.assertIn("users", self.context)
self.assertIn("tenants", self.context)
self.assertIn("user_choice_method", self.context)
self.assertEqual("random", self.context["user_choice_method"])
creds = mock_open_stack_credential.return_value
self.assertEqual(
[{"id": "u1", "credential": creds, "tenant_id": "p1"},
{"id": "u2", "credential": creds, "tenant_id": "p0"},
{"id": "u3", "credential": creds, "tenant_id": "p1"}],
self.context["users"]
)
self.assertEqual({"p0": {"id": "p0", "name": creds.tenant_name},
"p1": {"id": "p1", "name": creds.tenant_name}},
self.context["tenants"])
class UserGeneratorForNewUsersTestCase(test.ScenarioTestCase):
tenants_num = 1
users_per_tenant = 5
users_num = tenants_num * users_per_tenant
threads = 10
def setUp(self):
super(UserGeneratorForNewUsersTestCase, self).setUp()
self.osclients_patcher = mock.patch("%s.osclients" % CTX)
self.osclients = self.osclients_patcher.start()
self.addCleanup(self.osclients_patcher.stop)
# Force the case of creating new users
self.platforms = {
"openstack": {
"admin": {"username": "root",
"password": "qwerty",
"auth_url": "https://example.com"},
"users": []
}
}
self.context.update({
"config": {
"users": {
"tenants": self.tenants_num,
"users_per_tenant": self.users_per_tenant,
"resource_management_workers": self.threads,
}
},
"env": {"platforms": self.platforms},
"users": [],
"task": {"uuid": "task_id", "deployment_uuid": "dep_uuid"}
})
def test__remove_default_security_group(self):
self.context.update(
tenants={
"tenant-1": {},
"tenant-2": {}
}
)
self.osclients.Clients.return_value = mock.Mock()
neutron = self.osclients.Clients.return_value.neutron.return_value
neutron.list_extensions.return_value = {
"extensions": [{"alias": "security-group"}]}
neutron.list_security_groups.return_value = {
"security_groups": [
{"id": "id-1", "name": "default", "tenant_id": "tenant-1"},
{"id": "id-2", "name": "default", "tenant_id": "tenant-2"},
{"id": "id-3", "name": "default", "tenant_id": "tenant-3"}
]
}
users.UserGenerator(self.context)._remove_default_security_group()
neutron.list_security_groups.assert_called_once_with(name="default")
self.assertEqual(
[mock.call("id-1"), mock.call("id-2")],
neutron.delete_security_group.call_args_list
)
def test__remove_default_security_group_no_sg(self):
self.context.update(
tenants={
"tenant-1": {},
"tenant-2": {}
}
)
self.osclients.Clients.return_value = mock.Mock()
neutron = self.osclients.Clients.return_value.neutron.return_value
neutron.list_extensions.return_value = {"extensions": []}
neutron.list_security_groups.return_value = {
"security_groups": [
{"id": "id-1", "name": "default", "tenant_id": "tenant-1"},
{"id": "id-2", "name": "default", "tenant_id": "tenant-2"},
{"id": "id-3", "name": "default", "tenant_id": "tenant-3"}
]
}
users.UserGenerator(self.context)._remove_default_security_group()
self.assertFalse(neutron.list_security_groups.called)
self.assertFalse(neutron.delete_security_group.called)
@mock.patch("%s.identity" % CTX)
def test__create_tenants(self, mock_identity):
self.context["config"]["users"]["tenants"] = 1
user_generator = users.UserGenerator(self.context)
tenants = user_generator._create_tenants(1)
self.assertEqual(1, len(tenants))
id, tenant = tenants.popitem()
self.assertIn("name", tenant)
@mock.patch("%s.identity" % CTX)
def test__create_users(self, mock_identity):
self.context["config"]["users"]["users_per_tenant"] = 2
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"},
"t2": {"id": "t2", "name": "t2"}}
users_ = user_generator._create_users(4)
self.assertEqual(4, len(users_))
for user in users_:
self.assertIn("id", user)
self.assertIn("credential", user)
@mock.patch("%s.identity" % CTX)
def test__create_users_user_password(self, mock_identity):
self.context["config"]["users"]["users_per_tenant"] = 2
self.context["config"]["users"]["user_password"] = "TrustMe"
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"},
"t2": {"id": "t2", "name": "t2"}}
users_ = user_generator._create_users(4)
self.assertEqual(4, len(users_))
for user in users_:
self.assertIn("id", user)
self.assertIn("credential", user)
self.assertEqual("TrustMe", user["credential"]["password"])
@mock.patch("%s.identity" % CTX)
def test__delete_tenants(self, mock_identity):
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"},
"t2": {"id": "t2", "name": "t2"}}
user_generator._delete_tenants()
self.assertEqual(0, len(user_generator.context["tenants"]))
@mock.patch("%s.identity" % CTX)
def test__delete_tenants_failure(self, mock_identity):
identity_service = mock_identity.Identity.return_value
identity_service.delete_project.side_effect = Exception()
user_generator = users.UserGenerator(self.context)
user_generator.context["tenants"] = {"t1": {"id": "t1", "name": "t1"},
"t2": {"id": "t2", "name": "t2"}}
user_generator._delete_tenants()
self.assertEqual(0, len(user_generator.context["tenants"]))
@mock.patch("%s.identity" % CTX)
def test__delete_users(self, mock_identity):
user_generator = users.UserGenerator(self.context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(0, len(user_generator.context["users"]))
@mock.patch("%s.identity" % CTX)
def test__delete_users_failure(self, mock_identity):
identity_service = mock_identity.Identity.return_value
identity_service.delete_user.side_effect = Exception()
user_generator = users.UserGenerator(self.context)
user1 = mock.MagicMock()
user2 = mock.MagicMock()
user_generator.context["users"] = [user1, user2]
user_generator._delete_users()
self.assertEqual(0, len(user_generator.context["users"]))
@mock.patch("%s.identity" % CTX)
def test_setup_and_cleanup(self, mock_identity):
with users.UserGenerator(self.context) as ctx:
ctx.setup()
self.assertEqual(self.users_num,
len(ctx.context["users"]))
self.assertEqual(self.tenants_num,
len(ctx.context["tenants"]))
self.assertEqual("random", ctx.context["user_choice_method"])
# Cleanup (called by content manager)
self.assertEqual(0, len(ctx.context["users"]))
self.assertEqual(0, len(ctx.context["tenants"]))
@mock.patch("rally.common.broker.LOG.warning")
@mock.patch("%s.identity" % CTX)
def test_setup_and_cleanup_with_error_during_create_user(
self, mock_identity, mock_log_warning):
identity_service = mock_identity.Identity.return_value
identity_service.create_user.side_effect = Exception()
with users.UserGenerator(self.context) as ctx:
self.assertRaises(exceptions.ContextSetupFailure, ctx.setup)
mock_log_warning.assert_called_with(
"Failed to consume a task from the queue: ")
# Ensure that tenants get deleted anyway
self.assertEqual(0, len(ctx.context["tenants"]))
@mock.patch("%s.identity" % CTX)
def test_users_and_tenants_in_context(self, mock_identity):
identity_service = mock_identity.Identity.return_value
credential = oscredential.OpenStackCredential(
"foo_url", "foo", "foo_pass",
https_insecure=True,
https_cacert="cacert")
tmp_context = dict(self.context)
tmp_context["config"]["users"] = {"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1}
tmp_context["env"]["platforms"]["openstack"]["admin"] = credential
credential_dict = credential.to_dict()
user_list = [mock.MagicMock(id="id_%d" % i)
for i in range(self.users_num)]
identity_service.create_user.side_effect = user_list
with users.UserGenerator(tmp_context) as ctx:
ctx.generate_random_name = mock.Mock()
ctx.setup()
create_tenant_calls = []
for i, t in enumerate(ctx.context["tenants"]):
create_tenant_calls.append(
mock.call(ctx.generate_random_name.return_value,
ctx.config["project_domain"]))
for user in ctx.context["users"]:
self.assertEqual(set(["id", "credential", "tenant_id"]),
set(user.keys()))
user_credential_dict = user["credential"].to_dict()
excluded_keys = ["auth_url", "username", "password",
"tenant_name", "region_name",
"project_domain_name",
"user_domain_name", "permission"]
for key in (set(credential_dict.keys()) - set(excluded_keys)):
self.assertEqual(credential_dict[key],
user_credential_dict[key],
"The key '%s' differs." % key)
tenants_ids = []
for t in ctx.context["tenants"].keys():
tenants_ids.append(t)
for (user, tenant_id, orig_user) in zip(ctx.context["users"],
tenants_ids, user_list):
self.assertEqual(orig_user.id, user["id"])
self.assertEqual(tenant_id, user["tenant_id"])
@mock.patch("%s.identity" % CTX)
def test_users_contains_correct_endpoint_type(self, mock_identity):
credential = oscredential.OpenStackCredential(
"foo_url", "foo", "foo_pass",
endpoint_type=consts.EndpointType.INTERNAL)
config = {
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1
}
},
"env": {"platforms": {"openstack": {"admin": credential,
"users": []}}},
"task": {"uuid": "task_id", "deployment_uuid": "deployment_id"}
}
user_generator = users.UserGenerator(config)
users_ = user_generator._create_users(2)
for user in users_:
self.assertEqual("internal", user["credential"].endpoint_type)
@mock.patch("%s.identity" % CTX)
def test_users_contains_default_endpoint_type(self, mock_identity):
credential = oscredential.OpenStackCredential(
"foo_url", "foo", "foo_pass")
config = {
"config": {
"users": {
"tenants": 1,
"users_per_tenant": 2,
"resource_management_workers": 1
}
},
"env": {"platforms": {"openstack": {"admin": credential,
"users": []}}},
"task": {"uuid": "task_id", "deployment_uuid": "deployment_id"}
}
user_generator = users.UserGenerator(config)
users_ = user_generator._create_users(2)
for user in users_:
self.assertEqual("public", user["credential"].endpoint_type)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,648
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/common/services/identity/test_keystone_v2.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
import uuid
import ddt
from rally_openstack.common.services.identity import identity
from rally_openstack.common.services.identity import keystone_v2
from tests.unit import test
PATH = "rally_openstack.common.services.identity.keystone_v2"
@ddt.ddt
class KeystoneV2ServiceTestCase(test.TestCase):
def setUp(self):
super(KeystoneV2ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.kc = self.clients.keystone.return_value
self.name_generator = mock.MagicMock()
self.service = keystone_v2.KeystoneV2Service(
self.clients, name_generator=self.name_generator)
def test_create_tenant(self):
name = "name"
tenant = self.service.create_tenant(name)
self.assertEqual(tenant, self.kc.tenants.create.return_value)
self.kc.tenants.create.assert_called_once_with(name)
@ddt.data({"tenant_id": "fake_id", "name": True, "enabled": True,
"description": True},
{"tenant_id": "fake_id", "name": "some", "enabled": False,
"description": "descr"})
@ddt.unpack
def test_update_tenant(self, tenant_id, name, enabled, description):
self.name_generator.side_effect = ("foo", "bar")
self.service.update_tenant(tenant_id,
name=name,
description=description,
enabled=enabled)
name = "foo" if name is True else name
description = "bar" if description is True else description
self.kc.tenants.update.assert_called_once_with(
tenant_id, name=name, description=description, enabled=enabled)
def test_delete_tenant(self):
tenant_id = "fake_id"
self.service.delete_tenant(tenant_id)
self.kc.tenants.delete.assert_called_once_with(tenant_id)
def test_list_tenants(self):
self.assertEqual(self.kc.tenants.list.return_value,
self.service.list_tenants())
self.kc.tenants.list.assert_called_once_with()
def test_get_tenant(self):
tenant_id = "fake_id"
self.service.get_tenant(tenant_id)
self.kc.tenants.get.assert_called_once_with(tenant_id)
def test_create_user(self):
name = "name"
password = "passwd"
email = "rally@example.com"
tenant_id = "project"
user = self.service.create_user(name, password=password, email=email,
tenant_id=tenant_id)
self.assertEqual(user, self.kc.users.create.return_value)
self.kc.users.create.assert_called_once_with(
name=name, password=password, email=email, tenant_id=tenant_id,
enabled=True)
def test_create_users(self):
self.service.create_user = mock.MagicMock()
n = 2
tenant_id = "some"
self.assertEqual([self.service.create_user.return_value] * n,
self.service.create_users(number_of_users=n,
tenant_id=tenant_id))
self.assertEqual([mock.call(tenant_id=tenant_id)] * n,
self.service.create_user.call_args_list)
def test_update_user_with_wrong_params(self):
user_id = "fake_id"
card_with_cvv2 = "1234 5678 9000 0000 : 666"
self.assertRaises(NotImplementedError, self.service.update_user,
user_id, card_with_cvv2=card_with_cvv2)
def test_update_user(self):
user_id = "fake_id"
name = "new name"
email = "new.name2016@example.com"
enabled = True
self.service.update_user(user_id, name=name, email=email,
enabled=enabled)
self.kc.users.update.assert_called_once_with(
user_id, name=name, email=email, enabled=enabled)
def test_update_user_password(self):
user_id = "fake_id"
password = "qwerty123"
self.service.update_user_password(user_id, password=password)
self.kc.users.update_password.assert_called_once_with(
user_id, password=password)
@ddt.data({"name": None, "service_type": None, "description": None},
{"name": "some", "service_type": "st", "description": "d"})
@ddt.unpack
def test_create_service(self, name, service_type, description):
self.assertEqual(self.kc.services.create.return_value,
self.service.create_service(name=name,
service_type=service_type,
description=description))
name = name or self.name_generator.return_value
service_type = service_type or "rally_test_type"
description = description or self.name_generator.return_value
self.kc.services.create.assert_called_once_with(
name, service_type=service_type, description=description)
def test_create_role(self):
name = "some"
self.service.create_role(name)
self.kc.roles.create.assert_called_once_with(name)
def test_add_role(self):
role_id = "fake_id"
user_id = "user_id"
tenant_id = "tenant_id"
self.service.add_role(role_id, user_id=user_id, tenant_id=tenant_id)
self.kc.roles.add_user_role.assert_called_once_with(
user=user_id, role=role_id, tenant=tenant_id)
def test_list_roles(self):
self.assertEqual(self.kc.roles.list.return_value,
self.service.list_roles())
self.kc.roles.list.assert_called_once_with()
def test_list_roles_for_user(self):
user_id = "user_id"
tenant_id = "tenant_id"
self.assertEqual(self.kc.roles.roles_for_user.return_value,
self.service.list_roles_for_user(user_id,
tenant_id=tenant_id))
self.kc.roles.roles_for_user.assert_called_once_with(user_id,
tenant_id)
def test_revoke_role(self):
role_id = "fake_id"
user_id = "user_id"
tenant_id = "tenant_id"
self.service.revoke_role(role_id, user_id=user_id,
tenant_id=tenant_id)
self.kc.roles.remove_user_role.assert_called_once_with(
user=user_id, role=role_id, tenant=tenant_id)
def test_create_ec2credentials(self):
user_id = "fake_id"
tenant_id = "fake_id"
self.assertEqual(self.kc.ec2.create.return_value,
self.service.create_ec2credentials(
user_id, tenant_id=tenant_id))
self.kc.ec2.create.assert_called_once_with(user_id,
tenant_id=tenant_id)
@ddt.ddt
class UnifiedKeystoneV2ServiceTestCase(test.TestCase):
def setUp(self):
super(UnifiedKeystoneV2ServiceTestCase, self).setUp()
self.clients = mock.MagicMock()
self.service = keystone_v2.UnifiedKeystoneV2Service(self.clients)
self.service._impl = mock.MagicMock()
def test_init_identity_service(self):
self.clients.keystone.return_value.version = "v2.0"
self.assertIsInstance(identity.Identity(self.clients)._impl,
keystone_v2.UnifiedKeystoneV2Service)
def test__check_domain(self):
self.service._check_domain("Default")
self.service._check_domain("default")
self.assertRaises(NotImplementedError, self.service._check_domain,
"non-default")
def test__unify_tenant(self):
class KeystoneV2Tenant(object):
def __init__(self, domain_id="domain_id"):
self.id = str(uuid.uuid4())
self.name = str(uuid.uuid4())
self.domain_id = domain_id
tenant = KeystoneV2Tenant()
project = self.service._unify_tenant(tenant)
self.assertIsInstance(project, identity.Project)
self.assertEqual(tenant.id, project.id)
self.assertEqual(tenant.name, project.name)
self.assertEqual("default", project.domain_id)
self.assertNotEqual(tenant.domain_id, project.domain_id)
def test__unify_user(self):
class KeystoneV2User(object):
def __init__(self, tenantId=None):
self.id = str(uuid.uuid4())
self.name = str(uuid.uuid4())
if tenantId is not None:
self.tenantId = tenantId
user = KeystoneV2User()
unified_user = self.service._unify_user(user)
self.assertIsInstance(unified_user, identity.User)
self.assertEqual(user.id, unified_user.id)
self.assertEqual(user.name, unified_user.name)
self.assertEqual("default", unified_user.domain_id)
self.assertIsNone(unified_user.project_id)
tenant_id = "tenant_id"
user = KeystoneV2User(tenantId=tenant_id)
unified_user = self.service._unify_user(user)
self.assertIsInstance(unified_user, identity.User)
self.assertEqual(user.id, unified_user.id)
self.assertEqual(user.name, unified_user.name)
self.assertEqual("default", unified_user.domain_id)
self.assertEqual(tenant_id, unified_user.project_id)
@mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH)
def test_create_project(
self, mock_unified_keystone_v2_service__unify_tenant,
mock_unified_keystone_v2_service__check_domain):
mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant
mock_check_domain = mock_unified_keystone_v2_service__check_domain
name = "name"
self.assertEqual(mock_unify_tenant.return_value,
self.service.create_project(name))
mock_check_domain.assert_called_once_with("Default")
mock_unify_tenant.assert_called_once_with(
self.service._impl.create_tenant.return_value)
self.service._impl.create_tenant.assert_called_once_with(name)
def test_update_project(self):
tenant_id = "fake_id"
name = "name"
description = "descr"
enabled = False
self.service.update_project(project_id=tenant_id, name=name,
description=description, enabled=enabled)
self.service._impl.update_tenant.assert_called_once_with(
tenant_id=tenant_id, name=name, description=description,
enabled=enabled)
def test_delete_project(self):
tenant_id = "fake_id"
self.service.delete_project(tenant_id)
self.service._impl.delete_tenant.assert_called_once_with(tenant_id)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH)
def test_get_project(self,
mock_unified_keystone_v2_service__unify_tenant):
mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant
tenant_id = "id"
self.assertEqual(mock_unify_tenant.return_value,
self.service.get_project(tenant_id))
mock_unify_tenant.assert_called_once_with(
self.service._impl.get_tenant.return_value)
self.service._impl.get_tenant.assert_called_once_with(tenant_id)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_tenant" % PATH)
def test_list_projects(self,
mock_unified_keystone_v2_service__unify_tenant):
mock_unify_tenant = mock_unified_keystone_v2_service__unify_tenant
tenants = [mock.MagicMock()]
self.service._impl.list_tenants.return_value = tenants
self.assertEqual([mock_unify_tenant.return_value],
self.service.list_projects())
mock_unify_tenant.assert_called_once_with(tenants[0])
@mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH)
def test_create_user(self, mock_unified_keystone_v2_service__unify_user,
mock_unified_keystone_v2_service__check_domain):
mock_check_domain = mock_unified_keystone_v2_service__check_domain
mock_unify_user = mock_unified_keystone_v2_service__unify_user
name = "name"
password = "passwd"
tenant_id = "project"
self.assertEqual(mock_unify_user.return_value,
self.service.create_user(name, password=password,
project_id=tenant_id))
mock_check_domain.assert_called_once_with("Default")
mock_unify_user.assert_called_once_with(
self.service._impl.create_user.return_value)
self.service._impl.create_user.assert_called_once_with(
username=name, password=password, tenant_id=tenant_id,
enabled=True)
@mock.patch("%s.UnifiedKeystoneV2Service._check_domain" % PATH)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH)
def test_create_users(self, mock_unified_keystone_v2_service__unify_user,
mock_unified_keystone_v2_service__check_domain):
mock_check_domain = mock_unified_keystone_v2_service__check_domain
tenant_id = "project"
n = 3
domain_name = "Default"
self.service.create_users(
tenant_id, number_of_users=3,
user_create_args={"domain_name": domain_name})
mock_check_domain.assert_called_once_with(domain_name)
self.service._impl.create_users.assert_called_once_with(
tenant_id=tenant_id, number_of_users=n,
user_create_args={"domain_name": domain_name})
@mock.patch("%s.UnifiedKeystoneV2Service._unify_user" % PATH)
def test_list_users(self, mock_unified_keystone_v2_service__unify_user):
mock_unify_user = mock_unified_keystone_v2_service__unify_user
users = [mock.MagicMock()]
self.service._impl.list_users.return_value = users
self.assertEqual([mock_unify_user.return_value],
self.service.list_users())
mock_unify_user.assert_called_once_with(users[0])
@ddt.data({"user_id": "id", "enabled": False, "name": "Fake",
"email": "badboy@example.com", "password": "pass"},
{"user_id": "id", "enabled": None, "name": None,
"email": None, "password": None})
@ddt.unpack
def test_update_user(self, user_id, enabled, name, email, password):
self.service.update_user(user_id, enabled=enabled, name=name,
email=email, password=password)
if password:
self.service._impl.update_user_password.assert_called_once_with(
user_id=user_id, password=password)
args = {}
if enabled is not None:
args["enabled"] = enabled
if name is not None:
args["name"] = name
if email is not None:
args["email"] = email
if args:
self.service._impl.update_user.assert_called_once_with(
user_id, **args)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_service" % PATH)
def test_list_services(self,
mock_unified_keystone_v2_service__unify_service):
mock_unify_service = mock_unified_keystone_v2_service__unify_service
services = [mock.MagicMock()]
self.service._impl.list_services.return_value = services
self.assertEqual([mock_unify_service.return_value],
self.service.list_services())
mock_unify_service.assert_called_once_with(services[0])
@mock.patch("%s.UnifiedKeystoneV2Service._unify_role" % PATH)
def test_create_role(self, mock_unified_keystone_v2_service__unify_role):
mock_unify_role = mock_unified_keystone_v2_service__unify_role
name = "some"
self.assertEqual(mock_unify_role.return_value,
self.service.create_role(name))
self.service._impl.create_role.assert_called_once_with(name)
mock_unify_role.assert_called_once_with(
self.service._impl.create_role.return_value)
def test_add_role(self):
role_id = "fake_id"
user_id = "user_id"
project_id = "user_id"
self.service.add_role(role_id, user_id=user_id,
project_id=project_id)
self.service._impl.add_role.assert_called_once_with(
user_id=user_id, role_id=role_id, tenant_id=project_id)
def test_delete_role(self):
role_id = "fake_id"
self.service.delete_role(role_id)
self.service._impl.delete_role.assert_called_once_with(role_id)
def test_revoke_role(self):
role_id = "fake_id"
user_id = "user_id"
project_id = "user_id"
self.service.revoke_role(role_id, user_id=user_id,
project_id=project_id)
self.service._impl.revoke_role.assert_called_once_with(
user_id=user_id, role_id=role_id, tenant_id=project_id)
@mock.patch("%s.UnifiedKeystoneV2Service._unify_role" % PATH)
def test_list_roles(self, mock_unified_keystone_v2_service__unify_role):
mock_unify_role = mock_unified_keystone_v2_service__unify_role
roles = [mock.MagicMock()]
another_roles = [mock.MagicMock()]
self.service._impl.list_roles.return_value = roles
self.service._impl.list_roles_for_user.return_value = another_roles
# case 1
self.assertEqual([mock_unify_role.return_value],
self.service.list_roles())
self.service._impl.list_roles.assert_called_once_with()
mock_unify_role.assert_called_once_with(roles[0])
self.assertFalse(self.service._impl.list_roles_for_user.called)
self.service._impl.list_roles.reset_mock()
mock_unify_role.reset_mock()
# case 2
user = "user"
project = "project"
self.assertEqual([mock_unify_role.return_value],
self.service.list_roles(user_id=user,
project_id=project))
self.service._impl.list_roles_for_user.assert_called_once_with(
user, tenant_id=project)
self.assertFalse(self.service._impl.list_roles.called)
mock_unify_role.assert_called_once_with(another_roles[0])
# case 3
self.assertRaises(NotImplementedError, self.service.list_roles,
domain_name="some")
def test_create_ec2credentials(self):
user_id = "id"
tenant_id = "tenant-id"
self.assertEqual(self.service._impl.create_ec2credentials.return_value,
self.service.create_ec2credentials(
user_id=user_id, project_id=tenant_id))
self.service._impl.create_ec2credentials.assert_called_once_with(
user_id=user_id, tenant_id=tenant_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,649
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/contexts/network/existing_network.py
|
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.common import validation
from rally_openstack.common import consts
from rally_openstack.common import osclients
from rally_openstack.task import context
@validation.add("required_platform", platform="openstack", users=True)
@context.configure(name="existing_network", platform="openstack", order=349)
class ExistingNetwork(context.OpenStackContext):
"""This context supports using existing networks in Rally.
This context should be used on a deployment with existing users.
"""
CONFIG_SCHEMA = {
"type": "object",
"$schema": consts.JSON_SCHEMA,
"additionalProperties": False
}
def setup(self):
for user, tenant_id in self._iterate_per_tenants():
clients = osclients.Clients(user["credential"])
self.context["tenants"][tenant_id]["networks"] = (
clients.neutron().list_networks()["networks"]
)
self.context["tenants"][tenant_id]["subnets"] = (
clients.neutron().list_subnets()["subnets"]
)
def cleanup(self):
"""Networks were not created by Rally, so nothing to do."""
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,650
|
openstack/rally-openstack
|
refs/heads/master
|
/tests/unit/task/scenarios/watcher/test_utils.py
|
# Copyright 2016: Servionica LTD.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from rally.common import cfg
from rally_openstack.task.scenarios.watcher import utils
from tests.unit import test
CONF = cfg.CONF
class WatcherScenarioTestCase(test.ScenarioTestCase):
def test_create_audit_template(self):
watcher_scenario = utils.WatcherScenario(self.context)
watcher_scenario.generate_random_name = mock.MagicMock(
return_value="mock_name")
watcher_scenario._create_audit_template("fake_goal", "fake_strategy")
self.admin_clients(
"watcher").audit_template.create.assert_called_once_with(
goal="fake_goal", strategy="fake_strategy",
name="mock_name")
self._test_atomic_action_timer(watcher_scenario.atomic_actions(),
"watcher.create_audit_template")
def test_list_audit_templates(self):
audit_templates_list = []
watcher_scenario = utils.WatcherScenario(self.context)
self.admin_clients(
"watcher").audit_template.list.return_value = audit_templates_list
return_audit_templates_list = watcher_scenario._list_audit_templates()
self.assertEqual(audit_templates_list, return_audit_templates_list)
self._test_atomic_action_timer(watcher_scenario.atomic_actions(),
"watcher.list_audit_templates")
def test_delete_audit_template(self):
watcher_scenario = utils.WatcherScenario(self.context)
watcher_scenario._delete_audit_template("fake_audit_template")
self.admin_clients(
"watcher").audit_template.delete.assert_called_once_with(
"fake_audit_template")
self._test_atomic_action_timer(watcher_scenario.atomic_actions(),
"watcher.delete_audit_template")
def test_create_audit(self):
mock_audit_template = mock.Mock()
watcher_scenario = utils.WatcherScenario(self.context)
audit = watcher_scenario._create_audit(mock_audit_template)
self.mock_wait_for_status.mock.assert_called_once_with(
audit,
ready_statuses=["SUCCEEDED"],
failure_statuses=["FAILED"],
status_attr="state",
update_resource=self.mock_get_from_manager.mock.return_value,
check_interval=CONF.openstack.watcher_audit_launch_poll_interval,
timeout=CONF.openstack.watcher_audit_launch_timeout,
id_attr="uuid")
self.mock_get_from_manager.mock.assert_called_once_with()
self.admin_clients("watcher").audit.create.assert_called_once_with(
audit_template_uuid=mock_audit_template, audit_type="ONESHOT")
self._test_atomic_action_timer(watcher_scenario.atomic_actions(),
"watcher.create_audit")
def test_delete_audit(self):
mock_audit = mock.Mock()
watcher_scenario = utils.WatcherScenario(self.context)
watcher_scenario._delete_audit(mock_audit)
self.admin_clients("watcher").audit.delete.assert_called_once_with(
mock_audit.uuid)
self._test_atomic_action_timer(watcher_scenario.atomic_actions(),
"watcher.delete_audit")
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,651
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/sahara/utils.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import random
from oslo_utils import uuidutils
from rally.common import cfg
from rally.common import logging
from rally.common import utils as rutils
from rally import exceptions
from rally.task import atomic
from rally.task import utils
from rally_openstack.common import consts
from rally_openstack.task import scenario
from rally_openstack.task.scenarios.sahara import consts as sahara_consts
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class SaharaScenario(scenario.OpenStackScenario):
"""Base class for Sahara scenarios with basic atomic actions."""
# NOTE(sskripnick): Some sahara resource names are validated as hostnames.
# Since underscores are not allowed in hostnames we should not use them.
RESOURCE_NAME_FORMAT = "rally-sahara-XXXXXX-XXXXXXXXXXXXXXXX"
@atomic.action_timer("sahara.list_node_group_templates")
def _list_node_group_templates(self):
"""Return user Node Group Templates list."""
return self.clients("sahara").node_group_templates.list()
@atomic.action_timer("sahara.create_master_node_group_template")
def _create_master_node_group_template(self, flavor_id, plugin_name,
hadoop_version,
use_autoconfig=True):
"""Create a master Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:param use_autoconfig: If True, instances of the node group will be
automatically configured during cluster
creation. If False, the configuration values
should be specify manually
:returns: The created Template
"""
name = self.generate_random_name()
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"],
use_autoconfig=use_autoconfig)
@atomic.action_timer("sahara.create_worker_node_group_template")
def _create_worker_node_group_template(self, flavor_id, plugin_name,
hadoop_version, use_autoconfig):
"""Create a worker Node Group Template with a random name.
:param flavor_id: The required argument for the Template
:param plugin_name: Sahara provisioning plugin name
:param hadoop_version: The version of Hadoop distribution supported by
the plugin
:param use_autoconfig: If True, instances of the node group will be
automatically configured during cluster
creation. If False, the configuration values
should be specify manually
:returns: The created Template
"""
name = self.generate_random_name()
return self.clients("sahara").node_group_templates.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
flavor_id=flavor_id,
node_processes=sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
use_autoconfig=use_autoconfig)
@atomic.action_timer("sahara.delete_node_group_template")
def _delete_node_group_template(self, node_group):
"""Delete a Node Group Template by id.
:param node_group: The Node Group Template to be deleted
"""
self.clients("sahara").node_group_templates.delete(node_group.id)
def _wait_active(self, cluster_object):
utils.wait_for_status(
resource=cluster_object, ready_statuses=["active"],
failure_statuses=["error"], update_resource=self._update_cluster,
timeout=CONF.openstack.sahara_cluster_create_timeout,
check_interval=CONF.openstack.sahara_cluster_check_interval)
def _setup_neutron_floating_ip_pool(self, name_or_id):
if name_or_id:
if uuidutils.is_uuid_like(name_or_id):
# Looks like an id is provided Return as is.
return name_or_id
else:
# It's a name. Changing to id.
for net in self.clients("neutron").list_networks()["networks"]:
if net["name"] == name_or_id:
return net["id"]
# If the name is not found in the list. Exit with error.
raise exceptions.ContextSetupFailure(
ctx_name=self.get_name(),
msg="Could not resolve Floating IP Pool name %s to id"
% name_or_id)
else:
# Pool is not provided. Using the one set as GW for current router.
net = self.context["tenant"]["networks"][0]
router_id = net["router_id"]
router = self.clients("neutron").show_router(router_id)["router"]
net_id = router["external_gateway_info"]["network_id"]
return net_id
def _setup_nova_floating_ip_pool(self, name):
if name:
# The name is provided returning it as is.
return name
else:
# The name is not provided. Discovering
LOG.debug("No Floating Ip Pool provided. Taking random.")
pools = self.clients("nova").floating_ip_pools.list()
if pools:
return random.choice(pools).name
else:
LOG.warning("No Floating Ip Pools found. This may cause "
"instances to be unreachable.")
return None
def _setup_floating_ip_pool(self, node_groups, floating_ip_pool,
enable_proxy):
if consts.Service.NEUTRON in self.clients("services").values():
LOG.debug("Neutron detected as networking backend.")
floating_ip_pool_value = self._setup_neutron_floating_ip_pool(
floating_ip_pool)
else:
LOG.debug("Nova Network detected as networking backend.")
floating_ip_pool_value = self._setup_nova_floating_ip_pool(
floating_ip_pool)
if floating_ip_pool_value:
LOG.debug("Using floating ip pool %s." % floating_ip_pool_value)
# If the pool is set by any means assign it to all node groups.
# If the proxy node feature is enabled, Master Node Group and
# Proxy Workers should have a floating ip pool set up
if enable_proxy:
proxy_groups = [x for x in node_groups
if x["name"] in ("master-ng", "proxy-ng")]
for ng in proxy_groups:
ng["is_proxy_gateway"] = True
ng["floating_ip_pool"] = floating_ip_pool_value
else:
for ng in node_groups:
ng["floating_ip_pool"] = floating_ip_pool_value
return node_groups
def _setup_volumes(self, node_groups, volumes_per_node, volumes_size):
if volumes_per_node:
LOG.debug("Adding volumes config to Node Groups")
for ng in node_groups:
ng_name = ng["name"]
if "worker" in ng_name or "proxy" in ng_name:
# NOTE: Volume storage is used only by HDFS Datanode
# process which runs on workers and proxies.
ng["volumes_per_node"] = volumes_per_node
ng["volumes_size"] = volumes_size
return node_groups
def _setup_security_groups(self, node_groups, auto_security_group,
security_groups):
if auto_security_group:
LOG.debug("Auto security group enabled. Adding to Node Groups.")
if security_groups:
LOG.debug("Adding provided Security Groups to Node Groups.")
for ng in node_groups:
if auto_security_group:
ng["auto_security_group"] = auto_security_group
if security_groups:
ng["security_groups"] = security_groups
return node_groups
def _setup_node_configs(self, node_groups, node_configs):
if node_configs:
LOG.debug("Adding Hadoop configs to Node Groups")
for ng in node_groups:
ng["node_configs"] = node_configs
return node_groups
def _setup_node_autoconfig(self, node_groups, node_autoconfig):
LOG.debug("Adding auto-config par to Node Groups")
for ng in node_groups:
ng["use_autoconfig"] = node_autoconfig
return node_groups
def _setup_replication_config(self, hadoop_version, workers_count,
plugin_name):
replication_value = min(workers_count, 3)
# 3 is a default Hadoop replication
conf = sahara_consts.REPLICATION_CONFIGS[plugin_name][hadoop_version]
LOG.debug("Using replication factor: %s" % replication_value)
replication_config = {
conf["target"]: {
conf["config_name"]: replication_value
}
}
return replication_config
@logging.log_deprecated_args("`flavor_id` argument is deprecated. Use "
"`master_flavor_id` and `worker_flavor_id` "
"parameters.", rally_version="0.2.0",
deprecated_args=["flavor_id"])
@atomic.action_timer("sahara.launch_cluster")
def _launch_cluster(self, plugin_name, hadoop_version, master_flavor_id,
worker_flavor_id, image_id, workers_count,
flavor_id=None,
floating_ip_pool=None, volumes_per_node=None,
volumes_size=None, auto_security_group=None,
security_groups=None, node_configs=None,
cluster_configs=None, enable_anti_affinity=False,
enable_proxy=False,
wait_active=True,
use_autoconfig=True):
"""Create a cluster and wait until it becomes Active.
The cluster is created with two node groups. The master Node Group is
created with one instance. The worker node group contains
node_count - 1 instances.
:param plugin_name: provisioning plugin name
:param hadoop_version: Hadoop version supported by the plugin
:param master_flavor_id: flavor which will be used to create master
instance
:param worker_flavor_id: flavor which will be used to create workers
:param image_id: image id that will be used to boot instances
:param workers_count: number of worker instances. All plugins will
also add one Master instance and some plugins
add a Manager instance.
:param floating_ip_pool: floating ip pool name from which Floating
IPs will be allocated
:param volumes_per_node: number of Cinder volumes that will be
attached to every cluster node
:param volumes_size: size of each Cinder volume in GB
:param auto_security_group: boolean value. If set to True Sahara will
create a Security Group for each Node Group
in the Cluster automatically.
:param security_groups: list of security groups that will be used
while creating VMs. If auto_security_group is
set to True, this list can be left empty.
:param node_configs: configs dict that will be passed to each Node
Group
:param cluster_configs: configs dict that will be passed to the
Cluster
:param enable_anti_affinity: If set to true the vms will be scheduled
one per compute node.
:param enable_proxy: Use Master Node of a Cluster as a Proxy node and
do not assign floating ips to workers.
:param wait_active: Wait until a Cluster gets int "Active" state
:param use_autoconfig: If True, instances of the node group will be
automatically configured during cluster
creation. If False, the configuration values
should be specify manually
:returns: created cluster
"""
if enable_proxy:
proxies_count = int(
workers_count / CONF.openstack.sahara_workers_per_proxy)
else:
proxies_count = 0
if flavor_id:
# Note: the deprecated argument is used. Falling back to single
# flavor behavior.
master_flavor_id = flavor_id
worker_flavor_id = flavor_id
node_groups = [
{
"name": "master-ng",
"flavor_id": master_flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["master"],
"count": 1
}, {
"name": "worker-ng",
"flavor_id": worker_flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": workers_count - proxies_count
}
]
if proxies_count:
node_groups.append({
"name": "proxy-ng",
"flavor_id": worker_flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["worker"],
"count": proxies_count
})
if "manager" in (sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]):
# Adding manager group separately as it is supported only in
# specific configurations.
node_groups.append({
"name": "manager-ng",
"flavor_id": master_flavor_id,
"node_processes": sahara_consts.NODE_PROCESSES[plugin_name]
[hadoop_version]["manager"],
"count": 1
})
node_groups = self._setup_floating_ip_pool(node_groups,
floating_ip_pool,
enable_proxy)
neutron_net_id = self._get_neutron_net_id()
node_groups = self._setup_volumes(node_groups, volumes_per_node,
volumes_size)
node_groups = self._setup_security_groups(node_groups,
auto_security_group,
security_groups)
node_groups = self._setup_node_configs(node_groups, node_configs)
node_groups = self._setup_node_autoconfig(node_groups, use_autoconfig)
replication_config = self._setup_replication_config(hadoop_version,
workers_count,
plugin_name)
# The replication factor should be set for small clusters. However the
# cluster_configs parameter can override it
merged_cluster_configs = self._merge_configs(replication_config,
cluster_configs)
aa_processes = None
if enable_anti_affinity:
aa_processes = (sahara_consts.ANTI_AFFINITY_PROCESSES[plugin_name]
[hadoop_version])
name = self.generate_random_name()
cluster_object = self.clients("sahara").clusters.create(
name=name,
plugin_name=plugin_name,
hadoop_version=hadoop_version,
node_groups=node_groups,
default_image_id=image_id,
net_id=neutron_net_id,
cluster_configs=merged_cluster_configs,
anti_affinity=aa_processes,
use_autoconfig=use_autoconfig
)
if wait_active:
LOG.debug("Starting cluster `%s`" % name)
self._wait_active(cluster_object)
return self.clients("sahara").clusters.get(cluster_object.id)
def _update_cluster(self, cluster):
return self.clients("sahara").clusters.get(cluster.id)
def _scale_cluster(self, cluster, delta):
"""The scaling helper.
This method finds the worker node group in a cluster, builds a
scale_object required by Sahara API and waits for the scaling to
complete.
NOTE: This method is not meant to be called directly in scenarios.
There two specific scaling methods of up and down scaling which have
different atomic timers.
"""
worker_node_group = [g for g in cluster.node_groups
if "worker" in g["name"]][0]
scale_object = {
"resize_node_groups": [
{
"name": worker_node_group["name"],
"count": worker_node_group["count"] + delta
}
]
}
self.clients("sahara").clusters.scale(cluster.id, scale_object)
self._wait_active(cluster)
@atomic.action_timer("sahara.scale_up")
def _scale_cluster_up(self, cluster, delta):
"""Add a given number of worker nodes to the cluster.
:param cluster: The cluster to be scaled
:param delta: The number of workers to be added. (A positive number is
expected here)
"""
self._scale_cluster(cluster, delta)
@atomic.action_timer("sahara.scale_down")
def _scale_cluster_down(self, cluster, delta):
"""Remove a given number of worker nodes from the cluster.
:param cluster: The cluster to be scaled
:param delta: The number of workers to be removed. (A negative number
is expected here)
"""
self._scale_cluster(cluster, delta)
@atomic.action_timer("sahara.delete_cluster")
def _delete_cluster(self, cluster):
"""Delete cluster.
:param cluster: cluster to delete
"""
LOG.debug("Deleting cluster `%s`" % cluster.name)
self.clients("sahara").clusters.delete(cluster.id)
utils.wait_for(
resource=cluster,
timeout=CONF.openstack.sahara_cluster_delete_timeout,
check_interval=CONF.openstack.sahara_cluster_check_interval,
is_ready=self._is_cluster_deleted)
def _is_cluster_deleted(self, cluster):
from saharaclient.api import base as sahara_base
LOG.debug("Checking cluster `%s` to be deleted. Status: `%s`"
% (cluster.name, cluster.status))
try:
self.clients("sahara").clusters.get(cluster.id)
return False
except sahara_base.APIException:
return True
def _create_output_ds(self):
"""Create an output Data Source based on EDP context
:returns: The created Data Source
"""
ds_type = self.context["sahara"]["output_conf"]["output_type"]
url_prefix = self.context["sahara"]["output_conf"]["output_url_prefix"]
if ds_type == "swift":
raise exceptions.RallyException(
"Swift Data Sources are not implemented yet")
url = url_prefix.rstrip("/") + "/%s" % self.generate_random_name()
return self.clients("sahara").data_sources.create(
name=self.generate_random_name(),
description="",
data_source_type=ds_type,
url=url)
def _run_job_execution(self, job_id, cluster_id, input_id, output_id,
configs, job_idx):
"""Run a Job Execution and wait until it completes or fails.
The Job Execution is accepted as successful when Oozie reports
"success" or "succeeded" status. The failure statuses are "failed" and
"killed".
The timeout and the polling interval may be configured through
"sahara_job_execution_timeout" and "sahara_job_check_interval"
parameters under the "benchmark" section.
:param job_id: The Job id that will be executed
:param cluster_id: The Cluster id which will execute the Job
:param input_id: The input Data Source id
:param output_id: The output Data Source id
:param configs: The config dict that will be passed as Job Execution's
parameters.
:param job_idx: The index of a job in a sequence
"""
@atomic.action_timer("sahara.job_execution_%s" % job_idx)
def run(self):
job_execution = self.clients("sahara").job_executions.create(
job_id=job_id,
cluster_id=cluster_id,
input_id=input_id,
output_id=output_id,
configs=configs)
utils.wait_for(
resource=job_execution.id,
is_ready=self._job_execution_is_finished,
timeout=CONF.openstack.sahara_job_execution_timeout,
check_interval=CONF.openstack.sahara_job_check_interval)
run(self)
def _job_execution_is_finished(self, je_id):
status = self.clients("sahara").job_executions.get(je_id).info[
"status"].lower()
LOG.debug("Checking for Job Execution %s to complete. Status: %s"
% (je_id, status))
if status in ("success", "succeeded"):
return True
elif status in ("failed", "killed"):
raise exceptions.RallyException(
"Job execution %s has failed" % je_id)
return False
def _merge_configs(self, *configs):
"""Merge configs in special format.
It supports merging of configs in the following format:
applicable_target -> config_name -> config_value
"""
result = {}
for config_dict in configs:
if config_dict:
for a_target in config_dict:
if a_target not in result or not result[a_target]:
result[a_target] = {}
result[a_target].update(config_dict[a_target])
return result
def _get_neutron_net_id(self):
"""Get the Neutron Network id from context.
If Nova Network is used as networking backend, None is returned.
:returns: Network id for Neutron or None for Nova Networking.
"""
if consts.Service.NEUTRON not in self.clients("services").values():
return None
# Taking net id from context.
net = self.context["tenant"]["networks"][0]
neutron_net_id = net["id"]
LOG.debug("Using neutron network %s." % neutron_net_id)
LOG.debug("Using neutron router %s." % net["router_id"])
return neutron_net_id
def init_sahara_context(context_instance):
context_instance.context["sahara"] = context_instance.context.get("sahara",
{})
for user, tenant_id in rutils.iterate_per_tenants(
context_instance.context["users"]):
context_instance.context["tenants"][tenant_id]["sahara"] = (
context_instance.context["tenants"][tenant_id].get("sahara", {}))
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,652
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/task/scenarios/designate/utils.py
|
# Copyright 2014 Hewlett-Packard Development Company, L.P.
#
# Author: Endre Karlson <endre.karlson@hp.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from rally.task import atomic
from rally_openstack.task import scenario
class DesignateScenario(scenario.OpenStackScenario):
"""Base class for Designate scenarios with basic atomic actions."""
# valid domain name cannot contain underscore characters
# which are used in default autogenerated names
RESOURCE_NAME_FORMAT = "s-rally-XXXXXXXX-XXXXXXXX"
@atomic.action_timer("designate.create_zone")
def _create_zone(self, name=None, type_=None, email=None, description=None,
ttl=None):
"""Create zone.
:param name: Zone name
:param type_: Zone type, PRIMARY or SECONDARY
:param email: Zone owner email
:param description: Zone description
:param ttl: Zone ttl - Time to live in seconds
:returns: designate zone dict
"""
type_ = type_ or "PRIMARY"
if type_ == "PRIMARY":
email = email or "root@random.name"
# Name is only useful to be random for PRIMARY
name = name or "%s.name." % self.generate_random_name()
return self.clients("designate", version="2").zones.create(
name=name,
type_=type_,
email=email,
description=description,
ttl=ttl
)
@atomic.action_timer("designate.list_zones")
def _list_zones(self, criterion=None, marker=None, limit=None):
"""Return user zone list.
:param criterion: API Criterion to filter by
:param marker: UUID marker of the item to start the page from
:param limit: How many items to return in the page.
:returns: list of designate zones
"""
return self.clients("designate", version="2").zones.list()
@atomic.action_timer("designate.delete_zone")
def _delete_zone(self, zone_id):
"""Delete designate zone.
:param zone_id: Zone ID
"""
self.clients("designate", version="2").zones.delete(zone_id)
@atomic.action_timer("designate.list_recordsets")
def _list_recordsets(self, zone_id, criterion=None, marker=None,
limit=None):
"""List zone recordsets.
:param zone_id: Zone ID
:param criterion: API Criterion to filter by
:param marker: UUID marker of the item to start the page from
:param limit: How many items to return in the page.
:returns: zone recordsets list
"""
return self.clients("designate", version="2").recordsets.list(
zone_id, criterion=criterion, marker=marker, limit=limit)
@atomic.action_timer("designate.create_recordset")
def _create_recordset(self, zone, recordset=None):
"""Create a recordset in a zone.
:param zone: zone dict
:param recordset: recordset dict
:returns: Designate recordset dict
"""
recordset = recordset or {}
recordset.setdefault("type_", recordset.pop("type", "A"))
if "name" not in recordset:
recordset["name"] = "%s.%s" % (self.generate_random_name(),
zone["name"])
if "records" not in recordset:
recordset["records"] = ["10.0.0.1"]
return self.clients("designate", version="2").recordsets.create(
zone["id"], **recordset)
@atomic.action_timer("designate.delete_recordset")
def _delete_recordset(self, zone_id, recordset_id):
"""Delete a zone recordset.
:param zone_id: Zone ID
:param recordset_id: Recordset ID
"""
self.clients("designate", version="2").recordsets.delete(
zone_id, recordset_id)
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
24,653
|
openstack/rally-openstack
|
refs/heads/master
|
/rally_openstack/verification/tempest/config.py
|
# Copyright 2014: Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import configparser
import inspect
import io
import os
from rally.common import cfg
from rally.common import logging
from rally import exceptions
from rally.verification import utils
from rally_openstack.common import consts
from rally_openstack.common import credential
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class TempestConfigfileManager(object):
"""Class to create a Tempest config file."""
def __init__(self, env):
openstack_platform = env.data["platforms"]["openstack"]
self.credential = credential.OpenStackCredential(
permission=consts.EndpointPermission.ADMIN,
**openstack_platform["platform_data"]["admin"])
if not self.credential:
raise exceptions.ValidationError(
f"Failed to configure 'tempest' for '{env} since "
"admin credentials for OpenStack platform is missed there."
)
self.clients = self.credential.clients()
self.available_services = self.clients.services().values()
self.conf = configparser.ConfigParser(allow_no_value=True)
self.conf.optionxform = str
def _get_service_type_by_service_name(self, service_name):
for s_type, s_name in self.clients.services().items():
if s_name == service_name:
return s_type
def _configure_auth(self, section_name="auth"):
self.conf.set(section_name, "admin_username",
self.credential.username)
self.conf.set(section_name, "admin_password",
self.credential.password)
self.conf.set(section_name, "admin_project_name",
self.credential.tenant_name)
# Keystone v3 related parameter
self.conf.set(section_name, "admin_domain_name",
self.credential.user_domain_name or "Default")
# Sahara has two service types: 'data_processing' and 'data-processing'.
# 'data_processing' is deprecated, but it can be used in previous OpenStack
# releases. So we need to configure the 'catalog_type' option to support
# environments where 'data_processing' is used as service type for Sahara.
def _configure_data_processing(self, section_name="data-processing"):
if "sahara" in self.available_services:
self.conf.set(section_name, "catalog_type",
self._get_service_type_by_service_name("sahara"))
def _configure_identity(self, section_name="identity"):
self.conf.set(section_name, "region",
self.credential.region_name)
# discover keystone versions
def get_versions(auth_url):
from keystoneauth1 import discover
from keystoneauth1 import session
temp_session = session.Session(
verify=(self.credential.https_cacert
or not self.credential.https_insecure),
timeout=CONF.openstack_client_http_timeout)
data = discover.Discover(temp_session, auth_url).version_data()
return dict([(v["version"][0], v["url"]) for v in data])
# check the original auth_url without cropping versioning to identify
# the default version
versions = get_versions(self.credential.auth_url)
cropped_auth_url = self.clients.keystone._remove_url_version()
if cropped_auth_url == self.credential.auth_url:
# the given auth_url doesn't contain version
if set(versions.keys()) == {2, 3}:
# ok, both versions of keystone are enabled, we can take urls
# there
uri = versions[2]
uri_v3 = versions[3]
target_version = 3
elif set(versions.keys()) == {2} or set(versions.keys()) == {3}:
# only one version is available while discovering
# get the most recent version
target_version = sorted(versions.keys())[-1]
if target_version == 2:
uri = versions[2]
uri_v3 = os.path.join(cropped_auth_url, "v3")
else:
# keystone v2 is disabled. let's do it explicitly
self.conf.set("identity-feature-enabled", "api_v2",
"False")
uri_v3 = versions[3]
uri = os.path.join(cropped_auth_url, "v2.0")
else:
# Does Keystone released new version of API ?!
LOG.debug("Discovered keystone versions: %s" % versions)
raise exceptions.RallyException("Failed to discover keystone "
"auth urls.")
else:
if self.credential.auth_url.rstrip("/").endswith("v2.0"):
uri = self.credential.auth_url
uri_v3 = uri.replace("/v2.0", "/v3")
target_version = 2
else:
uri_v3 = self.credential.auth_url
uri = uri_v3.replace("/v3", "/v2.0")
target_version = 3
self.conf.set(section_name, "auth_version", "v%s" % target_version)
self.conf.set(section_name, "uri", uri)
self.conf.set(section_name, "uri_v3", uri_v3)
if self.credential.endpoint_type:
self.conf.set(section_name, "v2_endpoint_type",
self.credential.endpoint_type)
self.conf.set(section_name, "v3_endpoint_type",
self.credential.endpoint_type)
self.conf.set(section_name, "disable_ssl_certificate_validation",
str(self.credential.https_insecure))
self.conf.set(section_name, "ca_certificates_file",
self.credential.https_cacert)
# The compute section is configured in context class for Tempest resources.
# Options which are configured there: 'image_ref', 'image_ref_alt',
# 'flavor_ref', 'flavor_ref_alt'.
def _configure_network(self, section_name="network"):
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
public_nets = [
net for net in neutronclient.list_networks()["networks"]
if net["status"] == "ACTIVE" and net["router:external"] is True
]
if public_nets:
net_id = public_nets[0]["id"]
net_name = public_nets[0]["name"]
self.conf.set(section_name, "public_network_id", net_id)
self.conf.set(section_name, "floating_network_name", net_name)
else:
novaclient = self.clients.nova()
net_name = next(net.human_id for net in novaclient.networks.list()
if net.human_id is not None)
self.conf.set("compute", "fixed_network_name", net_name)
self.conf.set("validation", "network_for_ssh", net_name)
def _configure_network_feature_enabled(
self, section_name="network-feature-enabled"):
if "neutron" in self.available_services:
neutronclient = self.clients.neutron()
extensions = neutronclient.list_ext("extensions", "/extensions",
retrieve_all=True)
aliases = [ext["alias"] for ext in extensions["extensions"]]
aliases_str = ",".join(aliases)
self.conf.set(section_name, "api_extensions", aliases_str)
def _configure_object_storage(self, section_name="object-storage"):
self.conf.set(section_name, "operator_role",
CONF.openstack.swift_operator_role)
self.conf.set(section_name, "reseller_admin_role",
CONF.openstack.swift_reseller_admin_role)
def _configure_service_available(self, section_name="service_available"):
services = ["cinder", "glance", "heat", "ironic", "neutron", "nova",
"sahara", "swift"]
for service in services:
# Convert boolean to string because ConfigParser fails
# on attempt to get option with boolean value
self.conf.set(section_name, service,
str(service in self.available_services))
def _configure_validation(self, section_name="validation"):
if "neutron" in self.available_services:
self.conf.set(section_name, "connect_method", "floating")
else:
self.conf.set(section_name, "connect_method", "fixed")
def _configure_orchestration(self, section_name="orchestration"):
self.conf.set(section_name, "stack_owner_role",
CONF.openstack.heat_stack_owner_role)
self.conf.set(section_name, "stack_user_role",
CONF.openstack.heat_stack_user_role)
def create(self, conf_path, extra_options=None):
self.conf.read(os.path.join(os.path.dirname(__file__), "config.ini"))
for name, method in inspect.getmembers(self, inspect.ismethod):
if name.startswith("_configure_"):
method()
if extra_options:
utils.add_extra_options(extra_options, self.conf)
with open(conf_path, "w") as configfile:
self.conf.write(configfile)
raw_conf = io.StringIO()
raw_conf.write("# Some empty values of options will be replaced while "
"creating required resources (images, flavors, etc).\n")
self.conf.write(raw_conf)
return raw_conf.getvalue()
|
{"/tests/unit/doc/test_docker_readme.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_zuul_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/rally_jobs/test_jobs.py": ["/rally_openstack/__init__.py"], "/tests/unit/test__compat.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_task_samples.py": ["/rally_openstack/__init__.py"], "/tests/unit/task/test_scenario.py": ["/rally_openstack/common/credential.py"], "/tests/ci/playbooks/roles/list-os-resources/library/osresources.py": ["/rally_openstack/__init__.py"], "/tests/functional/test_certification_task.py": ["/rally_openstack/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.