hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
eaed664154984091886fa4b3077e8b70dd6988ad | 69,444 | py | Python | models/SPADE_related.py | yizhouzhao/3D_SLN | 5db241f1daa6e8095b69ff8467551ce374b598b6 | [
"Apache-2.0"
] | 39 | 2020-07-25T18:03:18.000Z | 2022-03-30T04:27:47.000Z | models/SPADE_related.py | yizhouzhao/3D_SLN | 5db241f1daa6e8095b69ff8467551ce374b598b6 | [
"Apache-2.0"
] | 1 | 2020-10-19T03:12:48.000Z | 2020-10-19T03:49:04.000Z | models/SPADE_related.py | yizhouzhao/3D_SLN | 5db241f1daa6e8095b69ff8467551ce374b598b6 | [
"Apache-2.0"
] | 6 | 2020-08-02T07:44:42.000Z | 2022-01-06T03:13:15.000Z | import torch
from torch import nn
import torch.nn.functional as F
import torch.nn.utils.spectral_norm as spectral_norm
import numpy as np
import re
# TODO: Use the default SPADE code, and/or release the SPADE training code
def padded_conv(in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True):
sequence = []
sequence += [nn.ReflectionPad2d(padding)]
sequence += [nn.Conv2d(in_channels, out_channels, kernel_size=kernel_size, stride=stride, padding=0,dilation=dilation, groups=groups, bias=bias)]
return nn.Sequential(*sequence)
class Conv2dBlock(nn.Module):
def __init__(self, input_dim, output_dim, kernel_size, stride,
padding=0, norm='none', activation='relu', pad_type='zero', use_bias=True):
super(Conv2dBlock, self).__init__()
self.use_bias = use_bias
# initialize padding
if pad_type == 'reflect':
self.pad = nn.ReflectionPad2d(padding)
elif pad_type == 'zero':
self.pad = nn.ZeroPad2d(padding)
else:
assert 0, "Unsupported padding type: {}".format(pad_type)
self.conv = nn.Conv2d(input_dim, output_dim, kernel_size, stride, bias=self.use_bias)
# initialize normalization
norm_dim = output_dim
if norm == 'batch':
self.norm = nn.BatchNorm2d(norm_dim)
elif norm == 'inst':
self.norm = nn.InstanceNorm2d(norm_dim, track_running_stats=False)
elif norm == 'none':
self.norm = None
elif norm == 'spectral':
self.norm = None
self.conv = spectral_norm(self.conv)
else:
assert 0, "Unsupported normalization: {}".format(norm)
# initialize activation
if activation == 'relu':
self.activation = nn.ReLU(inplace=True)
elif activation == 'lrelu':
self.activation = nn.LeakyReLU(0.2, inplace=True)
elif activation == 'prelu':
self.activation = nn.PReLU()
elif activation == 'selu':
self.activation = nn.SELU(inplace=True)
elif activation == 'tanh':
self.activation = nn.Tanh()
elif activation == 'none':
self.activation = None
else:
assert 0, "Unsupported activation: {}".format(activation)
# initialize convolution
def forward(self, x):
x = self.conv(self.pad(x))
if self.norm:
x = self.norm(x)
if self.activation:
x = self.activation(x)
return x
class SEBlock2(nn.Module):
def __init__(self, channel, reduction=4):
super(SEBlock2, self).__init__()
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(
nn.Linear(channel, channel // reduction, bias=False),
nn.ReLU(inplace=True),
nn.Linear(channel // reduction, channel, bias=False),
nn.Sigmoid()
)
def forward(self, x):
b, c, _, _ = x.size()
y = self.avg_pool(x).view(b, c)
y = self.fc(y).view(b, c, 1, 1)
return x * y.expand_as(x)
class SEResBlock2(nn.Module):
def __init__(self, dim, norm='inst', activation='relu', pad_type='reflect', nz=0):
super(SEResBlock2, self).__init__()
model = []
model += [Conv2dBlock(dim + nz, dim, 3, 1, 1, norm=norm, activation=activation, pad_type=pad_type)]
model += [Conv2dBlock(dim, dim + nz, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type)]
model += [SEBlock2(dim+nz, reduction=4)]
self.model = nn.Sequential(*model)
def forward(self, x):
residual = x
out = self.model(x)
out += residual
return out
class SEResBlock3(nn.Module):
def __init__(self, inplane, outplane,stride=1, norm='spectral', pad_type='reflect'):
super(SEResBlock3, self).__init__()
model = []
model += [Conv2dBlock(inplane, outplane, 3, stride, 1, norm=norm, activation='lrelu', pad_type=pad_type, use_bias=True)]
model += [Conv2dBlock(outplane, outplane, 3, 1, 1, norm=norm, activation='none', pad_type=pad_type, use_bias=True)]
model += [SEBlock2(outplane, reduction=4)]
self.model = nn.Sequential(*model)
if (outplane != inplane) or (stride!=1):
self.learned_skip = Conv2dBlock(inplane, outplane, 3, stride, 1, norm='none', activation='none', pad_type=pad_type, use_bias=False)
else:
self.learned_skip = None
# TODO: REMOVE INPLACE
self.final_act = nn.LeakyReLU(0.2, inplace=False)
def forward(self, x):
residual = x
out = self.model(x)
if self.learned_skip is None:
out += residual
else:
out += self.learned_skip(residual)
final_out = self.final_act(out)
return final_out
class LayerNorm2D(nn.Module):
def __init__(self, num_features, eps=1e-5, affine=True):
super(LayerNorm2D, self).__init__()
self.num_features = num_features
self.affine = affine
self.eps = eps
if self.affine:
self.gamma = nn.Parameter(torch.Tensor(num_features).uniform_())
self.beta = nn.Parameter(torch.zeros(num_features))
def forward(self, x):
shape = [-1] + [1] * (x.dim() - 1)
mean = x.view(x.size(0), -1).mean(1).view(*shape)
std = x.view(x.size(0), -1).std(1).view(*shape)
x = (x - mean) / (std + self.eps)
if self.affine:
shape = [1, -1] + [1] * (x.dim() - 2)
x = x * self.gamma.view(*shape) + self.beta.view(*shape)
return x
class SPADEGenerator(nn.Module):
def __init__(self, semantic_nc, target_nc, nz, ngf, norm, crop_size, n_up):
super().__init__()
# self.opt = opt
nf = ngf
self.nf = ngf
self.n_up = n_up
self.sw, self.sh = self.compute_latent_vector_size(n_up, crop_size)
self.has_z = nz>0
self.nz = nz
if self.has_z:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(self.nz, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_0 = SPADEResnetBlock(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_1 = SPADEResnetBlock(16 * nf, 16 * nf, norm, semantic_nc)
self.up_0 = SPADEResnetBlock(16 * nf, 8 * nf, norm, semantic_nc)
self.up_1 = SPADEResnetBlock(8 * nf, 4 * nf, norm, semantic_nc)
self.up_2 = SPADEResnetBlock(4 * nf, 2 * nf, norm, semantic_nc)
self.up_3 = SPADEResnetBlock(2 * nf, 1 * nf, norm, semantic_nc)
final_nc = nf
if n_up == 'most':
self.up_4 = SPADEResnetBlock(1 * nf, nf // 2, norm,semantic_nc)
final_nc = nf // 2
self.conv_img_pre = SEResBlock2(final_nc)
self.conv_img = nn.Conv2d(final_nc, target_nc, 5, padding=2)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, n_up, crop_size):
if n_up == 'normal':
num_up_layers = 5
elif n_up == 'more':
num_up_layers = 6
elif n_up == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
n_up)
sw = crop_size // (2**num_up_layers)
sh = sw
return sw, sh
def forward(self, input, z=None):
seg = input
if self.has_z:
# we sample z from unit normal and reshape the tensor
if z is None:
print("Missing z vector, sampling from normal")
z = torch.randn(input.size(0), self.nz,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.nf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
seg_1 = F.interpolate(seg, size=[self.sh, self.sw])
x = self.head_0(x, seg_1)
x = self.up(x)
x = self.G_middle_0(x, seg)
if self.n_up == 'more' or \
self.n_up == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if self.n_up == 'most':
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img_pre(x)
x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))
x = F.tanh(x)
return x
class SPADEResnetBlock(nn.Module):
def __init__(self, fin, fout, norm, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.semantic_nc = semantic_nc
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in norm:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = norm.replace('spectral', '')
self.norm_0 = SPADE(spade_config_str, fin, self.semantic_nc)
self.norm_1 = SPADE(spade_config_str, fmiddle, self.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE(spade_config_str, fin, self.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1, inplace=True)
class SPADE(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
raise ValueError
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_shared = nn.Sequential(
nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
nn.ReLU(inplace=True)
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
actv = self.mlp_shared(segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
def get_nonspade_norm_layer(norm_type='instance'):
# helper function to get # output channels of the previous layer
def get_out_channel(layer):
if hasattr(layer, 'out_channels'):
return getattr(layer, 'out_channels')
return layer.weight.size(0)
# this function will be returned
def add_norm_layer(layer):
nonlocal norm_type
if norm_type.startswith('spectral'):
old_padding = 0
if layer.padding != (0,0):
old_padding = layer.padding[0]
layer.padding = (0,0)
layer = spectral_norm(layer)
subnorm_type = norm_type[len('spectral'):]
if subnorm_type == 'none' or len(subnorm_type) == 0:
return layer
# remove bias in the previous layer, which is meaningless
# since it has no effect after normalization
if getattr(layer, 'bias', None) is not None:
delattr(layer, 'bias')
layer.register_parameter('bias', None)
if subnorm_type == 'batch':
norm_layer = nn.BatchNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'sync_batch':
# norm_layer = SynchronizedBatchNorm2d(get_out_channel(layer), affine=True)
raise ValueError
# Did not import the submodule containing syncbatch norm
elif subnorm_type == 'instance':
norm_layer = nn.InstanceNorm2d(get_out_channel(layer), affine=True)
elif subnorm_type == 'layer':
norm_layer = LayerNorm2D(get_out_channel(layer), affine=True)
else:
raise ValueError('normalization layer %s is not recognized' % subnorm_type)
padding_layer = None
if old_padding != 0:
padding_layer = nn.ReflectionPad2d(old_padding)
return nn.Sequential(padding_layer, layer, norm_layer)
return add_norm_layer
# From SPADE
class MultiscaleDiscriminator(nn.Module):
# @staticmethod
# def modify_commandline_options(parser, is_train):
# parser.add_argument('--netD_subarch', type=str, default='n_layer',
# help='architecture of each discriminator')
# parser.add_argument('--num_D', type=int, default=2,
# help='number of discriminators to be used in multiscale')
# opt, _ = parser.parse_known_args()
#
# # define properties of each discriminator of the multiscale discriminator
# subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',
# 'models.networks.discriminator')
# subnetD.modify_commandline_options(parser, is_train)
#
# return parser
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, num_D=2, use_feat_loss=True):
super().__init__()
self.use_feat_loss = use_feat_loss
for i in range(num_D):
subnetD = self.create_single_discriminator(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
self.add_module('discriminator_%d' % i, subnetD)
n_layers = n_layers - 1
def create_single_discriminator(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss):
subarch = 'n_layer'
if subarch == 'n_layer':
print("Selected n_layer pix2pixHD discrim")
netD = NLayerDiscriminator(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
else:
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
return netD
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
get_intermediate_features = self.use_feat_loss
for name, D in self.named_children():
out = D(input)
if not get_intermediate_features:
out = [out]
result.append(out)
input = self.downsample(input)
return result
# From SPADE
class NLayerDiscriminator(nn.Module):
# @staticmethod
# def modify_commandline_options(parser, is_train):
# parser.add_argument('--n_layers_D', type=int, default=3,
# help='# layers in each discriminator')
# return parser
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss):
super().__init__()
# self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = ndf
if conditional_nc <= 0:
print("Creating Pix2PixHD discriminator")
print("0 dimensional input set")
input_nc_total = input_nc + conditional_nc
norm_layer = get_nonspade_norm_layer(norm_layer)
sequence = [[nn.Conv2d(input_nc_total, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)]]
self.use_feat_loss = use_feat_loss
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
stride_val = 1 if n == n_layers - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride_val, padding=padw)),
nn.LeakyReLU(0.2, True)
]]
# sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
sequence += [[nn.Conv2d(nf, 1, kernel_size=1, stride=1, padding=1)]]
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
# def compute_D_input_nc(self, opt):
# input_nc = opt.label_nc + opt.output_nc
# if opt.contain_dontcare_label:
# input_nc += 1
# if not opt.no_instance:
# input_nc += 1
# return input_nc
def forward(self, input):
results = [input]
for submodel in self.children():
intermediate_output = submodel(results[-1])
results.append(intermediate_output)
get_intermediate_features = self.use_feat_loss
if get_intermediate_features:
return results[1:]
else:
return results[-1]
# Version of GANLoss from SPADE
class GANLoss_2(nn.Module):
def __init__(self, gan_mode, target_real_label=1.0, target_fake_label=0.0,
tensor=torch.FloatTensor, opt=None):
super(GANLoss_2, self).__init__()
self.real_label = target_real_label
self.fake_label = target_fake_label
self.real_label_tensor = None
self.fake_label_tensor = None
self.zero_tensor = None
self.Tensor = tensor
self.gan_mode = gan_mode
self.opt = opt
if gan_mode == 'ls' or gan_mode == 'lsgan':
pass
elif gan_mode == 'original':
pass
elif gan_mode == 'w':
pass
elif gan_mode == 'hinge':
pass
else:
raise ValueError('Unexpected gan_mode {}'.format(gan_mode))
def get_target_tensor(self, input, target_is_real):
if target_is_real:
if self.real_label_tensor is None:
self.real_label_tensor = self.Tensor(1).fill_(self.real_label)
self.real_label_tensor.requires_grad_(False)
return self.real_label_tensor.expand_as(input)
else:
if self.fake_label_tensor is None:
self.fake_label_tensor = self.Tensor(1).fill_(self.fake_label)
self.fake_label_tensor.requires_grad_(False)
return self.fake_label_tensor.expand_as(input)
def get_zero_tensor(self, input):
if self.zero_tensor is None:
self.zero_tensor = self.Tensor(1).fill_(0)
self.zero_tensor.requires_grad_(False)
return self.zero_tensor.expand_as(input)
def loss(self, input, target_is_real, for_discriminator=True):
if self.gan_mode == 'original': # cross entropy loss
target_tensor = self.get_target_tensor(input, target_is_real)
loss = F.binary_cross_entropy_with_logits(input, target_tensor)
return loss
elif self.gan_mode == 'ls' or self.gan_mode == 'lsgan':
target_tensor = self.get_target_tensor(input, target_is_real)
return F.mse_loss(input, target_tensor)
elif self.gan_mode == 'hinge':
if for_discriminator:
if target_is_real:
minval = torch.min(input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
minval = torch.min(-input - 1, self.get_zero_tensor(input))
loss = -torch.mean(minval)
else:
assert target_is_real, "The generator's hinge loss must be aiming for real"
loss = -torch.mean(input)
return loss
else:
# wgan
if target_is_real:
return -input.mean()
else:
return input.mean()
def __call__(self, input, target_is_real, for_discriminator=True):
# computing loss is a bit complicated because |input| may not be
# a tensor, but list of tensors in case of multiscale discriminator
if isinstance(input, list):
loss = 0
for pred_i in input:
if isinstance(pred_i, list):
pred_i = pred_i[-1]
if len(pred_i) == 2:
pred_i = pred_i[0]
loss_tensor = self.loss(pred_i, target_is_real, for_discriminator)
bs = 1 if len(loss_tensor.size()) == 0 else loss_tensor.size(0)
new_loss = torch.mean(loss_tensor.view(bs, -1), dim=1)
loss += new_loss
return loss / len(input)
else:
return self.loss(input, target_is_real, for_discriminator)
class ConvEncoder(nn.Module):
""" Same architecture as the image discriminator """
def __init__(self, input_nc, output_nc, nef, norm_layer_str, crop_size):
super().__init__()
self.crop_size = crop_size
kw = 3
pw = int(np.ceil((kw - 1.0) / 2))
# ndf = opt.ngf
norm_layer = get_nonspade_norm_layer(norm_layer_str)
self.layer1 = norm_layer(nn.Conv2d(input_nc, nef, kw, stride=2, padding=pw))
self.layer2 = norm_layer(nn.Conv2d(nef * 1, nef * 2, kw, stride=2, padding=pw))
self.layer3 = norm_layer(nn.Conv2d(nef * 2, nef * 4, kw, stride=2, padding=pw))
self.layer4 = norm_layer(nn.Conv2d(nef * 4, nef * 8, kw, stride=2, padding=pw))
self.layer5 = norm_layer(nn.Conv2d(nef * 8, nef * 8, kw, stride=2, padding=pw))
self.pool_layer = nn.AdaptiveAvgPool2d(1)
if self.crop_size >= 256:
self.layer6 = norm_layer(nn.Conv2d(nef * 8, nef * 8, kw, stride=2, padding=pw))
# s0 = crop_size//2**5
# if self.crop_size >= 256:
# s0 = s0//2
self.fc_mu = nn.Linear(nef * 8, output_nc)
self.fc_var = nn.Linear(nef * 8, output_nc)
self.actvn = nn.LeakyReLU(0.2, True)
def forward(self, x):
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = self.layer1(x)
x = self.layer2(self.actvn(x))
x = self.layer3(self.actvn(x))
x = self.layer4(self.actvn(x))
x = self.layer5(self.actvn(x))
if self.crop_size >= 256:
x = self.layer6(self.actvn(x))
x = self.pool_layer(x)
x = self.actvn(x)
x = x.view(x.size(0), -1)
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
class SPADEGenerator2(nn.Module):
def __init__(self, semantic_nc, target_nc, nz, ngf, norm, crop_size, n_up):
super().__init__()
# self.opt = opt
nf = ngf
self.nf = ngf
self.n_up = n_up
self.sw, self.sh = self.compute_latent_vector_size(n_up, crop_size)
self.has_z = nz>0
self.nz = nz
# todo: replace 8 with 16
if self.has_z:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(self.nz, 12 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(semantic_nc, 12 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock2(12 * nf, 12 * nf, norm, semantic_nc)
self.G_middle_0 = SPADEResnetBlock2(12 * nf, 12 * nf, norm, semantic_nc)
self.G_middle_1 = SPADEResnetBlock2(12 * nf, 12 * nf, norm, semantic_nc)
self.up_0 = SPADEResnetBlock2(12 * nf, 8 * nf, norm, semantic_nc)
self.up_1 = SPADEResnetBlock2(8 * nf, 4 * nf, norm, semantic_nc)
self.up_2 = SPADEResnetBlock2(4 * nf, 2 * nf, norm, semantic_nc)
self.up_3 = SPADEResnetBlock2(2 * nf, 1 * nf, norm, semantic_nc)
final_nc = nf
if n_up == 'most':
self.up_4 = SPADEResnetBlock2(1 * nf, nf // 2, norm,semantic_nc)
final_nc = nf // 2
self.conv_img_pre = SEResBlock2(final_nc)
self.conv_img = nn.Conv2d(final_nc, target_nc, 5, padding=2)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, n_up, crop_size):
if n_up == 'normal':
num_up_layers = 5
elif n_up == 'more':
num_up_layers = 6
elif n_up == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
n_up)
sw = crop_size // (2**num_up_layers)
sh = sw
return sw, sh
def forward(self, input, z=None):
seg = input
if self.has_z:
# we sample z from unit normal and reshape the tensor
if z is None:
print("Missing z vector, sampling from normal")
z = torch.randn(input.size(0), self.nz,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 12 * self.nf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
seg_1 = F.interpolate(seg, size=[self.sh, self.sw])
x = self.head_0(x, seg_1)
x = self.up(x)
x = self.G_middle_0(x, seg)
if self.n_up == 'more' or \
self.n_up == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if self.n_up == 'most':
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img_pre(x)
x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))
x = F.tanh(x)
return x
class SPADEResnetBlock2(nn.Module):
def __init__(self, fin, fout, norm, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.semantic_nc = semantic_nc
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=1)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=1)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in norm:
self.conv_0 = spectral_norm(self.conv_0)
self.conv_1 = spectral_norm(self.conv_1)
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = norm.replace('spectral', '')
self.norm_0 = SPADE2(spade_config_str, fin, self.semantic_nc)
self.norm_1 = SPADE2(spade_config_str, fmiddle, self.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE2(spade_config_str, fin, self.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1, inplace=True)
class SPADE2(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
raise ValueError
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_preshared_depth = nn.Sequential(nn.Conv2d(1, nhidden//8, kernel_size=ks, padding=pw))
self.mlp_preshared_label = nn.Sequential(nn.Conv2d(label_nc-1, nhidden//2, kernel_size=1, padding=0))
self.mlp_shared = nn.Sequential(
nn.Conv2d(nhidden//8+nhidden//2, nhidden, kernel_size=1, padding=0),
nn.ReLU(inplace=True)
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_beta = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
preactv_depth = self.mlp_preshared_depth(segmap[:,0:1,:,:])
preactv_label = self.mlp_preshared_label(segmap[:,1:,:,:])
postactv_segmap = torch.cat((preactv_depth, preactv_label), dim=1)
actv = self.mlp_shared(postactv_segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class PSPModule(nn.Module):
def __init__(self, features, out_features=256, sizes=(1, 2, 4, 8)):
super().__init__()
self.stages = []
self.stages = nn.ModuleList([self._make_stage(features, size) for size in sizes])
self.bottleneck = nn.Conv2d(features * (len(sizes) + 1), out_features, kernel_size=1)
self.acti = nn.LeakyReLU(0.2, True)
def _make_stage(self, features, size):
prior = nn.AdaptiveAvgPool2d(output_size=(size, size))
conv = nn.Conv2d(features, features, kernel_size=1, bias=False)
return nn.Sequential(prior, conv)
def forward(self, feats):
h, w = feats.size(2), feats.size(3)
priors = [F.upsample(input=stage(feats), size=(h, w), mode='bilinear') for stage in self.stages] + [feats]
bottle = self.bottleneck(torch.cat(priors, 1))
return self.acti(bottle)
class ConvEncoder_PSP_SE(nn.Module):
""" More powerful network as it seems simply increasing nz does not help """
""" Try adding a SE and a PSP to model channel/spatial interactions???"""
def __init__(self, input_nc, output_nc, nef, vae):
super().__init__()
# ndf = opt.ngf
self.vae = vae
self.layer1 = SEResBlock3(input_nc, nef, 1)
self.layer2 = SEResBlock3(nef, nef*2, 2)
self.layer3 = SEResBlock3(nef*2, nef * 4, 2)
self.psp = PSPModule(nef * 4, nef * 8)
self.layer4 = SEResBlock3(nef * 8, nef * 8, 2)
self.layer5 = SEResBlock3(nef * 8, nef * 16, 2)
self.pool_layer = nn.AdaptiveAvgPool2d(1)
self.actvn = nn.LeakyReLU(0.2, True)
self.fc_mu = nn.Linear(nef * 16, output_nc)
self.fc_var = nn.Linear(nef * 16, output_nc)
self.fc_z = nn.Linear(nef * 16, output_nc)
def forward(self, x):
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.psp(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.pool_layer(x)
x = self.actvn(x)
x = x.view(x.size(0), -1)
if self.vae:
mu = self.fc_mu(x)
logvar = self.fc_var(x)
return mu, logvar
else:
z = self.fc_z(x)
return z
class ConvEncoder_PSP_SE_MMD(nn.Module):
""" More powerful network as it seems simply increasing nz does not help """
""" Try adding a SE and a PSP to model channel/spatial interactions???"""
def __init__(self, input_nc, output_nc, nef):
super().__init__()
# ndf = opt.ngf
self.layer1 = SEResBlock3(input_nc, nef, 1)
self.layer2 = SEResBlock3(nef, nef*2, 2)
self.layer3 = SEResBlock3(nef*2, nef * 4, 2)
self.psp = PSPModule(nef * 4, nef * 8)
self.layer4 = SEResBlock3(nef * 8, nef * 8, 2)
self.layer5 = SEResBlock3(nef * 8, nef * 16, 2)
self.pool_layer = nn.AdaptiveAvgPool2d(1)
self.actvn = nn.LeakyReLU(0.2, True)
# self.fc_mu_pre = nn.Sequential(nn.Linear(nef * 16, 512), nn.ReLU(inplace=True))
# self.fc_mu = nn.Linear(512, output_nc)
#
# self.fc_var_pre = nn.Sequential(nn.Linear(nef * 16, 512), nn.ReLU(inplace=True))
# self.fc_var = nn.Linear(512, output_nc)
self.fc_z_pre = nn.Sequential(nn.Linear(nef * 16, 512), nn.ReLU(inplace=True))
self.fc_z = nn.Linear(512, output_nc)
def forward(self, x):
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.psp(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.pool_layer(x)
x = self.actvn(x)
x = x.view(x.size(0), -1)
return self.fc_z(self.fc_z_pre(x))#, self.fc_mu(self.fc_mu_pre(x)),self.fc_var(self.fc_var_pre(x))
class ConvEncoder_PSP_SE_MMD_2(nn.Module):
""" More powerful network as it seems simply increasing nz does not help """
""" Try adding a SE and a PSP to model channel/spatial interactions???"""
def __init__(self, input_nc, output_nc, nef):
super().__init__()
# ndf = opt.ngf
self.layer1 = SEResBlock3(input_nc, nef, 2)
self.layer2 = SEResBlock3(nef, nef*2, 2)
self.layer3 = SEResBlock3(nef*2, nef * 4, 2)
self.layer4 = SEResBlock3(nef * 4, nef * 8, 2)
self.layer5 = SEResBlock3(nef * 8, nef * 16, 2)
self.layer6 = SEResBlock3(nef * 16, nef * 16, 2)
self.actvn = nn.LeakyReLU(0.2, True)
self.fc_z_pre = nn.Sequential(nn.Linear(nef * 16 * 4 * 4, 512), nn.LeakyReLU(0.2, inplace=True))
self.fc_z = nn.Linear(512, output_nc)
def forward(self, x):
if x.size(2) != 256 or x.size(3) != 256:
x = F.interpolate(x, size=(256, 256), mode='bilinear')
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.layer5(x)
x = self.layer6(x)
x = self.actvn(x)
x = x.view(x.size(0), -1)
return self.fc_z(self.fc_z_pre(x))
class SPADE3(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
raise ValueError
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_preshared_depth = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(1, nhidden//8, kernel_size=ks, padding=0),nn.LeakyReLU(inplace=True))
self.mlp_preshared_label = nn.Sequential(nn.Conv2d(label_nc-1, nhidden//2, kernel_size=1, padding=0),nn.LeakyReLU(inplace=True))
self.mlp_shared = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(nhidden//8+nhidden//2, nhidden, kernel_size=3, padding=0),
nn.ReLU(inplace=True)
)
self.mlp_gamma = nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=pw)
self.mlp_gamma = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
self.mlp_beta = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
preactv_depth = self.mlp_preshared_depth(segmap[:,0:1,:,:])
preactv_label = self.mlp_preshared_label(segmap[:,1:,:,:])
postactv_segmap = torch.cat((preactv_depth, preactv_label), dim=1)
actv = self.mlp_shared(postactv_segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class SPADEResnetBlock3(nn.Module):
def __init__(self, fin, fout, norm, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.semantic_nc = semantic_nc
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0)
self.se = SEBlock2(fout, reduction=8)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in norm:
self.conv_0 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_0))
self.conv_1 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_1))
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = norm.replace('spectral', '')
self.norm_0 = SPADE3(spade_config_str, fin, self.semantic_nc)
self.norm_1 = SPADE3(spade_config_str, fmiddle, self.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE3(spade_config_str, fin, self.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
dx = self.se(dx)
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1, inplace=True)
class SPADEGenerator3(nn.Module):
def __init__(self, semantic_nc, target_nc, nz, ngf, norm, crop_size, n_up):
super().__init__()
# self.opt = opt
nf = ngf
self.nf = ngf
self.n_up = n_up
self.sw, self.sh = self.compute_latent_vector_size(n_up, crop_size)
self.has_z = nz>0
self.nz = nz
# todo: replace 8 with 16
if self.has_z:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(self.nz, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock3(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_0 = SPADEResnetBlock3(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_1 = SPADEResnetBlock3(16 * nf, 16 * nf, norm, semantic_nc)
self.up_0 = SPADEResnetBlock3(16 * nf, 8 * nf, norm, semantic_nc)
self.up_1 = SPADEResnetBlock3(8 * nf, 4 * nf, norm, semantic_nc)
self.up_2 = SPADEResnetBlock3(4 * nf, 2 * nf, norm, semantic_nc)
self.up_3 = SPADEResnetBlock3(2 * nf, 1 * nf, norm, semantic_nc)
final_nc = nf
if n_up == 'most':
self.up_4 = SPADEResnetBlock3(1 * nf, nf // 2, norm,semantic_nc)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, target_nc, 5, padding=2)
self.up = nn.Upsample(scale_factor=2)
def compute_latent_vector_size(self, n_up, crop_size):
if n_up == 'normal':
num_up_layers = 5
elif n_up == 'more':
num_up_layers = 6
elif n_up == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
n_up)
sw = crop_size // (2**num_up_layers)
sh = sw
return sw, sh
def forward(self, input, z=None):
seg = input
if self.has_z:
# we sample z from unit normal and reshape the tensor
if z is None:
print("Missing z vector, sampling from normal")
z = torch.randn(input.size(0), self.nz,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.nf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
seg_1 = F.interpolate(seg, size=[self.sh, self.sw])
x = self.head_0(x, seg_1)
x = self.up(x)
x = self.G_middle_0(x, seg)
if self.n_up == 'more' or \
self.n_up == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up(x)
x = self.up_0(x, seg)
x = self.up(x)
x = self.up_1(x, seg)
x = self.up(x)
x = self.up_2(x, seg)
x = self.up(x)
x = self.up_3(x, seg)
if self.n_up == 'most':
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))
x = F.tanh(x)
return x
class MultiscaleDiscriminator_MMD(nn.Module):
# @staticmethod
# def modify_commandline_options(parser, is_train):
# parser.add_argument('--netD_subarch', type=str, default='n_layer',
# help='architecture of each discriminator')
# parser.add_argument('--num_D', type=int, default=2,
# help='number of discriminators to be used in multiscale')
# opt, _ = parser.parse_known_args()
#
# # define properties of each discriminator of the multiscale discriminator
# subnetD = util.find_class_in_module(opt.netD_subarch + 'discriminator',
# 'models.networks.discriminator')
# subnetD.modify_commandline_options(parser, is_train)
#
# return parser
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, num_D=2, use_feat_loss=True):
super().__init__()
self.use_feat_loss = use_feat_loss
for i in range(num_D):
subnetD = self.create_single_discriminator(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
self.add_module('discriminator_%d' % i, subnetD)
n_layers = n_layers - 1
def create_single_discriminator(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss):
subarch = 'n_layer'
if subarch == 'n_layer':
print("Selected n_layer pix2pixHD discrim")
netD = NLayerDiscriminator_MMD(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
else:
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
return netD
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
get_intermediate_features = self.use_feat_loss
for name, D in self.named_children():
out = D(input)
if not get_intermediate_features:
out = [out]
result.append(out)
input = self.downsample(input)
return result
# From SPADE
class NLayerDiscriminator_MMD(nn.Module):
# @staticmethod
# def modify_commandline_options(parser, is_train):
# parser.add_argument('--n_layers_D', type=int, default=3,
# help='# layers in each discriminator')
# return parser
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss, nz=256):
super().__init__()
# self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = ndf
if conditional_nc <= 0:
print("Creating Pix2PixHD discriminator")
print("0 dimensional input set")
input_nc_total = input_nc + conditional_nc
norm_layer = get_nonspade_norm_layer(norm_layer)
sequence = [[nn.Conv2d(input_nc_total, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)]]
self.use_feat_loss = use_feat_loss
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
stride_val = 1 if n == n_layers - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,
stride=stride_val, padding=padw)),
nn.LeakyReLU(0.2, True)
]]
# sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
# sequence += [[nn.Conv2d(nf, 1, kernel_size=1, stride=1, padding=1)]]
self.decide = nn.Conv2d(nf, 1, kernel_size=1, stride=1, padding=0)
self.z_out = nn.Sequential(nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0),nn.LeakyReLU(inplace=True),nn.Conv2d(nf, nz, kernel_size=1, stride=1, padding=0), nn.AdaptiveAvgPool2d(1))
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
# def compute_D_input_nc(self, opt):
# input_nc = opt.label_nc + opt.output_nc
# if opt.contain_dontcare_label:
# input_nc += 1
# if not opt.no_instance:
# input_nc += 1
# return input_nc
def forward(self, input):
results = [input]
for submodel in self.named_children():
if ("decide" not in submodel[0]) and ("z_out" not in submodel[0]):
intermediate_output = submodel[1](results[-1])
results.append(intermediate_output)
results.append((self.decide(results[-1]), self.z_out(results[-1])))
get_intermediate_features = self.use_feat_loss
if get_intermediate_features:
return results[1:]
else:
return results[-1]
class MultiscaleDiscriminator_MMD_2(nn.Module):
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, num_D=2, use_feat_loss=True):
super().__init__()
self.use_feat_loss = use_feat_loss
for i in range(num_D):
subnetD = self.create_single_discriminator(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
self.add_module('discriminator_%d' % i, subnetD)
n_layers = n_layers - 1
def create_single_discriminator(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss):
subarch = 'n_layer'
if subarch == 'n_layer':
print("Selected n_layer pix2pixHD discrim")
netD = NLayerDiscriminator_MMD_2(input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss)
else:
raise ValueError('unrecognized discriminator subarchitecture %s' % subarch)
return netD
def downsample(self, input):
return F.avg_pool2d(input, kernel_size=3,
stride=2, padding=[1, 1],
count_include_pad=False)
# Returns list of lists of discriminator outputs.
# The final result is of size opt.num_D x opt.n_layers_D
def forward(self, input):
result = []
get_intermediate_features = self.use_feat_loss
for name, D in self.named_children():
out = D(input)
if not get_intermediate_features:
out = [out]
result.append(out)
input = self.downsample(input)
return result
# From SPADE
class NLayerDiscriminator_MMD_2(nn.Module):
# @staticmethod
# def modify_commandline_options(parser, is_train):
# parser.add_argument('--n_layers_D', type=int, default=3,
# help='# layers in each discriminator')
# return parser
def __init__(self, input_nc, conditional_nc, ndf, norm_layer, n_layers, use_feat_loss, nz=256):
super().__init__()
# self.opt = opt
kw = 4
padw = int(np.ceil((kw - 1.0) / 2))
nf = ndf
if conditional_nc <= 0:
print("Creating Pix2PixHD discriminator")
print("0 dimensional input set")
input_nc_total = input_nc + conditional_nc
norm_layer = get_nonspade_norm_layer(norm_layer)
sequence = [[nn.Conv2d(input_nc_total, nf, kernel_size=kw, stride=2, padding=padw),
nn.LeakyReLU(0.2, True)]]
self.use_feat_loss = use_feat_loss
for n in range(1, n_layers):
nf_prev = nf
nf = min(nf * 2, 512)
stride_val = 1 if n == n_layers - 1 else 2
sequence += [[norm_layer(nn.Conv2d(nf_prev, nf, kernel_size=kw,stride=stride_val, padding=padw)),
nn.LeakyReLU(0.2, True)
]]
# sequence += [[nn.Conv2d(nf, 1, kernel_size=kw, stride=1, padding=padw)]]
# sequence += [[nn.Conv2d(nf, 1, kernel_size=1, stride=1, padding=1)]]
self.decide = nn.Conv2d(nf, 1, kernel_size=1, stride=1, padding=0)
self.z_out = nn.Sequential(nn.Conv2d(nf, nf, kernel_size=1, stride=1, padding=0), nn.LeakyReLU(inplace=True),
nn.Conv2d(nf, nz, kernel_size=1, stride=1, padding=0), nn.AdaptiveAvgPool2d(1))
# We divide the layers into groups to extract intermediate layer outputs
for n in range(len(sequence)):
self.add_module('model' + str(n), nn.Sequential(*sequence[n]))
# def compute_D_input_nc(self, opt):
# input_nc = opt.label_nc + opt.output_nc
# if opt.contain_dontcare_label:
# input_nc += 1
# if not opt.no_instance:
# input_nc += 1
# return input_nc
def forward(self, input):
results = [input]
for submodel in self.named_children():
if ("decide" not in submodel[0]) and ("z_out" not in submodel[0]):
intermediate_output = submodel[1](results[-1])
results.append(intermediate_output)
results.append((self.decide(results[-1]), self.z_out(results[-1])))
get_intermediate_features = self.use_feat_loss
if get_intermediate_features:
return results[1:]
else:
return results[-1]
class SPADE4(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
raise ValueError
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'layer':
self.param_free_norm = LayerNorm2D(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_preshared_depth = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(1, nhidden//8, kernel_size=ks, padding=0),nn.LeakyReLU(inplace=True))
self.mlp_shared = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(nhidden//8+label_nc-1, nhidden, kernel_size=3, padding=0),
nn.ReLU(inplace=True)
)
self.mlp_gamma = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
self.mlp_beta = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
preactv_depth = self.mlp_preshared_depth(segmap[:,0:1,:,:])
postactv_segmap = torch.cat((preactv_depth, segmap[:,1:,:,:]), dim=1)
actv = self.mlp_shared(postactv_segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class SPADEResnetBlock4(nn.Module):
def __init__(self, fin, fout, norm, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.semantic_nc = semantic_nc
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0)
self.se = SEBlock2(fout, reduction=8)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in norm:
self.conv_0 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_0))
self.conv_1 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_1))
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = norm.replace('spectral', '')
self.norm_0 = SPADE4(spade_config_str, fin, self.semantic_nc)
self.norm_1 = SPADE4(spade_config_str, fmiddle, self.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE4(spade_config_str, fin, self.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
dx = self.se(dx)
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1, inplace=True)
class SPADEGenerator4(nn.Module):
def __init__(self, semantic_nc, target_nc, nz, ngf, norm, crop_size, n_up):
super().__init__()
# self.opt = opt
nf = ngf
self.nf = ngf
self.n_up = n_up
self.sw, self.sh = self.compute_latent_vector_size(n_up, crop_size)
self.has_z = nz>0
self.nz = nz
# todo: replace 8 with 16
if self.has_z:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(self.nz, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock4(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_0 = SPADEResnetBlock4(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_1 = SPADEResnetBlock4(16 * nf, 16 * nf, norm, semantic_nc)
self.up_0 = SPADEResnetBlock4(16 * nf, 8 * nf, norm, semantic_nc)
self.up_1 = SPADEResnetBlock4(8 * nf, 4 * nf, norm, semantic_nc)
self.up_2 = SPADEResnetBlock4(4 * nf, 2 * nf, norm, semantic_nc)
self.up_3 = SPADEResnetBlock4(2 * nf, 1 * nf, norm, semantic_nc)
final_nc = nf
if n_up == 'most':
self.up_4 = SPADEResnetBlock4(1 * nf, nf // 2, norm,semantic_nc)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, target_nc, 5, padding=2)
self.up_b = nn.Upsample(scale_factor=2, mode='bilinear')
self.up_n = nn.Upsample(scale_factor=2, mode='nearest')
def compute_latent_vector_size(self, n_up, crop_size):
if n_up == 'normal':
num_up_layers = 5
elif n_up == 'more':
num_up_layers = 6
elif n_up == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
n_up)
sw = crop_size // (2**num_up_layers)
sh = sw
return sw, sh
def forward(self, input, z=None):
seg = input
if self.has_z:
# we sample z from unit normal and reshape the tensor
if z is None:
print("Missing z vector, sampling from normal")
z = torch.randn(input.size(0), self.nz,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.nf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
seg_1 = F.interpolate(seg, size=[self.sh, self.sw])
x = self.head_0(x, seg_1)
x = self.up_n(x)
x = self.G_middle_0(x, seg)
if self.n_up == 'more' or \
self.n_up == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up_n(x)
x = self.up_0(x, seg)
x = self.up_n(x)
x = self.up_1(x, seg)
x = self.up_n(x)
x = self.up_2(x, seg)
x = self.up_b(x)
x = self.up_3(x, seg)
if self.n_up == 'most':
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))
x = F.tanh(x)
return x
class SPADE5(nn.Module):
def __init__(self, config_text, norm_nc, label_nc):
super().__init__()
assert config_text.startswith('spade')
parsed = re.search('spade(\D+)(\d)x\d', config_text)
param_free_norm_type = str(parsed.group(1))
ks = int(parsed.group(2))
if param_free_norm_type == 'instance':
self.param_free_norm = nn.InstanceNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'syncbatch':
raise ValueError
elif param_free_norm_type == 'batch':
self.param_free_norm = nn.BatchNorm2d(norm_nc, affine=False)
elif param_free_norm_type == 'layer':
self.param_free_norm = LayerNorm2D(norm_nc, affine=False)
else:
raise ValueError('%s is not a recognized param-free norm type in SPADE'
% param_free_norm_type)
# The dimension of the intermediate embedding space. Yes, hardcoded.
nhidden = 128
pw = ks // 2
self.mlp_preshared_depth = nn.Sequential(nn.ReflectionPad2d(pw),
nn.Conv2d(1, 40, kernel_size=ks, padding=0),
nn.Tanh())
self.mlp_shared = nn.Sequential(
nn.ReflectionPad2d(pw),
nn.Conv2d(80, nhidden, kernel_size=3, padding=0),
nn.LeakyReLU(inplace=True)
)
self.mlp_gamma = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
self.mlp_beta = nn.Sequential(nn.ReflectionPad2d(pw), nn.Conv2d(nhidden, norm_nc, kernel_size=ks, padding=0))
def forward(self, x, segmap):
# Part 1. generate parameter-free normalized activations
normalized = self.param_free_norm(x)
# Part 2. produce scaling and bias conditioned on semantic map
segmap = F.interpolate(segmap, size=x.size()[2:], mode='bilinear')
preactv_depth = self.mlp_preshared_depth(segmap[:, 0:1, :, :])*segmap[:,1:,:,:]
postactv_segmap = torch.cat((preactv_depth, segmap[:,1:,:,:]), dim=1)
actv = self.mlp_shared(postactv_segmap)
gamma = self.mlp_gamma(actv)
beta = self.mlp_beta(actv)
# apply scale and bias
out = normalized * (1 + gamma) + beta
return out
class SPADEResnetBlock5(nn.Module):
def __init__(self, fin, fout, norm, semantic_nc):
super().__init__()
# Attributes
self.learned_shortcut = (fin != fout)
self.semantic_nc = semantic_nc
fmiddle = min(fin, fout)
# create conv layers
self.conv_0 = nn.Conv2d(fin, fmiddle, kernel_size=3, padding=0)
self.conv_1 = nn.Conv2d(fmiddle, fout, kernel_size=3, padding=0)
if self.learned_shortcut:
self.conv_s = nn.Conv2d(fin, fout, kernel_size=1, bias=False)
# apply spectral norm if specified
if 'spectral' in norm:
self.conv_0 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_0))
self.conv_1 = nn.Sequential(nn.ReflectionPad2d(1),spectral_norm(self.conv_1))
if self.learned_shortcut:
self.conv_s = spectral_norm(self.conv_s)
# define normalization layers
spade_config_str = norm.replace('spectral', '')
self.norm_0 = SPADE5(spade_config_str, fin, self.semantic_nc)
self.norm_1 = SPADE5(spade_config_str, fmiddle, self.semantic_nc)
if self.learned_shortcut:
self.norm_s = SPADE5(spade_config_str, fin, self.semantic_nc)
# note the resnet block with SPADE also takes in |seg|,
# the semantic segmentation map as input
def forward(self, x, seg):
x_s = self.shortcut(x, seg)
dx = self.conv_0(self.actvn(self.norm_0(x, seg)))
dx = self.conv_1(self.actvn(self.norm_1(dx, seg)))
out = x_s + dx
return out
def shortcut(self, x, seg):
if self.learned_shortcut:
x_s = self.conv_s(self.norm_s(x, seg))
else:
x_s = x
return x_s
def actvn(self, x):
return F.leaky_relu(x, 2e-1, inplace=True)
class SPADEGenerator5(nn.Module):
def __init__(self, semantic_nc, target_nc, nz, ngf, norm, crop_size, n_up):
super().__init__()
# self.opt = opt
nf = ngf
self.nf = ngf
self.n_up = n_up
self.sw, self.sh = self.compute_latent_vector_size(n_up, crop_size)
self.has_z = nz>0
self.nz = nz
# todo: replace 8 with 16
if self.has_z:
# In case of VAE, we will sample from random z vector
self.fc = nn.Linear(self.nz, 16 * nf * self.sw * self.sh)
else:
# Otherwise, we make the network deterministic by starting with
# downsampled segmentation map instead of random z
self.fc = nn.Conv2d(semantic_nc, 16 * nf, 3, padding=1)
self.head_0 = SPADEResnetBlock5(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_0 = SPADEResnetBlock5(16 * nf, 16 * nf, norm, semantic_nc)
self.G_middle_1 = SPADEResnetBlock5(16 * nf, 16 * nf, norm, semantic_nc)
self.up_0 = SPADEResnetBlock5(16 * nf, 8 * nf, norm, semantic_nc)
self.up_1 = SPADEResnetBlock5(8 * nf, 4 * nf, norm, semantic_nc)
self.up_2 = SPADEResnetBlock5(4 * nf, 2 * nf, norm, semantic_nc)
self.up_3 = SPADEResnetBlock5(2 * nf, 1 * nf, norm, semantic_nc)
final_nc = nf
if n_up == 'most':
self.up_4 = SPADEResnetBlock4(1 * nf, nf // 2, norm,semantic_nc)
final_nc = nf // 2
self.conv_img = nn.Conv2d(final_nc, target_nc, 3, padding=1)
self.up_b = nn.Upsample(scale_factor=2, mode='bilinear')
self.up_n = nn.Upsample(scale_factor=2, mode='nearest')
def compute_latent_vector_size(self, n_up, crop_size):
if n_up == 'normal':
num_up_layers = 5
elif n_up == 'more':
num_up_layers = 6
elif n_up == 'most':
num_up_layers = 7
else:
raise ValueError('opt.num_upsampling_layers [%s] not recognized' %
n_up)
sw = crop_size // (2**num_up_layers)
sh = sw
return sw, sh
def forward(self, input, z=None):
seg = input
if self.has_z:
# we sample z from unit normal and reshape the tensor
if z is None:
print("Missing z vector, sampling from normal")
z = torch.randn(input.size(0), self.nz,
dtype=torch.float32, device=input.get_device())
x = self.fc(z)
x = x.view(-1, 16 * self.nf, self.sh, self.sw)
else:
# we downsample segmap and run convolution
x = F.interpolate(seg, size=(self.sh, self.sw))
x = self.fc(x)
seg_1 = F.interpolate(seg, size=[self.sh, self.sw])
x = self.head_0(x, seg_1)
x = self.up_n(x)
x = self.G_middle_0(x, seg)
if self.n_up == 'more' or \
self.n_up == 'most':
x = self.up(x)
x = self.G_middle_1(x, seg)
x = self.up_n(x)
x = self.up_0(x, seg)
x = self.up_n(x)
x = self.up_1(x, seg)
x = self.up_n(x)
x = self.up_2(x, seg)
x = self.up_b(x)
x = self.up_3(x, seg)
if self.n_up == 'most':
x = self.up(x)
x = self.up_4(x, seg)
x = self.conv_img(F.leaky_relu(x, 2e-1, inplace=True))
x = F.tanh(x)
return x | 38.515807 | 195 | 0.588359 | 9,528 | 69,444 | 4.085957 | 0.048594 | 0.016568 | 0.010018 | 0.014384 | 0.834502 | 0.816008 | 0.80206 | 0.787984 | 0.776964 | 0.768462 | 0 | 0.025949 | 0.297448 | 69,444 | 1,803 | 196 | 38.515807 | 0.772014 | 0.119751 | 0 | 0.733691 | 0 | 0 | 0.036634 | 0.00206 | 0 | 0 | 0 | 0.000555 | 0.006907 | 1 | 0.071374 | false | 0.00307 | 0.004605 | 0.00614 | 0.156562 | 0.010744 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d828c065004c43772783e8432c8458c1c583d7b5 | 28,180 | py | Python | test/development/test_headers_based_rule.py | denz/ldp | e49cff6f39a4b6d68998d90b8c75158e5b9b450a | [
"BSD-3-Clause"
] | null | null | null | test/development/test_headers_based_rule.py | denz/ldp | e49cff6f39a4b6d68998d90b8c75158e5b9b450a | [
"BSD-3-Clause"
] | null | null | null | test/development/test_headers_based_rule.py | denz/ldp | e49cff6f39a4b6d68998d90b8c75158e5b9b450a | [
"BSD-3-Clause"
] | null | null | null | import json
from unittest import TestCase
from flask import Flask
from ldp.resource import Resource
from ldp.rule import match_headers
from ldp import LDPApp
class HeadersBasedRuleTest(TestCase):
def setUp(self):
self.app = LDPApp(__name__)
self.app.debug = True
self.c = self.app.test_client()
class TestHeaderMatch(HeadersBasedRuleTest):
def test_simple_matching(self):
@self.app.route(match_headers('/', Accept='application/json'))
def json():
return 'JSON'
@self.app.route('/')
def view():
return 'DEFAULT'
response = self.c.get('/', headers={})
self.assertEqual(response.data, b'DEFAULT')
response = self.c.get('/', headers={'Accept':'application/json'})
self.assertEqual(response.data, b'JSON')
def test_parameter_matching(self):
@self.app.route(match_headers('/', Accept='application/json', Test='x<int:i>'))
def json_p(**kwargs):
return 'JSON%s'%kwargs['i']
@self.app.route(match_headers('/', Accept='application/json'))
def json():
return 'JSON'
@self.app.route('/')
def view():
return 'DEFAULT'
response = self.c.get('/', headers={})
self.assertEqual(response.data, b'DEFAULT')
response = self.c.get('/', headers={'Accept':'application/json'})
self.assertEqual(response.data, b'JSON')
response = self.c.get('/', headers={'Accept':'application/json', 'Test':'x20'})
self.assertEqual(response.data, b'JSON20')
class TestArgumentsCombinations(HeadersBasedRuleTest):
def test_0(self):
@self.app.route('/')
def view():
return json.dumps({})
response = self.c.get('/')
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {})
response = self.c.get('/ttt')
self.assertTrue(response.status_code, 404)
def test_1(self):
@self.app.route(match_headers('/', **{'Accept': 'text/turtle'}))
def view():
return json.dumps({})
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {})
response = self.c.get('/', headers={})
self.assertTrue(response.status_code, 404)
def test_2(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public, max-age=<int:max_age>'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=xx'})
self.assertTrue(response.status_code, 404)
def test_3(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control':'public,\tmax-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=xx'})
self.assertTrue(response.status_code, 404)
def test_4(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=xx',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'application/turtle'})
self.assertTrue(response.status_code, 404)
def test_5(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=xx',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept': 'application/turtle'})
self.assertTrue(response.status_code, 404)
def test_6(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
def test_7(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=xx',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept': 'application/turtle'})
self.assertTrue(response.status_code, 404)
def test_8(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/<mime>'}))
def view(max_age, mime):
return json.dumps({'max_age':max_age,'mime':mime})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=xx', 'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30', 'Accept': 'application/turtle'})
self.assertTrue(response.status_code, 404)
def test_9(self):
@self.app.route(match_headers('/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/<mime>'}))
def view(max_age, mime):
return json.dumps({'max_age':max_age,'mime':mime})
response = self.c.get('/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
response = self.c.get('/', headers={'Cache-Control': 'public, max-age=30',
'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
def test_10(self):
@self.app.route('/test/')
def view():
return json.dumps({})
response = self.c.get('/test/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {})
def test_11(self):
@self.app.route(match_headers('/test/',
**{'Accept': 'text/turtle'}))
def view():
return json.dumps({})
response = self.c.get('/test/', headers={'Accept': 'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {})
response = self.c.get('/test/', headers={'Accept': 'application/turtle'})
self.assertTrue(response.status_code, 404)
def test_12(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public, max-age=<int:max_age>'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/testz/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
def test_13(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public,\tmax-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/testz/', headers={'Cache-Control': 'public, \tmax-age=30'})
self.assertTrue(response.status_code, 404)
def test_14(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/anything'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'XXX/anything'})
self.assertTrue(response.status_code, 404)
def test_15(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/anything'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/anything'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, \tmax-age=30',
'Accept':'text/anything'})
self.assertTrue(response.status_code, 404)
def test_16(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=xx',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 404)
def test_17(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(max_age):
return json.dumps({'max_age':max_age})
response = self.c.get('/test/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30})
response = self.c.get('/test/', headers={'Cache-Control': 'public, \testmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 404)
def test_18(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/<mime>'}))
def view(max_age, mime):
return json.dumps({'max_age':max_age,'mime':mime})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
def test_19(self):
@self.app.route(match_headers('/test/',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/<mime>'}))
def view(max_age, mime):
return json.dumps({'max_age':max_age,'mime':mime})
response = self.c.get('/test/', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
response = self.c.get('/test/', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'max_age':30, 'mime':'turtle'})
def test_20(self):
@self.app.route('/<int:x>')
def view(x,):
return json.dumps({'x':x})
response = self.c.get('/23')
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23})
response = self.c.get('/xx')
self.assertTrue(response.status_code, 404)
def test_21(self):
@self.app.route(match_headers('/<int:x>',
**{'Accept': 'text/turtle'}))
def view(x,):
return json.dumps({'x':x})
response = self.c.get('/23', headers={'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23})
response = self.c.get('/23', headers={'Accept':'XXX/turtle'})
self.assertTrue(response.status_code, 404)
def test_22(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public, max-age=<int:max_age>'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=xx'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/xx', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 404)
def test_23(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
def test_24(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/xxx'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'XXX/turtle'})
self.assertTrue(response.status_code, 404)
def test_25(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/*'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/xxx'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/xxx'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'XXX/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'XXX/turtle'})
self.assertTrue(response.status_code, 404)
def test_26(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/xxx'})
self.assertTrue(response.status_code, 404)
def test_27(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>',
'Accept': 'text/turtle'}))
def view(x, max_age,):
return json.dumps({'x':x,'max_age':max_age})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23, 'max_age':30})
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/xxx'})
self.assertTrue(response.status_code, 404)
def test_28(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public, max-age=<int:max_age>',
'Accept': 'text/<mime>'}))
def view(x, max_age, mime,):
return json.dumps({'x':x,'max_age':max_age,'mime':mime})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23,
'max_age':30,
'mime':'turtle'})
response = self.c.get('/xx', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=xx',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 404)
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'XXX/turtle'})
self.assertTrue(response.status_code, 404)
def test_29(self):
@self.app.route(match_headers('/<int:x>',
**{'Cache-Control': 'public,[ \t]max-age=<int:max_age>', 'Accept': 'text/<mime>'}))
def view(x, max_age, mime,):
return json.dumps({'x':x,'max_age':max_age,'mime':mime})
response = self.c.get('/23', headers={'Cache-Control': 'public, max-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23,
'max_age':30,
'mime':'turtle'})
response = self.c.get('/23', headers={'Cache-Control': 'public,\tmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 200)
self.assertEqual(json.loads(response.data.decode()), {'x':23,
'max_age':30,
'mime':'turtle'})
response = self.c.get('/23', headers={'Cache-Control': 'public, \tmax-age=30',
'Accept':'text/turtle'})
self.assertTrue(response.status_code, 404)
| 46.425041 | 121 | 0.521824 | 3,095 | 28,180 | 4.658481 | 0.033279 | 0.082813 | 0.116105 | 0.099875 | 0.96324 | 0.960744 | 0.958316 | 0.952767 | 0.94167 | 0.931613 | 0 | 0.029259 | 0.302626 | 28,180 | 606 | 122 | 46.50165 | 0.704407 | 0 | 0 | 0.835118 | 0 | 0 | 0.203903 | 0.019163 | 0 | 0 | 0 | 0 | 0.280514 | 1 | 0.14561 | false | 0 | 0.012848 | 0.074946 | 0.239829 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
dc51c395c4fc5b08d49a3b853a13bc5125e152f0 | 168 | py | Python | tests/test_version.py | LucaCappelletti94/userinput | 57d8d8a4d751627f550614b3c51fd195708ae73c | [
"MIT"
] | 1 | 2021-11-08T18:02:49.000Z | 2021-11-08T18:02:49.000Z | tests/test_version.py | LucaCappelletti94/userinput | 57d8d8a4d751627f550614b3c51fd195708ae73c | [
"MIT"
] | null | null | null | tests/test_version.py | LucaCappelletti94/userinput | 57d8d8a4d751627f550614b3c51fd195708ae73c | [
"MIT"
] | null | null | null | from validate_version_code import validate_version_code
from userinput.__version__ import __version__
def test_version():
assert validate_version_code(__version__) | 33.6 | 55 | 0.869048 | 21 | 168 | 6.047619 | 0.428571 | 0.354331 | 0.448819 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.095238 | 168 | 5 | 56 | 33.6 | 0.835526 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.25 | true | 0 | 0.5 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
dc988c4f18df064b0efff7d68d70414b544e73f4 | 3,303 | py | Python | FITS_tools/tests/test_hcongrid.py | e-koch/FITS_tools | 45473ccfeaa97c94fd40b34017a2da987e66ecee | [
"BSD-3-Clause"
] | null | null | null | FITS_tools/tests/test_hcongrid.py | e-koch/FITS_tools | 45473ccfeaa97c94fd40b34017a2da987e66ecee | [
"BSD-3-Clause"
] | null | null | null | FITS_tools/tests/test_hcongrid.py | e-koch/FITS_tools | 45473ccfeaa97c94fd40b34017a2da987e66ecee | [
"BSD-3-Clause"
] | null | null | null | import numpy as np
from astropy.io import fits
from astropy.tests.helper import pytest
header1 = """
SIMPLE = T / conforms to FITS standard
BITPIX = -64 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 128
NAXIS2 = 128
CRVAL1 = 0.0 / Value at ref. pixel on axis 1
CRVAL2 = 0.0 / Value at ref. pixel on axis 2
CTYPE1 = 'GLON-CAR' / Type of co-ordinate on axis 1
CTYPE2 = 'GLAT-CAR' / Type of co-ordinate on axis 2
CRPIX1 = 65.0 / Reference pixel on axis 1
CRPIX2 = 65.0 / Reference pixel on axis 2
CDELT1 = -0.005555555556 / Pixel size on axis 1
CDELT2 = 0.005555555556 / Pixel size on axis 2
END
""".strip().lstrip()
header2 = """
SIMPLE = T / conforms to FITS standard
BITPIX = -64 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 128
NAXIS2 = 128
CRVAL1 = 266.416816625 / Value at ref. pixel on axis 1
CRVAL2 = -29.007824972 / Value at ref. pixel on axis 2
CTYPE1 = 'RA---TAN' / Type of co-ordinate on axis 1
CTYPE2 = 'DEC--TAN' / Type of co-ordinate on axis 2
CRPIX1 = 65.0 / Reference pixel on axis 1
CRPIX2 = 65.0 / Reference pixel on axis 2
CDELT1 = -0.005555555556 / Pixel size on axis 1
CDELT2 = 0.005555555556 / Pixel size on axis 2
END
""".strip().lstrip()
header3 = """
SIMPLE = T / conforms to FITS standard
BITPIX = -64 / array data type
NAXIS = 2 / number of array dimensions
NAXIS1 = 128
NAXIS2 = 128
CRVAL1 = 266.416816625 / Value at ref. pixel on axis 1
CRVAL2 = -29.007824972 / Value at ref. pixel on axis 2
CTYPE1 = 'RA---TAN' / Type of co-ordinate on axis 1
CTYPE2 = 'DEC--TAN' / Type of co-ordinate on axis 2
CRPIX1 = 65.0 / Reference pixel on axis 1
CRPIX2 = 65.0 / Reference pixel on axis 2
CDELT1 = -0.00225 / Pixel size on axis 1
CDELT2 = 0.00225 / Pixel size on axis 2
END
""".strip().lstrip()
from ..hcongrid import hcongrid,wcsalign
@pytest.mark.parametrize(('h1','h2'),zip((header1,header2,header3),(header1,header2,header3)))
def test_wcsalign_gaussian_smallerpix(h1,h2):
"""
Reproject different coordinate systems
"""
x,y = np.mgrid[:128,:128]
r = ((x-63.5)**2 + (y-63.5)**2)**0.5
e = np.exp(-r**2/(2.*10.**2))
hdr1 = fits.Header().fromstring(h1,'\n')
hdu_in = fits.PrimaryHDU(data=e, header=hdr1)
hdr2 = fits.Header().fromstring(h2,'\n')
hdu_out = wcsalign(hdu_in, hdr2)
return hdu_out
def test_hcongrid_gaussian_smallerpix():
"""
Reproject RA/Dec -> RA/Dec with smaller pixels
"""
x,y = np.mgrid[:128,:128]
r = ((x-63.5)**2 + (y-63.5)**2)**0.5
e = np.exp(-r**2/(2.*10.**2))
hdr1 = fits.Header().fromstring(header2,'\n')
hdu_in = fits.PrimaryHDU(data=e, header=hdr1)
hdr2 = fits.Header().fromstring(header3,'\n')
hdu_out = wcsalign(hdu_in, hdr2)
return hdu_out
| 35.902174 | 94 | 0.552528 | 455 | 3,303 | 3.98022 | 0.230769 | 0.079514 | 0.072888 | 0.049696 | 0.799006 | 0.799006 | 0.793484 | 0.777471 | 0.711761 | 0.711761 | 0 | 0.127215 | 0.333636 | 3,303 | 91 | 95 | 36.296703 | 0.695593 | 0.025734 | 0 | 0.71831 | 0 | 0 | 0.674929 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028169 | false | 0 | 0.056338 | 0 | 0.112676 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
760566744f6484cde261f87f0d95a1182786779c | 10,970 | py | Python | configs/_base_/models/h3dnet.py | Guangyun-Xu/mmdetection3d | 75c5c6cd590386bd1539a686c5fd2cc45c5480d5 | [
"Apache-2.0"
] | 2,216 | 2020-07-09T19:10:11.000Z | 2022-03-31T12:39:26.000Z | configs/_base_/models/h3dnet.py | Guangyun-Xu/mmdetection3d | 75c5c6cd590386bd1539a686c5fd2cc45c5480d5 | [
"Apache-2.0"
] | 1,174 | 2020-07-10T07:02:28.000Z | 2022-03-31T12:38:56.000Z | configs/_base_/models/h3dnet.py | Guangyun-Xu/mmdetection3d | 75c5c6cd590386bd1539a686c5fd2cc45c5480d5 | [
"Apache-2.0"
] | 681 | 2020-07-09T19:40:06.000Z | 2022-03-31T11:02:24.000Z | primitive_z_cfg = dict(
type='PrimitiveHead',
num_dims=2,
num_classes=18,
primitive_mode='z',
upper_thresh=100.0,
surface_thresh=0.5,
vote_module_cfg=dict(
in_channels=256,
vote_per_seed=1,
gt_per_seed=1,
conv_channels=(256, 256),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
norm_feats=True,
vote_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='none',
loss_dst_weight=10.0)),
vote_aggregation_cfg=dict(
type='PointSAModule',
num_point=1024,
radius=0.3,
num_sample=16,
mlp_channels=[256, 128, 128, 128],
use_xyz=True,
normalize_xyz=True),
feat_channels=(128, 128),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.4, 0.6],
reduction='mean',
loss_weight=30.0),
center_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=0.5,
loss_dst_weight=0.5),
semantic_reg_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=0.5,
loss_dst_weight=0.5),
semantic_cls_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0),
train_cfg=dict(
dist_thresh=0.2,
var_thresh=1e-2,
lower_thresh=1e-6,
num_point=100,
num_point_line=10,
line_thresh=0.2))
primitive_xy_cfg = dict(
type='PrimitiveHead',
num_dims=1,
num_classes=18,
primitive_mode='xy',
upper_thresh=100.0,
surface_thresh=0.5,
vote_module_cfg=dict(
in_channels=256,
vote_per_seed=1,
gt_per_seed=1,
conv_channels=(256, 256),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
norm_feats=True,
vote_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='none',
loss_dst_weight=10.0)),
vote_aggregation_cfg=dict(
type='PointSAModule',
num_point=1024,
radius=0.3,
num_sample=16,
mlp_channels=[256, 128, 128, 128],
use_xyz=True,
normalize_xyz=True),
feat_channels=(128, 128),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.4, 0.6],
reduction='mean',
loss_weight=30.0),
center_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=0.5,
loss_dst_weight=0.5),
semantic_reg_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=0.5,
loss_dst_weight=0.5),
semantic_cls_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0),
train_cfg=dict(
dist_thresh=0.2,
var_thresh=1e-2,
lower_thresh=1e-6,
num_point=100,
num_point_line=10,
line_thresh=0.2))
primitive_line_cfg = dict(
type='PrimitiveHead',
num_dims=0,
num_classes=18,
primitive_mode='line',
upper_thresh=100.0,
surface_thresh=0.5,
vote_module_cfg=dict(
in_channels=256,
vote_per_seed=1,
gt_per_seed=1,
conv_channels=(256, 256),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
norm_feats=True,
vote_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='none',
loss_dst_weight=10.0)),
vote_aggregation_cfg=dict(
type='PointSAModule',
num_point=1024,
radius=0.3,
num_sample=16,
mlp_channels=[256, 128, 128, 128],
use_xyz=True,
normalize_xyz=True),
feat_channels=(128, 128),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.4, 0.6],
reduction='mean',
loss_weight=30.0),
center_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=1.0,
loss_dst_weight=1.0),
semantic_reg_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='sum',
loss_src_weight=1.0,
loss_dst_weight=1.0),
semantic_cls_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=2.0),
train_cfg=dict(
dist_thresh=0.2,
var_thresh=1e-2,
lower_thresh=1e-6,
num_point=100,
num_point_line=10,
line_thresh=0.2))
model = dict(
type='H3DNet',
backbone=dict(
type='MultiBackbone',
num_streams=4,
suffixes=['net0', 'net1', 'net2', 'net3'],
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d', eps=1e-5, momentum=0.01),
act_cfg=dict(type='ReLU'),
backbones=dict(
type='PointNet2SASSG',
in_channels=4,
num_points=(2048, 1024, 512, 256),
radius=(0.2, 0.4, 0.8, 1.2),
num_samples=(64, 32, 16, 16),
sa_channels=((64, 64, 128), (128, 128, 256), (128, 128, 256),
(128, 128, 256)),
fp_channels=((256, 256), (256, 256)),
norm_cfg=dict(type='BN2d'),
sa_cfg=dict(
type='PointSAModule',
pool_mod='max',
use_xyz=True,
normalize_xyz=True))),
rpn_head=dict(
type='VoteHead',
vote_module_cfg=dict(
in_channels=256,
vote_per_seed=1,
gt_per_seed=3,
conv_channels=(256, 256),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
norm_feats=True,
vote_loss=dict(
type='ChamferDistance',
mode='l1',
reduction='none',
loss_dst_weight=10.0)),
vote_aggregation_cfg=dict(
type='PointSAModule',
num_point=256,
radius=0.3,
num_sample=16,
mlp_channels=[256, 128, 128, 128],
use_xyz=True,
normalize_xyz=True),
pred_layer_cfg=dict(
in_channels=128, shared_conv_channels=(128, 128), bias=True),
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.2, 0.8],
reduction='sum',
loss_weight=5.0),
center_loss=dict(
type='ChamferDistance',
mode='l2',
reduction='sum',
loss_src_weight=10.0,
loss_dst_weight=10.0),
dir_class_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0),
dir_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0),
size_class_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0),
size_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0),
semantic_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=1.0)),
roi_head=dict(
type='H3DRoIHead',
primitive_list=[primitive_z_cfg, primitive_xy_cfg, primitive_line_cfg],
bbox_head=dict(
type='H3DBboxHead',
gt_per_seed=3,
num_proposal=256,
suface_matching_cfg=dict(
type='PointSAModule',
num_point=256 * 6,
radius=0.5,
num_sample=32,
mlp_channels=[128 + 6, 128, 64, 32],
use_xyz=True,
normalize_xyz=True),
line_matching_cfg=dict(
type='PointSAModule',
num_point=256 * 12,
radius=0.5,
num_sample=32,
mlp_channels=[128 + 12, 128, 64, 32],
use_xyz=True,
normalize_xyz=True),
feat_channels=(128, 128),
primitive_refine_channels=[128, 128, 128],
upper_thresh=100.0,
surface_thresh=0.5,
line_thresh=0.5,
conv_cfg=dict(type='Conv1d'),
norm_cfg=dict(type='BN1d'),
objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.2, 0.8],
reduction='sum',
loss_weight=5.0),
center_loss=dict(
type='ChamferDistance',
mode='l2',
reduction='sum',
loss_src_weight=10.0,
loss_dst_weight=10.0),
dir_class_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=0.1),
dir_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0),
size_class_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=0.1),
size_res_loss=dict(
type='SmoothL1Loss', reduction='sum', loss_weight=10.0),
semantic_loss=dict(
type='CrossEntropyLoss', reduction='sum', loss_weight=0.1),
cues_objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.3, 0.7],
reduction='mean',
loss_weight=5.0),
cues_semantic_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.3, 0.7],
reduction='mean',
loss_weight=5.0),
proposal_objectness_loss=dict(
type='CrossEntropyLoss',
class_weight=[0.2, 0.8],
reduction='none',
loss_weight=5.0),
primitive_center_loss=dict(
type='MSELoss', reduction='none', loss_weight=1.0))),
# model training and testing settings
train_cfg=dict(
rpn=dict(
pos_distance_thr=0.3, neg_distance_thr=0.6, sample_mod='vote'),
rpn_proposal=dict(use_nms=False),
rcnn=dict(
pos_distance_thr=0.3,
neg_distance_thr=0.6,
sample_mod='vote',
far_threshold=0.6,
near_threshold=0.3,
mask_surface_threshold=0.3,
label_surface_threshold=0.3,
mask_line_threshold=0.3,
label_line_threshold=0.3)),
test_cfg=dict(
rpn=dict(
sample_mod='seed',
nms_thr=0.25,
score_thr=0.05,
per_class_proposal=True,
use_nms=False),
rcnn=dict(
sample_mod='seed',
nms_thr=0.25,
score_thr=0.05,
per_class_proposal=True)))
| 32.076023 | 79 | 0.537739 | 1,304 | 10,970 | 4.263804 | 0.115031 | 0.103597 | 0.073381 | 0.085612 | 0.839568 | 0.819245 | 0.792986 | 0.792446 | 0.769784 | 0.750719 | 0 | 0.078879 | 0.336645 | 10,970 | 341 | 80 | 32.170088 | 0.685172 | 0.003191 | 0 | 0.789318 | 0 | 0 | 0.090186 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
761051f5e81d939056839c2c754f948d7bbe9efe | 86 | py | Python | goalrepresent/dnn/networks/__init__.py | flowersteam/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 6 | 2020-12-19T00:16:16.000Z | 2022-01-28T14:59:21.000Z | goalrepresent/dnn/networks/__init__.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | null | null | null | goalrepresent/dnn/networks/__init__.py | Evolutionary-Intelligence/holmes | e38fb8417ec56cfde8142eddd0f751e319e35d8c | [
"MIT"
] | 1 | 2021-05-24T14:58:26.000Z | 2021-05-24T14:58:26.000Z | import goalrepresent.dnn.networks.decoders
import goalrepresent.dnn.networks.encoders
| 28.666667 | 42 | 0.883721 | 10 | 86 | 7.6 | 0.6 | 0.5 | 0.578947 | 0.789474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046512 | 86 | 2 | 43 | 43 | 0.926829 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
520403f9436c965cc7354e0a5114f588f33aeeb5 | 22,845 | py | Python | infrastructure/tests.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 242 | 2015-01-01T15:08:23.000Z | 2022-01-19T21:14:24.000Z | infrastructure/tests.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 52 | 2015-01-05T09:13:17.000Z | 2018-12-26T14:52:43.000Z | infrastructure/tests.py | nuwainfo/treeio | f57bf9114d9774c11468a1b0e44614b04631beb1 | [
"MIT"
] | 99 | 2015-01-09T23:28:14.000Z | 2021-12-30T09:19:51.000Z | # encoding: utf-8
# Copyright 2011 Tree.io Limited
# This file is part of Treeio.
# License www.tree.io/license
"""
Infrastructure: test suites
"""
from django.test import TestCase
from django.test.client import Client
from django.core.urlresolvers import reverse
from django.contrib.auth.models import User as DjangoUser
from treeio.core.models import User, Group, Perspective, ModuleSetting
from treeio.infrastructure.models import Item, ItemValue, ItemField, ItemType, ItemStatus, ItemServicing
class InfrastructureModelsTest(TestCase):
"Infrastructure models tests"
def test_model_item_field(self):
"Test item field model"
obj = ItemField(name='test', label='test', field_type='text')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_type(self):
"Test item type model"
obj = ItemType(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_status(self):
"Test item status model"
obj = ItemStatus(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item(self):
"Test item model"
type = ItemType(name='test')
type.save()
status = ItemStatus(name='test')
status.save()
obj = Item(name='test', item_type=type, status=status)
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_value(self):
"Test item value model"
status = ItemStatus(name='test')
status.save()
type = ItemType(name='test')
type.save()
item = Item(name='test', item_type=type, status=status)
item.save()
field = ItemField(name='test', label='test', field_type='text')
field.save()
obj = ItemValue(value='test', field=field, item=item)
obj.save()
self.assertEquals('test', obj.value)
self.assertNotEquals(obj.id, None)
obj.delete()
def test_model_item_servicing(self):
"Test item servicing model"
obj = ItemServicing(name='test')
obj.save()
self.assertEquals('test', obj.name)
self.assertNotEquals(obj.id, None)
obj.delete()
class InfrastructureViewsTest(TestCase):
"Infrastructure functional tests for views"
username = "test"
password = "password"
prepared = False
def setUp(self):
"Initial Setup"
if not self.prepared:
self.group, created = Group.objects.get_or_create(name='test')
duser, created = DjangoUser.objects.get_or_create(
username=self.username)
duser.set_password(self.password)
duser.save()
self.user, created = User.objects.get_or_create(user=duser)
self.user.save()
perspective, created = Perspective.objects.get_or_create(
name='default')
perspective.set_default_user()
perspective.save()
ModuleSetting.set('default_perspective', perspective.id)
self.type = ItemType(name='test')
self.type.set_default_user()
self.type.save()
self.status = ItemStatus(name='test')
self.status.set_default_user()
self.status.save()
self.field = ItemField(
name='test', label='test', field_type='text')
self.field.set_default_user()
self.field.save()
self.item = Item(
name='test', item_type=self.type, status=self.status)
self.item.set_default_user()
self.item.save()
self.value = ItemValue(field=self.field, item=self.item)
self.value.save()
self.servicing = ItemServicing(name='test')
self.servicing.set_default_user()
self.servicing.save()
self.client = Client()
self.prepared = True
######################################
# Testing views when user is logged in
######################################
def test_index_login(self):
"Test index page with login at /infrastructure/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure'))
self.assertEquals(response.status_code, 200)
def test_index_infrastructure_login(self):
"Test index page with login at /infrastructure/index/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_index'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_index_owned(self):
"Test index page with login at /infrastructure/owned/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_index_owned'))
self.assertEquals(response.status_code, 200)
# Type
def test_infrastructure_type_add(self):
"Test index page with login at /infrastructure/type/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_type_add'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_view(self):
"Test index page with login at /infrastructure/type/view/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_view', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_edit(self):
"Test index page with login at /infrastructure/type/edit/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_edit', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_type_delete(self):
"Test index page with login at /infrastructure/type/delete/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_type_delete', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
# Field
def test_infrastructure_field_add(self):
"Test index page with login at /infrastructure/field/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_field_add'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_field_view(self):
"Test index page with login at /infrastructure/field/view/<field_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_field_view', args=[self.field.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_field_edit(self):
"Test index page with login at /infrastructure/field/edit/<field_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_field_edit', args=[self.field.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_field_del(self):
"Test index page with login at /infrastructure/field/delete/<field_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_field_delete', args=[self.field.id]))
self.assertEquals(response.status_code, 200)
# Status
def test_infrastructure_status_add(self):
"Test index page with login at /infrastructure/status/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_status_add'))
self.assertEquals(response.status_code, 200)
def test_infrastructure_status_view(self):
"Test index page with login at /infrastructure/status/view/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_status_view', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_status_edit(self):
"Test index page with login at /infrastructure/status/edit/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_status_edit', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_status_del(self):
"Test index page with login at /infrastructure/status/delete/<status_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_status_delete', args=[self.status.id]))
self.assertEquals(response.status_code, 200)
# Item
def test_infrastructure_item_add(self):
"Test index page with login at /infrastructure/item/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(reverse('infrastructure_item_add'))
self.assertEquals(response.status_code, 200)
def test_infr_item_add_typed(self):
"Test index page with login at /infrastructure/item/add/<type_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_item_add_typed', args=[self.type.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_item_view(self):
"Test index page with login at /infrastructure/item/view/<item_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_item_view', args=[self.item.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_item_edit(self):
"Test index page with login at /infrastructure/item/edit/<item_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_item_edit', args=[self.item.id]))
self.assertEquals(response.status_code, 200)
def test_infrastructure_item_del(self):
"Test index page with login at /infrastructure/item/delete/<item_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_item_delete', args=[self.item.id]))
self.assertEquals(response.status_code, 200)
# Service Record
def test_infr_service_record_index(self):
"Test index page with login at /infrastructure/service_record/index/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_service_record_index'))
self.assertEquals(response.status_code, 200)
def test_infr_service_record_add(self):
"Test index page with login at /infrastructure/service_record/add/"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_service_record_add'))
self.assertEquals(response.status_code, 200)
def test_infr_service_record_view(self):
"Test index page with login at /infrastructure/service_record/view/<service_record_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_service_record_view', args=[self.servicing.id]))
self.assertEquals(response.status_code, 200)
def test_infr_service_record_edit(self):
"Test index page with login at /infrastructure/service_record/edit/<service_record_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_service_record_edit', args=[self.servicing.id]))
self.assertEquals(response.status_code, 200)
def test_infr_service_record_delete(self):
"Test index page with login at /infrastructure/service_record/delete/<service_record_id>"
response = self.client.post('/accounts/login',
{'username': self.username, 'password': self.password})
self.assertRedirects(response, '/')
response = self.client.get(
reverse('infrastructure_service_record_delete', args=[self.servicing.id]))
self.assertEquals(response.status_code, 200)
######################################
# Testing views when user is not logged in
######################################
def test_index(self):
"Testing /infrastructure/"
response = self.client.get('/infrastructure/')
# Redirects as unauthenticated
self.assertRedirects(response, reverse('user_login'))
def test_index_infrastructure_out(self):
"Testing /infrastructure/index/"
response = self.client.get(reverse('infrastructure_index'))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_index_owned_out(self):
"Testing /infrastructure/owned/"
response = self.client.get(reverse('infrastructure_index_owned'))
self.assertRedirects(response, reverse('user_login'))
# Type
def test_infrastructure_type_add_out(self):
"Testing /infrastructure/type/add/"
response = self.client.get(reverse('infrastructure_type_add'))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_type_view_out(self):
"Testing /infrastructure/type/view/<type_id>"
response = self.client.get(
reverse('infrastructure_type_view', args=[self.type.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_type_edit_out(self):
"Testing /infrastructure/type/edit/<type_id>"
response = self.client.get(
reverse('infrastructure_type_edit', args=[self.type.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_type_delete_out(self):
"Testing /infrastructure/type/delete/<type_id>"
response = self.client.get(
reverse('infrastructure_type_delete', args=[self.type.id]))
self.assertRedirects(response, reverse('user_login'))
# Field
def test_infrastructure_field_add_out(self):
"Testing /infrastructure/field/add/"
response = self.client.get(reverse('infrastructure_field_add'))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_field_view_out(self):
"Testing /infrastructure/field/view/<field_id>"
response = self.client.get(
reverse('infrastructure_field_view', args=[self.field.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_field_edit_out(self):
"Testing /infrastructure/field/edit/<field_id>"
response = self.client.get(
reverse('infrastructure_field_edit', args=[self.field.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_field_del_out(self):
"Testing /infrastructure/field/delete/<field_id>"
response = self.client.get(
reverse('infrastructure_field_delete', args=[self.field.id]))
self.assertRedirects(response, reverse('user_login'))
# Status
def test_infrastructure_status_add_out(self):
"Testing /infrastructure/status/add/"
response = self.client.get(reverse('infrastructure_status_add'))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_status_view_out(self):
"Testing /infrastructure/status/view/<status_id>"
response = self.client.get(
reverse('infrastructure_status_view', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_status_edit_out(self):
"Testing /infrastructure/status/edit/<status_id>"
response = self.client.get(
reverse('infrastructure_status_edit', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_status_del_out(self):
"Testing /infrastructure/status/delete/<status_id>"
response = self.client.get(
reverse('infrastructure_status_delete', args=[self.status.id]))
self.assertRedirects(response, reverse('user_login'))
# Item
def test_infrastructure_item_add_out(self):
"Testing /infrastructure/item/add/"
response = self.client.get(reverse('infrastructure_item_add'))
self.assertRedirects(response, reverse('user_login'))
def test_infr_item_add_typed_out(self):
"Testing /infrastructure/item/add/<type_id>"
response = self.client.get(
reverse('infrastructure_item_add_typed', args=[self.type.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_item_view_out(self):
"Testing /infrastructure/item/view/<item_id>"
response = self.client.get(
reverse('infrastructure_item_view', args=[self.item.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_item_edit_out(self):
"Testing /infrastructure/item/edit/<item_id>"
response = self.client.get(
reverse('infrastructure_item_edit', args=[self.item.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infrastructure_item_del_out(self):
"Testing /infrastructure/item/delete/<item_id>"
response = self.client.get(
reverse('infrastructure_item_delete', args=[self.item.id]))
self.assertRedirects(response, reverse('user_login'))
# Service Record
def test_infr_service_record_index_out(self):
"Testing /infrastructure/service_record/index/"
response = self.client.get(
reverse('infrastructure_service_record_index'))
self.assertRedirects(response, reverse('user_login'))
def test_infr_service_record_add_out(self):
"Testing /infrastructure/service_record/add/"
response = self.client.get(
reverse('infrastructure_service_record_add'))
self.assertRedirects(response, reverse('user_login'))
def test_infr_service_record_view_out(self):
"Testing /infrastructure/service_record/view/<service_record_id>"
response = self.client.get(
reverse('infrastructure_service_record_view', args=[self.servicing.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infr_service_record_edit_out(self):
"Testing /infrastructure/service_record/edit/<service_record_id>"
response = self.client.get(
reverse('infrastructure_service_record_edit', args=[self.servicing.id]))
self.assertRedirects(response, reverse('user_login'))
def test_infr_service_record_delete_out(self):
"Testing /infrastructure/service_record/delete/<service_record_id>"
response = self.client.get(
reverse('infrastructure_service_record_delete', args=[self.servicing.id]))
self.assertRedirects(response, reverse('user_login'))
| 43.848369 | 104 | 0.642854 | 2,446 | 22,845 | 5.827882 | 0.045789 | 0.053315 | 0.094704 | 0.073658 | 0.876955 | 0.845879 | 0.795089 | 0.789548 | 0.737846 | 0.638232 | 0 | 0.004569 | 0.233574 | 22,845 | 520 | 105 | 43.932692 | 0.809584 | 0.144145 | 0 | 0.57971 | 0 | 0 | 0.243849 | 0.133578 | 0 | 0 | 0 | 0 | 0.210145 | 1 | 0.137681 | false | 0.065217 | 0.014493 | 0 | 0.164251 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
5243069a240063e5707e3179ee485a1719cb687d | 229,486 | py | Python | .venv/lib/python3.7/site-packages/looker_sdk/sdk/methods.py | jgaines13/looker-to-powerpoint | 031df2bb059295af1154299247bc5fbc776553f4 | [
"MIT"
] | null | null | null | .venv/lib/python3.7/site-packages/looker_sdk/sdk/methods.py | jgaines13/looker-to-powerpoint | 031df2bb059295af1154299247bc5fbc776553f4 | [
"MIT"
] | null | null | null | .venv/lib/python3.7/site-packages/looker_sdk/sdk/methods.py | jgaines13/looker-to-powerpoint | 031df2bb059295af1154299247bc5fbc776553f4 | [
"MIT"
] | null | null | null | # 328 API methods
# NOTE: Do not edit this source code file. It is generated by Looker SDK Codegen.
import datetime
from typing import MutableMapping, Optional, Sequence, Union
from looker_sdk.sdk import models
from looker_sdk.rtl import api_methods
from looker_sdk.rtl import transport
class LookerSDK(api_methods.APIMethods):
# POST /integration_hubs/{integration_hub_id}/accept_legal_agreement -> models.IntegrationHub
def accept_integration_hub_legal_agreement(
self,
# Id of integration_hub
integration_hub_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.IntegrationHub:
"""Accept Integration Hub Legal Agreement"""
response = self.post(
f"/integration_hubs/{integration_hub_id}/accept_legal_agreement",
models.IntegrationHub,
transport_options=transport_options,
)
assert isinstance(response, models.IntegrationHub)
return response
# GET /themes/active -> Sequence[models.Theme]
def active_themes(
self,
# Name of theme
name: Optional[str] = None,
# Timestamp representing the target datetime for the active period. Defaults to 'now'
ts: Optional[datetime.datetime] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Theme]:
"""Get Active Themes"""
response = self.get(
f"/themes/active",
Sequence[models.Theme],
query_params={"name": name, "ts": ts, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# POST /groups/{group_id}/groups -> models.Group
def add_group_group(
self,
# Id of group
group_id: int,
body: Optional[models.GroupIdForGroupInclusion] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Group:
"""Add a Group to Group"""
response = self.post(
f"/groups/{group_id}/groups",
models.Group,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Group)
return response
# POST /groups/{group_id}/users -> models.User
def add_group_user(
self,
# Id of group
group_id: int,
body: Optional[models.GroupIdForGroupUserInclusion] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Add a User to Group"""
response = self.post(
f"/groups/{group_id}/users",
models.User,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# GET /color_collections -> Sequence[models.ColorCollection]
def all_color_collections(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ColorCollection]:
"""Get all Color Collections"""
response = self.get(
f"/color_collections",
Sequence[models.ColorCollection],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /connections -> Sequence[models.DBConnection]
def all_connections(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DBConnection]:
"""Get All Connections"""
response = self.get(
f"/connections",
Sequence[models.DBConnection],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /content_metadata_access -> Sequence[models.ContentMetaGroupUser]
def all_content_metadata_accesses(
self,
# Id of content metadata
content_metadata_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ContentMetaGroupUser]:
"""Get All Content Metadata Accesses"""
response = self.get(
f"/content_metadata_access",
Sequence[models.ContentMetaGroupUser],
query_params={"content_metadata_id": content_metadata_id, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /content_metadata -> Sequence[models.ContentMeta]
def all_content_metadatas(
self,
# Parent space of content.
parent_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ContentMeta]:
"""Get All Content Metadatas"""
response = self.get(
f"/content_metadata",
Sequence[models.ContentMeta],
query_params={"parent_id": parent_id, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboards -> Sequence[models.DashboardBase]
def all_dashboards(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardBase]:
"""Get All Dashboards"""
response = self.get(
f"/dashboards",
Sequence[models.DashboardBase],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /datagroups -> Sequence[models.Datagroup]
def all_datagroups(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.Datagroup]:
"""Get All Datagroups"""
response = self.get(
f"/datagroups",
Sequence[models.Datagroup],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dialect_info -> Sequence[models.DialectInfo]
def all_dialect_infos(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DialectInfo]:
"""Get All Dialect Infos"""
response = self.get(
f"/dialect_info",
Sequence[models.DialectInfo],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders -> Sequence[models.Folder]
def all_folders(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Folder]:
"""Get All Folders"""
response = self.get(
f"/folders",
Sequence[models.Folder],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/git_branches -> Sequence[models.GitBranch]
def all_git_branches(
self,
# Project Id
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.GitBranch]:
"""Get All Git Branches"""
response = self.get(
f"/projects/{project_id}/git_branches",
Sequence[models.GitBranch],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/git_connection_tests -> Sequence[models.GitConnectionTest]
def all_git_connection_tests(
self,
# Project Id
project_id: str,
# (Optional: leave blank for root project) The remote url for remote dependency to test.
remote_url: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.GitConnectionTest]:
"""Get All Git Connection Tests"""
response = self.get(
f"/projects/{project_id}/git_connection_tests",
Sequence[models.GitConnectionTest],
query_params={"remote_url": remote_url},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /groups/{group_id}/groups -> Sequence[models.Group]
def all_group_groups(
self,
# Id of group
group_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Group]:
"""Get All Groups in Group"""
response = self.get(
f"/groups/{group_id}/groups",
Sequence[models.Group],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /groups/{group_id}/users -> Sequence[models.User]
def all_group_users(
self,
# Id of group
group_id: int,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Get All Users in Group"""
response = self.get(
f"/groups/{group_id}/users",
Sequence[models.User],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /groups -> Sequence[models.Group]
def all_groups(
self,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Optional of ids to get specific groups.
ids: Optional[models.DelimSequence[int]] = None,
# Id of content metadata to which groups must have access.
content_metadata_id: Optional[int] = None,
# Select only groups that either can/cannot be given access to content.
can_add_to_content_metadata: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Group]:
"""Get All Groups"""
response = self.get(
f"/groups",
Sequence[models.Group],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
"ids": ids,
"content_metadata_id": content_metadata_id,
"can_add_to_content_metadata": can_add_to_content_metadata,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /homepage_items -> Sequence[models.HomepageItem]
def all_homepage_items(
self,
# Requested fields.
fields: Optional[str] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Filter to a specific homepage section
homepage_section_id: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.HomepageItem]:
"""Get All Homepage Items"""
response = self.get(
f"/homepage_items",
Sequence[models.HomepageItem],
query_params={
"fields": fields,
"sorts": sorts,
"homepage_section_id": homepage_section_id,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /homepage_sections -> Sequence[models.HomepageSection]
def all_homepage_sections(
self,
# Requested fields.
fields: Optional[str] = None,
# Fields to sort by.
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.HomepageSection]:
"""Get All Homepage sections"""
response = self.get(
f"/homepage_sections",
Sequence[models.HomepageSection],
query_params={"fields": fields, "sorts": sorts},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /homepages -> Sequence[models.Homepage]
def all_homepages(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Homepage]:
"""Get All Homepages"""
response = self.get(
f"/homepages",
Sequence[models.Homepage],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /integration_hubs -> Sequence[models.IntegrationHub]
def all_integration_hubs(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.IntegrationHub]:
"""Get All Integration Hubs"""
response = self.get(
f"/integration_hubs",
Sequence[models.IntegrationHub],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /integrations -> Sequence[models.Integration]
def all_integrations(
self,
# Requested fields.
fields: Optional[str] = None,
# Filter to a specific provider
integration_hub_id: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Integration]:
"""Get All Integrations"""
response = self.get(
f"/integrations",
Sequence[models.Integration],
query_params={"fields": fields, "integration_hub_id": integration_hub_id},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /legacy_features -> Sequence[models.LegacyFeature]
def all_legacy_features(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.LegacyFeature]:
"""Get All Legacy Features"""
response = self.get(
f"/legacy_features",
Sequence[models.LegacyFeature],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /locales -> Sequence[models.Locale]
def all_locales(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.Locale]:
"""Get All Locales"""
response = self.get(
f"/locales", Sequence[models.Locale], transport_options=transport_options
)
assert isinstance(response, list)
return response
# GET /lookml_models -> Sequence[models.LookmlModel]
def all_lookml_models(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.LookmlModel]:
"""Get All LookML Models"""
response = self.get(
f"/lookml_models",
Sequence[models.LookmlModel],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/lookml_tests -> Sequence[models.LookmlTest]
def all_lookml_tests(
self,
# Project Id
project_id: str,
# File Id
file_id: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.LookmlTest]:
"""Get All LookML Tests"""
response = self.get(
f"/projects/{project_id}/lookml_tests",
Sequence[models.LookmlTest],
query_params={"file_id": file_id},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /looks -> Sequence[models.Look]
def all_looks(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Look]:
"""Get All Looks"""
response = self.get(
f"/looks",
Sequence[models.Look],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /model_sets -> Sequence[models.ModelSet]
def all_model_sets(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ModelSet]:
"""Get All Model Sets"""
response = self.get(
f"/model_sets",
Sequence[models.ModelSet],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /permission_sets -> Sequence[models.PermissionSet]
def all_permission_sets(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.PermissionSet]:
"""Get All Permission Sets"""
response = self.get(
f"/permission_sets",
Sequence[models.PermissionSet],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /permissions -> Sequence[models.Permission]
def all_permissions(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.Permission]:
"""Get All Permissions"""
response = self.get(
f"/permissions",
Sequence[models.Permission],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/files -> Sequence[models.ProjectFile]
def all_project_files(
self,
# Project Id
project_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ProjectFile]:
"""Get All Project Files"""
response = self.get(
f"/projects/{project_id}/files",
Sequence[models.ProjectFile],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects -> Sequence[models.Project]
def all_projects(
self,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Project]:
"""Get All Projects"""
response = self.get(
f"/projects",
Sequence[models.Project],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /roles -> Sequence[models.Role]
def all_roles(
self,
# Requested fields.
fields: Optional[str] = None,
# Optional list of ids to get specific roles.
ids: Optional[models.DelimSequence[int]] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Role]:
"""Get All Roles"""
response = self.get(
f"/roles",
Sequence[models.Role],
query_params={"fields": fields, "ids": ids},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /running_queries -> Sequence[models.RunningQueries]
def all_running_queries(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.RunningQueries]:
"""Get All Running Queries"""
response = self.get(
f"/running_queries",
Sequence[models.RunningQueries],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /scheduled_plans -> Sequence[models.ScheduledPlan]
def all_scheduled_plans(
self,
# Return scheduled plans belonging to this user_id. If not provided, returns scheduled plans owned by the caller.
user_id: Optional[int] = None,
# Comma delimited list of field names. If provided, only the fields specified will be included in the response
fields: Optional[str] = None,
# Return scheduled plans belonging to all users (caller needs see_schedules permission)
all_users: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ScheduledPlan]:
"""Get All Scheduled Plans"""
response = self.get(
f"/scheduled_plans",
Sequence[models.ScheduledPlan],
query_params={"user_id": user_id, "fields": fields, "all_users": all_users},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces -> Sequence[models.SpaceBase]
def all_spaces(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.SpaceBase]:
"""Get All Spaces"""
response = self.get(
f"/spaces",
Sequence[models.SpaceBase],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /themes -> Sequence[models.Theme]
def all_themes(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Theme]:
"""Get All Themes"""
response = self.get(
f"/themes",
Sequence[models.Theme],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /timezones -> Sequence[models.Timezone]
def all_timezones(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.Timezone]:
"""Get All Timezones"""
response = self.get(
f"/timezones",
Sequence[models.Timezone],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /user_attributes/{user_attribute_id}/group_values -> Sequence[models.UserAttributeGroupValue]
def all_user_attribute_group_values(
self,
# Id of user attribute
user_attribute_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserAttributeGroupValue]:
"""Get User Attribute Group Values"""
response = self.get(
f"/user_attributes/{user_attribute_id}/group_values",
Sequence[models.UserAttributeGroupValue],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /user_attributes -> Sequence[models.UserAttribute]
def all_user_attributes(
self,
# Requested fields.
fields: Optional[str] = None,
# Fields to order the results by. Sortable fields include: name, label
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserAttribute]:
"""Get All User Attributes"""
response = self.get(
f"/user_attributes",
Sequence[models.UserAttribute],
query_params={"fields": fields, "sorts": sorts},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/{user_id}/credentials_api3 -> Sequence[models.CredentialsApi3]
def all_user_credentials_api3s(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.CredentialsApi3]:
"""Get All API 3 Credentials"""
response = self.get(
f"/users/{user_id}/credentials_api3",
Sequence[models.CredentialsApi3],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/{user_id}/credentials_embed -> Sequence[models.CredentialsEmbed]
def all_user_credentials_embeds(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.CredentialsEmbed]:
"""Get All Embedding Credentials"""
response = self.get(
f"/users/{user_id}/credentials_embed",
Sequence[models.CredentialsEmbed],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /user_login_lockouts -> Sequence[models.UserLoginLockout]
def all_user_login_lockouts(
self,
# Include only these fields in the response
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserLoginLockout]:
"""Get All User Login Lockouts"""
response = self.get(
f"/user_login_lockouts",
Sequence[models.UserLoginLockout],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/{user_id}/sessions -> Sequence[models.Session]
def all_user_sessions(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Session]:
"""Get All Web Login Sessions"""
response = self.get(
f"/users/{user_id}/sessions",
Sequence[models.Session],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users -> Sequence[models.User]
def all_users(
self,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Optional list of ids to get specific users.
ids: Optional[models.DelimSequence[int]] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Get All Users"""
response = self.get(
f"/users",
Sequence[models.User],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
"ids": ids,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /workspaces -> Sequence[models.Workspace]
def all_workspaces(
self, transport_options: Optional[transport.TransportSettings] = None
) -> Sequence[models.Workspace]:
"""Get All Workspaces"""
response = self.get(
f"/workspaces",
Sequence[models.Workspace],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /backup_configuration -> models.BackupConfiguration
def backup_configuration(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.BackupConfiguration:
"""Get Backup Configuration"""
response = self.get(
f"/backup_configuration",
models.BackupConfiguration,
transport_options=transport_options,
)
assert isinstance(response, models.BackupConfiguration)
return response
# GET /color_collections/{collection_id} -> models.ColorCollection
def color_collection(
self,
# Id of Color Collection
collection_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ColorCollection:
"""Get Color Collection by ID"""
response = self.get(
f"/color_collections/{collection_id}",
models.ColorCollection,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ColorCollection)
return response
# GET /color_collections/custom -> Sequence[models.ColorCollection]
def color_collections_custom(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ColorCollection]:
"""Get all Custom Color Collections"""
response = self.get(
f"/color_collections/custom",
Sequence[models.ColorCollection],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /color_collections/standard -> Sequence[models.ColorCollection]
def color_collections_standard(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ColorCollection]:
"""Get all Standard Color Collections"""
response = self.get(
f"/color_collections/standard",
Sequence[models.ColorCollection],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /connections/{connection_name} -> models.DBConnection
def connection(
self,
# Name of connection
connection_name: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DBConnection:
"""Get Connection"""
response = self.get(
f"/connections/{connection_name}",
models.DBConnection,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.DBConnection)
return response
# GET /content_favorite/{content_favorite_id} -> models.ContentFavorite
def content_favorite(
self,
# Id of favorite content
content_favorite_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentFavorite:
"""Get Favorite Content"""
response = self.get(
f"/content_favorite/{content_favorite_id}",
models.ContentFavorite,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ContentFavorite)
return response
# GET /content_metadata/{content_metadata_id} -> models.ContentMeta
def content_metadata(
self,
# Id of content metadata
content_metadata_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentMeta:
"""Get Content Metadata"""
response = self.get(
f"/content_metadata/{content_metadata_id}",
models.ContentMeta,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ContentMeta)
return response
# GET /content_validation -> models.ContentValidation
def content_validation(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentValidation:
"""Validate Content"""
response = self.get(
f"/content_validation",
models.ContentValidation,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ContentValidation)
return response
# POST /color_collections -> models.ColorCollection
def create_color_collection(
self,
body: Optional[models.WriteColorCollection] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ColorCollection:
"""Create ColorCollection"""
response = self.post(
f"/color_collections",
models.ColorCollection,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ColorCollection)
return response
# POST /connections -> models.DBConnection
def create_connection(
self,
body: Optional[models.WriteDBConnection] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DBConnection:
"""Create Connection"""
response = self.post(
f"/connections",
models.DBConnection,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DBConnection)
return response
# POST /content_favorite -> models.ContentFavorite
def create_content_favorite(
self,
body: Optional[models.WriteContentFavorite] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentFavorite:
"""Create Favorite Content"""
response = self.post(
f"/content_favorite",
models.ContentFavorite,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ContentFavorite)
return response
# POST /content_metadata_access -> models.ContentMetaGroupUser
def create_content_metadata_access(
self,
body: Optional[models.ContentMetaGroupUser] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentMetaGroupUser:
"""Create Content Metadata Access"""
response = self.post(
f"/content_metadata_access",
models.ContentMetaGroupUser,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ContentMetaGroupUser)
return response
# POST /dashboards -> models.Dashboard
def create_dashboard(
self,
body: Optional[models.WriteDashboard] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Dashboard:
"""Create Dashboard"""
response = self.post(
f"/dashboards",
models.Dashboard,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Dashboard)
return response
# POST /dashboard_elements -> models.DashboardElement
def create_dashboard_element(
self,
body: Optional[models.WriteDashboardElement] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardElement:
"""Create DashboardElement"""
response = self.post(
f"/dashboard_elements",
models.DashboardElement,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardElement)
return response
# POST /dashboard_filters -> models.DashboardFilter
def create_dashboard_filter(
self,
body: models.WriteCreateDashboardFilter,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardFilter:
"""Create Dashboard Filter"""
response = self.post(
f"/dashboard_filters",
models.DashboardFilter,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardFilter)
return response
# POST /dashboard_layouts -> models.DashboardLayout
def create_dashboard_layout(
self,
body: Optional[models.WriteDashboardLayout] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardLayout:
"""Create DashboardLayout"""
response = self.post(
f"/dashboard_layouts",
models.DashboardLayout,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardLayout)
return response
# POST /render_tasks/dashboards/{dashboard_id}/{result_format} -> models.RenderTask
def create_dashboard_render_task(
self,
# Id of dashboard to render
dashboard_id: int,
# Output type: pdf, png, or jpg
result_format: str,
body: models.CreateDashboardRenderTask,
# Output width in pixels
width: int,
# Output height in pixels
height: int,
# Requested fields.
fields: Optional[str] = None,
# Paper size for pdf
pdf_paper_size: Optional[str] = None,
# Whether to render pdf in landscape
pdf_landscape: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RenderTask:
"""Create Dashboard Render Task"""
response = self.post(
f"/render_tasks/dashboards/{dashboard_id}/{result_format}",
models.RenderTask,
query_params={
"width": width,
"height": height,
"fields": fields,
"pdf_paper_size": pdf_paper_size,
"pdf_landscape": pdf_landscape,
},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.RenderTask)
return response
# POST /folders -> models.Folder
def create_folder(
self,
body: Optional[models.WriteFolder] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Folder:
"""Create Folder"""
response = self.post(
f"/folders", models.Folder, body=body, transport_options=transport_options
)
assert isinstance(response, models.Folder)
return response
# POST /projects/{project_id}/git_branch -> models.GitBranch
def create_git_branch(
self,
# Project Id
project_id: str,
body: Optional[models.WriteGitBranch] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.GitBranch:
"""Checkout New Git Branch"""
response = self.post(
f"/projects/{project_id}/git_branch",
models.GitBranch,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.GitBranch)
return response
# POST /projects/{project_id}/git/deploy_key -> str
def create_git_deploy_key(
self,
# Project Id
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Create Deploy Key"""
response = self.post(
f"/projects/{project_id}/git/deploy_key",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# POST /groups -> models.Group
def create_group(
self,
body: Optional[models.WriteGroup] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Group:
"""Create Group"""
response = self.post(
f"/groups",
models.Group,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Group)
return response
# POST /homepages -> models.Homepage
def create_homepage(
self,
body: Optional[models.WriteHomepage] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Homepage:
"""Create Homepage"""
response = self.post(
f"/homepages",
models.Homepage,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Homepage)
return response
# POST /homepage_items -> models.HomepageItem
def create_homepage_item(
self,
body: Optional[models.WriteHomepageItem] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageItem:
"""Create Homepage Item"""
response = self.post(
f"/homepage_items",
models.HomepageItem,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.HomepageItem)
return response
# POST /homepage_sections -> models.HomepageSection
def create_homepage_section(
self,
body: Optional[models.WriteHomepageSection] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageSection:
"""Create Homepage section"""
response = self.post(
f"/homepage_sections",
models.HomepageSection,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.HomepageSection)
return response
# POST /integration_hubs -> models.IntegrationHub
def create_integration_hub(
self,
body: Optional[models.WriteIntegrationHub] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.IntegrationHub:
"""Create Integration Hub"""
response = self.post(
f"/integration_hubs",
models.IntegrationHub,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.IntegrationHub)
return response
# POST /looks -> models.LookWithQuery
def create_look(
self,
body: Optional[models.WriteLookWithQuery] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookWithQuery:
"""Create Look"""
response = self.post(
f"/looks",
models.LookWithQuery,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LookWithQuery)
return response
# POST /render_tasks/looks/{look_id}/{result_format} -> models.RenderTask
def create_look_render_task(
self,
# Id of look to render
look_id: int,
# Output type: png, or jpg
result_format: str,
# Output width in pixels
width: int,
# Output height in pixels
height: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RenderTask:
"""Create Look Render Task"""
response = self.post(
f"/render_tasks/looks/{look_id}/{result_format}",
models.RenderTask,
query_params={"width": width, "height": height, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.RenderTask)
return response
# POST /render_tasks/lookml_dashboards/{dashboard_id}/{result_format} -> models.RenderTask
def create_lookml_dashboard_render_task(
self,
# Id of lookml dashboard to render
dashboard_id: str,
# Output type: pdf, png, or jpg
result_format: str,
body: models.CreateDashboardRenderTask,
# Output width in pixels
width: int,
# Output height in pixels
height: int,
# Requested fields.
fields: Optional[str] = None,
# Paper size for pdf
pdf_paper_size: Optional[str] = None,
# Whether to render pdf in landscape
pdf_landscape: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RenderTask:
"""Create Lookml Dashboard Render Task"""
response = self.post(
f"/render_tasks/lookml_dashboards/{dashboard_id}/{result_format}",
models.RenderTask,
query_params={
"width": width,
"height": height,
"fields": fields,
"pdf_paper_size": pdf_paper_size,
"pdf_landscape": pdf_landscape,
},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.RenderTask)
return response
# POST /lookml_models -> models.LookmlModel
def create_lookml_model(
self,
body: Optional[models.WriteLookmlModel] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookmlModel:
"""Create LookML Model"""
response = self.post(
f"/lookml_models",
models.LookmlModel,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LookmlModel)
return response
# POST /merge_queries -> models.MergeQuery
def create_merge_query(
self,
body: Optional[models.WriteMergeQuery] = None,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.MergeQuery:
"""Create Merge Query"""
response = self.post(
f"/merge_queries",
models.MergeQuery,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.MergeQuery)
return response
# POST /model_sets -> models.ModelSet
def create_model_set(
self,
body: Optional[models.WriteModelSet] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ModelSet:
"""Create Model Set"""
response = self.post(
f"/model_sets",
models.ModelSet,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ModelSet)
return response
# POST /oidc_test_configs -> models.OIDCConfig
def create_oidc_test_config(
self,
body: models.WriteOIDCConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.OIDCConfig:
"""Create OIDC Test Configuration"""
response = self.post(
f"/oidc_test_configs",
models.OIDCConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.OIDCConfig)
return response
# POST /permission_sets -> models.PermissionSet
def create_permission_set(
self,
body: Optional[models.WritePermissionSet] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.PermissionSet:
"""Create Permission Set"""
response = self.post(
f"/permission_sets",
models.PermissionSet,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.PermissionSet)
return response
# POST /projects -> models.Project
def create_project(
self,
body: Optional[models.WriteProject] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Project:
"""Create Project"""
response = self.post(
f"/projects", models.Project, body=body, transport_options=transport_options
)
assert isinstance(response, models.Project)
return response
# POST /queries -> models.Query
def create_query(
self,
body: Optional[models.WriteQuery] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Query:
"""Create Query"""
response = self.post(
f"/queries",
models.Query,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Query)
return response
# POST /render_tasks/queries/{query_id}/{result_format} -> models.RenderTask
def create_query_render_task(
self,
# Id of the query to render
query_id: int,
# Output type: png or jpg
result_format: str,
# Output width in pixels
width: int,
# Output height in pixels
height: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RenderTask:
"""Create Query Render Task"""
response = self.post(
f"/render_tasks/queries/{query_id}/{result_format}",
models.RenderTask,
query_params={"width": width, "height": height, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.RenderTask)
return response
# POST /query_tasks -> models.QueryTask
def create_query_task(
self,
body: models.WriteCreateQueryTask,
# Row limit (may override the limit in the saved query).
limit: Optional[int] = None,
# Apply model-specified formatting to each result.
apply_formatting: Optional[bool] = None,
# Apply visualization options to results.
apply_vis: Optional[bool] = None,
# Get results from cache if available.
cache: Optional[bool] = None,
# Render width for image formats.
image_width: Optional[int] = None,
# Render height for image formats.
image_height: Optional[int] = None,
# Generate drill links (only applicable to 'json_detail' format.
generate_drill_links: Optional[bool] = None,
# Force use of production models even if the user is in development mode.
force_production: Optional[bool] = None,
# Retrieve any results from cache even if the results have expired.
cache_only: Optional[bool] = None,
# Prefix to use for drill links (url encoded).
path_prefix: Optional[str] = None,
# Rebuild PDTS used in query.
rebuild_pdts: Optional[bool] = None,
# Perform table calculations on query results
server_table_calcs: Optional[bool] = None,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.QueryTask:
"""Run Query Async"""
response = self.post(
f"/query_tasks",
models.QueryTask,
query_params={
"limit": limit,
"apply_formatting": apply_formatting,
"apply_vis": apply_vis,
"cache": cache,
"image_width": image_width,
"image_height": image_height,
"generate_drill_links": generate_drill_links,
"force_production": force_production,
"cache_only": cache_only,
"path_prefix": path_prefix,
"rebuild_pdts": rebuild_pdts,
"server_table_calcs": server_table_calcs,
"fields": fields,
},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.QueryTask)
return response
# POST /roles -> models.Role
def create_role(
self,
body: Optional[models.WriteRole] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Role:
"""Create Role"""
response = self.post(
f"/roles", models.Role, body=body, transport_options=transport_options
)
assert isinstance(response, models.Role)
return response
# POST /saml_test_configs -> models.SamlConfig
def create_saml_test_config(
self,
body: models.WriteSamlConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SamlConfig:
"""Create SAML Test Configuration"""
response = self.post(
f"/saml_test_configs",
models.SamlConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SamlConfig)
return response
# POST /scheduled_plans -> models.ScheduledPlan
def create_scheduled_plan(
self,
body: Optional[models.WriteScheduledPlan] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ScheduledPlan:
"""Create Scheduled Plan"""
response = self.post(
f"/scheduled_plans",
models.ScheduledPlan,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ScheduledPlan)
return response
# POST /spaces -> models.Space
def create_space(
self,
body: models.WriteSpace,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Space:
"""Create Space"""
response = self.post(
f"/spaces", models.Space, body=body, transport_options=transport_options
)
assert isinstance(response, models.Space)
return response
# POST /sql_queries -> models.SqlQuery
def create_sql_query(
self,
body: models.WriteSqlQueryCreate,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SqlQuery:
"""Create SQL Runner Query"""
response = self.post(
f"/sql_queries",
models.SqlQuery,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SqlQuery)
return response
# POST /themes -> models.Theme
def create_theme(
self,
body: Optional[models.WriteTheme] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Create Theme"""
response = self.post(
f"/themes", models.Theme, body=body, transport_options=transport_options
)
assert isinstance(response, models.Theme)
return response
# POST /users -> models.User
def create_user(
self,
body: Optional[models.WriteUser] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Create User"""
response = self.post(
f"/users",
models.User,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# POST /user_attributes -> models.UserAttribute
def create_user_attribute(
self,
body: Optional[models.WriteUserAttribute] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.UserAttribute:
"""Create User Attribute"""
response = self.post(
f"/user_attributes",
models.UserAttribute,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.UserAttribute)
return response
# POST /users/{user_id}/credentials_api3 -> models.CredentialsApi3
def create_user_credentials_api3(
self,
# id of user
user_id: int,
body: Optional[models.CredentialsApi3] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsApi3:
"""Create API 3 Credential"""
response = self.post(
f"/users/{user_id}/credentials_api3",
models.CredentialsApi3,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsApi3)
return response
# POST /users/{user_id}/credentials_email -> models.CredentialsEmail
def create_user_credentials_email(
self,
# id of user
user_id: int,
body: Optional[models.WriteCredentialsEmail] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsEmail:
"""Create Email/Password Credential"""
response = self.post(
f"/users/{user_id}/credentials_email",
models.CredentialsEmail,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsEmail)
return response
# POST /users/{user_id}/credentials_email/password_reset -> models.CredentialsEmail
def create_user_credentials_email_password_reset(
self,
# Id of user
user_id: int,
# Expiring token.
expires: Optional[bool] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsEmail:
"""Create Password Reset Token"""
response = self.post(
f"/users/{user_id}/credentials_email/password_reset",
models.CredentialsEmail,
query_params={"expires": expires, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsEmail)
return response
# POST /users/{user_id}/credentials_totp -> models.CredentialsTotp
def create_user_credentials_totp(
self,
# id of user
user_id: int,
body: Optional[models.CredentialsTotp] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsTotp:
"""Create Two-Factor Credential"""
response = self.post(
f"/users/{user_id}/credentials_totp",
models.CredentialsTotp,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsTotp)
return response
# GET /dashboards/{dashboard_id} -> models.Dashboard
def dashboard(
self,
# Id of dashboard
dashboard_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Dashboard:
"""Get Dashboard"""
response = self.get(
f"/dashboards/{dashboard_id}",
models.Dashboard,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Dashboard)
return response
# GET /dashboards/{dashboard_id}/dashboard_elements -> Sequence[models.DashboardElement]
def dashboard_dashboard_elements(
self,
# Id of dashboard
dashboard_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardElement]:
"""Get All DashboardElements"""
response = self.get(
f"/dashboards/{dashboard_id}/dashboard_elements",
Sequence[models.DashboardElement],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboards/{dashboard_id}/dashboard_filters -> Sequence[models.DashboardFilter]
def dashboard_dashboard_filters(
self,
# Id of dashboard
dashboard_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardFilter]:
"""Get All Dashboard Filters"""
response = self.get(
f"/dashboards/{dashboard_id}/dashboard_filters",
Sequence[models.DashboardFilter],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboards/{dashboard_id}/dashboard_layouts -> Sequence[models.DashboardLayout]
def dashboard_dashboard_layouts(
self,
# Id of dashboard
dashboard_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardLayout]:
"""Get All DashboardLayouts"""
response = self.get(
f"/dashboards/{dashboard_id}/dashboard_layouts",
Sequence[models.DashboardLayout],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboard_elements/{dashboard_element_id} -> models.DashboardElement
def dashboard_element(
self,
# Id of dashboard element
dashboard_element_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardElement:
"""Get DashboardElement"""
response = self.get(
f"/dashboard_elements/{dashboard_element_id}",
models.DashboardElement,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.DashboardElement)
return response
# GET /dashboard_filters/{dashboard_filter_id} -> models.DashboardFilter
def dashboard_filter(
self,
# Id of dashboard filters
dashboard_filter_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardFilter:
"""Get Dashboard Filter"""
response = self.get(
f"/dashboard_filters/{dashboard_filter_id}",
models.DashboardFilter,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.DashboardFilter)
return response
# GET /dashboard_layouts/{dashboard_layout_id} -> models.DashboardLayout
def dashboard_layout(
self,
# Id of dashboard layouts
dashboard_layout_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardLayout:
"""Get DashboardLayout"""
response = self.get(
f"/dashboard_layouts/{dashboard_layout_id}",
models.DashboardLayout,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.DashboardLayout)
return response
# GET /dashboard_layout_components/{dashboard_layout_component_id} -> models.DashboardLayoutComponent
def dashboard_layout_component(
self,
# Id of dashboard layout component
dashboard_layout_component_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardLayoutComponent:
"""Get DashboardLayoutComponent"""
response = self.get(
f"/dashboard_layout_components/{dashboard_layout_component_id}",
models.DashboardLayoutComponent,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.DashboardLayoutComponent)
return response
# GET /dashboard_layouts/{dashboard_layout_id}/dashboard_layout_components -> Sequence[models.DashboardLayoutComponent]
def dashboard_layout_dashboard_layout_components(
self,
# Id of dashboard layout component
dashboard_layout_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardLayoutComponent]:
"""Get All DashboardLayoutComponents"""
response = self.get(
f"/dashboard_layouts/{dashboard_layout_id}/dashboard_layout_components",
Sequence[models.DashboardLayoutComponent],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /datagroups/{datagroup_id} -> models.Datagroup
def datagroup(
self,
# ID of datagroup.
datagroup_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Datagroup:
"""Get Datagroup"""
response = self.get(
f"/datagroups/{datagroup_id}",
models.Datagroup,
transport_options=transport_options,
)
assert isinstance(response, models.Datagroup)
return response
# GET /color_collections/default -> models.ColorCollection
def default_color_collection(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.ColorCollection:
"""Get Default Color Collection"""
response = self.get(
f"/color_collections/default",
models.ColorCollection,
transport_options=transport_options,
)
assert isinstance(response, models.ColorCollection)
return response
# GET /themes/default -> models.Theme
def default_theme(
self,
# Timestamp representing the target datetime for the active period. Defaults to 'now'
ts: Optional[datetime.datetime] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Get Default Theme"""
response = self.get(
f"/themes/default",
models.Theme,
query_params={"ts": ts},
transport_options=transport_options,
)
assert isinstance(response, models.Theme)
return response
# DELETE /color_collections/{collection_id} -> str
def delete_color_collection(
self,
# Id of Color Collection
collection_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete ColorCollection"""
response = self.delete(
f"/color_collections/{collection_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /connections/{connection_name} -> str
def delete_connection(
self,
# Name of connection
connection_name: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Connection"""
response = self.delete(
f"/connections/{connection_name}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /connections/{connection_name}/connection_override/{override_context} -> str
def delete_connection_override(
self,
# Name of connection
connection_name: str,
# Context of connection override
override_context: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Connection Override"""
response = self.delete(
f"/connections/{connection_name}/connection_override/{override_context}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /content_favorite/{content_favorite_id} -> str
def delete_content_favorite(
self,
# Id of favorite content
content_favorite_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Favorite Content"""
response = self.delete(
f"/content_favorite/{content_favorite_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /content_metadata_access/{content_metadata_access_id} -> str
def delete_content_metadata_access(
self,
# Id of content metadata access
content_metadata_access_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Content Metadata Access"""
response = self.delete(
f"/content_metadata_access/{content_metadata_access_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /dashboards/{dashboard_id} -> str
def delete_dashboard(
self,
# Id of dashboard
dashboard_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Dashboard"""
response = self.delete(
f"/dashboards/{dashboard_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /dashboard_elements/{dashboard_element_id} -> str
def delete_dashboard_element(
self,
# Id of dashboard element
dashboard_element_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete DashboardElement"""
response = self.delete(
f"/dashboard_elements/{dashboard_element_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /dashboard_filters/{dashboard_filter_id} -> str
def delete_dashboard_filter(
self,
# Id of dashboard filter
dashboard_filter_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Dashboard Filter"""
response = self.delete(
f"/dashboard_filters/{dashboard_filter_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /dashboard_layouts/{dashboard_layout_id} -> str
def delete_dashboard_layout(
self,
# Id of dashboard layout
dashboard_layout_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete DashboardLayout"""
response = self.delete(
f"/dashboard_layouts/{dashboard_layout_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /folders/{folder_id} -> str
def delete_folder(
self,
# Id of folder
folder_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Folder"""
response = self.delete(
f"/folders/{folder_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /projects/{project_id}/git_branch/{branch_name} -> str
def delete_git_branch(
self,
# Project Id
project_id: str,
# Branch Name
branch_name: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete a Git Branch"""
response = self.delete(
f"/projects/{project_id}/git_branch/{branch_name}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /groups/{group_id} -> str
def delete_group(
self,
# Id of group
group_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Group"""
response = self.delete(
f"/groups/{group_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /groups/{group_id}/groups/{deleting_group_id} -> None
def delete_group_from_group(
self,
# Id of group
group_id: int,
# Id of group to delete
deleting_group_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> None:
"""Deletes a Group from Group"""
response = self.delete(
f"/groups/{group_id}/groups/{deleting_group_id}",
None,
transport_options=transport_options,
)
assert response is None
return response
# DELETE /groups/{group_id}/users/{user_id} -> None
def delete_group_user(
self,
# Id of group
group_id: int,
# Id of user to remove from group
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> None:
"""Remove a User from Group"""
response = self.delete(
f"/groups/{group_id}/users/{user_id}",
None,
transport_options=transport_options,
)
assert response is None
return response
# DELETE /homepages/{homepage_id} -> str
def delete_homepage(
self,
# Id of homepage
homepage_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Homepage"""
response = self.delete(
f"/homepages/{homepage_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /homepage_items/{homepage_item_id} -> str
def delete_homepage_item(
self,
# Id of homepage_item
homepage_item_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Homepage Item"""
response = self.delete(
f"/homepage_items/{homepage_item_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /homepage_sections/{homepage_section_id} -> str
def delete_homepage_section(
self,
# Id of homepage_section
homepage_section_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Homepage section"""
response = self.delete(
f"/homepage_sections/{homepage_section_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /integration_hubs/{integration_hub_id} -> str
def delete_integration_hub(
self,
# Id of integration_hub
integration_hub_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Integration Hub"""
response = self.delete(
f"/integration_hubs/{integration_hub_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /looks/{look_id} -> str
def delete_look(
self,
# Id of look
look_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Look"""
response = self.delete(
f"/looks/{look_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /lookml_models/{lookml_model_name} -> str
def delete_lookml_model(
self,
# Name of lookml model.
lookml_model_name: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete LookML Model"""
response = self.delete(
f"/lookml_models/{lookml_model_name}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /model_sets/{model_set_id} -> str
def delete_model_set(
self,
# id of model set
model_set_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Model Set"""
response = self.delete(
f"/model_sets/{model_set_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /oidc_test_configs/{test_slug} -> str
def delete_oidc_test_config(
self,
# Slug of test config
test_slug: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete OIDC Test Configuration"""
response = self.delete(
f"/oidc_test_configs/{test_slug}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /permission_sets/{permission_set_id} -> str
def delete_permission_set(
self,
# Id of permission set
permission_set_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Permission Set"""
response = self.delete(
f"/permission_sets/{permission_set_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /projects/{root_project_id}/credential/{credential_id} -> str
def delete_repository_credential(
self,
# Root Project Id
root_project_id: str,
# Credential Id
credential_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Repository Credential"""
response = self.delete(
f"/projects/{root_project_id}/credential/{credential_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /roles/{role_id} -> str
def delete_role(
self,
# id of role
role_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Role"""
response = self.delete(
f"/roles/{role_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /saml_test_configs/{test_slug} -> str
def delete_saml_test_config(
self,
# Slug of test config
test_slug: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete SAML Test Configuration"""
response = self.delete(
f"/saml_test_configs/{test_slug}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /scheduled_plans/{scheduled_plan_id} -> str
def delete_scheduled_plan(
self,
# Scheduled Plan Id
scheduled_plan_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Scheduled Plan"""
response = self.delete(
f"/scheduled_plans/{scheduled_plan_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /spaces/{space_id} -> str
def delete_space(
self,
# Id of space
space_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Space"""
response = self.delete(
f"/spaces/{space_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /themes/{theme_id} -> str
def delete_theme(
self,
# Id of theme
theme_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Theme"""
response = self.delete(
f"/themes/{theme_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id} -> str
def delete_user(
self,
# Id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete User"""
response = self.delete(
f"/users/{user_id}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /user_attributes/{user_attribute_id} -> str
def delete_user_attribute(
self,
# Id of user_attribute
user_attribute_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete User Attribute"""
response = self.delete(
f"/user_attributes/{user_attribute_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /groups/{group_id}/attribute_values/{user_attribute_id} -> None
def delete_user_attribute_group_value(
self,
# Id of group
group_id: int,
# Id of user attribute
user_attribute_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> None:
"""Delete User Attribute Group Value"""
response = self.delete(
f"/groups/{group_id}/attribute_values/{user_attribute_id}",
None,
transport_options=transport_options,
)
assert response is None
return response
# DELETE /users/{user_id}/attribute_values/{user_attribute_id} -> None
def delete_user_attribute_user_value(
self,
# Id of user
user_id: int,
# Id of user attribute
user_attribute_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> None:
"""Delete User Attribute User Value"""
response = self.delete(
f"/users/{user_id}/attribute_values/{user_attribute_id}",
None,
transport_options=transport_options,
)
assert response is None
return response
# DELETE /users/{user_id}/credentials_api3/{credentials_api3_id} -> str
def delete_user_credentials_api3(
self,
# id of user
user_id: int,
# id of API 3 Credential
credentials_api3_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete API 3 Credential"""
response = self.delete(
f"/users/{user_id}/credentials_api3/{credentials_api3_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_email -> str
def delete_user_credentials_email(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Email/Password Credential"""
response = self.delete(
f"/users/{user_id}/credentials_email",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_embed/{credentials_embed_id} -> str
def delete_user_credentials_embed(
self,
# id of user
user_id: int,
# id of Embedding Credential
credentials_embed_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Embedding Credential"""
response = self.delete(
f"/users/{user_id}/credentials_embed/{credentials_embed_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_google -> str
def delete_user_credentials_google(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Google Auth Credential"""
response = self.delete(
f"/users/{user_id}/credentials_google",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_ldap -> str
def delete_user_credentials_ldap(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete LDAP Credential"""
response = self.delete(
f"/users/{user_id}/credentials_ldap",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_looker_openid -> str
def delete_user_credentials_looker_openid(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Looker OpenId Credential"""
response = self.delete(
f"/users/{user_id}/credentials_looker_openid",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_oidc -> str
def delete_user_credentials_oidc(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete OIDC Auth Credential"""
response = self.delete(
f"/users/{user_id}/credentials_oidc",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_saml -> str
def delete_user_credentials_saml(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Saml Auth Credential"""
response = self.delete(
f"/users/{user_id}/credentials_saml",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/credentials_totp -> str
def delete_user_credentials_totp(
self,
# id of user
user_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Two-Factor Credential"""
response = self.delete(
f"/users/{user_id}/credentials_totp",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# DELETE /user_login_lockout/{key} -> str
def delete_user_login_lockout(
self,
# The key associated with the locked user
key: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete User Login Lockout"""
response = self.delete(
f"/user_login_lockout/{key}", str, transport_options=transport_options
)
assert isinstance(response, str)
return response
# DELETE /users/{user_id}/sessions/{session_id} -> str
def delete_user_session(
self,
# id of user
user_id: int,
# id of Web Login Session
session_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Delete Web Login Session"""
response = self.delete(
f"/users/{user_id}/sessions/{session_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# POST /projects/{project_id}/deploy_to_production -> str
def deploy_to_production(
self,
# Id of project
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Deploy To Production"""
response = self.post(
f"/projects/{project_id}/deploy_to_production",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# POST /fetch_and_parse_saml_idp_metadata -> models.SamlMetadataParseResult
def fetch_and_parse_saml_idp_metadata(
self, body: str, transport_options: Optional[transport.TransportSettings] = None
) -> models.SamlMetadataParseResult:
"""Parse SAML IdP Url"""
response = self.post(
f"/fetch_and_parse_saml_idp_metadata",
models.SamlMetadataParseResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SamlMetadataParseResult)
return response
# POST /integrations/{integration_id}/form -> models.DataActionForm
def fetch_integration_form(
self,
# Id of Integration
integration_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DataActionForm:
"""Fetch Remote Integration Form"""
response = self.post(
f"/integrations/{integration_id}/form",
models.DataActionForm,
transport_options=transport_options,
)
assert isinstance(response, models.DataActionForm)
return response
# POST /data_actions/form -> models.DataActionForm
def fetch_remote_data_action_form(
self,
body: MutableMapping[str, str],
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DataActionForm:
"""Fetch Remote Data Action Form"""
response = self.post(
f"/data_actions/form",
models.DataActionForm,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DataActionForm)
return response
# GET /projects/{project_id}/git_branch/{branch_name} -> models.GitBranch
def find_git_branch(
self,
# Project Id
project_id: str,
# Branch Name
branch_name: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.GitBranch:
"""Find a Git Branch"""
response = self.get(
f"/projects/{project_id}/git_branch/{branch_name}",
models.GitBranch,
transport_options=transport_options,
)
assert isinstance(response, models.GitBranch)
return response
# GET /folders/{folder_id} -> models.Folder
def folder(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Folder:
"""Get Folder"""
response = self.get(
f"/folders/{folder_id}",
models.Folder,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Folder)
return response
# GET /folders/{folder_id}/ancestors -> Sequence[models.Folder]
def folder_ancestors(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Folder]:
"""Get Folder Ancestors"""
response = self.get(
f"/folders/{folder_id}/ancestors",
Sequence[models.Folder],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/{folder_id}/children -> Sequence[models.Space]
def folder_children(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Space]:
"""Get Folder Children"""
response = self.get(
f"/folders/{folder_id}/children",
Sequence[models.Space],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/{folder_id}/children/search -> Sequence[models.Folder]
def folder_children_search(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Match folder name.
name: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Folder]:
"""Search Folder Children"""
response = self.get(
f"/folders/{folder_id}/children/search",
Sequence[models.Folder],
query_params={"fields": fields, "sorts": sorts, "name": name},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/{folder_id}/dashboards -> Sequence[models.Dashboard]
def folder_dashboards(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Dashboard]:
"""Get Folder Dashboards"""
response = self.get(
f"/folders/{folder_id}/dashboards",
Sequence[models.Dashboard],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/{folder_id}/looks -> Sequence[models.LookWithQuery]
def folder_looks(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.LookWithQuery]:
"""Get Folder Looks"""
response = self.get(
f"/folders/{folder_id}/looks",
Sequence[models.LookWithQuery],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/{folder_id}/parent -> models.Folder
def folder_parent(
self,
# Id of folder
folder_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Folder:
"""Get Folder Parent"""
response = self.get(
f"/folders/{folder_id}/parent",
models.Folder,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Folder)
return response
# PUT /password_config/force_password_reset_at_next_login_for_all_users -> str
def force_password_reset_at_next_login_for_all_users(
self, transport_options: Optional[transport.TransportSettings] = None
) -> str:
"""Force password reset"""
response = self.put(
f"/password_config/force_password_reset_at_next_login_for_all_users",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# GET /projects/{root_project_id}/credentials -> Sequence[models.RepositoryCredential]
def get_all_repository_credentials(
self,
# Root Project Id
root_project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.RepositoryCredential]:
"""Get All Repository Credentials"""
response = self.get(
f"/projects/{root_project_id}/credentials",
Sequence[models.RepositoryCredential],
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/git_branch -> models.GitBranch
def git_branch(
self,
# Project Id
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.GitBranch:
"""Get Active Git Branch"""
response = self.get(
f"/projects/{project_id}/git_branch",
models.GitBranch,
transport_options=transport_options,
)
assert isinstance(response, models.GitBranch)
return response
# GET /projects/{project_id}/git/deploy_key -> str
def git_deploy_key(
self,
# Project Id
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Git Deploy Key"""
response = self.get(
f"/projects/{project_id}/git/deploy_key",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# GET /groups/{group_id} -> models.Group
def group(
self,
# Id of group
group_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Group:
"""Get Group"""
response = self.get(
f"/groups/{group_id}",
models.Group,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Group)
return response
# GET /homepages/{homepage_id} -> models.Homepage
def homepage(
self,
# Id of homepage
homepage_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Homepage:
"""Get Homepage"""
response = self.get(
f"/homepages/{homepage_id}",
models.Homepage,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Homepage)
return response
# GET /homepage_items/{homepage_item_id} -> models.HomepageItem
def homepage_item(
self,
# Id of homepage item
homepage_item_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageItem:
"""Get Homepage Item"""
response = self.get(
f"/homepage_items/{homepage_item_id}",
models.HomepageItem,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.HomepageItem)
return response
# GET /homepage_sections/{homepage_section_id} -> models.HomepageSection
def homepage_section(
self,
# Id of homepage section
homepage_section_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageSection:
"""Get Homepage section"""
response = self.get(
f"/homepage_sections/{homepage_section_id}",
models.HomepageSection,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.HomepageSection)
return response
# POST /dashboards/{lookml_dashboard_id}/import/{space_id} -> models.Dashboard
def import_lookml_dashboard(
self,
# Id of LookML dashboard
lookml_dashboard_id: str,
# Id of space to import the dashboard to
space_id: str,
body: Optional[models.WriteDashboard] = None,
# If true, and this dashboard is localized, export it with the raw keys, not localized.
raw_locale: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Dashboard:
"""Import LookML Dashboard"""
response = self.post(
f"/dashboards/{lookml_dashboard_id}/import/{space_id}",
models.Dashboard,
query_params={"raw_locale": raw_locale},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Dashboard)
return response
# GET /integrations/{integration_id} -> models.Integration
def integration(
self,
# Id of Integration
integration_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Integration:
"""Get Integration"""
response = self.get(
f"/integrations/{integration_id}",
models.Integration,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Integration)
return response
# GET /integration_hubs/{integration_hub_id} -> models.IntegrationHub
def integration_hub(
self,
# Id of Integration Hub
integration_hub_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.IntegrationHub:
"""Get Integration Hub"""
response = self.get(
f"/integration_hubs/{integration_hub_id}",
models.IntegrationHub,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.IntegrationHub)
return response
# GET /internal_help_resources_enabled -> models.InternalHelpResources
def internal_help_resources(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.InternalHelpResources:
"""Get Internal Help Resources"""
response = self.get(
f"/internal_help_resources_enabled",
models.InternalHelpResources,
transport_options=transport_options,
)
assert isinstance(response, models.InternalHelpResources)
return response
# GET /internal_help_resources_content -> models.InternalHelpResourcesContent
def internal_help_resources_content(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.InternalHelpResourcesContent:
"""Get Internal Help Resources Content"""
response = self.get(
f"/internal_help_resources_content",
models.InternalHelpResourcesContent,
transport_options=transport_options,
)
assert isinstance(response, models.InternalHelpResourcesContent)
return response
# DELETE /running_queries/{query_task_id} -> str
def kill_query(
self,
# Query task id.
query_task_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Kill Running Query"""
response = self.delete(
f"/running_queries/{query_task_id}",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# GET /ldap_config -> models.LDAPConfig
def ldap_config(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.LDAPConfig:
"""Get LDAP Configuration"""
response = self.get(
f"/ldap_config", models.LDAPConfig, transport_options=transport_options
)
assert isinstance(response, models.LDAPConfig)
return response
# GET /legacy_features/{legacy_feature_id} -> models.LegacyFeature
def legacy_feature(
self,
# id of legacy feature
legacy_feature_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LegacyFeature:
"""Get Legacy Feature"""
response = self.get(
f"/legacy_features/{legacy_feature_id}",
models.LegacyFeature,
transport_options=transport_options,
)
assert isinstance(response, models.LegacyFeature)
return response
# login() using api3credentials is automated in the client
def login_user(self, user_id: int) -> api_methods.APIMethods:
return super().login_user(user_id)
def logout(self) -> None:
super().logout()
# GET /looks/{look_id} -> models.LookWithQuery
def look(
self,
# Id of look
look_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookWithQuery:
"""Get Look"""
response = self.get(
f"/looks/{look_id}",
models.LookWithQuery,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.LookWithQuery)
return response
# GET /lookml_models/{lookml_model_name} -> models.LookmlModel
def lookml_model(
self,
# Name of lookml model.
lookml_model_name: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookmlModel:
"""Get LookML Model"""
response = self.get(
f"/lookml_models/{lookml_model_name}",
models.LookmlModel,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.LookmlModel)
return response
# GET /lookml_models/{lookml_model_name}/explores/{explore_name} -> models.LookmlModelExplore
def lookml_model_explore(
self,
# Name of lookml model.
lookml_model_name: str,
# Name of explore.
explore_name: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookmlModelExplore:
"""Get LookML Model Explore"""
response = self.get(
f"/lookml_models/{lookml_model_name}/explores/{explore_name}",
models.LookmlModelExplore,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.LookmlModelExplore)
return response
# GET /projects/{project_id}/manifest -> models.Manifest
def manifest(
self,
# Project Id
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Manifest:
"""Get Manifest"""
response = self.get(
f"/projects/{project_id}/manifest",
models.Manifest,
transport_options=transport_options,
)
assert isinstance(response, models.Manifest)
return response
# GET /user -> models.User
def me(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Get Current User"""
response = self.get(
f"/user",
models.User,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# GET /merge_queries/{merge_query_id} -> models.MergeQuery
def merge_query(
self,
# Merge Query Id
merge_query_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.MergeQuery:
"""Get Merge Query"""
response = self.get(
f"/merge_queries/{merge_query_id}",
models.MergeQuery,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.MergeQuery)
return response
# GET /model_sets/{model_set_id} -> models.ModelSet
def model_set(
self,
# Id of model set
model_set_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ModelSet:
"""Get Model Set"""
response = self.get(
f"/model_sets/{model_set_id}",
models.ModelSet,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ModelSet)
return response
# GET /oidc_config -> models.OIDCConfig
def oidc_config(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.OIDCConfig:
"""Get OIDC Configuration"""
response = self.get(
f"/oidc_config", models.OIDCConfig, transport_options=transport_options
)
assert isinstance(response, models.OIDCConfig)
return response
# GET /oidc_test_configs/{test_slug} -> models.OIDCConfig
def oidc_test_config(
self,
# Slug of test config
test_slug: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.OIDCConfig:
"""Get OIDC Test Configuration"""
response = self.get(
f"/oidc_test_configs/{test_slug}",
models.OIDCConfig,
transport_options=transport_options,
)
assert isinstance(response, models.OIDCConfig)
return response
# POST /parse_saml_idp_metadata -> models.SamlMetadataParseResult
def parse_saml_idp_metadata(
self, body: str, transport_options: Optional[transport.TransportSettings] = None
) -> models.SamlMetadataParseResult:
"""Parse SAML IdP XML"""
response = self.post(
f"/parse_saml_idp_metadata",
models.SamlMetadataParseResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SamlMetadataParseResult)
return response
# GET /password_config -> models.PasswordConfig
def password_config(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.PasswordConfig:
"""Get Password Config"""
response = self.get(
f"/password_config",
models.PasswordConfig,
transport_options=transport_options,
)
assert isinstance(response, models.PasswordConfig)
return response
# POST /data_actions -> models.DataActionResponse
def perform_data_action(
self,
body: models.WriteDataActionRequest,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DataActionResponse:
"""Send a Data Action"""
response = self.post(
f"/data_actions",
models.DataActionResponse,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DataActionResponse)
return response
# GET /permission_sets/{permission_set_id} -> models.PermissionSet
def permission_set(
self,
# Id of permission set
permission_set_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.PermissionSet:
"""Get Permission Set"""
response = self.get(
f"/permission_sets/{permission_set_id}",
models.PermissionSet,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.PermissionSet)
return response
# GET /projects/{project_id} -> models.Project
def project(
self,
# Project Id
project_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Project:
"""Get Project"""
response = self.get(
f"/projects/{project_id}",
models.Project,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Project)
return response
# GET /projects/{project_id}/files/file -> models.ProjectFile
def project_file(
self,
# Project Id
project_id: str,
# File Id
file_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ProjectFile:
"""Get Project File"""
response = self.get(
f"/projects/{project_id}/files/file",
models.ProjectFile,
query_params={"file_id": file_id, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ProjectFile)
return response
# GET /projects/{project_id}/validate -> models.ProjectValidationCache
def project_validation_results(
self,
# Project Id
project_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ProjectValidationCache:
"""Cached Project Validation Results"""
response = self.get(
f"/projects/{project_id}/validate",
models.ProjectValidationCache,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ProjectValidationCache)
return response
# GET /projects/{project_id}/current_workspace -> models.ProjectWorkspace
def project_workspace(
self,
# Project Id
project_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ProjectWorkspace:
"""Get Project Workspace"""
response = self.get(
f"/projects/{project_id}/current_workspace",
models.ProjectWorkspace,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ProjectWorkspace)
return response
# GET /queries/{query_id} -> models.Query
def query(
self,
# Id of query
query_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Query:
"""Get Query"""
response = self.get(
f"/queries/{query_id}",
models.Query,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Query)
return response
# GET /queries/slug/{slug} -> models.Query
def query_for_slug(
self,
# Slug of query
slug: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Query:
"""Get Query for Slug"""
response = self.get(
f"/queries/slug/{slug}",
models.Query,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Query)
return response
# GET /query_tasks/{query_task_id} -> models.QueryTask
def query_task(
self,
# ID of the Query Task
query_task_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.QueryTask:
"""Get Async Query Info"""
response = self.get(
f"/query_tasks/{query_task_id}",
models.QueryTask,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.QueryTask)
return response
# GET /query_tasks/multi_results -> MutableMapping[str, str]
def query_task_multi_results(
self,
# List of Query Task IDs
query_task_ids: models.DelimSequence[str],
transport_options: Optional[transport.TransportSettings] = None,
) -> MutableMapping[str, str]:
"""Get Multiple Async Query Results"""
response = self.get(
f"/query_tasks/multi_results",
MutableMapping[str, str],
query_params={"query_task_ids": query_task_ids},
transport_options=transport_options,
)
assert isinstance(response, dict)
return response
# GET /query_tasks/{query_task_id}/results -> MutableMapping[str, str]
def query_task_results(
self,
# ID of the Query Task
query_task_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> MutableMapping[str, str]:
"""Get Async Query Results"""
response = self.get(
f"/query_tasks/{query_task_id}/results",
MutableMapping[str, str],
transport_options=transport_options,
)
assert isinstance(response, dict)
return response
# GET /render_tasks/{render_task_id} -> models.RenderTask
def render_task(
self,
# Id of render task
render_task_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RenderTask:
"""Get Render Task"""
response = self.get(
f"/render_tasks/{render_task_id}",
models.RenderTask,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.RenderTask)
return response
# GET /render_tasks/{render_task_id}/results -> bytes
def render_task_results(
self,
# Id of render task
render_task_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> bytes:
"""Render Task Results"""
response = self.get(
f"/render_tasks/{render_task_id}/results",
bytes,
transport_options=transport_options,
)
assert isinstance(response, bytes)
return response
# POST /projects/{project_id}/reset_to_production -> str
def reset_project_to_production(
self,
# Id of project
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Reset To Production"""
response = self.post(
f"/projects/{project_id}/reset_to_production",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# POST /projects/{project_id}/reset_to_remote -> str
def reset_project_to_remote(
self,
# Id of project
project_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> str:
"""Reset To Remote"""
response = self.post(
f"/projects/{project_id}/reset_to_remote",
str,
transport_options=transport_options,
)
assert isinstance(response, str)
return response
# GET /roles/{role_id} -> models.Role
def role(
self,
# id of role
role_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Role:
"""Get Role"""
response = self.get(
f"/roles/{role_id}", models.Role, transport_options=transport_options
)
assert isinstance(response, models.Role)
return response
# GET /roles/{role_id}/groups -> Sequence[models.Group]
def role_groups(
self,
# id of role
role_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Group]:
"""Get Role Groups"""
response = self.get(
f"/roles/{role_id}/groups",
Sequence[models.Group],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /roles/{role_id}/users -> Sequence[models.User]
def role_users(
self,
# id of user
role_id: int,
# Requested fields.
fields: Optional[str] = None,
# Get only users associated directly with the role: exclude those only associated through groups.
direct_association_only: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Get Role Users"""
response = self.get(
f"/roles/{role_id}/users",
Sequence[models.User],
query_params={
"fields": fields,
"direct_association_only": direct_association_only,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /projects/{project_id}/git_connection_tests/{test_id} -> models.GitConnectionTestResult
def run_git_connection_test(
self,
# Project Id
project_id: str,
# Test Id
test_id: str,
# (Optional: leave blank for root project) The remote url for remote dependency to test.
remote_url: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.GitConnectionTestResult:
"""Run Git Connection Test"""
response = self.get(
f"/projects/{project_id}/git_connection_tests/{test_id}",
models.GitConnectionTestResult,
query_params={"remote_url": remote_url},
transport_options=transport_options,
)
assert isinstance(response, models.GitConnectionTestResult)
return response
# POST /queries/run/{result_format} -> Union[str, bytes]
def run_inline_query(
self,
# Format of result
result_format: str,
body: models.WriteQuery,
# Row limit (may override the limit in the saved query).
limit: Optional[int] = None,
# Apply model-specified formatting to each result.
apply_formatting: Optional[bool] = None,
# Apply visualization options to results.
apply_vis: Optional[bool] = None,
# Get results from cache if available.
cache: Optional[bool] = None,
# Render width for image formats.
image_width: Optional[int] = None,
# Render height for image formats.
image_height: Optional[int] = None,
# Generate drill links (only applicable to 'json_detail' format.
generate_drill_links: Optional[bool] = None,
# Force use of production models even if the user is in development mode.
force_production: Optional[bool] = None,
# Retrieve any results from cache even if the results have expired.
cache_only: Optional[bool] = None,
# Prefix to use for drill links (url encoded).
path_prefix: Optional[str] = None,
# Rebuild PDTS used in query.
rebuild_pdts: Optional[bool] = None,
# Perform table calculations on query results
server_table_calcs: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Union[str, bytes]:
"""Run Inline Query"""
response = self.post(
f"/queries/run/{result_format}",
Union[str, bytes], # type: ignore
query_params={
"limit": limit,
"apply_formatting": apply_formatting,
"apply_vis": apply_vis,
"cache": cache,
"image_width": image_width,
"image_height": image_height,
"generate_drill_links": generate_drill_links,
"force_production": force_production,
"cache_only": cache_only,
"path_prefix": path_prefix,
"rebuild_pdts": rebuild_pdts,
"server_table_calcs": server_table_calcs,
},
body=body,
transport_options=transport_options,
)
assert isinstance(response, (str, bytes))
return response
# GET /looks/{look_id}/run/{result_format} -> Union[str, bytes]
def run_look(
self,
# Id of look
look_id: int,
# Format of result
result_format: str,
# Row limit (may override the limit in the saved query).
limit: Optional[int] = None,
# Apply model-specified formatting to each result.
apply_formatting: Optional[bool] = None,
# Apply visualization options to results.
apply_vis: Optional[bool] = None,
# Get results from cache if available.
cache: Optional[bool] = None,
# Render width for image formats.
image_width: Optional[int] = None,
# Render height for image formats.
image_height: Optional[int] = None,
# Generate drill links (only applicable to 'json_detail' format.
generate_drill_links: Optional[bool] = None,
# Force use of production models even if the user is in development mode.
force_production: Optional[bool] = None,
# Retrieve any results from cache even if the results have expired.
cache_only: Optional[bool] = None,
# Prefix to use for drill links (url encoded).
path_prefix: Optional[str] = None,
# Rebuild PDTS used in query.
rebuild_pdts: Optional[bool] = None,
# Perform table calculations on query results
server_table_calcs: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Union[str, bytes]:
"""Run Look"""
response = self.get(
f"/looks/{look_id}/run/{result_format}",
Union[str, bytes], # type: ignore
query_params={
"limit": limit,
"apply_formatting": apply_formatting,
"apply_vis": apply_vis,
"cache": cache,
"image_width": image_width,
"image_height": image_height,
"generate_drill_links": generate_drill_links,
"force_production": force_production,
"cache_only": cache_only,
"path_prefix": path_prefix,
"rebuild_pdts": rebuild_pdts,
"server_table_calcs": server_table_calcs,
},
transport_options=transport_options,
)
assert isinstance(response, (str, bytes))
return response
# GET /projects/{project_id}/lookml_tests/run -> Sequence[models.LookmlTestResult]
def run_lookml_test(
self,
# Project Id
project_id: str,
# File Name
file_id: Optional[str] = None,
# Test Name
test: Optional[str] = None,
# Model Name
model: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.LookmlTestResult]:
"""Run LookML Test"""
response = self.get(
f"/projects/{project_id}/lookml_tests/run",
Sequence[models.LookmlTestResult],
query_params={"file_id": file_id, "test": test, "model": model},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /queries/{query_id}/run/{result_format} -> Union[str, bytes]
def run_query(
self,
# Id of query
query_id: int,
# Format of result
result_format: str,
# Row limit (may override the limit in the saved query).
limit: Optional[int] = None,
# Apply model-specified formatting to each result.
apply_formatting: Optional[bool] = None,
# Apply visualization options to results.
apply_vis: Optional[bool] = None,
# Get results from cache if available.
cache: Optional[bool] = None,
# Render width for image formats.
image_width: Optional[int] = None,
# Render height for image formats.
image_height: Optional[int] = None,
# Generate drill links (only applicable to 'json_detail' format.
generate_drill_links: Optional[bool] = None,
# Force use of production models even if the user is in development mode.
force_production: Optional[bool] = None,
# Retrieve any results from cache even if the results have expired.
cache_only: Optional[bool] = None,
# Prefix to use for drill links (url encoded).
path_prefix: Optional[str] = None,
# Rebuild PDTS used in query.
rebuild_pdts: Optional[bool] = None,
# Perform table calculations on query results
server_table_calcs: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Union[str, bytes]:
"""Run Query"""
response = self.get(
f"/queries/{query_id}/run/{result_format}",
Union[str, bytes], # type: ignore
query_params={
"limit": limit,
"apply_formatting": apply_formatting,
"apply_vis": apply_vis,
"cache": cache,
"image_width": image_width,
"image_height": image_height,
"generate_drill_links": generate_drill_links,
"force_production": force_production,
"cache_only": cache_only,
"path_prefix": path_prefix,
"rebuild_pdts": rebuild_pdts,
"server_table_calcs": server_table_calcs,
},
transport_options=transport_options,
)
assert isinstance(response, (str, bytes))
return response
# POST /sql_queries/{slug}/run/{result_format} -> Union[str, bytes]
def run_sql_query(
self,
# slug of query
slug: str,
# Format of result, options are: ["json", "json_detail", "json_fe", "csv", "html", "md", "txt", "xlsx", "gsxml"]
result_format: str,
# Defaults to false. If set to true, the HTTP response will have content-disposition and other headers set to make the HTTP response behave as a downloadable attachment instead of as inline content.
download: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Union[str, bytes]:
"""Run SQL Runner Query"""
response = self.post(
f"/sql_queries/{slug}/run/{result_format}",
Union[str, bytes], # type: ignore
query_params={"download": download},
transport_options=transport_options,
)
assert isinstance(response, (str, bytes))
return response
# GET /queries/models/{model_name}/views/{view_name}/run/{result_format} -> Union[str, bytes]
def run_url_encoded_query(
self,
# Model name
model_name: str,
# View name
view_name: str,
# Format of result
result_format: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> Union[str, bytes]:
"""Run Url Encoded Query"""
response = self.get(
f"/queries/models/{model_name}/views/{view_name}/run/{result_format}",
Union[str, bytes], # type: ignore
transport_options=transport_options,
)
assert isinstance(response, (str, bytes))
return response
# GET /saml_config -> models.SamlConfig
def saml_config(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.SamlConfig:
"""Get SAML Configuration"""
response = self.get(
f"/saml_config", models.SamlConfig, transport_options=transport_options
)
assert isinstance(response, models.SamlConfig)
return response
# GET /saml_test_configs/{test_slug} -> models.SamlConfig
def saml_test_config(
self,
# Slug of test config
test_slug: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SamlConfig:
"""Get SAML Test Configuration"""
response = self.get(
f"/saml_test_configs/{test_slug}",
models.SamlConfig,
transport_options=transport_options,
)
assert isinstance(response, models.SamlConfig)
return response
# GET /scheduled_plans/{scheduled_plan_id} -> models.ScheduledPlan
def scheduled_plan(
self,
# Scheduled Plan Id
scheduled_plan_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ScheduledPlan:
"""Get Scheduled Plan"""
response = self.get(
f"/scheduled_plans/{scheduled_plan_id}",
models.ScheduledPlan,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ScheduledPlan)
return response
# POST /scheduled_plans/run_once -> models.ScheduledPlan
def scheduled_plan_run_once(
self,
body: Optional[models.WriteScheduledPlan] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ScheduledPlan:
"""Run Scheduled Plan Once"""
response = self.post(
f"/scheduled_plans/run_once",
models.ScheduledPlan,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ScheduledPlan)
return response
# GET /scheduled_plans/dashboard/{dashboard_id} -> Sequence[models.ScheduledPlan]
def scheduled_plans_for_dashboard(
self,
# Dashboard Id
dashboard_id: int,
# User Id (default is requesting user if not specified)
user_id: Optional[int] = None,
# Return scheduled plans belonging to all users for the dashboard
all_users: Optional[bool] = None,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ScheduledPlan]:
"""Scheduled Plans for Dashboard"""
response = self.get(
f"/scheduled_plans/dashboard/{dashboard_id}",
Sequence[models.ScheduledPlan],
query_params={"user_id": user_id, "all_users": all_users, "fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /scheduled_plans/look/{look_id} -> Sequence[models.ScheduledPlan]
def scheduled_plans_for_look(
self,
# Look Id
look_id: int,
# User Id (default is requesting user if not specified)
user_id: Optional[int] = None,
# Requested fields.
fields: Optional[str] = None,
# Return scheduled plans belonging to all users for the look
all_users: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ScheduledPlan]:
"""Scheduled Plans for Look"""
response = self.get(
f"/scheduled_plans/look/{look_id}",
Sequence[models.ScheduledPlan],
query_params={"user_id": user_id, "fields": fields, "all_users": all_users},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /scheduled_plans/lookml_dashboard/{lookml_dashboard_id} -> Sequence[models.ScheduledPlan]
def scheduled_plans_for_lookml_dashboard(
self,
# LookML Dashboard Id
lookml_dashboard_id: int,
# User Id (default is requesting user if not specified)
user_id: Optional[int] = None,
# Requested fields.
fields: Optional[str] = None,
# Return scheduled plans belonging to all users for the dashboard
all_users: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ScheduledPlan]:
"""Scheduled Plans for LookML Dashboard"""
response = self.get(
f"/scheduled_plans/lookml_dashboard/{lookml_dashboard_id}",
Sequence[models.ScheduledPlan],
query_params={"user_id": user_id, "fields": fields, "all_users": all_users},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /scheduled_plans/space/{space_id} -> Sequence[models.ScheduledPlan]
def scheduled_plans_for_space(
self,
# Space Id
space_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ScheduledPlan]:
"""Scheduled Plans for Space"""
response = self.get(
f"/scheduled_plans/space/{space_id}",
Sequence[models.ScheduledPlan],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /content_favorite/search -> Sequence[models.ContentFavorite]
def search_content_favorites(
self,
# Match content favorite id(s)
id: Optional[int] = None,
# Match user id(s)
user_id: Optional[int] = None,
# Match content metadata id(s)
content_metadata_id: Optional[int] = None,
# Match dashboard id(s)
dashboard_id: Optional[int] = None,
# Match look id(s)
look_id: Optional[int] = None,
# Number of results to return. (used with offset)
limit: Optional[int] = None,
# Number of results to skip before returning any. (used with limit)
offset: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Requested fields.
fields: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ContentFavorite]:
"""Search Favorite Contents"""
response = self.get(
f"/content_favorite/search",
Sequence[models.ContentFavorite],
query_params={
"id": id,
"user_id": user_id,
"content_metadata_id": content_metadata_id,
"dashboard_id": dashboard_id,
"look_id": look_id,
"limit": limit,
"offset": offset,
"sorts": sorts,
"fields": fields,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /content_view/search -> Sequence[models.ContentView]
def search_content_views(
self,
# Match view count
view_count: Optional[int] = None,
# Match Group Id
group_id: Optional[int] = None,
# Match look_id
look_id: Optional[str] = None,
# Match dashboard_id
dashboard_id: Optional[str] = None,
# Match content metadata id
content_metadata_id: Optional[int] = None,
# Match start of week date
start_of_week_date: Optional[str] = None,
# True if only all time view records should be returned
all_time: Optional[bool] = None,
# Match user id
user_id: Optional[int] = None,
# Requested fields
fields: Optional[str] = None,
# Number of results to return. Use with `offset` to manage pagination of results
limit: Optional[int] = None,
# Number of results to skip before returning data
offset: Optional[int] = None,
# Fields to sort by
sorts: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.ContentView]:
"""Search Content Views"""
response = self.get(
f"/content_view/search",
Sequence[models.ContentView],
query_params={
"view_count": view_count,
"group_id": group_id,
"look_id": look_id,
"dashboard_id": dashboard_id,
"content_metadata_id": content_metadata_id,
"start_of_week_date": start_of_week_date,
"all_time": all_time,
"user_id": user_id,
"fields": fields,
"limit": limit,
"offset": offset,
"sorts": sorts,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboard_elements/search -> Sequence[models.DashboardElement]
def search_dashboard_elements(
self,
# Select elements that refer to a given dashboard id
dashboard_id: Optional[int] = None,
# Select elements that refer to a given look id
look_id: Optional[int] = None,
# Match the title of element
title: Optional[str] = None,
# Select soft-deleted dashboard elements
deleted: Optional[bool] = None,
# Requested fields.
fields: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
# Fields to sort by. Sortable fields: [:look_id, :dashboard_id, :deleted, :title]
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DashboardElement]:
"""Search Dashboard Elements"""
response = self.get(
f"/dashboard_elements/search",
Sequence[models.DashboardElement],
query_params={
"dashboard_id": dashboard_id,
"look_id": look_id,
"title": title,
"deleted": deleted,
"fields": fields,
"filter_or": filter_or,
"sorts": sorts,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /dashboards/search -> Sequence[models.Dashboard]
def search_dashboards(
self,
# Match dashboard id.
id: Optional[int] = None,
# Match dashboard slug.
slug: Optional[str] = None,
# Match Dashboard title.
title: Optional[str] = None,
# Match Dashboard description.
description: Optional[str] = None,
# Filter on a content favorite id.
content_favorite_id: Optional[int] = None,
# Filter on a particular space.
space_id: Optional[str] = None,
# Filter on dashboards deleted status.
deleted: Optional[str] = None,
# Filter on dashboards created by a particular user.
user_id: Optional[str] = None,
# Filter on a particular value of view_count
view_count: Optional[str] = None,
# Filter on a content favorite id.
content_metadata_id: Optional[int] = None,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Number of results to return. (used with offset and takes priority over page and per_page)
limit: Optional[int] = None,
# Number of results to skip before returning any. (used with limit and takes priority over page and per_page)
offset: Optional[int] = None,
# One or more fields to sort by. Sortable fields: [:title, :user_id, :id, :created_at, :space_id, :description, :view_count, :favorite_count, :slug, :content_favorite_id, :content_metadata_id, :deleted, :deleted_at, :last_viewed_at]
sorts: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Dashboard]:
"""Search Dashboards"""
response = self.get(
f"/dashboards/search",
Sequence[models.Dashboard],
query_params={
"id": id,
"slug": slug,
"title": title,
"description": description,
"content_favorite_id": content_favorite_id,
"space_id": space_id,
"deleted": deleted,
"user_id": user_id,
"view_count": view_count,
"content_metadata_id": content_metadata_id,
"fields": fields,
"page": page,
"per_page": per_page,
"limit": limit,
"offset": offset,
"sorts": sorts,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /folders/search -> Sequence[models.Folder]
def search_folders(
self,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Number of results to return. (used with offset and takes priority over page and per_page)
limit: Optional[int] = None,
# Number of results to skip before returning any. (used with limit and takes priority over page and per_page)
offset: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Match Space title.
name: Optional[str] = None,
# Match Space id
id: Optional[int] = None,
# Filter on a children of a particular folder.
parent_id: Optional[str] = None,
# Filter on folder created by a particular user.
creator_id: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Folder]:
"""Search Folders"""
response = self.get(
f"/folders/search",
Sequence[models.Folder],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"limit": limit,
"offset": offset,
"sorts": sorts,
"name": name,
"id": id,
"parent_id": parent_id,
"creator_id": creator_id,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /homepages/search -> Sequence[models.Homepage]
def search_homepages(
self,
# Matches homepage title.
title: Optional[str] = None,
# Matches the timestamp for when the homepage was created.
created_at: Optional[str] = None,
# The first name of the user who created this homepage.
first_name: Optional[str] = None,
# The last name of the user who created this homepage.
last_name: Optional[str] = None,
# Requested fields.
fields: Optional[str] = None,
# Return favorited homepages when true.
favorited: Optional[bool] = None,
# Filter on homepages created by a particular user.
creator_id: Optional[str] = None,
# The fields to sort the results by
sorts: Optional[str] = None,
# The page to return.
page: Optional[int] = None,
# The number of items in the returned page.
per_page: Optional[int] = None,
# The number of items to skip before returning any. (used with limit and takes priority over page and per_page)
offset: Optional[int] = None,
# The maximum number of items to return. (used with offset and takes priority over page and per_page)
limit: Optional[int] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Homepage]:
"""Search Homepages"""
response = self.get(
f"/homepages/search",
Sequence[models.Homepage],
query_params={
"title": title,
"created_at": created_at,
"first_name": first_name,
"last_name": last_name,
"fields": fields,
"favorited": favorited,
"creator_id": creator_id,
"sorts": sorts,
"page": page,
"per_page": per_page,
"offset": offset,
"limit": limit,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /looks/search -> Sequence[models.Look]
def search_looks(
self,
# Match Look title.
title: Optional[str] = None,
# Match Look description.
description: Optional[str] = None,
# Select looks with a particular content favorite id
content_favorite_id: Optional[int] = None,
# Select looks in a particular space.
space_id: Optional[str] = None,
# Select looks created by a particular user.
user_id: Optional[str] = None,
# Select looks with particular view_count value
view_count: Optional[str] = None,
# Select soft-deleted looks
deleted: Optional[bool] = None,
# Select looks that reference a particular query by query_id
query_id: Optional[int] = None,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Number of results to return. (used with offset and takes priority over page and per_page)
limit: Optional[int] = None,
# Number of results to skip before returning any. (used with limit and takes priority over page and per_page)
offset: Optional[int] = None,
# One or more fields to sort results by. Sortable fields: [:title, :user_id, :id, :created_at, :space_id, :description, :updated_at, :last_updater_id, :view_count, :favorite_count, :content_favorite_id, :deleted, :deleted_at, :last_viewed_at, :query_id]
sorts: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Look]:
"""Search Looks"""
response = self.get(
f"/looks/search",
Sequence[models.Look],
query_params={
"title": title,
"description": description,
"content_favorite_id": content_favorite_id,
"space_id": space_id,
"user_id": user_id,
"view_count": view_count,
"deleted": deleted,
"query_id": query_id,
"fields": fields,
"page": page,
"per_page": per_page,
"limit": limit,
"offset": offset,
"sorts": sorts,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/search -> Sequence[models.Space]
def search_spaces(
self,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Number of results to return. (used with offset and takes priority over page and per_page)
limit: Optional[int] = None,
# Number of results to skip before returning any. (used with limit and takes priority over page and per_page)
offset: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Match Space title.
name: Optional[str] = None,
# Match Space id
id: Optional[int] = None,
# Filter on a children of a particular space.
parent_id: Optional[str] = None,
# Filter on spaces created by a particular user.
creator_id: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Space]:
"""Search Spaces"""
response = self.get(
f"/spaces/search",
Sequence[models.Space],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"limit": limit,
"offset": offset,
"sorts": sorts,
"name": name,
"id": id,
"parent_id": parent_id,
"creator_id": creator_id,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /themes/search -> Sequence[models.Theme]
def search_themes(
self,
# Match theme id.
id: Optional[int] = None,
# Match theme name.
name: Optional[str] = None,
# Timestamp for activation.
begin_at: Optional[datetime.datetime] = None,
# Timestamp for expiration.
end_at: Optional[datetime.datetime] = None,
# Number of results to return (used with `offset`).
limit: Optional[int] = None,
# Number of results to skip before returning any (used with `limit`).
offset: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Requested fields.
fields: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Theme]:
"""Search Themes"""
response = self.get(
f"/themes/search",
Sequence[models.Theme],
query_params={
"id": id,
"name": name,
"begin_at": begin_at,
"end_at": end_at,
"limit": limit,
"offset": offset,
"sorts": sorts,
"fields": fields,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /user_login_lockouts/search -> Sequence[models.UserLoginLockout]
def search_user_login_lockouts(
self,
# Include only these fields in the response
fields: Optional[str] = None,
# Return only page N of paginated results
page: Optional[int] = None,
# Return N rows of data per page
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Auth type user is locked out for (email, ldap, totp, api)
auth_type: Optional[str] = None,
# Match name
full_name: Optional[str] = None,
# Match email
email: Optional[str] = None,
# Match remote LDAP ID
remote_id: Optional[str] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserLoginLockout]:
"""Search User Login Lockouts"""
response = self.get(
f"/user_login_lockouts/search",
Sequence[models.UserLoginLockout],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
"auth_type": auth_type,
"full_name": full_name,
"email": email,
"remote_id": remote_id,
"filter_or": filter_or,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/search -> Sequence[models.User]
def search_users(
self,
# Include only these fields in the response
fields: Optional[str] = None,
# Return only page N of paginated results
page: Optional[int] = None,
# Return N rows of data per page
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Match User Id.
id: Optional[int] = None,
# Match First name.
first_name: Optional[str] = None,
# Match Last name.
last_name: Optional[str] = None,
# Search for user accounts associated with Looker employees
verified_looker_employee: Optional[bool] = None,
# Search for the user with this email address
email: Optional[str] = None,
# Search for disabled user accounts
is_disabled: Optional[bool] = None,
# Combine given search criteria in a boolean OR expression
filter_or: Optional[bool] = None,
# Search for users who have access to this content_metadata item
content_metadata_id: Optional[int] = None,
# Search for users who are direct members of this group
group_id: Optional[int] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Search Users"""
response = self.get(
f"/users/search",
Sequence[models.User],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
"id": id,
"first_name": first_name,
"last_name": last_name,
"verified_looker_employee": verified_looker_employee,
"email": email,
"is_disabled": is_disabled,
"filter_or": filter_or,
"content_metadata_id": content_metadata_id,
"group_id": group_id,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/search/names/{pattern} -> Sequence[models.User]
def search_users_names(
self,
# Pattern to match
pattern: str,
# Include only these fields in the response
fields: Optional[str] = None,
# Return only page N of paginated results
page: Optional[int] = None,
# Return N rows of data per page
per_page: Optional[int] = None,
# Fields to sort by
sorts: Optional[str] = None,
# Match User Id
id: Optional[int] = None,
# Match First name
first_name: Optional[str] = None,
# Match Last name
last_name: Optional[str] = None,
# Match Verified Looker employee
verified_looker_employee: Optional[bool] = None,
# Match Email Address
email: Optional[str] = None,
# Include or exclude disabled accounts in the results
is_disabled: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Search User Names"""
response = self.get(
f"/users/search/names/{pattern}",
Sequence[models.User],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
"id": id,
"first_name": first_name,
"last_name": last_name,
"verified_looker_employee": verified_looker_employee,
"email": email,
"is_disabled": is_disabled,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /session -> models.ApiSession
def session(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.ApiSession:
"""Get Session"""
response = self.get(
f"/session", models.ApiSession, transport_options=transport_options
)
assert isinstance(response, models.ApiSession)
return response
# GET /session_config -> models.SessionConfig
def session_config(
self, transport_options: Optional[transport.TransportSettings] = None
) -> models.SessionConfig:
"""Get Session Config"""
response = self.get(
f"/session_config",
models.SessionConfig,
transport_options=transport_options,
)
assert isinstance(response, models.SessionConfig)
return response
# PUT /color_collections/default -> models.ColorCollection
def set_default_color_collection(
self,
# ID of color collection to set as default
collection_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ColorCollection:
"""Set Default Color Collection"""
response = self.put(
f"/color_collections/default",
models.ColorCollection,
query_params={"collection_id": collection_id},
transport_options=transport_options,
)
assert isinstance(response, models.ColorCollection)
return response
# PUT /themes/default -> models.Theme
def set_default_theme(
self,
# Name of theme to set as default
name: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Set Default Theme"""
response = self.put(
f"/themes/default",
models.Theme,
query_params={"name": name},
transport_options=transport_options,
)
assert isinstance(response, models.Theme)
return response
# PUT /roles/{role_id}/groups -> Sequence[models.Group]
def set_role_groups(
self,
# Id of Role
role_id: int,
body: Sequence[int],
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Group]:
"""Update Role Groups"""
response = self.put(
f"/roles/{role_id}/groups",
Sequence[models.Group],
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# PUT /roles/{role_id}/users -> Sequence[models.User]
def set_role_users(
self,
# id of role
role_id: int,
body: Sequence[int],
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.User]:
"""Update Role Users"""
response = self.put(
f"/roles/{role_id}/users",
Sequence[models.User],
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# POST /user_attributes/{user_attribute_id}/group_values -> Sequence[models.UserAttributeGroupValue]
def set_user_attribute_group_values(
self,
# Id of user attribute
user_attribute_id: int,
body: Sequence[models.UserAttributeGroupValue],
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserAttributeGroupValue]:
"""Set User Attribute Group Values"""
response = self.post(
f"/user_attributes/{user_attribute_id}/group_values",
Sequence[models.UserAttributeGroupValue],
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# PATCH /users/{user_id}/attribute_values/{user_attribute_id} -> models.UserAttributeWithValue
def set_user_attribute_user_value(
self,
# Id of user
user_id: int,
# Id of user attribute
user_attribute_id: int,
body: models.WriteUserAttributeWithValue,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.UserAttributeWithValue:
"""Set User Attribute User Value"""
response = self.patch(
f"/users/{user_id}/attribute_values/{user_attribute_id}",
models.UserAttributeWithValue,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.UserAttributeWithValue)
return response
# PUT /users/{user_id}/roles -> Sequence[models.Role]
def set_user_roles(
self,
# id of user
user_id: int,
body: Sequence[int],
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Role]:
"""Set User Roles"""
response = self.put(
f"/users/{user_id}/roles",
Sequence[models.Role],
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id} -> models.Space
def space(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Space:
"""Get Space"""
response = self.get(
f"/spaces/{space_id}",
models.Space,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Space)
return response
# GET /spaces/{space_id}/ancestors -> Sequence[models.Space]
def space_ancestors(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Space]:
"""Get Space Ancestors"""
response = self.get(
f"/spaces/{space_id}/ancestors",
Sequence[models.Space],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id}/children -> Sequence[models.Space]
def space_children(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
# Requested page.
page: Optional[int] = None,
# Results per page.
per_page: Optional[int] = None,
# Fields to sort by.
sorts: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Space]:
"""Get Space Children"""
response = self.get(
f"/spaces/{space_id}/children",
Sequence[models.Space],
query_params={
"fields": fields,
"page": page,
"per_page": per_page,
"sorts": sorts,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id}/children/search -> Sequence[models.Space]
def space_children_search(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
# Fields to sort by.
sorts: Optional[str] = None,
# Match Space name.
name: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Space]:
"""Search Space Children"""
response = self.get(
f"/spaces/{space_id}/children/search",
Sequence[models.Space],
query_params={"fields": fields, "sorts": sorts, "name": name},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id}/dashboards -> Sequence[models.Dashboard]
def space_dashboards(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Dashboard]:
"""Get Space Dashboards"""
response = self.get(
f"/spaces/{space_id}/dashboards",
Sequence[models.Dashboard],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id}/looks -> Sequence[models.LookWithQuery]
def space_looks(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.LookWithQuery]:
"""Get Space Looks"""
response = self.get(
f"/spaces/{space_id}/looks",
Sequence[models.LookWithQuery],
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /spaces/{space_id}/parent -> models.Space
def space_parent(
self,
# Id of space
space_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Space:
"""Get Space Parent"""
response = self.get(
f"/spaces/{space_id}/parent",
models.Space,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Space)
return response
# GET /sql_queries/{slug} -> models.SqlQuery
def sql_query(
self,
# slug of query
slug: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SqlQuery:
"""Get SQL Runner Query"""
response = self.get(
f"/sql_queries/{slug}", models.SqlQuery, transport_options=transport_options
)
assert isinstance(response, models.SqlQuery)
return response
# PATCH /dashboards/{lookml_dashboard_id}/sync -> Sequence[int]
def sync_lookml_dashboard(
self,
# Id of LookML dashboard, in the form 'model::dashboardname'
lookml_dashboard_id: str,
body: models.WriteDashboard,
# If true, and this dashboard is localized, export it with the raw keys, not localized.
raw_locale: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[int]:
"""Sync LookML Dashboard"""
response = self.patch(
f"/dashboards/{lookml_dashboard_id}/sync",
Sequence[int],
query_params={"raw_locale": raw_locale},
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# PUT /connections/{connection_name}/test -> Sequence[models.DBConnectionTestResult]
def test_connection(
self,
# Name of connection
connection_name: str,
# Array of names of tests to run
tests: Optional[models.DelimSequence[str]] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DBConnectionTestResult]:
"""Test Connection"""
response = self.put(
f"/connections/{connection_name}/test",
Sequence[models.DBConnectionTestResult],
query_params={"tests": tests},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# PUT /connections/test -> Sequence[models.DBConnectionTestResult]
def test_connection_config(
self,
body: Optional[models.WriteDBConnection] = None,
# Array of names of tests to run
tests: Optional[models.DelimSequence[str]] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.DBConnectionTestResult]:
"""Test Connection Configuration"""
response = self.put(
f"/connections/test",
Sequence[models.DBConnectionTestResult],
query_params={"tests": tests},
body=body,
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# POST /integrations/{integration_id}/test -> models.IntegrationTestResult
def test_integration(
self,
# Id of Integration
integration_id: int,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.IntegrationTestResult:
"""Test integration"""
response = self.post(
f"/integrations/{integration_id}/test",
models.IntegrationTestResult,
transport_options=transport_options,
)
assert isinstance(response, models.IntegrationTestResult)
return response
# PUT /ldap_config/test_auth -> models.LDAPConfigTestResult
def test_ldap_config_auth(
self,
body: models.WriteLDAPConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LDAPConfigTestResult:
"""Test LDAP Auth"""
response = self.put(
f"/ldap_config/test_auth",
models.LDAPConfigTestResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LDAPConfigTestResult)
return response
# PUT /ldap_config/test_connection -> models.LDAPConfigTestResult
def test_ldap_config_connection(
self,
body: models.WriteLDAPConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LDAPConfigTestResult:
"""Test LDAP Connection"""
response = self.put(
f"/ldap_config/test_connection",
models.LDAPConfigTestResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LDAPConfigTestResult)
return response
# PUT /ldap_config/test_user_auth -> models.LDAPConfigTestResult
def test_ldap_config_user_auth(
self,
body: models.WriteLDAPConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LDAPConfigTestResult:
"""Test LDAP User Auth"""
response = self.put(
f"/ldap_config/test_user_auth",
models.LDAPConfigTestResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LDAPConfigTestResult)
return response
# PUT /ldap_config/test_user_info -> models.LDAPConfigTestResult
def test_ldap_config_user_info(
self,
body: models.WriteLDAPConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LDAPConfigTestResult:
"""Test LDAP User Info"""
response = self.put(
f"/ldap_config/test_user_info",
models.LDAPConfigTestResult,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LDAPConfigTestResult)
return response
# GET /themes/{theme_id} -> models.Theme
def theme(
self,
# Id of theme
theme_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Get Theme"""
response = self.get(
f"/themes/{theme_id}",
models.Theme,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Theme)
return response
# GET /themes/theme_or_default -> models.Theme
def theme_or_default(
self,
# Name of theme
name: str,
# Timestamp representing the target datetime for the active period. Defaults to 'now'
ts: Optional[datetime.datetime] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Get Theme or Default"""
response = self.get(
f"/themes/theme_or_default",
models.Theme,
query_params={"name": name, "ts": ts},
transport_options=transport_options,
)
assert isinstance(response, models.Theme)
return response
# PATCH /backup_configuration -> models.BackupConfiguration
def update_backup_configuration(
self,
body: models.WriteBackupConfiguration,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.BackupConfiguration:
"""Update Backup Configuration"""
response = self.patch(
f"/backup_configuration",
models.BackupConfiguration,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.BackupConfiguration)
return response
# PATCH /color_collections/{collection_id} -> models.ColorCollection
def update_color_collection(
self,
# Id of Custom Color Collection
collection_id: str,
body: models.WriteColorCollection,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ColorCollection:
"""Update Custom Color collection"""
response = self.patch(
f"/color_collections/{collection_id}",
models.ColorCollection,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ColorCollection)
return response
# PATCH /connections/{connection_name} -> models.DBConnection
def update_connection(
self,
# Name of connection
connection_name: str,
body: models.WriteDBConnection,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DBConnection:
"""Update Connection"""
response = self.patch(
f"/connections/{connection_name}",
models.DBConnection,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DBConnection)
return response
# PATCH /content_metadata/{content_metadata_id} -> models.ContentMeta
def update_content_metadata(
self,
# Id of content metadata
content_metadata_id: int,
body: models.WriteContentMeta,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentMeta:
"""Update Content Metadata"""
response = self.patch(
f"/content_metadata/{content_metadata_id}",
models.ContentMeta,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ContentMeta)
return response
# PUT /content_metadata_access/{content_metadata_access_id} -> models.ContentMetaGroupUser
def update_content_metadata_access(
self,
# Id of content metadata access
content_metadata_access_id: int,
body: models.ContentMetaGroupUser,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ContentMetaGroupUser:
"""Update Content Metadata Access"""
response = self.put(
f"/content_metadata_access/{content_metadata_access_id}",
models.ContentMetaGroupUser,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ContentMetaGroupUser)
return response
# PATCH /dashboards/{dashboard_id} -> models.Dashboard
def update_dashboard(
self,
# Id of dashboard
dashboard_id: str,
body: models.WriteDashboard,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Dashboard:
"""Update Dashboard"""
response = self.patch(
f"/dashboards/{dashboard_id}",
models.Dashboard,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Dashboard)
return response
# PATCH /dashboard_elements/{dashboard_element_id} -> models.DashboardElement
def update_dashboard_element(
self,
# Id of dashboard element
dashboard_element_id: str,
body: models.WriteDashboardElement,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardElement:
"""Update DashboardElement"""
response = self.patch(
f"/dashboard_elements/{dashboard_element_id}",
models.DashboardElement,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardElement)
return response
# PATCH /dashboard_filters/{dashboard_filter_id} -> models.DashboardFilter
def update_dashboard_filter(
self,
# Id of dashboard filter
dashboard_filter_id: str,
body: models.WriteDashboardFilter,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardFilter:
"""Update Dashboard Filter"""
response = self.patch(
f"/dashboard_filters/{dashboard_filter_id}",
models.DashboardFilter,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardFilter)
return response
# PATCH /dashboard_layouts/{dashboard_layout_id} -> models.DashboardLayout
def update_dashboard_layout(
self,
# Id of dashboard layout
dashboard_layout_id: str,
body: models.WriteDashboardLayout,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardLayout:
"""Update DashboardLayout"""
response = self.patch(
f"/dashboard_layouts/{dashboard_layout_id}",
models.DashboardLayout,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardLayout)
return response
# PATCH /dashboard_layout_components/{dashboard_layout_component_id} -> models.DashboardLayoutComponent
def update_dashboard_layout_component(
self,
# Id of dashboard layout component
dashboard_layout_component_id: str,
body: models.WriteDashboardLayoutComponent,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.DashboardLayoutComponent:
"""Update DashboardLayoutComponent"""
response = self.patch(
f"/dashboard_layout_components/{dashboard_layout_component_id}",
models.DashboardLayoutComponent,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.DashboardLayoutComponent)
return response
# PATCH /datagroups/{datagroup_id} -> models.Datagroup
def update_datagroup(
self,
# ID of datagroup.
datagroup_id: str,
body: models.WriteDatagroup,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Datagroup:
"""Update Datagroup"""
response = self.patch(
f"/datagroups/{datagroup_id}",
models.Datagroup,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Datagroup)
return response
# PATCH /folders/{folder_id} -> models.Folder
def update_folder(
self,
# Id of folder
folder_id: str,
body: models.WriteFolder,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Folder:
"""Update Folder"""
response = self.patch(
f"/folders/{folder_id}",
models.Folder,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Folder)
return response
# PUT /projects/{project_id}/git_branch -> models.GitBranch
def update_git_branch(
self,
# Project Id
project_id: str,
body: models.WriteGitBranch,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.GitBranch:
"""Update Project Git Branch"""
response = self.put(
f"/projects/{project_id}/git_branch",
models.GitBranch,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.GitBranch)
return response
# PATCH /groups/{group_id} -> models.Group
def update_group(
self,
# Id of group
group_id: int,
body: models.WriteGroup,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Group:
"""Update Group"""
response = self.patch(
f"/groups/{group_id}",
models.Group,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Group)
return response
# PATCH /homepages/{homepage_id} -> models.Homepage
def update_homepage(
self,
# Id of homepage
homepage_id: int,
body: models.WriteHomepage,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Homepage:
"""Update Homepage"""
response = self.patch(
f"/homepages/{homepage_id}",
models.Homepage,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Homepage)
return response
# PATCH /homepage_items/{homepage_item_id} -> models.HomepageItem
def update_homepage_item(
self,
# Id of homepage item
homepage_item_id: int,
body: models.WriteHomepageItem,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageItem:
"""Update Homepage Item"""
response = self.patch(
f"/homepage_items/{homepage_item_id}",
models.HomepageItem,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.HomepageItem)
return response
# PATCH /homepage_sections/{homepage_section_id} -> models.HomepageSection
def update_homepage_section(
self,
# Id of homepage section
homepage_section_id: int,
body: models.WriteHomepageSection,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.HomepageSection:
"""Update Homepage section"""
response = self.patch(
f"/homepage_sections/{homepage_section_id}",
models.HomepageSection,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.HomepageSection)
return response
# PATCH /integrations/{integration_id} -> models.Integration
def update_integration(
self,
# Id of Integration
integration_id: int,
body: models.WriteIntegration,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Integration:
"""Update Integration"""
response = self.patch(
f"/integrations/{integration_id}",
models.Integration,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Integration)
return response
# PATCH /integration_hubs/{integration_hub_id} -> models.IntegrationHub
def update_integration_hub(
self,
# Id of Integration Hub
integration_hub_id: int,
body: models.WriteIntegrationHub,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.IntegrationHub:
"""Update Integration Hub"""
response = self.patch(
f"/integration_hubs/{integration_hub_id}",
models.IntegrationHub,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.IntegrationHub)
return response
# PATCH /internal_help_resources -> models.InternalHelpResources
def update_internal_help_resources(
self,
body: models.WriteInternalHelpResources,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.InternalHelpResources:
"""Update internal help resources configuration"""
response = self.patch(
f"/internal_help_resources",
models.InternalHelpResources,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.InternalHelpResources)
return response
# PATCH /internal_help_resources_content -> models.InternalHelpResourcesContent
def update_internal_help_resources_content(
self,
body: models.WriteInternalHelpResourcesContent,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.InternalHelpResourcesContent:
"""Update internal help resources content"""
response = self.patch(
f"/internal_help_resources_content",
models.InternalHelpResourcesContent,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.InternalHelpResourcesContent)
return response
# PATCH /ldap_config -> models.LDAPConfig
def update_ldap_config(
self,
body: models.WriteLDAPConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LDAPConfig:
"""Update LDAP Configuration"""
response = self.patch(
f"/ldap_config",
models.LDAPConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LDAPConfig)
return response
# PATCH /legacy_features/{legacy_feature_id} -> models.LegacyFeature
def update_legacy_feature(
self,
# id of legacy feature
legacy_feature_id: int,
body: models.WriteLegacyFeature,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LegacyFeature:
"""Update Legacy Feature"""
response = self.patch(
f"/legacy_features/{legacy_feature_id}",
models.LegacyFeature,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LegacyFeature)
return response
# PATCH /looks/{look_id} -> models.LookWithQuery
def update_look(
self,
# Id of look
look_id: int,
body: models.WriteLookWithQuery,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookWithQuery:
"""Update Look"""
response = self.patch(
f"/looks/{look_id}",
models.LookWithQuery,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LookWithQuery)
return response
# PATCH /lookml_models/{lookml_model_name} -> models.LookmlModel
def update_lookml_model(
self,
# Name of lookml model.
lookml_model_name: str,
body: models.WriteLookmlModel,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.LookmlModel:
"""Update LookML Model"""
response = self.patch(
f"/lookml_models/{lookml_model_name}",
models.LookmlModel,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.LookmlModel)
return response
# PATCH /model_sets/{model_set_id} -> models.ModelSet
def update_model_set(
self,
# id of model set
model_set_id: int,
body: models.WriteModelSet,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ModelSet:
"""Update Model Set"""
response = self.patch(
f"/model_sets/{model_set_id}",
models.ModelSet,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ModelSet)
return response
# PATCH /oidc_config -> models.OIDCConfig
def update_oidc_config(
self,
body: models.WriteOIDCConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.OIDCConfig:
"""Update OIDC Configuration"""
response = self.patch(
f"/oidc_config",
models.OIDCConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.OIDCConfig)
return response
# PATCH /password_config -> models.PasswordConfig
def update_password_config(
self,
body: models.WritePasswordConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.PasswordConfig:
"""Update Password Config"""
response = self.patch(
f"/password_config",
models.PasswordConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.PasswordConfig)
return response
# PATCH /permission_sets/{permission_set_id} -> models.PermissionSet
def update_permission_set(
self,
# id of permission set
permission_set_id: int,
body: models.WritePermissionSet,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.PermissionSet:
"""Update Permission Set"""
response = self.patch(
f"/permission_sets/{permission_set_id}",
models.PermissionSet,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.PermissionSet)
return response
# PATCH /projects/{project_id} -> models.Project
def update_project(
self,
# Project Id
project_id: str,
body: models.WriteProject,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Project:
"""Update Project"""
response = self.patch(
f"/projects/{project_id}",
models.Project,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Project)
return response
# PUT /projects/{root_project_id}/credential/{credential_id} -> models.RepositoryCredential
def update_repository_credential(
self,
# Root Project Id
root_project_id: str,
# Credential Id
credential_id: str,
body: models.WriteRepositoryCredential,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.RepositoryCredential:
"""Create Repository Credential"""
response = self.put(
f"/projects/{root_project_id}/credential/{credential_id}",
models.RepositoryCredential,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.RepositoryCredential)
return response
# PATCH /roles/{role_id} -> models.Role
def update_role(
self,
# id of role
role_id: int,
body: models.WriteRole,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Role:
"""Update Role"""
response = self.patch(
f"/roles/{role_id}",
models.Role,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Role)
return response
# PATCH /saml_config -> models.SamlConfig
def update_saml_config(
self,
body: models.WriteSamlConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SamlConfig:
"""Update SAML Configuration"""
response = self.patch(
f"/saml_config",
models.SamlConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SamlConfig)
return response
# PATCH /scheduled_plans/{scheduled_plan_id} -> models.ScheduledPlan
def update_scheduled_plan(
self,
# Scheduled Plan Id
scheduled_plan_id: int,
body: models.WriteScheduledPlan,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ScheduledPlan:
"""Update Scheduled Plan"""
response = self.patch(
f"/scheduled_plans/{scheduled_plan_id}",
models.ScheduledPlan,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ScheduledPlan)
return response
# PATCH /session -> models.ApiSession
def update_session(
self,
body: models.WriteApiSession,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ApiSession:
"""Update Session"""
response = self.patch(
f"/session",
models.ApiSession,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ApiSession)
return response
# PATCH /session_config -> models.SessionConfig
def update_session_config(
self,
body: models.WriteSessionConfig,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.SessionConfig:
"""Update Session Config"""
response = self.patch(
f"/session_config",
models.SessionConfig,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.SessionConfig)
return response
# PATCH /spaces/{space_id} -> models.Space
def update_space(
self,
# Id of space
space_id: str,
body: models.WriteSpace,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Space:
"""Update Space"""
response = self.patch(
f"/spaces/{space_id}",
models.Space,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Space)
return response
# PATCH /themes/{theme_id} -> models.Theme
def update_theme(
self,
# Id of theme
theme_id: str,
body: models.WriteTheme,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Theme:
"""Update Theme"""
response = self.patch(
f"/themes/{theme_id}",
models.Theme,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.Theme)
return response
# PATCH /users/{user_id} -> models.User
def update_user(
self,
# Id of user
user_id: int,
body: models.WriteUser,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Update User"""
response = self.patch(
f"/users/{user_id}",
models.User,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# PATCH /user_attributes/{user_attribute_id} -> models.UserAttribute
def update_user_attribute(
self,
# Id of user attribute
user_attribute_id: int,
body: models.WriteUserAttribute,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.UserAttribute:
"""Update User Attribute"""
response = self.patch(
f"/user_attributes/{user_attribute_id}",
models.UserAttribute,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.UserAttribute)
return response
# PATCH /groups/{group_id}/attribute_values/{user_attribute_id} -> models.UserAttributeGroupValue
def update_user_attribute_group_value(
self,
# Id of group
group_id: int,
# Id of user attribute
user_attribute_id: int,
body: models.UserAttributeGroupValue,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.UserAttributeGroupValue:
"""Set User Attribute Group Value"""
response = self.patch(
f"/groups/{group_id}/attribute_values/{user_attribute_id}",
models.UserAttributeGroupValue,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.UserAttributeGroupValue)
return response
# PATCH /users/{user_id}/credentials_email -> models.CredentialsEmail
def update_user_credentials_email(
self,
# id of user
user_id: int,
body: models.WriteCredentialsEmail,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsEmail:
"""Update Email/Password Credential"""
response = self.patch(
f"/users/{user_id}/credentials_email",
models.CredentialsEmail,
query_params={"fields": fields},
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsEmail)
return response
# PUT /whitelabel_configuration -> models.WhitelabelConfiguration
def update_whitelabel_configuration(
self,
body: models.WriteWhitelabelConfiguration,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.WhitelabelConfiguration:
"""Update Whitelabel configuration"""
response = self.put(
f"/whitelabel_configuration",
models.WhitelabelConfiguration,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.WhitelabelConfiguration)
return response
# GET /users/{user_id} -> models.User
def user(
self,
# Id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Get User by Id"""
response = self.get(
f"/users/{user_id}",
models.User,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# GET /user_attributes/{user_attribute_id} -> models.UserAttribute
def user_attribute(
self,
# Id of user attribute
user_attribute_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.UserAttribute:
"""Get User Attribute"""
response = self.get(
f"/user_attributes/{user_attribute_id}",
models.UserAttribute,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.UserAttribute)
return response
# GET /users/{user_id}/attribute_values -> Sequence[models.UserAttributeWithValue]
def user_attribute_user_values(
self,
# Id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
# Specific user attributes to request. Omit or leave blank to request all user attributes.
user_attribute_ids: Optional[models.DelimSequence[int]] = None,
# If true, returns all values in the search path instead of just the first value found. Useful for debugging group precedence.
all_values: Optional[bool] = None,
# If true, returns an empty record for each requested attribute that has no user, group, or default value.
include_unset: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.UserAttributeWithValue]:
"""Get User Attribute Values"""
response = self.get(
f"/users/{user_id}/attribute_values",
Sequence[models.UserAttributeWithValue],
query_params={
"fields": fields,
"user_attribute_ids": user_attribute_ids,
"all_values": all_values,
"include_unset": include_unset,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/{user_id}/credentials_api3/{credentials_api3_id} -> models.CredentialsApi3
def user_credentials_api3(
self,
# Id of user
user_id: int,
# Id of API 3 Credential
credentials_api3_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsApi3:
"""Get API 3 Credential"""
response = self.get(
f"/users/{user_id}/credentials_api3/{credentials_api3_id}",
models.CredentialsApi3,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsApi3)
return response
# GET /users/{user_id}/credentials_email -> models.CredentialsEmail
def user_credentials_email(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsEmail:
"""Get Email/Password Credential"""
response = self.get(
f"/users/{user_id}/credentials_email",
models.CredentialsEmail,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsEmail)
return response
# GET /users/{user_id}/credentials_embed/{credentials_embed_id} -> models.CredentialsEmbed
def user_credentials_embed(
self,
# Id of user
user_id: int,
# Id of Embedding Credential
credentials_embed_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsEmbed:
"""Get Embedding Credential"""
response = self.get(
f"/users/{user_id}/credentials_embed/{credentials_embed_id}",
models.CredentialsEmbed,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsEmbed)
return response
# GET /users/{user_id}/credentials_google -> models.CredentialsGoogle
def user_credentials_google(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsGoogle:
"""Get Google Auth Credential"""
response = self.get(
f"/users/{user_id}/credentials_google",
models.CredentialsGoogle,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsGoogle)
return response
# GET /users/{user_id}/credentials_ldap -> models.CredentialsLDAP
def user_credentials_ldap(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsLDAP:
"""Get LDAP Credential"""
response = self.get(
f"/users/{user_id}/credentials_ldap",
models.CredentialsLDAP,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsLDAP)
return response
# GET /users/{user_id}/credentials_looker_openid -> models.CredentialsLookerOpenid
def user_credentials_looker_openid(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsLookerOpenid:
"""Get Looker OpenId Credential"""
response = self.get(
f"/users/{user_id}/credentials_looker_openid",
models.CredentialsLookerOpenid,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsLookerOpenid)
return response
# GET /users/{user_id}/credentials_oidc -> models.CredentialsOIDC
def user_credentials_oidc(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsOIDC:
"""Get OIDC Auth Credential"""
response = self.get(
f"/users/{user_id}/credentials_oidc",
models.CredentialsOIDC,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsOIDC)
return response
# GET /users/{user_id}/credentials_saml -> models.CredentialsSaml
def user_credentials_saml(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsSaml:
"""Get Saml Auth Credential"""
response = self.get(
f"/users/{user_id}/credentials_saml",
models.CredentialsSaml,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsSaml)
return response
# GET /users/{user_id}/credentials_totp -> models.CredentialsTotp
def user_credentials_totp(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.CredentialsTotp:
"""Get Two-Factor Credential"""
response = self.get(
f"/users/{user_id}/credentials_totp",
models.CredentialsTotp,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.CredentialsTotp)
return response
# GET /users/credential/{credential_type}/{credential_id} -> models.User
def user_for_credential(
self,
# Type name of credential
credential_type: str,
# Id of credential
credential_id: str,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.User:
"""Get User by Credential Id"""
response = self.get(
f"/users/credential/{credential_type}/{credential_id}",
models.User,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.User)
return response
# GET /users/{user_id}/roles -> Sequence[models.Role]
def user_roles(
self,
# id of user
user_id: int,
# Requested fields.
fields: Optional[str] = None,
# Get only roles associated directly with the user: exclude those only associated through groups.
direct_association_only: Optional[bool] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> Sequence[models.Role]:
"""Get User Roles"""
response = self.get(
f"/users/{user_id}/roles",
Sequence[models.Role],
query_params={
"fields": fields,
"direct_association_only": direct_association_only,
},
transport_options=transport_options,
)
assert isinstance(response, list)
return response
# GET /users/{user_id}/sessions/{session_id} -> models.Session
def user_session(
self,
# Id of user
user_id: int,
# Id of Web Login Session
session_id: int,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Session:
"""Get Web Login Session"""
response = self.get(
f"/users/{user_id}/sessions/{session_id}",
models.Session,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.Session)
return response
# POST /projects/{project_id}/validate -> models.ProjectValidation
def validate_project(
self,
# Project Id
project_id: str,
# Requested fields
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ProjectValidation:
"""Validate Project"""
response = self.post(
f"/projects/{project_id}/validate",
models.ProjectValidation,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ProjectValidation)
return response
# POST /themes/validate -> models.ValidationError
def validate_theme(
self,
body: Optional[models.WriteTheme] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ValidationError:
"""Validate Theme"""
response = self.post(
f"/themes/validate",
models.ValidationError,
body=body,
transport_options=transport_options,
)
assert isinstance(response, models.ValidationError)
return response
# GET /versions -> models.ApiVersion
def versions(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.ApiVersion:
"""Get ApiVersion"""
response = self.get(
f"/versions",
models.ApiVersion,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.ApiVersion)
return response
# GET /whitelabel_configuration -> models.WhitelabelConfiguration
def whitelabel_configuration(
self,
# Requested fields.
fields: Optional[str] = None,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.WhitelabelConfiguration:
"""Get Whitelabel configuration"""
response = self.get(
f"/whitelabel_configuration",
models.WhitelabelConfiguration,
query_params={"fields": fields},
transport_options=transport_options,
)
assert isinstance(response, models.WhitelabelConfiguration)
return response
# GET /workspaces/{workspace_id} -> models.Workspace
def workspace(
self,
# Id of the workspace
workspace_id: str,
transport_options: Optional[transport.TransportSettings] = None,
) -> models.Workspace:
"""Get Workspace"""
response = self.get(
f"/workspaces/{workspace_id}",
models.Workspace,
transport_options=transport_options,
)
assert isinstance(response, models.Workspace)
return response
| 35.712107 | 261 | 0.606956 | 22,002 | 229,486 | 6.159849 | 0.027588 | 0.115105 | 0.057552 | 0.079134 | 0.877849 | 0.827306 | 0.77306 | 0.735038 | 0.664242 | 0.616945 | 0 | 0.000249 | 0.299243 | 229,486 | 6,425 | 262 | 35.717665 | 0.84252 | 0.190421 | 0 | 0.761686 | 1 | 0 | 0.066912 | 0.041653 | 0 | 0 | 0 | 0 | 0.067821 | 1 | 0.068239 | false | 0.00313 | 0.001461 | 0.000209 | 0.137938 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5279382a628e67109fcf50bbdfffedcd765594eb | 2,119 | py | Python | tests/test_expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | 21 | 2021-03-16T14:36:57.000Z | 2022-03-31T09:39:39.000Z | tests/test_expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | 3 | 2021-06-30T00:55:25.000Z | 2021-07-13T00:06:27.000Z | tests/test_expression.py | pwwang/dpipe | 4efafbb1b13f8a70cc692943473d716b66e9e947 | [
"MIT"
] | 3 | 2021-06-29T06:26:42.000Z | 2021-09-10T00:13:07.000Z | import pytest
from pipda.expression import Expression
from pipda.symbolic import ReferenceAttr, ReferenceItem
class Expr(Expression):
def _pipda_eval(self, data, context):
...
def test_expression():
f = Expr()
# hashable
d = {f: 1}
assert isinstance(f.a, ReferenceAttr)
assert isinstance(f[1], ReferenceItem)
assert isinstance(f + 1, Expression)
assert isinstance(1 + f, Expression)
assert isinstance(f - 1, Expression)
assert isinstance(1 - f, Expression)
assert isinstance(f * 1, Expression)
assert isinstance(1 * f, Expression)
assert isinstance(f @ 1, Expression)
assert isinstance(1 @ f, Expression)
assert isinstance(f / 1, Expression)
assert isinstance(1 / f, Expression)
assert isinstance(f // 1, Expression)
assert isinstance(1 // f, Expression)
assert isinstance(f % 1, Expression)
assert isinstance(1 % f, Expression)
assert isinstance(f << 1, Expression)
assert isinstance(1 << f, Expression)
assert isinstance(f >> 1, Expression)
assert isinstance(1 >> f, Expression)
assert isinstance(f & 1, Expression)
assert isinstance(1 & f, Expression)
assert isinstance(f | 1, Expression)
assert isinstance(1 | f, Expression)
assert isinstance(f ^ 1, Expression)
assert isinstance(1 ^ f, Expression)
assert isinstance(f ** 1, Expression)
assert isinstance(1 ** f, Expression)
assert isinstance(f > 1, Expression)
assert isinstance(1 > f, Expression)
assert isinstance(f < 1, Expression)
assert isinstance(1 < f, Expression)
assert isinstance(f == 1, Expression)
assert isinstance(1 == f, Expression)
assert isinstance(f != 1, Expression)
assert isinstance(1 != f, Expression)
assert isinstance(f >= 1, Expression)
assert isinstance(1 >= f, Expression)
assert isinstance(f <= 1, Expression)
assert isinstance(1 <= f, Expression)
assert isinstance(-f, Expression)
assert isinstance(+f, Expression)
assert isinstance(~f, Expression)
assert f.__index__() is None
with pytest.raises(TypeError):
iter(f)
| 33.634921 | 55 | 0.67815 | 254 | 2,119 | 5.629921 | 0.11811 | 0.481119 | 0.727273 | 0.396504 | 0.804895 | 0.804895 | 0.804895 | 0.804895 | 0.804895 | 0.804895 | 0 | 0.024096 | 0.216612 | 2,119 | 62 | 56 | 34.177419 | 0.837349 | 0.003775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.8 | 1 | 0.036364 | false | 0 | 0.054545 | 0 | 0.109091 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
52870c05113e4cef0f70ce7a7946dd813da70e5b | 44,010 | py | Python | dcl/inflow_import/import_buy_backup.py | OlamideD/zutron | 42a3b360f7603fc4755d519904ecdb1712296ec2 | [
"MIT"
] | null | null | null | dcl/inflow_import/import_buy_backup.py | OlamideD/zutron | 42a3b360f7603fc4755d519904ecdb1712296ec2 | [
"MIT"
] | null | null | null | dcl/inflow_import/import_buy_backup.py | OlamideD/zutron | 42a3b360f7603fc4755d519904ecdb1712296ec2 | [
"MIT"
] | null | null | null | import frappe
from dateutil import parser
from frappe.model.rename_doc import rename_doc
from erpnext.buying.doctype.purchase_order.purchase_order import make_purchase_invoice
from dcl.inflow_import.stock import make_stock_entry
def truncate(f, n):
'''Truncates/pads a float f to n decimal places without rounding'''
s = '%.12f' % f
i, p, d = s.partition('.')
return float('.'.join([i, (d+'0'*n)[:n]]))
#dcl.inflow_import.import_buy.start_import
def start_import(file):
import csv
import os
current_customer = ""
current_order = ""
SI_dict = {}
last_single_SI_dict = {}
SI_items = []
last_single_SI_items = []
paid_and_fulfilled_items = []
last_single_paid_and_fulfilled_items = []
fulfilled_items = []
last_single_fulfilled_items = []
paid_items = []
last_single_paid_items = []
paid_pi = {}
# input_file = csv.DictReader(open(os.path.dirname(os.path.abspath(__file__))+'/data/inFlow_PurchaseOrder_test.csv'))
input_file = csv.DictReader(open(os.path.dirname(os.path.abspath(__file__))+'/data/'+file))
# current_customer = input_file[0]["Customer"]
income_accounts = "5111 - Cost of Goods Sold - DCL"
# income_accounts = "Sales - J"
cost_centers = "Main - DCL"
# cost_centers = "Main - J"
rows = list(input_file)
total_paid = 0.0
last_single_total_paid = 0.0
# print rows
totalrows = len(rows) - 1
for i,row in enumerate(rows):
# print row
if row["Location"].strip():
if row["Location"].strip() == "DCL House, Plot 1299 Fumilayo Ransome Kuti Way, Area 3, PMB 690 Garki, Abuja":
to_warehouse = "DCLWarehouse - Abuja - DCL"
elif row[
"Location"].strip() == "DCL Laboratory Products Ltd, Plot 5 Block 4 Etal Avenue off Kudirat Abiola Way by NNPC Lagos NG - DCL":
to_warehouse = "Lagos Warehouse - DCL"
else:
to_warehouse = row["Location"].strip() + " - DCL"
else:
to_warehouse = ""
#make item non stock
item_code1 = row["ItemName"].strip()
frappe.db.sql("""UPDATE `tabItem` SET is_stock_item=1 WHERE item_code=%s""", (item_code1))
frappe.db.commit()
to_warehouse = "DCLWarehouse - Abuja - DCL"
if row["Location"].strip():
exists_cat = frappe.db.sql("""SELECT Count(*) FROM `tabWarehouse` WHERE warehouse_name=%s""", (row["Location"].strip()))
# print exists_cat, row["Location"]
if exists_cat[0][0] == 0:
item_code = row["Location"]
SI = frappe.get_doc({"doctype": "Warehouse",
"warehouse_name": item_code.strip()
})
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
exists_cat = frappe.db.sql("""SELECT Count(*) FROM `tabItem` WHERE item_code=%s""", (item_code1))
# print exists_cat
if exists_cat[0][0] == 0:
SI = frappe.get_doc({"doctype": "Item",
"item_code": item_code1,
"description": row["ItemDescription"],
# "item_group": row["Category"].strip() + " Category"
"item_group": "All Item Groups"
})
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
#CREATE SUPPLIER IF NOT EXISTS
exists_supplier = frappe.db.sql("""SELECT Count(*) FROM `tabSupplier` WHERE name=%s""",(row["Vendor"].strip()))
if exists_supplier[0][0] == 0:
frappe.get_doc({"doctype":"Supplier","supplier_name":row["Vendor"].strip(),
"supplier_group":"All Supplier Groups","supplier_type":"Company"}).insert()
frappe.db.commit()
if i==0:
current_customer = row["Vendor"].strip()
current_order = row["OrderNumber"]
dt = parser.parse(row["OrderDate"])
currency = ""
conversion_rate = 0.0
if float(row["ExchangeRate"]) != 0.0 and float(row["ExchangeRate"]) != 1.0:
currency = row["CurrencyCode"]
conversion_rate = float(row["ExchangeRate"])
elif float(row["ExchangeRate"]) == 0.0 or float(row["ExchangeRate"]) == 1.0:
currency = "NGN"
conversion_rate = 0.0
po_status = ""
if row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Paid":
po_status = "Completed"
elif row["InventoryStatus"] == "Unfulfilled" and row["PaymentStatus"] == "Paid":
po_status = "To Receive"
elif row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Unpaid":
po_status = "To Bill"
SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"po_status":po_status,
"due_date": dt.date(),
"items": SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate":dt,
"inflow_remarks":row["OrderRemarks"],
"inflow_file":file,
"currency": currency,
"conversion_rate":conversion_rate
}
# print(current_customer,row["Vendor"],totalrows)
print " ",totalrows,i
if current_customer != row["Vendor"].strip() or current_customer != row["Vendor"].strip() \
or current_order!= row["OrderNumber"] or totalrows == i:
if totalrows == i and current_customer == row["Vendor"]:
print "LAST ROW!"
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
print row["ItemName"]
SI_item = {
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]),2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": float(row["ItemQuantity"]),
"received_qty": float(row["ItemQuantity"]),
# "warehouse":row["Location"].strip() +" - DCL",
"warehouse":to_warehouse,
"InventoryStatus":row["InventoryStatus"],
"PaymentStatus":row["PaymentStatus"],
"OrderDate":row["OrderDate"]
}
SI_items.append(SI_item)
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
total_paid += float(row["ItemSubtotal"])
elif totalrows == i:
print "LAST SINGLE ROW!"
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
last_single_SI_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse":row["Location"].strip() +" - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"],
"OrderDate": row["OrderDate"]
})
print last_single_SI_items
last_single_SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"due_date": dt.date(),
"items": last_single_SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate": dt,
"inflow_remarks": row["OrderRemarks"],
"currency": currency,
"conversion_rate": conversion_rate,
"inflow_file":file
}
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
last_single_paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
last_single_paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
last_single_fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
last_single_total_paid += float(row["ItemSubtotal"])
SI_dict.update({"outstanding_amount":total_paid,
"inflow_file":file,
"per_received":100.0,
"per_billed":100.0
})
print SI_dict["items"]
SI = frappe.get_doc(SI_dict)
# print SI_dict
print(" CURRENT:",current_order,SI_dict["po_status"])
SI_created = SI.insert(ignore_permissions=True)
SI_created.submit()
"""
To Receive and Bill
To Bill
To Receive
Completed
"""
# print " PO Status: ",SI_dict["po_status"]
# if SI_dict["po_status"] == "To Receive and Bill":
# print "To Receive and Bill"
# SI_created.db_set("per_received", 100, update_modified=False)
# SI_created.db_set("per_billed", 100, update_modified=False)
# elif SI_dict["po_status"] == "To Receive":
# print "To Receive"
# SI_created.db_set("per_billed", 100, update_modified=False)
# if SI_dict["po_status"] == "To Bill":
# print "To Bill"
# SI_created.db_set("per_received", 100, update_modified=False)
# SI_created.status = SI_dict["po_status"]
frappe.db.commit()
#/home/jvfiel/frappe-v11/apps/erpnext/erpnext/buying/doctype/purchase_order/purchase_order.py
from erpnext.buying.doctype.purchase_order.purchase_order import update_status
#/home/jvfiel/frappe-v11/apps/frappe/frappe/model/rename_doc.py
rename_doc("Purchase Order",SI_created.name,current_order,force=True)
frappe.db.commit()
# update_status(SI_dict["po_status"], current_order)
# SI_created.set_status(update=True, status=SI_dict["po_status"])
#self.db_set('status', self.status, update_modified = update_modified)
# SI_created.db_set(fieldname='status',value=SI_dict['po_status'])
# frappe.db.sql("""UPDATE `tabPurchase Order` SET status=%s WHERE name=%s""",(SI_dict["po_status"],current_order),debug=1)
#self.db_set("per_received", flt(received_qty / total_qty) * 100, update_modified=False)
# frappe.db.commit()
print paid_and_fulfilled_items
if paid_and_fulfilled_items:
pi = make_purchase_invoice(current_order)
if to_warehouse:
pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in paid_and_fulfilled_items:
# if float(item["rate"]) < 0:
# zeros.append(item)
# else:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
# nl.base_rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
print(nl.rate)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " ", paid_and_fulfilled_items
print " Paid and Fulfilled PI Total", pi_total,current_order,pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
# if pi.conversion_rate:
# print "<<<<",pi.grand_total,">>>>"
# print "<<<<",pi.conversion_rate,">>>>"
# print "<<<<",pi.grand_total * pi.conversion_rate,">>>>"
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.insert()
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
for item in zeros:
make_stock_entry(item_code=item["item_code"], qty=item['qty'],
to_warehouse=item["warehouse"],
valuation_rate=1, remarks="This is affected by data import. " + file,
posting_date=pi.posting_date,
posting_time=pi.posting_time,
set_posting_time=1, inflow_file=file)
frappe.db.commit()
print "Stock entry created."
if paid_items:
pi = make_purchase_invoice(current_order)
# pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in paid_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " Paid Items:", paid_items
print " Paid Items Only PI Total", pi_total,current_order,pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
if fulfilled_items:
pi = make_purchase_invoice(current_order)
if to_warehouse:
pi.update_stock = 1
# pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(SI_dict["conversion_rate"]) != 0.0 and float(
SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(SI_dict["conversion_rate"]) == 0.0 or float(
SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in fulfilled_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.received_qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += abs(float(nl.rate) * float(nl.qty))
# print nl.rate
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
print " ", fulfilled_items
print " Fulfilled Items Only PI Total", pi_total, current_order, pi.currency
print " conversion rate", pi.conversion_rate
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
# pi.paid_amount = pi.grand_total
# pi.base_paid_amount = pi.outstanding_amount
pi.rounding_adjustment = 0.0
pi.disable_rounded_total = 1
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
current_customer = row["Vendor"].strip()
current_order = row["OrderNumber"]
dt = parser.parse(row["OrderDate"])
SI_items = []
currency = ""
conversion_rate = 0.0
if float(row["ExchangeRate"]) != 0.0 and float(row["ExchangeRate"]) != 1.0:
currency = row["CurrencyCode"]
conversion_rate = float(row["ExchangeRate"])
elif float(row["ExchangeRate"]) == 0.0 or float(row["ExchangeRate"]) == 1.0:
currency = "NGN"
conversion_rate = 0.0
po_status = ""
if row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Paid":
po_status = "Completed"
elif row["InventoryStatus"] == "Unfulfilled" and row["PaymentStatus"] == "Paid":
po_status = "To Receive"
elif row["InventoryStatus"] == "Fulfilled" and row["PaymentStatus"] == "Unpaid":
po_status = "To Bill"
SI_dict = {"doctype": "Purchase Order",
"title": current_customer,
"supplier": current_customer,
"posting_date": dt.date(),
"schedule_date": dt.date(), # TODO + 30 days
"transaction_date": dt.date(),
# "due_date": row["DueDate"],
"po_status":po_status,
"due_date": dt.date(),
"items": SI_items,
# "docstatus": 1,
"outstanding_amount": total_paid,
"name": row["OrderNumber"],
"OrderDate":dt,
"inflow_remarks": row["OrderRemarks"],
"inflow_file": file,
"currency": currency,
"conversion_rate": conversion_rate
}
paid_items = []
fulfilled_items = []
paid_and_fulfilled_items = []
# else:
item_code1 = row["ItemName"].strip()
# if row[
# "ItemName"] == "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser.\nSupplied specifically without top plate (ring) for use only with the autoclave / steam sterilizer.":
if "Kerosene stove, four burner pressure type for use with 39L autoclave / steriliser." in item_code1:
item_code1 = "Kerosene Stove"
SI_item = {
# "item_code": installment.item, # test
"description": row["ItemDescription"].strip() or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "warehouse": row["Location"].strip() +" - DCL",
"warehouse": to_warehouse,
"rate": float(row["ItemUnitPrice"]),
"conversion_factor":1,
"uom":"Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": float(row["ItemQuantity"]),
"received_qty": float(row["ItemQuantity"]),
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"],
"OrderDate":row["OrderDate"]
}
SI_items.append(SI_item)
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] == "Fulfilled":
paid_and_fulfilled_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] == "Paid" and row["InventoryStatus"] != "Fulfilled":
paid_items.append({
# "item_code": installment.item, # test
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
# "rate": truncate(float(row["ItemSubtotal"]),2),
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
if row["PaymentStatus"] != "Paid" and row["InventoryStatus"] == "Fulfilled":
fulfilled_items.append({
"description": row["ItemDescription"] or row["ItemName"],
"item_name": item_code1,
"item_code": item_code1,
"rate": truncate(float(row["ItemUnitPrice"]), 2),
"conversion_factor": 1,
"uom": "Nos",
"expense_account": income_accounts,
"cost_center": cost_centers,
"qty": row["ItemQuantity"],
# "warehouse": row["Location"].strip() + " - DCL",
"warehouse": to_warehouse,
"InventoryStatus": row["InventoryStatus"],
"PaymentStatus": row["PaymentStatus"]
})
total_paid +=float(row["ItemSubtotal"])
if last_single_SI_dict != {}:
print "* END *", current_order
print last_single_SI_dict["items"]
SI = frappe.get_doc(last_single_SI_dict)
# print SI_dict
SI_created = SI.insert(ignore_permissions=True)
frappe.db.commit()
SI_created.submit()
frappe.db.commit()
rename_doc("Purchase Order", SI_created.name, current_order, force=True)
frappe.db.commit()
if last_single_paid_and_fulfilled_items:
pi = make_purchase_invoice(current_order)
pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = SI_dict['OrderDate'].date()
pi.posting_time = str(SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = SI_dict["currency"]
pi.conversion_rate = float(SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_paid_and_fulfilled_items:
# if float(item["rate"]) < 0:
# zeros.append(item)
# else:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
# nl.base_rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_and_fulfilled_items
print " Paid and Fulfilled PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
# if pi.conversion_rate:
# print "<<<<",pi.grand_total,">>>>"
# print "<<<<",pi.conversion_rate,">>>>"
# print "<<<<",pi.grand_total * pi.conversion_rate,">>>>"
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.insert()
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
for item in zeros:
make_stock_entry(item_code=item["item_code"], qty=item['qty'],
to_warehouse=item["warehouse"],
valuation_rate=1, remarks="This is affected by data import. " + file,
posting_date=pi.posting_date,
posting_time=pi.posting_time,
set_posting_time=1, inflow_file=file)
frappe.db.commit()
print "Stock entry created."
if last_single_paid_items:
pi = make_purchase_invoice(current_order)
# pi.update_stock = 1
pi.is_paid = 1
pi.items = []
pi.posting_date = last_single_SI_dict['OrderDate'].date()
pi.posting_time = str(last_single_SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = last_single_SI_dict["currency"]
pi.conversion_rate = float(last_single_SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_paid_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_items
print " Paid Items Only PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
if last_single_fulfilled_items:
pi = make_purchase_invoice(current_order)
pi.update_stock = 1
# pi.is_paid = 1
pi.items = []
pi.posting_date = last_single_SI_dict['OrderDate'].date()
pi.posting_time = str(last_single_SI_dict['OrderDate'].time())
pi_total = 0.0
if float(last_single_SI_dict["conversion_rate"]) != 0.0 and float(
last_single_SI_dict["conversion_rate"]) != 1.0:
pi.currency = last_single_SI_dict["currency"]
pi.conversion_rate = float(last_single_SI_dict["conversion_rate"])
elif float(last_single_SI_dict["conversion_rate"]) == 0.0 or float(
last_single_SI_dict["conversion_rate"]) == 1.0:
pi.currency = "NGN"
pi.conversion_rate = None
zeros = []
for item in last_single_fulfilled_items:
nl = pi.append('items', {})
nl.description = item["description"]
nl.item_name = item["item_name"]
nl.item_code = item["item_name"]
nl.rate = float(item["rate"])
nl.conversion_factor = item["conversion_factor"]
nl.uom = item["uom"]
nl.expense_account = item["expense_account"]
nl.cost_center = item["cost_center"]
nl.qty = float(item["qty"])
nl.warehouse = item["warehouse"]
nl.purchase_order = current_order
pi_total += float(nl.rate) * float(nl.qty)
# if pi.items:
pi.set_posting_time = 1
pi.cash_bank_account = "Access Bank - DCL"
pi.taxes_and_charges = ""
pi.taxes = []
pi.inflow_file = file
# print " ", paid_items
print " Paid Items Only PI Total", pi_total, current_order, pi.currency
# print " ", pi.as_dict()["items"]
if pi_total:
pi.mode_of_payment = "Cash"
pi.insert()
frappe.db.commit()
if pi.currency != "NGN":
pi.paid_amount = pi.grand_total
pi.base_paid_amount = pi.outstanding_amount
pi.save()
frappe.db.commit()
pi.submit()
frappe.db.commit()
else:
pass
None
def remove_imported_data(file):
SIs = frappe.db.sql("""SELECT name FROM `tabPurchase Invoice` WHERE inflow_file=%s""",(file))
for si in SIs:
si_doc = frappe.get_doc("Purchase Invoice",si[0])
if si_doc.docstatus == 1:
si_doc.cancel()
si_doc.delete()
# SIs = frappe.db.sql("""SELECT name FROM `tabStock Entry` WHERE docstatus=1""")
#
# for si in SIs:
# si_doc = frappe.get_doc("Stock Entry", si[0])
# si_doc.cancel()
# si_doc.delete()
SIs = frappe.db.sql("""SELECT name FROM `tabPurchase Order` WHERE inflow_file=%s""",(file))
for si in SIs:
si_doc = frappe.get_doc("Purchase Order", si[0])
if si_doc.docstatus == 1:
si_doc.cancel()
si_doc.delete() | 47.62987 | 222 | 0.486708 | 4,214 | 44,010 | 4.862838 | 0.065971 | 0.020789 | 0.023424 | 0.02928 | 0.873902 | 0.84877 | 0.831837 | 0.821931 | 0.813732 | 0.806949 | 0 | 0.010337 | 0.395501 | 44,010 | 924 | 223 | 47.62987 | 0.759923 | 0.120427 | 0 | 0.831006 | 0 | 0.001397 | 0.191467 | 0 | 0 | 0 | 0 | 0.002165 | 0 | 0 | null | null | 0.005587 | 0.01676 | null | null | 0.032123 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
528d4593d90c039e41626284daf9e79b597350b7 | 18,007 | py | Python | envi/tests/test_arch_arm_cmp_flags.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 716 | 2015-01-01T14:41:11.000Z | 2022-03-28T06:51:50.000Z | envi/tests/test_arch_arm_cmp_flags.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 266 | 2015-01-01T15:07:27.000Z | 2022-03-30T15:19:26.000Z | envi/tests/test_arch_arm_cmp_flags.py | rnui2k/vivisect | b7b00f2d03defef28b4b8c912e3a8016e956c5f7 | [
"ECL-2.0",
"Apache-2.0"
] | 159 | 2015-01-01T16:19:44.000Z | 2022-03-21T21:55:34.000Z | cmp_tests = ( \
{ "setup" : ( ("r3",0x7fff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },)
cmn_tests = ( \
{ "setup" : ( ("r3",0x7fff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fff),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x0), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7ffffff),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x8000001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7fff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7ffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x8000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x90000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x7fffffff),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x60000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x80000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x70000000), ), },
{ "setup" : ( ("r3",0x80000000),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x30000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x7fffffff), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x60000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x80000000), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x30000000), ), },
{ "setup" : ( ("r3",0x80000001),("r7",0x80000001), ("cpsr",0), ("r5",0)),
"tests" : ( ("cpsr", 0x30000000), ), },)
| 55.749226 | 73 | 0.451547 | 1,912 | 18,007 | 4.251569 | 0.012029 | 0.136917 | 0.136917 | 0.156477 | 0.996802 | 0.996802 | 0.996802 | 0.996802 | 0.995571 | 0.987698 | 0 | 0.246779 | 0.159494 | 18,007 | 322 | 74 | 55.92236 | 0.29032 | 0 | 0 | 0.975 | 0 | 0 | 0.211929 | 0 | 0 | 0 | 0.220982 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
bfdc4839f8ae569b7bc59048efef9037aecee673 | 51,263 | py | Python | Testing/Functional/RAS/lib/PLActions.py | rjwinchester/VistA | 6ada05a153ff670adcb62e1c83e55044a2a0f254 | [
"Apache-2.0"
] | 72 | 2015-02-03T02:30:45.000Z | 2020-01-30T17:20:52.000Z | Testing/Functional/RAS/lib/PLActions.py | rjwinchester/VistA | 6ada05a153ff670adcb62e1c83e55044a2a0f254 | [
"Apache-2.0"
] | 80 | 2016-04-19T12:04:06.000Z | 2020-01-31T14:35:19.000Z | Testing/Functional/RAS/lib/PLActions.py | rjwinchester/VistA | 6ada05a153ff670adcb62e1c83e55044a2a0f254 | [
"Apache-2.0"
] | 67 | 2015-01-27T16:47:56.000Z | 2020-02-12T21:23:56.000Z | #---------------------------------------------------------------------------
# Copyright 2013 PwC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#---------------------------------------------------------------------------
## @class PLActions
## Problem List Package Tests (Actions)
'''
Problem List Actions class. Extends Actions
Created on Mar 7, 2012
@author: pbradley
@copyright PwC
@license http://www.apache.org/licenses/LICENSE-2.0
'''
import time
import TestHelper
from Actions import Actions
import logging
class PLActions (Actions):
'''
This class extends the Actions class with methods specific to actions performed
through the Roll and Scroll interface for the Problem List package.
'''
def __init__(self, VistAconn, scheduling=None, user=None, code=None):
Actions.__init__(self, VistAconn, scheduling, user, code)
def signon (self):
''' This provides a signon via ^XUP or ^ZU depending on the value of acode'''
if self.acode is None:
self.VistA.write('S DUZ=1,DUZ(0)="@" D ^XUP')
self.VistA.wait('OPTION NAME:')
self.VistA.write('GMPL MGT MENU')
else:
self.VistA.write('D ^ZU')
self.VistA.wait('ACCESS CODE:')
self.VistA.write(self.acode)
self.VistA.wait('VERIFY CODE:')
self.VistA.write(self.vcode)
self.VistA.wait('//')
self.VistA.write('')
self.VistA.wait('Option:')
self.VistA.write('Problem List')
# def signoff(self):
# super(Actions,self).signoff(self.VistA, self.acode)
def write(self, string):
self.VistA.write(string)
def addcsv(self, ssn, pfile):
'''Add a list of problems to a patient's record'''
preader = TestHelper.CSVFileReader()
prec = preader.getfiledata(pfile)
for key in sorted(prec):
problem_data = prec[key]
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(problem_data['clinic'].strip())
while True:
index = self.VistA.multiwait(['Select Item','PROBLEM:',"No items available"])
if index == 0:
self.VistA.write('AD')
else:
self.VistA.write('?')
probID =[problem_data['icd'].strip(),
problem_data['icd10'].strip(),
problem_data['snomed'].strip()]
valIndex = 0
while True:
index = self.VistA.multiwait(['Ok','PROBLEM:'])
if index == 1:
self.VistA.write(probID[valIndex])
valIndex += 1;
elif index == 0:
break
else:
self.VistA.write('?')
self.VistA.write('Yes')
# if self.acode is not None:
# self.VistA.wait('//'); self.VistA.write('')
index = self.VistA.multiwait(['COMMENT','already an ACTIVE problem'])
if index == 0:
self.VistA.write(problem_data['comment1'].strip())
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write(problem_data['comment2'].strip())
self.VistA.wait('DATE OF ONSET')
self.VistA.write(problem_data['onsetdate'].strip())
self.VistA.wait('STATUS')
self.VistA.write(problem_data['status'].strip())
self.VistA.wait('hronic')
self.VistA.write(problem_data['acutechronic'].strip())
rval = self.VistA.multiwait(['service-connected condition',
'uit w/o saving'])
if rval == 0:
self.VistA.write(problem_data['service'].strip())
self.VistA.wait('uit w/o saving?')
self.VistA.write('Save')
elif rval == 1:
self.VistA.write('Save')
break
else:
self.VistA.write("")
break
self.VistA.wait('PROBLEM')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def addbyprobnum(self, ssn, clinic, comment, onsetdate, status, acutechronic,
service, probnum, icd=None,icd10=None,snomed=None, evalue=None, verchknum=None):
''' Add a problem using clinic or user with assigned selection list'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
index = self.VistA.multiwait(["Select I(TEM|tem)","PROBLEM:"])
self.VistA.write(probnum)
index = self.VistA.multiwait(['COMMENT','already an ACTIVE problem'])
if index == 0:
self.VistA.write(comment)
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write('')
self.VistA.wait('DATE OF ONSET')
self.VistA.write(onsetdate)
self.VistA.wait('STATUS')
self.VistA.write(status)
self.VistA.wait('hronic')
self.VistA.write(acutechronic)
rval = self.VistA.multiwait(['service-connected condition', 'uit w/o saving'])
if rval == 0:
self.VistA.write(service)
self.VistA.wait('uit w/o saving')
self.VistA.write('Save')
elif rval == 1:
self.VistA.write('Save')
#
else:
self.VistA.write('')
self.VistA.multiwait(["PROBLEM:","Select Item"])
self.VistA.write('')
self.VistA.wait('Select Action')
# optionally, check to make sure user entering the data can't also verify it
if verchknum is not None:
self.VistA.write('$')
self.VistA.wait('Select Problem')
self.VistA.write(verchknum)
self.VistA.wait('does not require verification')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def add(self, ssn, clinic, comment, onsetdate, status, acutechronic,
service, probnum=None, icd=None,icd10=None,snomed=None, evalue=None, verchknum=None):
''' Add a problem using clinic or user with assigned selection list'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
index = self.VistA.multiwait(["Select Item","PROBLEM:"])
if (index == 0):
self.VistA.write('AD')
self.VistA.wait('PROBLEM:')
probList = [icd, icd10,snomed]
probIndex = 0
self.VistA.write('?')
while True:
index = self.VistA.multiwait(['PROBLEM:','Ok',"Select Item"])
if index==0:
self.VistA.write(probList[probIndex])
probIndex += 1
elif index == 1:
break
elif index == 2:
self.VistA.write('AD')
else:
self.VistA.write('?')
self.VistA.write('YES')
index = self.VistA.multiwait(['COMMENT','already an ACTIVE problem'])
if index == 0:
self.VistA.write(comment)
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write('')
self.VistA.wait('DATE OF ONSET')
self.VistA.write(onsetdate)
self.VistA.wait('STATUS')
self.VistA.write(status)
self.VistA.wait('hronic')
self.VistA.write(acutechronic)
rval = self.VistA.multiwait(['service-connected condition', 'uit w/o saving'])
if rval == 0:
self.VistA.write(service)
self.VistA.wait('uit w/o saving')
self.VistA.write('Save')
elif rval == 1:
self.VistA.write('Save')
#
else:
self.VistA.write('')
while True:
index = self.VistA.multiwait(["PROBLEM:","Select Item",'Select Action'])
if index == 2:
break
self.VistA.write('')
# optionally, check to make sure user entering the data can't also verify it
if verchknum is not None:
self.VistA.write('$')
self.VistA.wait('Select Problem')
self.VistA.write(verchknum)
self.VistA.wait('does not require verification')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def addspec(self, ssn, clinic, comment, onsetdate, status, acutechronic,
service, icd, prompt='yes', uselex='yes', screendups='yes', isdup=None, prob=None, vlist=None):
''' Add problems with checks for the PL site parameters'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
self.VistA.wait('PROBLEM')
self.VistA.write(icd)
if uselex is 'yes':
self.VistA.wait('Ok?')
self.VistA.write('YES')
if screendups == isdup == 'yes':
self.VistA.wait('>>> ' + prob)
self.VistA.wait(' is already an')
self.VistA.wait('Are you sure you want to continue')
self.VistA.write('Yes')
self.VistA.wait('COMMENT')
self.VistA.write(comment)
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write('')
self.VistA.wait('DATE OF ONSET')
self.VistA.write(onsetdate)
self.VistA.wait('STATUS')
self.VistA.write(status)
self.VistA.wait('hronic')
self.VistA.write(acutechronic)
rval = self.VistA.multiwait(['service-connected condition', 'uit w/o saving'])
if rval == 0:
self.VistA.write(service)
self.VistA.wait('uit w/o saving')
self.VistA.write('Save')
elif rval == 1:
self.VistA.write('Save')
self.VistA.wait('PROBLEM')
self.VistA.write('')
if vlist is not None:
while True:
index = self.VistA.multiwait(vlist)
if index == len(vlist)-1:
break
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
if prompt == 'yes':
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def dataentry(self, ssn, provider, clinic, problem, comment, onsetdate, status, acutechronic,
service, probnum=None, icd=None, evalue=None):
'''Add a problem (via data entry) using description or selection list'''
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Provider:')
self.VistA.write(provider)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
if probnum == 'skip': # SL exists but don't use
self.VistA.wait('Select Item')
self.VistA.write('AD')
self.VistA.wait('PROBLEM')
self.VistA.write(icd)
elif probnum is None : # SL doesn't exist
self.VistA.wait('PROBLEM')
self.VistA.write(problem)
else : # Use SL
self.VistA.wait('Select Item')
self.VistA.write(probnum)
# if clinic == '':
# self.VistA.wait(evalue); self.VistA.write('')
self.VistA.wait('COMMENT')
self.VistA.write(comment)
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write('')
self.VistA.wait('DATE OF ONSET')
self.VistA.write(onsetdate)
self.VistA.wait('STATUS')
self.VistA.write(status)
self.VistA.wait('hronic')
self.VistA.write(acutechronic)
rval = self.VistA.multiwait(['service-connected condition', 'uit w/o saving'])
if rval == 0:
self.VistA.write(service)
self.VistA.wait('uit w/o saving')
self.VistA.write('Save')
elif rval == 1:
self.VistA.write('Save')
self.VistA.wait('PROBLEM:')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def editsimple(self, ssn, probnum, itemnum, chgval,icd10='',snomed=''):
'''Simple edit of problem, items 1,2,4,5 or 6 only'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('Select Item')
self.VistA.write(itemnum) # select 1, 2,4,5,or6
self.VistA.wait(':')
self.VistA.write(chgval)
valIndex=0
valList = [icd10,snomed]
while True:
rval = self.VistA.multiwait(['Select Item', 'Ok','A suitable term','STOP or Select'])
if rval == 0:
self.VistA.write('SC')
break
elif rval == 1:
self.VistA.write('Yes')
elif rval == 2:
self.VistA.write(valList[valIndex])
valIndex +=1
elif rval == 3:
self.VistA.write('1')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def editinactivate (self, ssn, probnum, resdate):
'''Inactivate a problem'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('Select Item')
self.VistA.write('3') # STATUS
self.VistA.wait('STATUS')
self.VistA.write('INACTIVE')
self.VistA.wait('DATE RESOLVED')
self.VistA.write(resdate)
self.VistA.wait('Select Item')
self.VistA.write('SC')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def editactivate (self, ssn, probnum, acutechronic):
'''Activate a problem'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('Select Item')
self.VistA.write('3') # STATUS
self.VistA.wait('STATUS')
self.VistA.write('ACTIVE')
self.VistA.wait('hronic')
self.VistA.write(acutechronic)
self.VistA.wait('Select Item')
self.VistA.write('SC')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def verify(self, ssn, probnum, itemnum, evalue, view='AT'):
'''Verify a problem exists'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('VW')
self.VistA.wait('Select Item')
self.VistA.write(view)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('Select Item')
self.VistA.write(itemnum) # which item to verify?
self.VistA.multiwait(evalue)
self.VistA.write('^')
self.VistA.wait('Select Item')
self.VistA.write('QUIT')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
def comcm (self, ssn, probnum, comment):
'''Comment on an Active problem'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('CM')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('COMMENT')
self.VistA.write(comment)
self.VistA.wait('ANOTHER COMMENT')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def rem (self, ssn):
'''Remove the first problem on the list (Active or Inactive)'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('VW')
self.VistA.wait('Select Item')
self.VistA.write('BO')
self.VistA.wait('Select Action')
self.VistA.write('RM')
self.VistA.wait('Select Problem')
self.VistA.write('1')
self.VistA.wait('Are you sure')
self.VistA.write('YES')
self.VistA.wait('REASON FOR REMOVAL')
self.VistA.write('testing')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def rem_all (self, ssn):
'''Remove the first problem on the list (Active or Inactive)'''
rval = 0
while rval is not 1:
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('VW')
self.VistA.wait('Select Item')
self.VistA.write('BO')
self.VistA.wait('Select Action')
self.VistA.write('RM')
rval = self.VistA.multiwait(['Select Problem', 'Select Action'])
if rval == 0:
self.VistA.write('1')
self.VistA.wait('Are you sure')
self.VistA.write('YES')
self.VistA.wait('REASON FOR REMOVAL')
self.VistA.write('testing')
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
elif rval == 1:
self.VistA.write('QUIT')
r2val = self.VistA.multiwait(['Print a new problem list', 'Problem List Mgt Menu'])
if r2val == 0:
self.VistA.write('N')
elif r2val == 1:
self.VistA.write('?')
else:
self.VistA.wait('SHOULDNOTGETHERE')
else:
self.VistA.wait('SHOULDNOTGETHERE')
def replace (self, ssn, probnum):
'''Replace Removed Problem'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Replace Removed Problem')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select the problem')
self.VistA.write(probnum)
self.VistA.wait('Are you sure you want to do this?')
self.VistA.write('YES')
self.VistA.wait('to continue')
self.VistA.write('')
def checkempty (self, ssn):
'''Verify that patient problem list is empty'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action: Add New Problems//')
self.VistA.write('QUIT')
def createsellist (self, listname, clinic):
'''Create a Selection List'''
needAssignedToClinic = False
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('new PROBLEM SELECTION LIST')
self.VistA.write('Yes')
index = self.VistA.multiwait(['PROBLEM SELECTION LIST CLINIC:','PROBLEM SELECTION LIST CLASS'])
if index == 0:
self.VistA.write(clinic)
else:
needAssignedToClinic = True
self.VistA.write("Local")
self.VistA.wait('Select Action:')
# assign to clinic
if (needAssignedToClinic) and (clinic):
self.VistA.write("SS")
self.VistA.wait("Enter selection")
self.VistA.write('2') # Assign to hospital location
self.VistA.wait("HOSPITAL LOCATION NAME")
self.VistA.write(clinic)
self.VistA.wait("Selection List")
self.VistA.write(listname)
self.VistA.wait("Enter selection")
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def createcat (self, listname, catname):
'''Create a Category'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build Problem Selection List')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('EC')
self.VistA.wait('Select CATEGORY NAME:')
self.VistA.write(catname)
self.VistA.wait('new PROBLEM SELECTION CATEGORY')
self.VistA.write('Yes')
index = self.VistA.multiwait(['Select Item','PROBLEM SELECTION CATEGORY CLASS'])
if index == 1:
self.VistA.write("Local")
self.VistA.wait('Select Item')
self.VistA.write('SV')
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('CATEGORY NAME')
self.VistA.write(catname)
index = self.VistA.multiwait(['HEADER','part of this list'])
if index == 0:
self.VistA.write('')
self.VistA.wait('SEQUENCE')
self.VistA.write('')
self.VistA.wait('CATEGORY NAME')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def catad (self, listname, catname, icd, snomed, spec='', dtext='', seqnum=''):
'''Add a Problem (ICD) to a Category'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('EC')
self.VistA.wait('Select CATEGORY NAME:')
self.VistA.write(catname)
self.VistA.wait('Select Item')
self.VistA.write('AD')
index = self.VistA.multiwait(['PROBLEM','Select Specialty Subset'])
if index == 1:
self.VistA.write(spec)
self.VistA.wait('PROBLEM')
self.VistA.write(icd)
index = self.VistA.multiwait(['Ok', 'STOP or Select', 'A suitable term'])
if index == 0:
self.VistA.write('')
self.VistA.wait('DISPLAY TEXT')
self.VistA.write(dtext)
self.VistA.wait('ICD CODE')
self.VistA.write(icd)
self.VistA.wait('...OK')
self.VistA.write('Yes')
self.VistA.wait('SEQUENCE')
self.VistA.write(seqnum)
self.VistA.wait('PROBLEM')
self.VistA.write('')
elif index == 1:
self.VistA.write('1')
self.VistA.wait('DISPLAY TEXT')
self.VistA.write(dtext)
self.VistA.wait('ICD CODE')
self.VistA.write(icd)
self.VistA.wait('...OK')
self.VistA.write('Yes')
self.VistA.wait('SEQUENCE')
self.VistA.write(seqnum)
self.VistA.wait('PROBLEM')
self.VistA.write('')
elif index == 2:
self.VistA.write(snomed)
index = self.VistA.multiwait(['Ok', 'STOP or Select', 'A suitable term'])
if index == 0:
self.VistA.write('')
self.VistA.wait('DISPLAY TEXT')
self.VistA.write(dtext)
self.VistA.multiwait(['... Ok','... Yes'])
self.VistA.write('Yes')
self.VistA.wait('SEQUENCE')
self.VistA.write(seqnum)
self.VistA.wait('PROBLEM')
elif index == 1:
self.VistA.write('1')
self.VistA.wait('DISPLAY TEXT')
self.VistA.write(dtext)
self.VistA.multiwait(['... Ok','... Yes'])
self.VistA.write('Yes')
self.VistA.wait('SEQUENCE')
self.VistA.write(seqnum)
self.VistA.wait('PROBLEM')
self.VistA.write('')
self.VistA.wait('Select Item')
self.VistA.write('SV')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistad (self, listname, catname, hdrname='', seqnum=''):
'''Add a Category to a Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Select CATEGORY NAME:')
self.VistA.write(catname)
index = self.VistA.multiwait(['HEADER','part of this list'])
if index == 0:
self.VistA.write(hdrname)
self.VistA.wait('SEQUENCE')
self.VistA.write(seqnum)
self.VistA.wait('Select CATEGORY NAME')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistss (self, listname, clinic, username):
'''Assign a Selection List to a User'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('SS')
self.VistA.wait('CLINIC:')
self.VistA.write(clinic)
self.VistA.wait('Select USER')
self.VistA.write(username)
self.VistA.wait('ANOTHER ONE')
self.VistA.write('')
self.VistA.wait('Are you ready')
self.VistA.write('Yes')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistgal (self, listname, username):
'''Assign a Selection List to a User'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Assign')
index = self.VistA.multiwait(["System",'Enter Selection','Select LIST NAME:'])
if index == 0:
self.VistA.write("1")
self.VistA.wait("NEW PERSON NAME")
self.VistA.write(username)
self.VistA.wait("Selection List")
self.VistA.write(listname)
self.VistA.wait("Enter selection")
self.VistA.write('')
else:
self.VistA.write(listname)
self.VistA.wait('Select USER')
self.VistA.write(username)
self.VistA.wait('ANOTHER ONE')
self.VistA.write('')
self.VistA.wait('Are you ready')
self.VistA.write('Yes')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistrfu (self, listname, username):
'''De-Assign a Selection List from a User'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Select Create Problem Selection Lists')
self.VistA.write('Remove')
index = self.VistA.multiwait(['Select LIST NAME:','Select Create Problem'])
if index == 1:
self.VistA.write('Assign')
self.VistA.wait('Enter selection')
self.VistA.write('1')
self.VistA.wait('NEW PERSON')
self.VistA.write(username)
self.VistA.wait('Selection List')
self.VistA.write('@')
self.VistA.wait('Enter selection')
self.VistA.write('')
else:
self.VistA.write(listname)
self.VistA.wait('Select USER')
self.VistA.write(username)
self.VistA.wait('ANOTHER ONE')
self.VistA.write('')
self.VistA.wait('Are you ready')
self.VistA.write('Yes')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistrm (self, listname, catnum='1'):
''' Remove Category from a Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('RM')
self.VistA.wait('Select Category')
self.VistA.write(catnum)
self.VistA.wait('Are you sure you want to remove')
self.VistA.write('Yes')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def catdl (self, listname, catname):
''' Delete a Category'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action')
self.VistA.write('EC')
self.VistA.wait('Select CATEGORY NAME')
self.VistA.write(catname)
self.VistA.wait('Select Item')
self.VistA.write('DL')
self.VistA.wait('Are you sure you want to delete the entire')
self.VistA.write('Yes')
self.VistA.wait('Select CATEGORY NAME')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def sellistdl (self, listname, clinic):
'''Delete a Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
# First remove assignments
if clinic:
self.VistA.write("ASSIGN")
index = self.VistA.multiwait(["Enter selection","Select LIST NAME" ])
if index == 0:
self.VistA.write('2') # Assign to hospital location
self.VistA.wait("HOSPITAL LOCATION NAME")
self.VistA.write(clinic)
self.VistA.wait("Selection List")
self.VistA.write("@")
self.VistA.wait("Enter selection")
self.VistA.write('')
self.VistA.wait('Create Problem Selection Lists')
else:
self.VistA.write('')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Delete')
self.VistA.wait('Select LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Are you sure you want to delete this list')
self.VistA.write('Yes')
index = self.VistA.multiwait(['to continue','Create Problem Selection Lists'])
if index == 0:
self.VistA.write('')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def createibform (self, clinic, formname, groupname, plist, icd10list):
'''Create IB Encounter Form'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('')
self.VistA.wait('Core Applications')
self.VistA.write('IB')
self.VistA.wait('Integrated Billing Master Menu')
self.VistA.write('Encounter Forms')
self.VistA.wait('Encounter Forms')
self.VistA.write('Edit Encounter Forms')
self.VistA.wait('Edit Encounter Forms')
self.VistA.write('Clinic Setup')
self.VistA.wait('WHICH CLINIC?')
self.VistA.write(clinic)
self.VistA.wait('Select Action:')
self.VistA.write('Create Blank Form')
self.VistA.wait('New Form Name')
self.VistA.write(formname + '\r\r\r0\r\r\rTest Form\r1')
self.VistA.wait('Select Action')
self.VistA.write('Edit Form')
self.VistA.wait('Select Action')
self.VistA.write('Add Toolkit')
self.VistA.wait('Select Action')
self.VistA.write('Add Tool Kit Block')
self.VistA.wait('Select TOOL KIT BLOCK:')
self.VistA.write('8')
self.VistA.wait('STARTING ROW:')
self.VistA.write('\r\r\r')
self.VistA.wait('Select Action')
self.VistA.write('Fast Selection Edit')
self.VistA.wait('Select Action:')
self.VistA.write('Group Add')
self.VistA.wait('HEADER')
self.VistA.write(groupname + '\r1\r\r')
for pitem in plist:
self.VistA.wait('Select Action')
self.VistA.write('Add Selection')
self.VistA.wait('Select PROBLEM:')
self.VistA.write(pitem)
index = self.VistA.multiwait(['Select PROBLEM','Ok'])
if index == 0:
self.VistA.write(icd10list[plist.index(pitem)])
self.VistA.wait('Ok')
self.VistA.write('\rGroup1\r\r^')
index = self.VistA.multiwait(['NARRATIVE','Select Action'])
if index == 0:
self.VistA.write('TEST')
else:
self.VistA.write('?')
self.VistA.wait('Select Action')
self.VistA.write('QUIT\rYES')
self.VistA.wait('Select Action')
self.VistA.write('QUIT\r\r\r')
self.VistA.wait('Integrated Billing Master Menu')
self.VistA.write('Problem List')
def checkOutOfOrder (self, menuName):
'''Remove Category from a Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('?');
index = self.VistA.multiwait(['SNOMED CT','Select Problem Selection Lists'])
self.VistA.write('')
if index == 0:
return False
else:
return True
def sellistib (self, formname, listname, clinic):
'''Remove Category from a Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem Selection Lists')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Copy Selection List from IB Encounter')
self.VistA.wait('Select a FORM:')
self.VistA.write(formname)
self.VistA.wait('LIST NAME')
self.VistA.write(listname)
self.VistA.wait('CLINIC')
self.VistA.write(clinic)
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def versellist(self, ssn, clinic, vlist):
'''Verify a clinic selection list, content and order'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
vlist = ["PROBLEM:"] + vlist
while True:
index = self.VistA.multiwait(vlist)
if (index == len(vlist)-1):
self.VistA.wait('Select Item')
self.VistA.write('Quit')
break
if index == 0:
self.VistA.write('')
break
self.VistA.wait('Select Action')
self.VistA.write('Quit')
def verplist(self, ssn, vlist):
'''Verify a patient problem list, content and order'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
while True:
index = self.VistA.multiwait(vlist)
if index == len(vlist)-1:
break
self.VistA.wait('Select Action')
self.VistA.write('Quit')
def verlistpats(self, vlist):
'''Verify a patient problem list, content and order'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('List Patients with Problem List data')
self.VistA.wait('//')
self.VistA.write('')
while True:
index = self.VistA.multiwait(vlist)
if index == len(vlist)-1:
break
self.VistA.wait('to exit:')
self.VistA.write('')
def verpatsrch(self, prob, icd10,snomed, vlist):
'''Verify a patient problem list, content and order'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Search for Patients having selected Problem')
probList = [prob,icd10,snomed]
probIndex =0
while True:
index = self.VistA.multiwait(['Ok','PROBLEM'])
if index == 1:
self.VistA.write(probList[probIndex])
probIndex += 1
elif index == 0:
break
else:
self.VistA.write('?')
self.VistA.write('')
self.VistA.wait('Select STATUS:')
self.VistA.write('')
self.VistA.wait('DEVICE:')
self.VistA.write('')
while True:
index = self.VistA.multiwait(vlist)
if index == len(vlist)-1:
break
self.VistA.wait('to exit:')
self.VistA.write('')
self.VistA.wait('PROBLEM:')
self.VistA.write('')
def detview (self, ssn, probnum, vlist1, vlist2):
'''Checks the Detailed View'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('DT')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
while True:
index = self.VistA.multiwait(vlist1)
if index == len(vlist1)-1:
break
self.VistA.wait('Select Action')
self.VistA.write('')
while True:
index = self.VistA.multiwait(vlist2)
if index == len(vlist2)-1:
break
self.VistA.wait('Select Action')
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('')
def verifyproblem(self, ssn, problem):
'''Check that its unconfirmed'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('1')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(ssn)
self.VistA.wait('$') # check for $ verify mark
self.VistA.wait(problem) # check for $ verify mark
self.VistA.wait('Select Action:')
self.VistA.write('DT')
self.VistA.wait('Select Problem')
self.VistA.write('')
self.VistA.wait('CLERK')
self.VistA.write('q')
self.VistA.wait('Select Action:')
self.VistA.write('$')
self.VistA.wait('Select Problem')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('DT')
self.VistA.wait('Select Problem')
self.VistA.write('')
self.VistA.wait('Select Action:')
self.VistA.write('Q')
# verify again and confirm previous verification worked
self.VistA.wait('Select Action:')
self.VistA.write('$')
self.VistA.wait('Select Problem')
self.VistA.write('')
self.VistA.wait('does not require verification')
self.VistA.wait('Select Action:')
self.VistA.write('Q')
def selectnewpatient(self, ssn1, name1, ss2, name2):
'''This checks to see if the select new patient feature works properly'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn1)
self.VistA.wait(name1)
self.VistA.write('SP')
self.VistA.wait('PATIENT NAME:')
self.VistA.write(ss2)
self.VistA.wait(name2)
self.VistA.write('Q')
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('')
def printproblemlist(self, ssn, vlist):
'''This checks that the print function inside problem list works properly'''
self.VistA.wait("Problem List Mgt Menu")
self.VistA.write('Patient Problem List')
self.VistA.wait('NAME:')
self.VistA.write(ssn)
self.VistA.wait('Select Action:')
self.VistA.write('PP')
self.VistA.wait('ll problems?')
self.VistA.write('A')
self.VistA.wait('DEVICE:')
self.VistA.write('HOME')
while True:
index = self.VistA.multiwait(vlist)
if index == len(vlist)-1:
break
self.VistA.wait('exit:')
self.VistA.write('^')
self.VistA.wait('Select Action')
self.VistA.write('')
def resequencecat(self, listname, catnames):
'''Tests re-sequence function inside of category build list'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('LIST NAME:')
self.VistA.write(listname)
self.VistA.wait('Select Action:')
self.VistA.write('SQ')
self.VistA.wait('Select Category')
self.VistA.write('1')
self.VistA.wait('SEQUENCE')
self.VistA.write('3')
self.VistA.wait_re(catnames[1])
self.VistA.wait_re(catnames[0])
self.VistA.write('SQ')
self.VistA.wait('Select Category')
self.VistA.write('2')
self.VistA.wait('SEQUENCE')
self.VistA.write('1')
self.VistA.wait_re(catnames[0])
self.VistA.wait_re(catnames[1])
self.VistA.wait('Select Action:')
self.VistA.write('VW')
self.VistA.wait('<1>')
self.VistA.write('')
self.VistA.wait('Save')
self.VistA.write('Yes')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def categorydisp(self, listname, catname):
'''Tests category display function'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('LIST NAME')
self.VistA.write(listname)
self.VistA.wait('Select Action:')
self.VistA.write('CD')
self.VistA.wait('Category')
self.VistA.write('1')
self.VistA.wait('HEADER:')
self.VistA.write(catname.upper())
self.VistA.wait('AUTOMATICALLY')
self.VistA.write('Yes')
self.VistA.wait(catname.upper())
self.VistA.write('CD')
self.VistA.wait('Category')
self.VistA.write('1')
self.VistA.wait('HEADER:')
self.VistA.write(catname)
self.VistA.wait('AUTOMATICALLY')
self.VistA.write('Yes')
self.VistA.wait('Select Action')
self.VistA.write('SV')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def changesellist(self, list1, list2, category=None):
'''Changes the Selection List'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Create Problem')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('Build')
self.VistA.wait('LIST NAME:')
self.VistA.write(list1)
self.VistA.wait('Select Action:')
self.VistA.write('CL')
self.VistA.wait('LIST NAME:')
self.VistA.write(list2)
self.VistA.wait_re(list2)
if category is None:
self.VistA.wait('No items available.')
else:
self.VistA.wait(category)
self.VistA.write('')
self.VistA.wait('Create Problem Selection Lists')
self.VistA.write('')
def editpart1(self, ssn, probnum, itemnum, chgval):
'''Simple edit of problem, items 1,2,4,5 or 6 only'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
self.VistA.wait('Select Item')
self.VistA.write(itemnum) # select 1, 2,4,5,or6
def editpart2(self, ssn, probnum, itemnum, chgval, icd10='',snomed=''):
''' Edit for lock test'''
self.VistA.wait(':')
self.VistA.write('')
probList=[chgval,icd10,snomed]
probIndex = 0
while True:
rval = self.VistA.multiwait(['Select Item', 'Ok','A suitable term'])
if rval == 0:
self.VistA.write('SC')
break
elif rval == 1:
self.VistA.write('Yes')
elif rval == 2:
self.VistA.write(probList[probIndex])
probIndex += 1
self.VistA.wait('Select Action')
self.VistA.write('QUIT')
self.VistA.wait('Print a new problem list')
self.VistA.write('N')
def badeditpart1(self, ssn, probnum, itemnum, chgval,icd10):
''' Simple edit of problem, items 1,2,4,5 or 6 only'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('ED')
self.VistA.wait('Select Problem')
self.VistA.write(probnum) # which patient problem
# self.VistA.wait('Select Item')
# self.VistA.write(itemnum)
index = self.VistA.multiwait(['Select Problem', 'edited by another user'])
if index == 0:
self.VistA.write(icd10)
self.VistA.wait('edited by another user')
self.VistA.write('QUIT')
def editPLsite(self, ver, prompt, uselex, order, screendups):
'''Simple edit of problem, items 1,2,4,5 or 6 only'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Edit PL Site Parameters')
self.VistA.wait('VERIFY TRANSCRIBED PROBLEMS:')
self.VistA.write(ver)
self.VistA.wait('PROMPT FOR CHART COPY:')
self.VistA.write(prompt)
self.VistA.wait('USE CLINICAL LEXICON:')
self.VistA.write(uselex)
self.VistA.wait('DISPLAY ORDER:')
self.VistA.write(order)
self.VistA.wait('SCREEN DUPLICATE ENTRIES:')
self.VistA.write(screendups)
def checkVerplsetting(self, ssn):
''' Check Verify PL site setting'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('$')
self.VistA.wait('$ is not a valid selection')
self.VistA.wait('Select Action')
self.VistA.write('Q')
def checkRMsellist(self, ssn, clinic):
'''Check to verify response when adding problem via clinic with a removed selection list'''
self.VistA.wait('Problem List Mgt Menu')
self.VistA.write('Patient Problem List')
self.VistA.wait('PATIENT NAME')
self.VistA.write(ssn)
self.VistA.wait('Select Action')
self.VistA.write('AD')
self.VistA.wait('Clinic')
self.VistA.write(clinic)
index = self.VistA.multiwait(['Retrieving list of problems ...',"PROBLEM:"])
if index == 0:
self.VistA.wait('No items available. Returning to Problem List ...')
else:
self.VistA.write('')
self.VistA.wait('Select Action')
self.VistA.write('Q')
| 39.402767 | 109 | 0.57205 | 6,110 | 51,263 | 4.795417 | 0.072668 | 0.29273 | 0.229829 | 0.090785 | 0.810307 | 0.787986 | 0.755495 | 0.725256 | 0.686962 | 0.653072 | 0 | 0.005605 | 0.286503 | 51,263 | 1,300 | 110 | 39.433077 | 0.795467 | 0.074947 | 0 | 0.767703 | 0 | 0 | 0.199355 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040587 | false | 0 | 0.003454 | 0 | 0.046632 | 0.000864 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
87158fdbe3626d7684269109af52553b70e7d540 | 47,732 | py | Python | tests/runtests.py | iptc/newsml-g2 | 3aa1bd36c7c8ddea2f410b878f478e3e4dc59aac | [
"CC-BY-4.0"
] | 8 | 2017-05-03T10:06:49.000Z | 2021-11-09T17:17:33.000Z | tests/runtests.py | iptc/newsml-g2 | 3aa1bd36c7c8ddea2f410b878f478e3e4dc59aac | [
"CC-BY-4.0"
] | 6 | 2019-06-21T08:24:19.000Z | 2020-04-29T13:51:35.000Z | tests/runtests.py | iptc/newsml-g2 | 3aa1bd36c7c8ddea2f410b878f478e3e4dc59aac | [
"CC-BY-4.0"
] | 2 | 2017-05-03T10:06:55.000Z | 2018-11-05T18:33:44.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Copyright (c) 2020 International Press Telecommunications Council (IPTC)
#
# The MIT License
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""
NewsML-G2 unit test runner
"""
import lxml
import lxml.etree
import unittest
import os
DIRNAME = os.path.dirname(__file__)
NEWSMLG2_SCHEMA = os.path.join(
DIRNAME, '..', 'specification',
'NewsML-G2_2.30-spec-All-Power.xsd'
)
NEWSMLG2_DEV_SCHEMA = os.path.join(
DIRNAME, '..', 'dev-schema',
'NewsML-G2dev_0.5_nar230.xsd'
)
TEST_FILES_FOLDER = os.path.join(
DIRNAME, 'unit_test_files'
)
SCHEMA_FILES_FOLDER = os.path.join(
DIRNAME, 'schema_versions'
)
SCHEMA_VERSIONS = {
"dev": {
"schema_file": NEWSMLG2_DEV_SCHEMA,
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, 'dev', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.30', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, 'dev', 'should_fail')
],
},
"2.30": {
"schema_file": os.path.join(
DIRNAME, '..', 'specification', 'NewsML-G2_2.30-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.30', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.30', 'should_fail')
],
},
"2.29": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.29-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
],
},
"2.28": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.28-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
],
},
"2.27": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.27-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
],
},
"2.26": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.26-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
],
},
"2.25": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.25-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
],
},
"2.24": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.24-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_pass'),
# os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.23": {
"schema_file": os.path.join(
# SCHEMA_FILES_FOLDER, 'NewsML-G2_2.23-spec-All-Power.xsd'
SCHEMA_FILES_FOLDER, 'G2-multi-schema-2.23.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.22": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.22-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.21": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.21-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.20": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.20-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.19": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.19-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.18": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.18-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.17": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.17-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.16": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.16-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.15": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.15-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.14": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.14-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.13": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.13-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.12": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.12-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.11": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.11-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.10": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.10-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_pass'),
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"2.9": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NewsML-G2_2.9-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.9', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
},
"NAR1.9": {
"schema_file": os.path.join(
SCHEMA_FILES_FOLDER, 'NAR_1.9-spec-All-Power.xsd'
),
"should_pass_folders": [
os.path.join(TEST_FILES_FOLDER, 'NAR1.9', 'should_pass')
],
"should_fail_folders": [
os.path.join(TEST_FILES_FOLDER, '2.10', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.11', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.12', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.13', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.14', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.15', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.16', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.17', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.18', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.19', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.20', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.21', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.22', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.23', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.24', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.25', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.26', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.27', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.28', 'should_fail'),
os.path.join(TEST_FILES_FOLDER, '2.29', 'should_fail')
]
}
}
"""
We make extensive use of subTest, so we want to count the number of subTests
run, not just the number of top-level tests (which is only 2 in our case!)
"""
class CountSubtestsResult(unittest.TextTestResult):
def addSubTest(self, test, subtest, outcome):
# handle failures calling base class
super(CountSubtestsResult, self).addSubTest(test, subtest, outcome)
# add to total number of tests run
self.testsRun += 1
class TestNewsMLSchema(unittest.TestCase):
newsmlg2_schema = None
newsmlg2_dev_schema = None
schemas = {}
# use the above helper class to count subtests
# def run(self, test_result=None):
# return super(TestNewsMLSchema, self).run(CountSubtestsResult())
def __init__(self, *args, **kwargs):
"""
Set up paths and load the schema.
If we put this in setUp() rather than __init__(), it would
load the schema for each test which is unnecessary.
"""
self.current_path = DIRNAME
for schema_version, schema in SCHEMA_VERSIONS.items():
self.schemas[schema_version] = lxml.etree.XMLSchema(
file=schema['schema_file']
)
with open(NEWSMLG2_SCHEMA) as schemafile:
self.newsmlg2_schema = lxml.etree.XMLSchema(file=schemafile)
return super(TestNewsMLSchema, self).__init__(*args, **kwargs)
# HELPER FUNCTIONS
def get_files_in_folder(self, folder_name):
if not os.path.isdir(folder_name):
return []
else:
return [
os.path.join(folder_name, file)
for file in os.listdir(folder_name)
if file.endswith('.xml')
]
def load_test_file(self, file_name):
with open(file_name, 'r') as xmlfile:
instance = lxml.etree.parse(xmlfile)
return instance
def folder_should_pass(self, schema_version=None, schema=None, folder_name=None):
testfiles = self.get_files_in_folder(folder_name)
for file in testfiles:
with self.subTest(schema_version=schema_version, folder_name=folder_name, file=file):
instance = self.load_test_file(file)
schema.assertValid(instance)
def folder_should_fail(self, schema=None, folder_name=None):
testfiles = self.get_files_in_folder(folder_name)
for file in testfiles:
with self.subTest(file=file):
instance = self.load_test_file(file)
self.assertFalse(
schema.validate(instance)
)
# TESTS START HERE
def test_simplest_instance_newsmlg2(self):
instance = """<?xml version="1.0" encoding="UTF-8"?>
<newsItem
xmlns="http://iptc.org/std/nar/2006-10-01/"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
guid="simplest-test"
standard="NewsML-G2"
standardversion="2.30"
conformance="power"
xml:lang="en-GB">
<catalogRef href="http://www.iptc.org/std/catalog/catalog.IPTC-G2-Standards_36.xml" />
<itemMeta>
<itemClass qcode="ninat:text" />
<provider qcode="nprov:REUTERS" />
<versionCreated>2018-10-21T16:25:32-05:00</versionCreated>
</itemMeta>
<contentSet>
<inlineXML contenttype="application/nitf+xml">
</inlineXML>
</contentSet>
</newsItem>
"""
parser = lxml.etree.XMLParser(schema=self.newsmlg2_schema)
self.assertIsNotNone(
lxml.etree.fromstring(bytes(instance, encoding='utf-8'), parser)
)
def test_all_schema_versions_against_pass_and_fail_tests(self):
"""
Run files in TEST_FILES_FOLDER/should_pass against all NewsML-G2 schema
versions. They should all pass (ie they are all valid against the schema).
Within folder_should_pass and folder_should_fail, we use "subTest" so
we can see which file failed the test.
"""
for schema_version, schema in SCHEMA_VERSIONS.items():
for should_pass_folder in schema['should_pass_folders']:
self.folder_should_pass(
schema_version=schema_version,
schema=self.schemas[schema_version],
folder_name=should_pass_folder
)
for should_fail_folder in schema['should_fail_folders']:
self.folder_should_fail(
schema=self.schemas[schema_version],
folder_name=should_fail_folder
)
if __name__ == '__main__':
unittest.main(testRunner=unittest.TextTestRunner(resultclass=CountSubtestsResult))
| 51.658009 | 97 | 0.59767 | 6,671 | 47,732 | 3.990406 | 0.04587 | 0.126221 | 0.209617 | 0.278738 | 0.860857 | 0.859391 | 0.857288 | 0.857288 | 0.847896 | 0.843877 | 0 | 0.049329 | 0.239776 | 47,732 | 923 | 98 | 51.713976 | 0.684267 | 0.048961 | 0 | 0.773159 | 0 | 0.001188 | 0.23708 | 0.020797 | 0 | 0 | 0 | 0 | 0.003563 | 1 | 0.009501 | false | 0.353919 | 0.004751 | 0 | 0.024941 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 10 |
8727feeee448b118a5296058d83b7e9840d5fcbb | 173 | py | Python | pyball/models/venue.py | glirios/PyBall | 7b537bebb1cbf84dd30da16aac45d89c4516b43e | [
"MIT"
] | 74 | 2018-03-04T22:58:46.000Z | 2021-07-06T12:28:50.000Z | pyball/models/venue.py | glirios/PyBall | 7b537bebb1cbf84dd30da16aac45d89c4516b43e | [
"MIT"
] | 18 | 2018-03-10T19:17:54.000Z | 2020-01-04T15:42:47.000Z | pyball/models/venue.py | glirios/PyBall | 7b537bebb1cbf84dd30da16aac45d89c4516b43e | [
"MIT"
] | 13 | 2018-03-06T02:39:38.000Z | 2020-01-17T04:38:53.000Z | from dataclasses import dataclass, field
@dataclass
class Venue:
id: int = field(default=None)
link: str = field(default=None)
name: str = field(default=None)
| 19.222222 | 40 | 0.699422 | 23 | 173 | 5.26087 | 0.608696 | 0.297521 | 0.396694 | 0.31405 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196532 | 173 | 8 | 41 | 21.625 | 0.870504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.166667 | 0 | 0.833333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
8767e1358328a4de6ae47d5e350da9b0a5cf9bc8 | 3,776 | py | Python | tests/test_transactions.py | kokellab/valarpy | 86b3cba301689194768814d6a136b0c93650ca25 | [
"Apache-2.0"
] | null | null | null | tests/test_transactions.py | kokellab/valarpy | 86b3cba301689194768814d6a136b0c93650ca25 | [
"Apache-2.0"
] | 46 | 2020-09-23T19:12:43.000Z | 2022-03-28T08:08:07.000Z | tests/test_transactions.py | kokellab/valarpy | 86b3cba301689194768814d6a136b0c93650ca25 | [
"Apache-2.0"
] | null | null | null | import json
from pathlib import Path
import pytest
from valarpy import opened, opened
CONFIG_PATH = Path(__file__).parent / "resources" / "connection.json"
CONFIG_DATA = json.loads(CONFIG_PATH.read_text(encoding="utf8"))
class TestModel:
"""
def test_atomic_trans(self):
with opened(CONFIG_DATA) as model:
valar = model.conn
valar.backend.enable_write()
from valarpy.model import Refs
Refs.delete().where(Refs.name << {"myfakeref", "fixedrefname"}).execute()
assert "myfakeref" not in {r.name for r in Refs.select()}
with valar.atomic():
Refs.create(name="myfakeref")
# transaction should commit
assert "myfakeref" in {r.name for r in Refs.select()}
def test_rollback_trans(self):
with opened(CONFIG_DATA) as model:
valar = model.conn
valar.backend.enable_write()
from valarpy.model import Refs
Refs.delete().where(Refs.name << {"myfakeref", "fixedrefname"}).execute()
assert "myfakeref" not in {r.name for r in Refs.select()}
with valar.rolling_back():
Refs.create(name="myfakeref")
# transaction should commit
assert "myfakeref" not in {r.name for r in Refs.select()}
def test_atomic_trans_fail(self):
with opened(CONFIG_DATA) as model:
valar = model.conn
valar.backend.enable_write()
from valarpy.model import Refs
Refs.delete().where(Refs.name << {"test_atomic_trans_fail"}).execute()
assert "test_atomic_trans_fail" not in {r.name for r in Refs.select()}
try:
with valar.atomic() as t:
Refs.create(name="test_atomic_trans_fail")
assert "test_atomic_trans_fail" in {r.name for r in Refs.select()}
raise ValueError("nope")
except ValueError:
pass
# it should have rolled back
assert "test_atomic_trans_fail" not in {r.name for r in Refs.select()}
def test_atomic_nested(self):
with opened(CONFIG_DATA) as model:
valar = model.conn
valar.backend.enable_write()
from valarpy.model import Refs
Refs.delete().where(Refs.name << {"myfakeref", "fixedrefname"}).execute()
with valar.atomic():
Refs.create(name="myfakeref")
with valar.atomic():
Refs.update(dict(name="fixedrefname")).where(Refs.name == "myfakeref").execute()
# transaction should commit
assert "myfakeref" not in {r.name for r in Refs.select()}
assert "fixedrefname" in {r.name for r in Refs.select()}
def test_atomic_nested_fail_on_checkpoint(self):
with opened(CONFIG_DATA) as model:
valar = model.conn
from valarpy.model import Refs
Refs.delete().where(Refs.name << {"myfakeref", "fixedrefname"}).execute()
with valar.atomic():
try:
Refs.create(name="myfakeref")
with valar.atomic():
Refs.update(dict(name="fixedrefname")).where(Refs.name == "myfakeref").execute()
raise ValueError("nope")
except ValueError:
pass # catching outside of savepoint but inside transaction
# it should have rolled back the savepoint BUT NOT transaction
with opened(CONFIG_DATA):
assert "myfakeref" in {r.name for r in Refs.select()}
assert "fixedrefname" not in {r.name for r in Refs.select()}
"""
if __name__ == ["__main__"]:
pytest.main()
| 40.170213 | 104 | 0.581303 | 437 | 3,776 | 4.894737 | 0.180778 | 0.015428 | 0.035998 | 0.051426 | 0.815334 | 0.763441 | 0.726975 | 0.72417 | 0.713417 | 0.669004 | 0 | 0.000386 | 0.313559 | 3,776 | 93 | 105 | 40.602151 | 0.824846 | 0.845339 | 0 | 0 | 0 | 0 | 0.120805 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.444444 | 0 | 0.555556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 8 |
5e56046d772ffdd4b281f37045170fce878c9836 | 135 | py | Python | tgbot/parse_test.py | psevdognom/gostbot | f5a142c0657285077cee58151590163a9e7f2527 | [
"Apache-2.0"
] | 1 | 2020-11-10T10:30:33.000Z | 2020-11-10T10:30:33.000Z | tgbot/parse_test.py | psevdognom/gostbot | f5a142c0657285077cee58151590163a9e7f2527 | [
"Apache-2.0"
] | 1 | 2020-07-30T17:38:30.000Z | 2020-07-30T19:36:42.000Z | tgbot/parse_test.py | psevdognom/gostbot | f5a142c0657285077cee58151590163a9e7f2527 | [
"Apache-2.0"
] | null | null | null | from tgbot.parse_tools import get_search_list, get_search_list_db
def test_bulls():
assert 'ГОСТ 20909.1-75' == 'ГОСТ 20909.1-75'
| 27 | 65 | 0.755556 | 24 | 135 | 3.958333 | 0.708333 | 0.189474 | 0.273684 | 0.252632 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136752 | 0.133333 | 135 | 4 | 66 | 33.75 | 0.675214 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
5e82918ffafb50b2678677805c3173093ecfd8b8 | 39,309 | py | Python | tests/serialization/test_scpd.py | pataquets/aioupnp | 0bfaee35aecf0a45fa6683ea3fc6a64f47107dfc | [
"MIT"
] | 26 | 2018-10-22T15:02:44.000Z | 2022-02-06T11:05:17.000Z | tests/serialization/test_scpd.py | pataquets/aioupnp | 0bfaee35aecf0a45fa6683ea3fc6a64f47107dfc | [
"MIT"
] | 26 | 2018-10-20T12:11:36.000Z | 2021-07-27T04:18:03.000Z | tests/serialization/test_scpd.py | pataquets/aioupnp | 0bfaee35aecf0a45fa6683ea3fc6a64f47107dfc | [
"MIT"
] | 10 | 2018-10-20T12:05:07.000Z | 2021-02-23T21:30:59.000Z | import unittest
from aioupnp.fault import UPnPError
from aioupnp.serialization.scpd import serialize_scpd_get, deserialize_scpd_get_response
from aioupnp.serialization.xml import xml_to_dict
from aioupnp.device import Device
from aioupnp.util import get_dict_val_case_insensitive
class TestSCPDSerialization(unittest.TestCase):
path, lan_address = '/IGDdevicedesc_brlan0.xml', '10.1.10.1'
get_request = b'GET /IGDdevicedesc_brlan0.xml HTTP/1.1\r\n' \
b'Accept-Encoding: gzip\r\nHost: 10.1.10.1\r\nConnection: Close\r\n\r\n'
response = b"HTTP/1.1 200 OK\r\n" \
b"CONTENT-LENGTH: 2972\r\n" \
b"CONTENT-TYPE: text/xml\r\n" \
b"DATE: Thu, 18 Oct 2018 01:20:23 GMT\r\n" \
b"LAST-MODIFIED: Fri, 28 Sep 2018 18:35:48 GMT\r\n" \
b"SERVER: Linux/3.14.28-Prod_17.2, UPnP/1.0, Portable SDK for UPnP devices/1.6.22\r\n" \
b"X-User-Agent: redsonic\r\n" \
b"CONNECTION: close\r\n" \
b"\r\n" \
b"<?xml version=\"1.0\"?>\n<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n<specVersion>\n<major>1</major>\n<minor>0</minor>\n</specVersion>\n<device>\n<deviceType>urn:schemas-upnp-org:device:InternetGatewayDevice:1</deviceType>\n<friendlyName>CGA4131COM</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:Layer3Forwarding:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:L3Forwarding1</serviceId>\n<SCPDURL>/Layer3ForwardingSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/Layer3Forwarding</controlURL>\n<eventSubURL>/upnp/event/Layer3Forwarding</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n<device>\n<deviceType>urn:schemas-upnp-org:device:WANDevice:1</deviceType>\n<friendlyName>WANDevice:1</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:WANCommonIFC1</serviceId>\n<SCPDURL>/WANCommonInterfaceConfigSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/WANCommonInterfaceConfig0</controlURL>\n<eventSubURL>/upnp/event/WANCommonInterfaceConfig0</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n <device>\n <deviceType>urn:schemas-upnp-org:device:WANConnectionDevice:1</deviceType>\n <friendlyName>WANConnectionDevice:1</friendlyName>\n <manufacturer>Cisco</manufacturer>\n <manufacturerURL>http://www.cisco.com/</manufacturerURL>\n <modelDescription>CGA4131COM</modelDescription>\n <modelName>CGA4131COM</modelName>\n <modelNumber>CGA4131COM</modelNumber>\n <modelURL>http://www.cisco.com</modelURL>\n <serialNumber></serialNumber>\n <UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n <UPC>CGA4131COM</UPC>\n <serviceList>\n <service>\n <serviceType>urn:schemas-upnp-org:service:WANIPConnection:1</serviceType>\n <serviceId>urn:upnp-org:serviceId:WANIPConn1</serviceId>\n <SCPDURL>/WANIPConnectionServiceSCPD.xml</SCPDURL>\n <controlURL>/upnp/control/WANIPConnection0</controlURL>\n <eventSubURL>/upnp/event/WANIPConnection0</eventSubURL>\n </service>\n </serviceList>\n </device>\n</deviceList>\n</device>\n</deviceList>\n<presentationURL>http://10.1.10.1/</presentationURL></device>\n</root>\n"
response_bad_root_device_name = b"HTTP/1.1 200 OK\r\n" \
b"CONTENT-LENGTH: 2972\r\n" \
b"CONTENT-TYPE: text/xml\r\n" \
b"DATE: Thu, 18 Oct 2018 01:20:23 GMT\r\n" \
b"LAST-MODIFIED: Fri, 28 Sep 2018 18:35:48 GMT\r\n" \
b"SERVER: Linux/3.14.28-Prod_17.2, UPnP/1.0, Portable SDK for UPnP devices/1.6.22\r\n" \
b"X-User-Agent: redsonic\r\n" \
b"CONNECTION: close\r\n" \
b"\r\n" \
b"<?xml version=\"1.0\"?>\n<root xmlns=\"urn:schemas-upnp-org:device-1-?\">\n<specVersion>\n<major>1</major>\n<minor>0</minor>\n</specVersion>\n<device>\n<deviceType>urn:schemas-upnp-org:device:InternetGatewayDevic3:1</deviceType>\n<friendlyName>CGA4131COM</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:Layer3Forwarding:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:L3Forwarding1</serviceId>\n<SCPDURL>/Layer3ForwardingSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/Layer3Forwarding</controlURL>\n<eventSubURL>/upnp/event/Layer3Forwarding</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n<device>\n<deviceType>urn:schemas-upnp-org:device:WANDevice:1</deviceType>\n<friendlyName>WANDevice:1</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:WANCommonIFC1</serviceId>\n<SCPDURL>/WANCommonInterfaceConfigSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/WANCommonInterfaceConfig0</controlURL>\n<eventSubURL>/upnp/event/WANCommonInterfaceConfig0</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n <device>\n <deviceType>urn:schemas-upnp-org:device:WANConnectionDevice:1</deviceType>\n <friendlyName>WANConnectionDevice:1</friendlyName>\n <manufacturer>Cisco</manufacturer>\n <manufacturerURL>http://www.cisco.com/</manufacturerURL>\n <modelDescription>CGA4131COM</modelDescription>\n <modelName>CGA4131COM</modelName>\n <modelNumber>CGA4131COM</modelNumber>\n <modelURL>http://www.cisco.com</modelURL>\n <serialNumber></serialNumber>\n <UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n <UPC>CGA4131COM</UPC>\n <serviceList>\n <service>\n <serviceType>urn:schemas-upnp-org:service:WANIPConnection:1</serviceType>\n <serviceId>urn:upnp-org:serviceId:WANIPConn1</serviceId>\n <SCPDURL>/WANIPConnectionServiceSCPD.xml</SCPDURL>\n <controlURL>/upnp/control/WANIPConnection0</controlURL>\n <eventSubURL>/upnp/event/WANIPConnection0</eventSubURL>\n </service>\n </serviceList>\n </device>\n</deviceList>\n</device>\n</deviceList>\n<presentationURL>http://10.1.10.1/</presentationURL></device>\n</root>\n"
response_bad_root_xmls = b"HTTP/1.1 200 OK\r\n" \
b"CONTENT-LENGTH: 2972\r\n" \
b"CONTENT-TYPE: text/xml\r\n" \
b"DATE: Thu, 18 Oct 2018 01:20:23 GMT\r\n" \
b"LAST-MODIFIED: Fri, 28 Sep 2018 18:35:48 GMT\r\n" \
b"SERVER: Linux/3.14.28-Prod_17.2, UPnP/1.0, Portable SDK for UPnP devices/1.6.22\r\n" \
b"X-User-Agent: redsonic\r\n" \
b"CONNECTION: close\r\n" \
b"\r\n" \
b"<?xml version=\"1.0\"?>\n<root xmlns=\"urn:schemas-upnp--org:device-1-0\">\n<specVersion>\n<major>1</major>\n<minor>0</minor>\n</specVersion>\n<device>\n<deviceType>urn:schemas-upnp-org:device:InternetGatewayDevic3:1</deviceType>\n<friendlyName>CGA4131COM</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:Layer3Forwarding:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:L3Forwarding1</serviceId>\n<SCPDURL>/Layer3ForwardingSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/Layer3Forwarding</controlURL>\n<eventSubURL>/upnp/event/Layer3Forwarding</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n<device>\n<deviceType>urn:schemas-upnp-org:device:WANDevice:1</deviceType>\n<friendlyName>WANDevice:1</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:WANCommonIFC1</serviceId>\n<SCPDURL>/WANCommonInterfaceConfigSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/WANCommonInterfaceConfig0</controlURL>\n<eventSubURL>/upnp/event/WANCommonInterfaceConfig0</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n <device>\n <deviceType>urn:schemas-upnp-org:device:WANConnectionDevice:1</deviceType>\n <friendlyName>WANConnectionDevice:1</friendlyName>\n <manufacturer>Cisco</manufacturer>\n <manufacturerURL>http://www.cisco.com/</manufacturerURL>\n <modelDescription>CGA4131COM</modelDescription>\n <modelName>CGA4131COM</modelName>\n <modelNumber>CGA4131COM</modelNumber>\n <modelURL>http://www.cisco.com</modelURL>\n <serialNumber></serialNumber>\n <UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n <UPC>CGA4131COM</UPC>\n <serviceList>\n <service>\n <serviceType>urn:schemas-upnp-org:service:WANIPConnection:1</serviceType>\n <serviceId>urn:upnp-org:serviceId:WANIPConn1</serviceId>\n <SCPDURL>/WANIPConnectionServiceSCPD.xml</SCPDURL>\n <controlURL>/upnp/control/WANIPConnection0</controlURL>\n <eventSubURL>/upnp/event/WANIPConnection0</eventSubURL>\n </service>\n </serviceList>\n </device>\n</deviceList>\n</device>\n</deviceList>\n<presentationURL>http://10.1.10.1/</presentationURL></device>\n</root>\n"
expected_parsed = {
'specVersion': {'major': '1', 'minor': '0'},
'device': {
'deviceType': 'urn:schemas-upnp-org:device:InternetGatewayDevice:1',
'friendlyName': 'CGA4131COM',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555556',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:Layer3Forwarding:1',
'serviceId': 'urn:upnp-org:serviceId:L3Forwarding1',
'SCPDURL': '/Layer3ForwardingSCPD.xml',
'controlURL': '/upnp/control/Layer3Forwarding',
'eventSubURL': '/upnp/event/Layer3Forwarding'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANDevice:1',
'friendlyName': 'WANDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555556',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1',
'serviceId': 'urn:upnp-org:serviceId:WANCommonIFC1',
'SCPDURL': '/WANCommonInterfaceConfigSCPD.xml',
'controlURL': '/upnp/control/WANCommonInterfaceConfig0',
'eventSubURL': '/upnp/event/WANCommonInterfaceConfig0'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANConnectionDevice:1',
'friendlyName': 'WANConnectionDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANIPConnection:1',
'serviceId': 'urn:upnp-org:serviceId:WANIPConn1',
'SCPDURL': '/WANIPConnectionServiceSCPD.xml',
'controlURL': '/upnp/control/WANIPConnection0',
'eventSubURL': '/upnp/event/WANIPConnection0'
}
}
}
}
}
},
'presentationURL': 'http://10.1.10.1/'
}
}
def test_serialize_get(self):
self.assertEqual(serialize_scpd_get(self.path, self.lan_address), self.get_request)
self.assertEqual(serialize_scpd_get(self.path, 'http://' + self.lan_address), self.get_request)
self.assertEqual(serialize_scpd_get(self.path, 'http://' + self.lan_address + ':1337'), self.get_request)
self.assertEqual(serialize_scpd_get(self.path, self.lan_address + ':1337'), self.get_request)
def test_parse_device_response_xml(self):
self.assertDictEqual(
xml_to_dict('<?xml version="1.0"?>\n<root xmlns="urn:schemas-upnp-org:device-1-0">\n\t<specVersion>\n\t\t<major>1</major>\n\t\t<minor>0</minor>\n\t</specVersion>\n\t<URLBase>http://10.0.0.1:49152</URLBase>\n\t<device>\n\t\t<deviceType>urn:schemas-upnp-org:device:InternetGatewayDevice:1</deviceType>\n\t\t<friendlyName>Wireless Broadband Router</friendlyName>\n\t\t<manufacturer>D-Link Corporation</manufacturer>\n\t\t<manufacturerURL>http://www.dlink.com</manufacturerURL>\n\t\t<modelDescription>D-Link Router</modelDescription>\n\t\t<modelName>D-Link Router</modelName>\n\t\t<modelNumber>DIR-890L</modelNumber>\n\t\t<modelURL>http://www.dlink.com</modelURL>\n\t\t<serialNumber>120</serialNumber>\n\t\t<UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n\t\t<iconList>\n\t\t\t<icon>\n\t\t\t\t<mimetype>image/gif</mimetype>\n\t\t\t\t<width>118</width>\n\t\t\t\t<height>119</height>\n\t\t\t\t<depth>8</depth>\n\t\t\t\t<url>/ligd.gif</url>\n\t\t\t</icon>\n\t\t</iconList>\n\t\t<serviceList>\n\t\t\t<service>\n\t\t\t\t<serviceType>urn:schemas-microsoft-com:service:OSInfo:1</serviceType>\n\t\t\t\t<serviceId>urn:microsoft-com:serviceId:OSInfo1</serviceId>\n\t\t\t\t<controlURL>/soap.cgi?service=OSInfo1</controlURL>\n\t\t\t\t<eventSubURL>/gena.cgi?service=OSInfo1</eventSubURL>\n\t\t\t\t<SCPDURL>/OSInfo.xml</SCPDURL>\n\t\t\t</service>\n\t\t\t<service>\n\t\t\t\t<serviceType>urn:schemas-upnp-org:service:Layer3Forwarding:1</serviceType>\n\t\t\t\t<serviceId>urn:upnp-org:serviceId:L3Forwarding1</serviceId>\n\t\t\t\t<controlURL>/soap.cgi?service=L3Forwarding1</controlURL>\n\t\t\t\t<eventSubURL>/gena.cgi?service=L3Forwarding1</eventSubURL>\n\t\t\t\t<SCPDURL>/Layer3Forwarding.xml</SCPDURL>\n\t\t\t</service>\n\t\t</serviceList>\n\t\t<deviceList>\n\t\t\t<device>\n\t\t\t\t<deviceType>urn:schemas-upnp-org:device:WANDevice:1</deviceType>\n\t\t\t\t<friendlyName>WANDevice</friendlyName>\n\t\t\t\t<manufacturer>D-Link</manufacturer>\n\t\t\t\t<manufacturerURL>http://www.dlink.com</manufacturerURL>\n\t\t\t\t<modelDescription>WANDevice</modelDescription>\n\t\t\t\t<modelName>DIR-890L</modelName>\n\t\t\t\t<modelNumber>1</modelNumber>\n\t\t\t\t<modelURL>http://www.dlink.com</modelURL>\n\t\t\t\t<serialNumber>120</serialNumber>\n\t\t\t\t<UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n\t\t\t\t<serviceList>\n\t\t\t\t\t<service>\n\t\t\t\t\t\t<serviceType>urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1</serviceType>\n\t\t\t\t\t\t<serviceId>urn:upnp-org:serviceId:WANCommonIFC1</serviceId>\n\t\t\t\t\t\t<controlURL>/soap.cgi?service=WANCommonIFC1</controlURL>\n\t\t\t\t\t\t<eventSubURL>/gena.cgi?service=WANCommonIFC1</eventSubURL>\n\t\t\t\t\t\t<SCPDURL>/WANCommonInterfaceConfig.xml</SCPDURL>\n\t\t\t\t\t</service>\n\t\t\t\t</serviceList>\n\t\t\t\t<deviceList>\n\t\t\t\t\t<device>\n\t\t\t\t\t\t<deviceType>urn:schemas-upnp-org:device:WANConnectionDevice:1</deviceType>\n\t\t\t\t\t\t<friendlyName>WANConnectionDevice</friendlyName>\n\t\t\t\t\t\t<manufacturer>D-Link</manufacturer>\n\t\t\t\t\t\t<manufacturerURL>http://www.dlink.com</manufacturerURL>\n\t\t\t\t\t\t<modelDescription>WanConnectionDevice</modelDescription>\n\t\t\t\t\t\t<modelName>DIR-890L</modelName>\n\t\t\t\t\t\t<modelNumber>1</modelNumber>\n\t\t\t\t\t\t<modelURL>http://www.dlink.com</modelURL>\n\t\t\t\t\t\t<serialNumber>120</serialNumber>\n\t\t\t\t\t\t<UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n\t\t\t\t\t\t<serviceList>\n\t\t\t\t\t\t\t<service>\n\t\t\t\t\t\t\t\t<serviceType>urn:schemas-upnp-org:service:WANEthernetLinkConfig:1</serviceType>\n\t\t\t\t\t\t\t\t<serviceId>urn:upnp-org:serviceId:WANEthLinkC1</serviceId>\n\t\t\t\t\t\t\t\t<controlURL>/soap.cgi?service=WANEthLinkC1</controlURL>\n\t\t\t\t\t\t\t\t<eventSubURL>/gena.cgi?service=WANEthLinkC1</eventSubURL>\n\t\t\t\t\t\t\t\t<SCPDURL>/WANEthernetLinkConfig.xml</SCPDURL>\n\t\t\t\t\t\t\t</service>\n\t\t\t\t\t\t\t<service>\n\t\t\t\t\t\t\t\t<serviceType>urn:schemas-upnp-org:service:WANIPConnection:1</serviceType>\n\t\t\t\t\t\t\t\t<serviceId>urn:upnp-org:serviceId:WANIPConn1</serviceId>\n\t\t\t\t\t\t\t\t<controlURL>/soap.cgi?service=WANIPConn1</controlURL>\n\t\t\t\t\t\t\t\t<eventSubURL>/gena.cgi?service=WANIPConn1</eventSubURL>\n\t\t\t\t\t\t\t\t<SCPDURL>/WANIPConnection.xml</SCPDURL>\n\t\t\t\t\t\t\t</service>\n\t\t\t\t\t\t</serviceList>\n\t\t\t\t\t</device>\n\t\t\t\t</deviceList>\n\t\t\t</device>\n\t\t</deviceList>\n\t\t<presentationURL>http://10.0.0.1</presentationURL>\n\t</device>\n</root>\n'),
{'{urn:schemas-upnp-org:device-1-0}root': {
'{urn:schemas-upnp-org:device-1-0}specVersion': {'{urn:schemas-upnp-org:device-1-0}major': '1',
'{urn:schemas-upnp-org:device-1-0}minor': '0'},
'{urn:schemas-upnp-org:device-1-0}URLBase': 'http://10.0.0.1:49152',
'{urn:schemas-upnp-org:device-1-0}device': {
'{urn:schemas-upnp-org:device-1-0}deviceType': 'urn:schemas-upnp-org:device:InternetGatewayDevice:1',
'{urn:schemas-upnp-org:device-1-0}friendlyName': 'Wireless Broadband Router',
'{urn:schemas-upnp-org:device-1-0}manufacturer': 'D-Link Corporation',
'{urn:schemas-upnp-org:device-1-0}manufacturerURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}modelDescription': 'D-Link Router',
'{urn:schemas-upnp-org:device-1-0}modelName': 'D-Link Router',
'{urn:schemas-upnp-org:device-1-0}modelNumber': 'DIR-890L',
'{urn:schemas-upnp-org:device-1-0}modelURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}serialNumber': '120',
'{urn:schemas-upnp-org:device-1-0}UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'{urn:schemas-upnp-org:device-1-0}iconList': {'{urn:schemas-upnp-org:device-1-0}icon': {
'{urn:schemas-upnp-org:device-1-0}mimetype': 'image/gif',
'{urn:schemas-upnp-org:device-1-0}width': '118',
'{urn:schemas-upnp-org:device-1-0}height': '119', '{urn:schemas-upnp-org:device-1-0}depth': '8',
'{urn:schemas-upnp-org:device-1-0}url': '/ligd.gif'}},
'{urn:schemas-upnp-org:device-1-0}serviceList': {'{urn:schemas-upnp-org:device-1-0}service': [
{'{urn:schemas-upnp-org:device-1-0}serviceType': 'urn:schemas-microsoft-com:service:OSInfo:1',
'{urn:schemas-upnp-org:device-1-0}serviceId': 'urn:microsoft-com:serviceId:OSInfo1',
'{urn:schemas-upnp-org:device-1-0}controlURL': '/soap.cgi?service=OSInfo1',
'{urn:schemas-upnp-org:device-1-0}eventSubURL': '/gena.cgi?service=OSInfo1',
'{urn:schemas-upnp-org:device-1-0}SCPDURL': '/OSInfo.xml'}, {
'{urn:schemas-upnp-org:device-1-0}serviceType': 'urn:schemas-upnp-org:service:Layer3Forwarding:1',
'{urn:schemas-upnp-org:device-1-0}serviceId': 'urn:upnp-org:serviceId:L3Forwarding1',
'{urn:schemas-upnp-org:device-1-0}controlURL': '/soap.cgi?service=L3Forwarding1',
'{urn:schemas-upnp-org:device-1-0}eventSubURL': '/gena.cgi?service=L3Forwarding1',
'{urn:schemas-upnp-org:device-1-0}SCPDURL': '/Layer3Forwarding.xml'}]},
'{urn:schemas-upnp-org:device-1-0}deviceList': {'{urn:schemas-upnp-org:device-1-0}device': {
'{urn:schemas-upnp-org:device-1-0}deviceType': 'urn:schemas-upnp-org:device:WANDevice:1',
'{urn:schemas-upnp-org:device-1-0}friendlyName': 'WANDevice',
'{urn:schemas-upnp-org:device-1-0}manufacturer': 'D-Link',
'{urn:schemas-upnp-org:device-1-0}manufacturerURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}modelDescription': 'WANDevice',
'{urn:schemas-upnp-org:device-1-0}modelName': 'DIR-890L',
'{urn:schemas-upnp-org:device-1-0}modelNumber': '1',
'{urn:schemas-upnp-org:device-1-0}modelURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}serialNumber': '120',
'{urn:schemas-upnp-org:device-1-0}UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'{urn:schemas-upnp-org:device-1-0}serviceList': {'{urn:schemas-upnp-org:device-1-0}service': {
'{urn:schemas-upnp-org:device-1-0}serviceType': 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1',
'{urn:schemas-upnp-org:device-1-0}serviceId': 'urn:upnp-org:serviceId:WANCommonIFC1',
'{urn:schemas-upnp-org:device-1-0}controlURL': '/soap.cgi?service=WANCommonIFC1',
'{urn:schemas-upnp-org:device-1-0}eventSubURL': '/gena.cgi?service=WANCommonIFC1',
'{urn:schemas-upnp-org:device-1-0}SCPDURL': '/WANCommonInterfaceConfig.xml'}},
'{urn:schemas-upnp-org:device-1-0}deviceList': {'{urn:schemas-upnp-org:device-1-0}device': {
'{urn:schemas-upnp-org:device-1-0}deviceType': 'urn:schemas-upnp-org:device:WANConnectionDevice:1',
'{urn:schemas-upnp-org:device-1-0}friendlyName': 'WANConnectionDevice',
'{urn:schemas-upnp-org:device-1-0}manufacturer': 'D-Link',
'{urn:schemas-upnp-org:device-1-0}manufacturerURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}modelDescription': 'WanConnectionDevice',
'{urn:schemas-upnp-org:device-1-0}modelName': 'DIR-890L',
'{urn:schemas-upnp-org:device-1-0}modelNumber': '1',
'{urn:schemas-upnp-org:device-1-0}modelURL': 'http://www.dlink.com',
'{urn:schemas-upnp-org:device-1-0}serialNumber': '120',
'{urn:schemas-upnp-org:device-1-0}UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'{urn:schemas-upnp-org:device-1-0}serviceList': {
'{urn:schemas-upnp-org:device-1-0}service': [{
'{urn:schemas-upnp-org:device-1-0}serviceType': 'urn:schemas-upnp-org:service:WANEthernetLinkConfig:1',
'{urn:schemas-upnp-org:device-1-0}serviceId': 'urn:upnp-org:serviceId:WANEthLinkC1',
'{urn:schemas-upnp-org:device-1-0}controlURL': '/soap.cgi?service=WANEthLinkC1',
'{urn:schemas-upnp-org:device-1-0}eventSubURL': '/gena.cgi?service=WANEthLinkC1',
'{urn:schemas-upnp-org:device-1-0}SCPDURL': '/WANEthernetLinkConfig.xml'},
{
'{urn:schemas-upnp-org:device-1-0}serviceType': 'urn:schemas-upnp-org:service:WANIPConnection:1',
'{urn:schemas-upnp-org:device-1-0}serviceId': 'urn:upnp-org:serviceId:WANIPConn1',
'{urn:schemas-upnp-org:device-1-0}controlURL': '/soap.cgi?service=WANIPConn1',
'{urn:schemas-upnp-org:device-1-0}eventSubURL': '/gena.cgi?service=WANIPConn1',
'{urn:schemas-upnp-org:device-1-0}SCPDURL': '/WANIPConnection.xml'}]}}}}},
'{urn:schemas-upnp-org:device-1-0}presentationURL': 'http://10.0.0.1'}}}
)
def test_deserialize_get_response(self):
self.assertDictEqual(deserialize_scpd_get_response(self.response), self.expected_parsed)
def test_deserialize_blank(self):
self.assertDictEqual(deserialize_scpd_get_response(b''), {})
def test_fail_to_deserialize_invalid_root_device(self):
with self.assertRaises(UPnPError):
deserialize_scpd_get_response(self.response_bad_root_device_name)
def test_fail_to_deserialize_invalid_root_xmls(self):
with self.assertRaises(UPnPError):
deserialize_scpd_get_response(self.response_bad_root_xmls)
def test_deserialize_to_device_object(self):
devices = []
services = []
device = Device(devices, services, **get_dict_val_case_insensitive(self.expected_parsed, "device"))
expected_result = {
'deviceType': 'urn:schemas-upnp-org:device:InternetGatewayDevice:1',
'friendlyName': 'CGA4131COM',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'udn': 'uuid:11111111-2222-3333-4444-555555555556',
'upc': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:Layer3Forwarding:1',
'serviceId': 'urn:upnp-org:serviceId:L3Forwarding1',
'SCPDURL': '/Layer3ForwardingSCPD.xml',
'controlURL': '/upnp/control/Layer3Forwarding',
'eventSubURL': '/upnp/event/Layer3Forwarding'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANDevice:1',
'friendlyName': 'WANDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555556',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1',
'serviceId': 'urn:upnp-org:serviceId:WANCommonIFC1',
'SCPDURL': '/WANCommonInterfaceConfigSCPD.xml',
'controlURL': '/upnp/control/WANCommonInterfaceConfig0',
'eventSubURL': '/upnp/event/WANCommonInterfaceConfig0'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANConnectionDevice:1',
'friendlyName': 'WANConnectionDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANIPConnection:1',
'serviceId': 'urn:upnp-org:serviceId:WANIPConn1',
'SCPDURL': '/WANIPConnectionServiceSCPD.xml',
'controlURL': '/upnp/control/WANIPConnection0',
'eventSubURL': '/upnp/event/WANIPConnection0'
}
}
}
}
}
}, 'presentationURL': 'http://10.1.10.1/'
}
self.assertDictEqual(expected_result, device.as_dict())
def test_deserialize_another_device(self):
xml_bytes = b"<?xml version=\"1.0\"?>\n<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n<specVersion>\n<major>1</major>\n<minor>0</minor>\n</specVersion>\n<device>\n<deviceType>urn:schemas-upnp-org:device:InternetGatewayDevice:1</deviceType>\n<friendlyName>CGA4131COM</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:11111111-2222-3333-4444-555555555556</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:Layer3Forwarding:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:L3Forwarding1</serviceId>\n<SCPDURL>/Layer3ForwardingSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/Layer3Forwarding</controlURL>\n<eventSubURL>/upnp/event/Layer3Forwarding</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n<device>\n<deviceType>urn:schemas-upnp-org:device:WANDevice:1</deviceType>\n<friendlyName>WANDevice:1</friendlyName>\n<manufacturer>Cisco</manufacturer>\n<manufacturerURL>http://www.cisco.com/</manufacturerURL>\n<modelDescription>CGA4131COM</modelDescription>\n<modelName>CGA4131COM</modelName>\n<modelNumber>CGA4131COM</modelNumber>\n<modelURL>http://www.cisco.com</modelURL>\n<serialNumber></serialNumber>\n<UDN>uuid:ebf5a0a0-1dd1-11b2-a92f-603d266f9915</UDN>\n<UPC>CGA4131COM</UPC>\n<serviceList>\n<service>\n<serviceType>urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1</serviceType>\n<serviceId>urn:upnp-org:serviceId:WANCommonIFC1</serviceId>\n<SCPDURL>/WANCommonInterfaceConfigSCPD.xml</SCPDURL>\n<controlURL>/upnp/control/WANCommonInterfaceConfig0</controlURL>\n<eventSubURL>/upnp/event/WANCommonInterfaceConfig0</eventSubURL>\n</service>\n</serviceList>\n<deviceList>\n <device>\n <deviceType>urn:schemas-upnp-org:device:WANConnectionDevice:1</deviceType>\n <friendlyName>WANConnectionDevice:1</friendlyName>\n <manufacturer>Cisco</manufacturer>\n <manufacturerURL>http://www.cisco.com/</manufacturerURL>\n <modelDescription>CGA4131COM</modelDescription>\n <modelName>CGA4131COM</modelName>\n <modelNumber>CGA4131COM</modelNumber>\n <modelURL>http://www.cisco.com</modelURL>\n <serialNumber></serialNumber>\n <UDN>uuid:11111111-2222-3333-4444-555555555555</UDN>\n <UPC>CGA4131COM</UPC>\n <serviceList>\n <service>\n <serviceType>urn:schemas-upnp-org:service:WANIPConnection:1</serviceType>\n <serviceId>urn:upnp-org:serviceId:WANIPConn1</serviceId>\n <SCPDURL>/WANIPConnectionServiceSCPD.xml</SCPDURL>\n <controlURL>/upnp/control/WANIPConnection0</controlURL>\n <eventSubURL>/upnp/event/WANIPConnection0</eventSubURL>\n </service>\n </serviceList>\n </device>\n</deviceList>\n</device>\n</deviceList>\n<presentationURL>http://10.1.10.1/</presentationURL></device>\n</root>\n"
expected_parsed = {
'specVersion': {'major': '1', 'minor': '0'},
'device': {
'deviceType': 'urn:schemas-upnp-org:device:InternetGatewayDevice:1',
'friendlyName': 'CGA4131COM',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555556',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:Layer3Forwarding:1',
'serviceId': 'urn:upnp-org:serviceId:L3Forwarding1',
'SCPDURL': '/Layer3ForwardingSCPD.xml',
'controlURL': '/upnp/control/Layer3Forwarding',
'eventSubURL': '/upnp/event/Layer3Forwarding'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANDevice:1',
'friendlyName': 'WANDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:ebf5a0a0-1dd1-11b2-a92f-603d266f9915',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANCommonInterfaceConfig:1',
'serviceId': 'urn:upnp-org:serviceId:WANCommonIFC1',
'SCPDURL': '/WANCommonInterfaceConfigSCPD.xml',
'controlURL': '/upnp/control/WANCommonInterfaceConfig0',
'eventSubURL': '/upnp/event/WANCommonInterfaceConfig0'
}
},
'deviceList': {
'device': {
'deviceType': 'urn:schemas-upnp-org:device:WANConnectionDevice:1',
'friendlyName': 'WANConnectionDevice:1',
'manufacturer': 'Cisco',
'manufacturerURL': 'http://www.cisco.com/',
'modelDescription': 'CGA4131COM',
'modelName': 'CGA4131COM',
'modelNumber': 'CGA4131COM',
'modelURL': 'http://www.cisco.com',
'UDN': 'uuid:11111111-2222-3333-4444-555555555555',
'UPC': 'CGA4131COM',
'serviceList': {
'service': {
'serviceType': 'urn:schemas-upnp-org:service:WANIPConnection:1',
'serviceId': 'urn:upnp-org:serviceId:WANIPConn1',
'SCPDURL': '/WANIPConnectionServiceSCPD.xml',
'controlURL': '/upnp/control/WANIPConnection0',
'eventSubURL': '/upnp/event/WANIPConnection0'
}
}
}
}
}
},
'presentationURL': 'http://10.1.10.1/'
}
}
self.assertDictEqual(expected_parsed, deserialize_scpd_get_response(xml_bytes))
| 107.991758 | 4,461 | 0.599354 | 4,404 | 39,309 | 5.324932 | 0.046094 | 0.027035 | 0.028911 | 0.026268 | 0.938638 | 0.925163 | 0.91753 | 0.889941 | 0.864782 | 0.857021 | 0 | 0.062812 | 0.236155 | 39,309 | 363 | 4,462 | 108.289256 | 0.718211 | 0 | 0 | 0.618911 | 0 | 0.025788 | 0.695337 | 0.251444 | 0 | 0 | 0 | 0 | 0.031519 | 1 | 0.022923 | false | 0 | 0.017192 | 0 | 0.057307 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
5eb2bd02ae912daf9e8739c88ec760070f72d8b8 | 47,362 | py | Python | RetangulosEngine.py | carolinamcg/RectanglesrFitting | 1f51a8b8becc64fcb06b26a874c0144e2b335135 | [
"MIT"
] | null | null | null | RetangulosEngine.py | carolinamcg/RectanglesrFitting | 1f51a8b8becc64fcb06b26a874c0144e2b335135 | [
"MIT"
] | null | null | null | RetangulosEngine.py | carolinamcg/RectanglesrFitting | 1f51a8b8becc64fcb06b26a874c0144e2b335135 | [
"MIT"
] | null | null | null | #-*-coding:utf-8 -*-'''
'''
Created on 29/11/2016
@author: Carolina
'''
class Figura():
def __init__(self, nome, l, a, r):
self.nome = nome
self.largura = l
self.altura = a
self.rodar = r
self.posx = -1
self.posy = -1
def move(self, px, py):
self.posx = px
self.posy = py
def getnome(self):
return self.nome
def setnome(self, nome):
self.nome = nome
def getposx(self):
return self.posx
def getposx2(self): # Correspondente ao canto inferior direito
if self.posx == -1:
return -1
else:
return self.posx + self.largura - 1
def setposx(self, px):
self.posx = px
def getposy(self):
return self.posy
def getposy2(self): # Correspondente ao canto inferior direito
if self.posy == -1:
return -1
else:
return self.posy + self.altura - 1
def setposy(self, py):
self.posy = py
def getwidth(self):
return self.largura
def setwidth(self, w):
self.largura = w
def getheight(self):
return self.altura
def setheight(self, h):
self.altura = h
def getArea(self):
return self.largura * self.altura
def getrodar (self):
return self.rodar
class RetangulosEngine:
def __init__(self, l, a):
self.largura = l
self.altura = a
self.figuras_colocadas = {}
self.figuras_nao_colocadas = {}
self.areas = {}
self.rest_DIR = []
self.rest_ESQ = []
self.rest_CIM = []
self.rest_BX = []
self.rest_CLD = []
self.rest_SEP = []
self.rest_DENTRO = []
self.rest_FORA = []
self.desfrsv=[]
self.figuras_anteriores={}
self.s = Stack()
t= ('first', 'DIM', 'no name')
self.s.push(t)
def novo_rect(self, nome, largura, altura, rodar):
if nome in self.figuras_nao_colocadas or nome in self.figuras_colocadas:
return "NÃO"
else:
f = Figura(nome, largura, altura, rodar)
self.figuras_nao_colocadas[nome] = f
return "SIM"
def coloca(self, nome, px, py):
if nome in self.figuras_nao_colocadas:
f = self.figuras_nao_colocadas[nome]
if self.__valida_coloca(f, px, py) == False:
return "NÃO"
else:
f.setposx(px)
f.setposy(py)
self.figuras_colocadas[nome] = f
del self.figuras_nao_colocadas[nome]
t=(f, 'COL', nome)
self.s.push(t)
elif nome in self.figuras_colocadas: # aqui vamos mudar o rectangulo de posição
f = self.figuras_colocadas[nome]
if self.__valida_coloca(f, px, py) == False:
return "NÃO"
else:
g=self.figuras_colocadas[nome]
if nome in self.figuras_anteriores: #criamos um dicionario para guardar as posições anteriores de cada figura, de modo a conseguirmos restaurar a sua posição no undo
self.figuras_anteriores[nome]+=[(g.getposx(), g.getposy())] #se não for a primeira vez que estamos a mudar a figura de posição, apenas adicionamos, antes de a mudarmos de posição, as suas coordenadas ao dicionario como valores da chave correspondente ao nome da figura
else: # assim, o ultimo tuplo da lista correspondente ao nome da figura(chave) corresponde a última posição que esta ocupou antes de ser alterada
self.figuras_anteriores[nome]=[(g.getposx(), g.getposy())]
f.setposx(px)
f.setposy(py)
t=(f, 'COL',nome)
self.s.push(t)
elif nome not in self.figuras_nao_colocadas and nome not in self.figuras_colocadas:
return 'NÃO'
return "SIM"
def __valida_coloca(self, fig, posx, posy):
if not self.__valida_rest_dentro_sup(fig, posx, posy):
return False
if not self.__nao_sobrepoe(fig, posx, posy):
return False #vai sobrepor a nenhum outro rectangulo e cai dentro da dimensao
if not self.__valida_rest_dir(fig, posx, posy):
return False # valida a restrição do comando DIR
if not self.__valida_rest_esq(fig, posx, posy):
return False # valida a restrição do comando ESQ
if not self.__valida_rest_cim(fig, posx, posy):
return False # valida a restrição do comando CIM
if not self.__valida_rest_bx(fig, posx, posy):
return False # valida a restrição do comando BX
if not self.__valida_rest_CLD(fig, posx, posy):
return False # valida a restrição do comando CLD
if not self.__valida_rest_SEP(fig, posx, posy):
return False # valida a restrição do comando SEP
if not self.__valida_rest_dentro(fig, posx, posy):
return False # valida a restrição do comando DENTRO
if not self.__valida_rest_fora(fig, posx, posy):
return False # valida a restrição do comando FORA
return True
def __valida_rest_dentro_sup(self, f, x, y):
if x >= 1 and y >= 1 and x + f.getwidth() - 1 <= self.largura and y + f.getheight() - 1 <= self.altura:
return True
else:
return False
def __valida_rest_dir(self, fig, x, y):
valido = True
for (fA, fB) in self.rest_DIR: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a figA fica à direita de figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar à direita
if fB in self.figuras_colocadas: # pois se fB não estiver colocada não vai haver qualquer problema em colocar fA
figuraB = self.figuras_colocadas[fB]
# testar que a instancia fig está à direita da instancia figuraB
if x <= figuraB.getposx2():
valido = False
break
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar à esquerda
if fA in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
# testar que a instancia fig está à esquerda da instancia figuraA
if figuraA.getposx() <= x + fig.getwidth() - 1:
valido = False
break
return valido
def __valida_rest_esq(self, fig, x, y):
valido = True
for (fA, fB) in self.rest_ESQ: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a figA fica à esquerda de figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar à esq
if fB in self.figuras_colocadas: # pois se fB não estiver colocada não vai haver qualquer problema em colocar fA
figuraB = self.figuras_colocadas[fB]
# testar que a instancia fig está à esquerda da instancia figuraB
if figuraB.getposx() <= x + fig.getwidth() - 1:
valido = False
break
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar à direita
if fA in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
# testar que a instancia fig está à direita da instancia figuraA
if x <= figuraA.getposx2():
valido = False
break
return valido
def __valida_rest_cim(self, fig, x, y):
valido = True
for (fA, fB) in self.rest_CIM: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a figA fica acima de figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar acima de outra
if fB in self.figuras_colocadas: # pois se fB não estiver colocada não vai haver qualquer problema em colocar fA
figuraB = self.figuras_colocadas[fB]
# testar que a instancia fig está acima da instancia figuraB
if y + fig.getheight() - 1 >= figuraB.getposy():
valido = False
break
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar abaixo
if fA in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
# testar que a instancia fig está abaixo da instancia figuraA
if y <= figuraA.getposy2():
valido = False
break
return valido
def __valida_rest_bx(self, fig, x, y):
valido = True
for (fA, fB) in self.rest_BX: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a figA fica abaixo de figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar abaixo
if fB in self.figuras_colocadas: # pois se fB não estiver colocada não vai haver qualquer problema em colocar fA
figuraB = self.figuras_colocadas[fB]
# testar que a instancia fig está abaixo da instancia figuraB
if y <= figuraB.getposy2():
valido = False
break
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar acima
if fA in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
# testar que a instancia fig está acima da instancia figuraA
if y +fig.getheight() -1 >= figuraA.getposy():
valido = False
break
return valido
def __valida_rest_CLD(self, fig, x, y): #verificador da restrição em colado
valido = False
if len(self.rest_CLD)==0: #se nao existir nenhuma restriçao guardada na lista
return True
for (fA, fB) in self.rest_CLD: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a um lado da figA fica colado a um lado da figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar colada a figB
if fB in self.figuras_colocadas: # verifica se fB já foi colocada
if fA in self.figuras_colocadas: #se estivermos a mudar a posição de uma figura ja colocada
figuraB = self.figuras_colocadas[fB]
figuraA = self.figuras_colocadas[fA]
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx(): #verifica um dos lados verticais em posições seguidas
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1: #verifica se esses lados estão mesmo colados
return True
break
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1: #mesmo raciocínio para os lados horizontais
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
return True
break
elif fA in self.figuras_nao_colocadas:
figuraB = self.figuras_colocadas[fB]
figuraA = self.figuras_nao_colocadas[fA]
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx():
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1:
return True
break
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1:
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
return True
break
elif fB in self.figuras_nao_colocadas: # verifica se fB ainda não foi colocada
return True
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar colada
if fA in self.figuras_colocadas: # verifica se fA já foi colocada
if fB in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
figuraB = self.figuras_colocadas[fB]
if x == figuraA.getposx() + figuraA.getwidth() or x + figuraB.getwidth()-2 == figuraA.getposx():
if y<=figuraA.getposy()<=y+figuraB.getheight()-1 or y<=figuraA.getposy()+figuraA.getheight()-1<=y+figuraB.getheight()-1 or figuraA.getposy()<=y<=figuraA.getposy() + figuraA.getheight()-1 or figuraA.getposy()<=figuraB.getheight()+y - 1<=figuraA.getposy() + figuraA.getheight()-1:
return True
break
if y == figuraA.getposy() + figuraA.getheight() or y + figuraB.getheight()-1 == figuraA.getposy()-1:
if x<=figuraA.getposx()<=x+figuraB.getwidth()-1 or x<=figuraA.getposx()+figuraA.getwidth()-1<=x+figuraB.getwidth()-1 or figuraA.getposx()<=x<=figuraA.getposx() + figuraA.getwith()-1 or figuraA.getposx()<=figuraB.getwidth()+x-1<=figuraA.getposx() + figuraA.getwith()-1:
return True
break
if fB in self.figuras_nao_colocadas:
figuraA = self.figuras_colocadas[fA]
figuraB = self.figuras_nao_colocadas[fB]
if x == figuraA.getposx() + figuraA.getwidth() or x + figuraB.getwidth()-2 == figuraA.getposx():
if y<=figuraA.getposy()<=y+figuraB.getheight()-1 or y<=figuraA.getposy()+figuraA.getheight()-1<=y+figuraB.getheight()-1 or figuraA.getposy()<=y<=figuraA.getposy() + figuraA.getheight()-1 or figuraA.getposy()<=figuraB.getheight()+y - 1<=figuraA.getposy() + figuraA.getheight()-1:
return True
break
if y == figuraA.getposy() + figuraA.getheight() or y + figuraB.getheight()-1 == figuraA.getposy()-1:
if x<=figuraA.getposx()<=x+figuraB.getwidth()-1 or x<=figuraA.getposx()+figuraA.getwidth()-1<=x+figuraB.getwidth()-1 or figuraA.getposx()<=x<=figuraA.getposx() + figuraA.getwith()-1 or figuraA.getposx()<=figuraB.getwidth()+x-1<=figuraA.getposx() + figuraA.getwith()-1:
return True
break
elif fA in self.figuras_nao_colocadas: # verifica se fA ainda não foi colocada
return True
elif fig.getnome() != fA and fig.getnome() != fB: #verifica se fig pertence a alguma das restrições da lista
return True
return valido #se não estiver de acordo com a restrição, retorna False
def __valida_rest_SEP(self, fig, x, y): #verificador da restrição separado
valido = True
for (fA, fB) in self.rest_SEP: # cada elemento desta lista é um tuplo do genero (figA,figB) em que a um lado da figA fica separada da figB
if fig.getnome() == fA: # encontrei a figura na lista de restrições como uma que tinha de estar separada da figB
if fB in self.figuras_colocadas: # verifica se fB já foi colocada
if fA in self.figuras_colocadas: #se estivermos a mudar a posição de uma figura ja colocada
figuraB = self.figuras_colocadas[fB]
figuraA = self.figuras_colocadas[fA]
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx():
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1:
return False
break
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1:
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
return False
break
elif fA in self.figuras_nao_colocadas:
figuraB = self.figuras_colocadas[fB]
figuraA = self.figuras_nao_colocadas[fA]
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx():
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1:
return False
break
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1:
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
return False
break
elif fB in self.figuras_nao_colocadas: # verifica se fB ainda não foi colocada
return True
elif fig.getnome() == fB: # encontrei a figura na lista de restrições como uma que tinha de estar separada
if fA in self.figuras_colocadas: # verifica se fA já foi colocada
if fB in self.figuras_colocadas:
figuraA = self.figuras_colocadas[fA]
figuraB = self.figuras_colocadas[fB]
if x == figuraA.getposx() + figuraA.getwidth() or x + figuraB.getwidth()-2 == figuraA.getposx():
if y<=figuraA.getposy()<=y+figuraB.getheight()-1 or y<=figuraA.getposy()+figuraA.getheight()-1<=y+figuraB.getheight()-1 or figuraA.getposy()<=y<=figuraA.getposy() + figuraA.getheight()-1 or figuraA.getposy()<=figuraB.getheight()+y - 1<=figuraA.getposy() + figuraA.getheight()-1:
return False
break
if y == figuraA.getposy() + figuraA.getheight() or y + figuraB.getheight()-1 == figuraA.getposy()-1:
if x<=figuraA.getposx()<=x+figuraB.getwidth()-1 or x<=figuraA.getposx()+figuraA.getwidth()-1<=x+figuraB.getwidth()-1 or figuraA.getposx()<=x<=figuraA.getposx() + figuraA.getwith()-1 or figuraA.getposx()<=figuraB.getwidth()+x-1<=figuraA.getposx() + figuraA.getwith()-1:
return False
break
if fB in self.figuras_nao_colocadas:
figuraA = self.figuras_colocadas[fA]
figuraB = self.figuras_nao_colocadas[fB]
if x == figuraA.getposx() + figuraA.getwidth() or x + figuraB.getwidth()-2 == figuraA.getposx():
if y<=figuraA.getposy()<=y+figuraB.getheight()-1 or y<=figuraA.getposy()+figuraA.getheight()-1<=y+figuraB.getheight()-1 or figuraA.getposy()<=y<=figuraA.getposy() + figuraA.getheight()-1 or figuraA.getposy()<=figuraB.getheight()+y - 1<=figuraA.getposy() + figuraA.getheight()-1:
return False
break
if y == figuraA.getposy() + figuraA.getheight() or y + figuraB.getheight()-1 == figuraA.getposy()-1:
if x<=figuraA.getposx()<=x+figuraB.getwidth()-1 or x<=figuraA.getposx()+figuraA.getwidth()-1<=x+figuraB.getwidth()-1 or figuraA.getposx()<=x<=figuraA.getposx() + figuraA.getwith()-1 or figuraA.getposx()<=figuraB.getwidth()+x-1<=figuraA.getposx() + figuraA.getwith()-1:
return False
break
elif fA in self.figuras_nao_colocadas: # verifica se fA ainda não foi colocada
return True
return valido #se estiver de acordo com a restrição retorna True
def __valida_rest_dentro(self, fig, x, y):
valido = True
for (f, a) in self.rest_DENTRO: # cada elemento desta lista é um tuplo do genero (f,a) em que a f tem de estar dentro de a
if fig.getnome() == f:
if a in self.areas: # pois se a não estiver definida não vai haver qualquer problema em colocar fig
if f in self.figuras_colocadas:
a = self.areas[a]
fA=self.figuras_colocadas[f]
# testar que a instancia fig tem de estar dentro de a
if x >= a.getx() and y >= a.gety() and x + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1 and y + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1:
valido = True
break
else:
valido = False
break
if f in self.figuras_nao_colocadas:
a = self.areas[a]
fA=self.figuras_nao_colocadas[f]
if x >= a.getx() and y >= a.gety() and x + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1 and y + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1:
valido = True
break
else:
valido = False
break
return valido
def __valida_rest_fora(self, fig, x, y):
valido = True
for (f, a) in self.rest_FORA: # cada elemento desta lista é um tuplo do genero (f,a) em que a f tem de estar fora de a
if fig.getnome() == f:
if a in self.areas: # pois se a não estiver definida não vai haver qualquer problema em colocar f
if f in self.figuras_colocadas:
a = self.areas[a]
fA=self.figuras_colocadas[f]
if (x >= a.getx() and x <= a.getx() + a.gettamx() -1) or (y >= a.gety() and y<=a.gety() + a.gettamy()-1) or (x + fA.getwidth() -1 >= a.getx() and x + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1) or (y + fA.getheight() -1 >= a.gety() and y + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1):
valido = False
break
else:
valido=True
break
elif f in self.figuras_nao_colocadas:
a = self.areas[a]
fA=self.figuras_nao_colocadas[f]
if (x >= a.getx() and x <= a.getx() + a.gettamx() -1) or (y >= a.gety() and y<=a.gety() + a.gettamy()-1) or (x + fA.getwidth() -1 >= a.getx() and x + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1) or (y + fA.getheight() -1 >= a.gety() and y + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1):
valido = False
break
else:
valido=True
break
return valido
def rest_dir(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_ESQ: #verificar se não há contradições entre restrições
return 'NÃO'
if nomeA in self.figuras_colocadas: #caso as figuras ja estejam colocadas
if nomeB in self.figuras_colocadas:
fA=self.figuras_colocadas[nomeA] #encontrei a figura ja colocada que teria de estar à direita de fB
fB=self.figuras_colocadas[nomeB]
if fA.getposx() <= fB.getposx2(): #se fA nao estiver a direita de fB
return 'NÃO'
else:
self.rest_DIR.append(tup)
return 'SIM'
self.rest_DIR.append(tup) #se alguma das figuras ainda nao estiver colocada, podemos adiciona-la a respetiva lista das restricoes, sem qualque problema
return "SIM" # o mesmo raciocinio e usado nas funcoes seguintes
def rest_esq(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_DIR:
return 'NÃO'
if nomeA in self.figuras_colocadas: #caso as figuras ja estejam colocadas
if nomeB in self.figuras_colocadas:
fA=self.figuras_colocadas[nomeA] #encontrei a figura ja colocada que teria de estar à esquerda de fB
fB=self.figuras_colocadas[nomeB]
if fB.getposx() <= fA.getposx2(): #se fA nao estiver a esquerda de fB
return 'NÃO'
else:
self.rest_ESQ.append(tup)
return 'SIM'
self.rest_ESQ.append(tup)
return "SIM"
def rest_cim(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_BX:
return 'NÃO'
if nomeA in self.figuras_colocadas:
if nomeB in self.figuras_colocadas:
fA=self.figuras_colocadas[nomeA]
fB=self.figuras_colocadas[nomeB]
if fA.getposy2() >= fB.getposy():
return 'NÃO'
else:
self.rest_CIM.append(tup)
return 'SIM'
self.rest_CIM.append(tup)
return "SIM"
def rest_bx(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_CIM:
return 'NÃO'
if nomeA in self.figuras_colocadas:
if nomeB in self.figuras_colocadas:
fA=self.figuras_colocadas[nomeA]
fB=self.figuras_colocadas[nomeB]
if fA.getposy() <= fB.getposy2():
return 'NÃO'
else:
self.rest_BX.append(tup)
return 'SIM'
self.rest_BX.append(tup)
return "SIM"
def rest_cld(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_SEP:
return 'NÃO'
if nomeA in self.figuras_colocadas:
if nomeB in self.figuras_colocadas:
figuraA=self.figuras_colocadas[nomeA]
figuraB=self.figuras_colocadas[nomeB]
x=figuraA.getposx()
y=figuraA.getposy()
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx(): #verifica um dos lados verticais em posições seguidas
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1: #verifica se esses lados estão mesmo colados
self.rest_CLD.append(tup)
return 'SIM'
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1: #mesmo raciocínio para os lados horizontais
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
self.rest_CLD.append(tup)
return 'SIM'
else:
return 'NÃO'
self.rest_CLD.append(tup)
return "SIM"
def rest_sep(self, nomeA, nomeB):
tup = (nomeA, nomeB)
if tup in self.rest_CLD:
return 'NÃO'
if nomeA in self.figuras_colocadas:
if nomeB in self.figuras_colocadas:
figuraA=self.figuras_colocadas[nomeA]
figuraB=self.figuras_colocadas[nomeB]
x=figuraA.getposx()
y=figuraA.getposy()
if x == figuraB.getposx() + figuraB.getwidth() or x + figuraA.getwidth()-2 == figuraB.getposx(): #verifica um dos lados verticais em posições seguidas
if y<=figuraB.getposy()<=y+figuraA.getheight()-1 or y<=figuraB.getposy()+figuraB.getheight()-1<=y+figuraA.getheight()-1 or figuraB.getposy()<=y<=figuraB.getposy() + figuraB.getheight()-1 or figuraB.getposy()<=figuraA.getheight()+y - 1<=figuraB.getposy() + figuraB.getheight()-1: #verifica se esses lados estão mesmo colados
self.rest_CLD.append(tup)
return 'NÃO'
if y == figuraB.getposy() + figuraB.getheight() or y + figuraA.getheight()-1 == figuraB.getposy()-1: #mesmo raciocínio para os lados horizontais
if x<=figuraB.getposx()<=x+figuraA.getwidth()-1 or x<=figuraB.getposx()+figuraB.getwidth()-1<=x+figuraA.getwidth()-1 or figuraB.getposx()<=x<=figuraB.getposx() + figuraB.getwith()-1 or figuraB.getposx()<=figuraA.getwidth()+x-1<=figuraB.getposx() + figuraB.getwith()-1:
self.rest_CLD.append(tup)
return 'NÃO'
else:
self.rest_SEP.append(tup)
return 'SIM'
self.rest_SEP.append(tup)
return "SIM"
def rest_dentro(self, nomeRect, nomeArea):
tup = (nomeRect, nomeArea)
if tup in self.rest_FORA:
return 'NÃO'
if nomeRect in self.figuras_colocadas:
if nomeArea in self.areas:
fA=self.figuras_colocadas[nomeRect]
a=self.areas[nomeArea]
if fA.getposx() >= a.getx() and fA.getposy() >= a.gety() and fA.getposx() + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1 and fA.getposy() + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1:
self.rest_DENTRO.append(tup)
return 'SIM'
else:
return 'NÃO'
self.rest_DENTRO.append(tup)
return "SIM"
def rest_fora(self, nomeRect, nomeArea):
tup = (nomeRect, nomeArea)
if tup in self.rest_DENTRO: #verificar se não há contradições entre restrições
return 'NÃO'
if nomeRect in self.figuras_colocadas:
if nomeArea in self.areas:
fA=self.figuras_colocadas[nomeRect]
a=self.areas[nomeArea]
if (fA.getposx() >= a.getx() and fA.getposx() <= a.getx() + a.gettamx() -1) or (fA.getposy() >= a.gety() and fA.getposy()<=a.gety() + a.gettamy()-1) or (fA.getposx() + fA.getwidth() -1 >= a.getx() and fA.getposx() + fA.getwidth() -1 <= a.getx() + a.gettamx() - 1) or (fA.getposy() + fA.getheight() -1 >= a.gety() and fA.getposy() + fA.getheight() - 1 <= a.gety() + a.gettamy() - 1):
return 'NÃO'
else:
self.rest_FORA.append(tup)
return 'SIM'
self.rest_FORA.append(tup)
return "SIM"
def coloca_rodar(self, nome, px, py):
if nome in self.figuras_nao_colocadas:
f = self.figuras_nao_colocadas[nome]
if f.rodar=='s':
if self.__valida_coloca(f, px, py) == False:
return "NÃO"
else:
g= Figura(nome, f.getheight(), f.getwidth(), f.getrodar())
g.setposx(px)
g.setposy(py)
self.figuras_colocadas[nome] = g
del self.figuras_nao_colocadas[nome]
t=(g, 'COLR', nome)
self.s.push(t)
return "SIM"
elif nome in self.figuras_colocadas: # aqui vamos mudar o rectangulo de posição
f = self.figuras_colocadas[nome]
if self.__valida_coloca(f, px, py) == False:
return "NÃO"
else:
h= f.altura
f.setheight(f.largura) #quando queremos voltar a colocar a figura outra vez, mas usando o colr, nao so queremos que ela mude de posição, como queremos que esta rode de novo, voltando a sua forma original
f.setwidth(h)
if nome in self.figuras_anteriores: #criamos um dicionario para guardar as posições anteriores de cada figura, de modo a conseguirmos restaurar a sua posição no undo
self.figuras_anteriores[nome]+=[(f.getposx(), f.getposy())] #se não for a primeira vez que estamos a mudar a figura de posição, apenas adicionamos, antes de a mudarmos de posição, as suas coordenadas ao dicionario como valores da chave correspondente ao nome da figura
else: # assim, o ultimo tuplo da lista correspondente ao nome da figura(chave) corresponde a última posição que esta ocupou antes de ser alterada
self.figuras_anteriores[nome]=[(f.getposx(), f.getposy())]
f.setposx(px)
f.setposy(py)
t=(f, 'COLR', nome)
self.s.push(t)
return "SIM"
elif nome not in self.figuras_nao_colocadas and nome not in self.figuras_colocadas:
return 'NÃO'
def __nao_sobrepoe(self,g, x, y):
for f in self.figuras_colocadas.values(): #ver se a fig cai em cima de alguma figura f da lista de figuras co,ocadas
if f.getnome()!= g.getnome(): #para, no caso de estarmos a mudar a posição de uma figura já colocada, a posião atual desta não interferir
if x>= f.getposx() and x<= f.getposx2() and y>= f.posy and y<= f.getposy2(): #primeiro vertice (x,y)
return False
if x+g.getwidth() - 1>= f.getposx() and x+g.getwidth()-1<= f.getposx2() and y>= f.getposy() and y<= f.getposy2(): #segundo vertice (x2,y)
return False
if x>= f.getposx() and x<= f.getposx2() and y+g.getheight()-1>= f.getposy() and y+g.getheight()-1<= f.getposy2(): # terceiro vertice (x,y2)
return False
if x+g.getwidth()-1>= f.getposx() and x+g.getwidth()-1<= f.getposx2() and y+g.getheight()-1>= f.getposy() and y+g.getheight()-1<= f.getposy2(): #quarto vertice (x2,y2)
return False
#verifica se a figura ja existente fica contida dentro da que queremos colocar
if f.getposx()>= x and f.getposx()<= x+g.getwidth()-1 and f.getposy()>= y and f.getposy()<= y+g.getheight()-1: #primeiro vertice (ver rascunho tarefa 9 no caderno)
return False
if f.getposx2()>= x and f.getposx2()<= x+g.getwidth()-1 and f.getposy()>= y and f.getposy()<= y+g.getheight()-1: #segundo vertice
return False
if f.getposx()>= x and f.getposx()<= x+g.getwidth()-1 and f.getposy2()>= y and f.getposy2()<= y+g.getheight()-1: # terceiro vertice
return False
if f.getposx2()>= x and f.getposx2()<= x+g.getwidth()-1 and f.getposy2()>= y and f.getposy2()<= y+g.getheight()-1: #quarto vertice
return False
return True
def __valida_rest_dentro_sup_areas(self, x, y, tamx, tamy):
if x >= 1 and y >= 1 and x + tamx - 1 <= self.largura and y + tamy - 1 <= self.altura:
return True
else:
return False
def nova_area(self, nome, x, y, tamx, tamy):
if nome in self.areas:
return "NÃO"
if self.__valida_rest_dentro_sup_areas( x, y,tamx, tamy)==True: #verificar se a area esta dentro da dimensao
f= Area(nome, x, y, tamx, tamy)
self.areas[nome]=f
t=(f, 'AREA', nome)
self.s.push(t)
return "SIM"
else:
return 'NÃO'
def getlargura(self):
return self.largura
def getaltura(self):
return self.altura
def getfiguras_colocadas(self):
return self.figuras_colocadas
def getfiguras_nao_colocadas(self):
return self.figuras_nao_colocadas
def stack_to_list(self, stack):
s=stack
L=[]
stemp=Stack()
while not s.is_empty():
t= s.pop()
L.append(t)
stemp.push(t)
while not stemp.is_empty():
s.push(stemp.pop())
return L
def rest_undo(self):
if not self.s.is_empty():
T=self.s.pop()
f= T[0] #o meu f sera a figura, area ou a dimensao, dependendo do ultimo comando efetuado
com= T[1] #o meu com e o ultimo comando efetuado
nome = T[2]
L= self.stack_to_list(self.s)
print(L)
if com == 'COL':
for el in L:
if el[2] == f.getnome(): #a primeira figura que encontrar na lista com um nome igual ao da figura (f) que queremos eliminar corresponde ao ultimo estado(posição, dimensão) de f, antes desta ter mudado de posição
nome= el[2]
l= len(self.figuras_anteriores[nome])
t=self.figuras_anteriores[nome][l-1]
x = t[0]
y= t[1]
print(x,y)
f.setposx(x)
f.setposy(y)
del self.figuras_anteriores[nome][l-1]
if not self.__valida_coloca(f, x, y):
return 'E_RESTR'
break
else:
return 'SIM'
break
f.setposx(-1)
f.setposy(-1)
self.figuras_nao_colocadas[f.getnome()]=f
del self.figuras_colocadas[f.getnome()]
return 'SIM'
if com == 'COLR':
for el in L:
if el[2] == f.getnome():
print('aaaa')
nome=el[2]
l= len(self.figuras_anteriores[nome])
t=self.figuras_anteriores[nome][l-1]
x = t[0] #É AQUI QUE DÁ MAL
y= t[1]
print(x,y)
f.setposx(x)
f.setposy(y)
h= f.altura
f.setheight(f.largura) #anulamos tambem a sua rotação
f.setwidth(h)
del self.figuras_anteriores[nome][l-1]
if not self.__valida_coloca(f, x, y):
return 'E_RESTR'
break
else:
return 'SIM'
break
f.setposx(-1)
f.setposy(-1)
h= f.altura #aqui estamos a anular a rotação que a figuar sofreu
f.setheight(f.largura)
f.setwidth(h)
self.figuras_nao_colocadas[f.getnome()]=f
del self.figuras_colocadas[f.getnome()]
return 'SIM'
break
elif com == 'AREA':
del self.areas[nome]
return 'SIM'
elif L==[]: #como o primeiro elemento da stack corresponde a dimensão, quando fazemos o pop deste, a nossa lista L ficará vazia
return 'DIM'
else:
return 'NÃO'
def __valida_rsv(self, fig, posx, posy):
if not self.__valida_rest_dentro_sup(fig, posx, posy):
return False
if not self.__nao_sobrepoe(fig, posx, posy):
return False #vai sobrepor a nenhum outro rectangulo e cai dentro da dimensao
def coloca_auto(self, f, px, py):
if RetangulosEngine.__valida_rsv(self, f, px, py)==True:
f = self.figuras_nao_colocadas[f.nome]
f.setposx(px)
f.setposy(py)
self.figuras_colocadas[f.nome] = f
if not self.__valida_coloca(f, px, py):
return 'E_RESTR'
else:
return True
else:
return False
def coloca_rodar_auto(self, f, px, py):
f=self.figuras_nao_colocadas[f.nome]
if f.rodar=='s':
g= Figura(f.nome, f.altura, f.largura, f.rodar)
g.setposx(px)
g.setposy(py)
del self.figuras_nao_colocadas[f.nome]
self.figuras_nao_colocadas[f.nome] = g
if RetangulosEngine.__valida_rsv(self, g, px, py)==True:
self.figuras_colocadas[f.nome] = g
if not self.__valida_coloca(f, px, py):
return 'E_RESTR'
else:
return True
else:
del self.figuras_nao_colocadas[f.nome]
self.figuras_nao_colocadas[f.nome] = f
return False
def resolve(self):
for f in self.figuras_nao_colocadas.values(): #percorrer a lista de figuras não colocadas
g=self.figuras_nao_colocadas[f.nome]
if f.nome in self.figuras_colocadas:
break
else:
for i in range (1, (self.largura-g.largura+1)):
j=1
if f.nome in self.figuras_colocadas:
break
else:
while(1<=j< (self.altura-g.altura+1)):
if RetangulosEngine.coloca_auto(self, g, i, j)==True:
break
else:
j+=1
for f in self.figuras_colocadas.values(): #depois de a figura ser colocada
if f.nome in self.figuras_nao_colocadas: #temos de eliminá-la da lista das não colocadas
self.desfrsv.append(f.nome)
del self.figuras_nao_colocadas[f.nome]
for f in self.figuras_nao_colocadas.values(): #vamos tentar colocar as restantes, rodando-as tambem
g=self.figuras_nao_colocadas[f.nome]
if f.nome in self.figuras_colocadas:
break
else:
for i in range (1, (self.largura-g.altura+1)):
j=1
if f.nome in self.figuras_colocadas:
break
else:
while(1<=j<(self.altura-g.largura+1)):
if RetangulosEngine.coloca_rodar_auto(self, g, i, j)==True:
break
else:
j+=1
for f in self.figuras_colocadas.values():
if f.nome in self.figuras_nao_colocadas:
self.desfrsv.append(f.nome)
del self.figuras_nao_colocadas[f.nome]
if len(self.figuras_nao_colocadas)>0:
return "NÃO HÁ SOLUÇÃO"
else:
return "SIM"
class Area():
def __init__(self, nome, x, y, tamx, tamy):
self.nome = nome
self.x = x
self.y = y
self.tamx = tamx
self.tamy = tamy
def getx(self):
return self.x
def getNome(self):
return self.nome
def gety(self):
return self.y
def gety2(self):
return self.y + self.tamy - 1
def getx2(self):
return self.x + self.tamx - 1
def gettamx(self):
return self.tamx
def gettamy(self):
return self.tamy
class Stack:
def __init__(self):
self.items = []
def is_empty(self):
return self.items == []
def push(self, item):
self.items.append(item)
def pop(self):
return self.items.pop()
def top(self):
return self.items[len(self.items)-1]
def size(self):
return len(self.items)
| 54.31422 | 399 | 0.524366 | 5,678 | 47,362 | 4.299401 | 0.062698 | 0.067139 | 0.074554 | 0.042397 | 0.835532 | 0.800385 | 0.784327 | 0.740947 | 0.713665 | 0.697239 | 0 | 0.010114 | 0.373696 | 47,362 | 872 | 400 | 54.31422 | 0.812865 | 0.154111 | 0 | 0.704336 | 0 | 0 | 0.006509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085414 | false | 0 | 0 | 0.028909 | 0.285151 | 0.005256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5ec217907e6e8a00235e98b0208de9841e46411c | 32,953 | py | Python | tests/test_callable.py | spack971/none | 6313dd7d7095b301e8d49a38d1b39c9080008ae0 | [
"MIT"
] | 1 | 2020-09-28T17:57:33.000Z | 2020-09-28T17:57:33.000Z | tests/test_callable.py | spack971/none | 6313dd7d7095b301e8d49a38d1b39c9080008ae0 | [
"MIT"
] | 5 | 2020-09-02T15:30:39.000Z | 2020-10-15T09:52:35.000Z | tests/test_callable.py | spack971/none | 6313dd7d7095b301e8d49a38d1b39c9080008ae0 | [
"MIT"
] | 1 | 2020-09-19T05:10:02.000Z | 2020-09-19T05:10:02.000Z | # tests/test_callable.py
# ======================
#
# Copying
# -------
#
# Copyright (c) 2020 none authors and contributors.
#
# This file is part of the *none* project.
#
# None is a free software project. You can redistribute it and/or
# modify it following the terms of the MIT License.
#
# This software project is distributed *as is*, WITHOUT WARRANTY OF ANY
# KIND; including but not limited to the WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE and NONINFRINGEMENT.
#
# You should have received a copy of the MIT License along with
# *none*. If not, see <http://opensource.org/licenses/MIT>.
#
"""Test cases for :mod:`none.callable`."""
from contextlib import suppress
import pytest
from hypothesis import given, assume, strategies as st
import none
#: Maximum amount of times a function can be tried again.
MAX_RETRY = (2 ** 13) - 1
class TestCatchHook(object):
"""Test cases for :class:`none.callable.catch` and
:class:`none.callable.hook`.
"""
def test_hook___init___function_isinstance_hook(self):
"""Ensure the a hooked function becomes a :class:`none.callable.hook`
instance.
"""
@none.callable.hook
def my_hook():
pass
assert isinstance(my_hook, none.callable.hook)
def test_hook___init___method_isinstance_hook(self):
"""Ensure the a hooked method becomes a :class:`none.callable.hook`
instance.
"""
class Hookable(object):
@none.callable.hook
def my_hook(self):
pass
assert isinstance(Hookable.my_hook, none.callable.hook)
def test_hook___call___run_other_functions(self):
"""Test that by calling the hooked function, all hanging functions are
also executed.
"""
stack = set()
@none.callable.hook
def my_hook():
stack.add(0)
@none.callable.catch(my_hook)
def my_catch1():
stack.add(1)
@none.callable.catch(my_hook)
def my_catch2():
stack.add(2)
my_hook()
assert stack == {0, 1, 2}
def test_hook___call___run_other_methods(self):
"""Test that by calling the hooked method, all hanging methods are also
executed.
"""
stack = set()
class Hookable(object):
@none.callable.hook
def my_hook(self):
stack.add(0)
@none.callable.catch("my_hook")
def my_catch1(self):
stack.add(1)
@none.callable.catch("my_hook")
def my_catch2(self):
stack.add(2)
h = Hookable()
h.my_hook()
assert stack == {0, 1, 2}
def test_hook___call___run_other_methods_with_inheritance(self):
"""Test that by calling the hooked method, all hanging methods are also
executed even when inheritance is involved.
"""
stack = set()
class Hookable(object):
@none.callable.hook
def my_hook(self):
stack.add(0)
class Catching(Hookable):
@none.callable.catch("my_hook")
def my_catch1(self):
stack.add(1)
@none.callable.catch("my_hook")
def my_catch2(self):
stack.add(2)
c = Catching()
c.my_hook()
assert stack == {0, 1, 2}
def test_hook___call___run_only_inherited_methods(self):
"""Make sure that only hanging methods within the class context are
executed.
"""
stack = set()
class Hookable(object):
@none.callable.hook
def my_hook(self):
stack.add(0)
class Catching(Hookable):
@none.callable.catch("my_hook")
def my_catch1(self):
stack.add(1)
@none.callable.catch("my_hook")
def my_catch2(self):
stack.add(2)
class _NOOP(Hookable):
@none.callable.catch("my_hook")
def my_noop_catch(self):
stack.add("--noop--")
c = Catching()
c.my_hook()
assert stack == {0, 1, 2}
def test_hook_hangers_register_same_hanger_only_once(self):
"""Ensure that adding the same catch function twice is only
registered once.
"""
@none.callable.hook
def my_hook():
pass
# First registration via decorator.
@none.callable.catch(my_hook)
def my_catch():
pass
# Force direct registration.
my_hook.hanging.add(my_catch)
assert len(my_hook.hanging) == 1
def test_catch___init___function_isinstance_catch_and_hook(self):
"""Ensure the a catching function becomes an instance of
:class:`none.callable.catch` and :class:`none.callable.hook`.
"""
@none.callable.hook
def my_hook():
pass
@none.callable.catch(my_hook)
def my_catch():
pass
assert isinstance(my_catch, none.callable.catch)
assert isinstance(my_catch, none.callable.hook)
def test_catch___init___method_isinstance_catch_and_hook(self):
"""Ensure the a catching method becomes an instance of
:class:`none.callable.catch` and :class:`none.callable.hook`.
"""
class Hookable(object):
@none.callable.hook
def my_hook(self):
pass
@none.callable.catch("my_hook")
def my_catch(self):
pass
assert isinstance(Hookable.my_catch, none.callable.catch)
assert isinstance(Hookable.my_catch, none.callable.hook)
def test_catch___call___update_hook_from_function(self):
"""Ensure that catching a hook from a function updates its list of
hanging functions.
"""
@none.callable.hook
def my_hook():
pass
@none.callable.catch(my_hook)
def my_catch():
pass
assert my_catch in my_hook.hanging
def test_catch___call___function_can_also_be_a_hook(self):
"""Ensure that by catching another catch function, all functions in the
chain are ran.
"""
stack = set()
@none.callable.hook
def my_hook():
stack.add(0)
@none.callable.catch(my_hook)
def my_catch1():
stack.add(1)
@none.callable.catch(my_catch1)
def my_catch2():
stack.add(2)
my_hook()
assert stack == {0, 1, 2}
def test_hook___call___method_can_also_be_a_hook(self):
"""Ensure that by catching another catch method, all methods in the
chain are ran.
"""
stack = set()
class Hookable(object):
@none.callable.hook
def my_hook(self):
stack.add(0)
@none.callable.catch("my_hook")
def my_catch1(self):
stack.add(1)
@none.callable.catch("my_catch1")
def my_catch2(self):
stack.add(2)
h = Hookable()
h.my_hook()
assert stack == {0, 1, 2}
def test_hook___call___method_with_inheritance_can_also_be_a_hook(self):
"""Ensure that by catching another catch method, all methods in the
chain are ran even when inheritance is involved.
"""
stack = set()
class Hookable(object):
@none.callable.hook
def my_hook(self):
stack.add(0)
@none.callable.catch("my_hook")
def my_catch1(self):
stack.add(1)
class Catcher(Hookable):
@none.callable.catch("my_catch1")
def my_catch2(self):
stack.add(2)
c = Catcher()
c.my_hook()
assert stack == {0, 1, 2}
class TestAsyncCatchHook(object):
"""Test cases for :class:`none.callable.asynccatch` and
:class:`none.callable.asynchook`.
"""
def test_asynchook___init___coroutine_isinstance_asynchook_and_hook(self):
"""Ensure the a hooked coroutine becomes an
:class:`none.callable.asynchook` and :class:`none.callable.hook`
instance.
"""
@none.callable.asynchook
async def my_hook():
pass
assert isinstance(my_hook, none.callable.asynchook)
assert isinstance(my_hook, none.callable.hook)
def test_asynchook___init___method_isinstance_asynchook_and_hook(self):
"""Ensure the a hooked method becomes an :class:`none.callable.asynchook`
and :class:`none.callable.hook` instance.
"""
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
pass
assert isinstance(AsyncHookable.my_hook, none.callable.asynchook)
assert isinstance(AsyncHookable.my_hook, none.callable.hook)
@pytest.mark.asyncio
async def test_asynchook___call___run_other_coroutines(self):
"""Test that by calling the hooked coroutine, all hanging coroutines are
also called.
"""
stack = set()
@none.callable.asynchook
async def my_hook():
stack.add(0)
@none.callable.asynccatch(my_hook)
async def my_catch1():
stack.add(1)
@none.callable.asynccatch(my_hook)
async def my_catch2():
stack.add(2)
await my_hook()
assert stack == {0, 1, 2}
@pytest.mark.asyncio
async def test_asynchook___call___run_other_methods(self):
"""Test that by calling the hooked method, all hanging methods are also
executed.
"""
stack = set()
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
stack.add(0)
@none.callable.asynccatch("my_hook")
async def my_catch1(self):
stack.add(1)
@none.callable.asynccatch("my_hook")
async def my_catch2(self):
stack.add(2)
h = AsyncHookable()
await h.my_hook()
assert stack == {0, 1, 2}
@pytest.mark.asyncio
async def test_asynchook___call___run_other_methods_with_inheritance(self):
"""Test that by calling the hooked method, all hanging methods are also
executed even when inheritance is involved.
"""
stack = set()
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
stack.add(0)
class AsyncCatching(AsyncHookable):
@none.callable.asynccatch("my_hook")
async def my_catch1(self):
stack.add(1)
@none.callable.asynccatch("my_hook")
async def my_catch2(self):
stack.add(2)
c = AsyncCatching()
await c.my_hook()
assert stack == {0, 1, 2}
@pytest.mark.asyncio
async def test_asynchook___call___run_only_inherited_methods(self):
"""Make sure that only hanging methods within the class context are
executed.
"""
stack = set()
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
stack.add(0)
class AsyncCatching(AsyncHookable):
@none.callable.asynccatch("my_hook")
async def my_catch1(self):
stack.add(1)
@none.callable.asynccatch("my_hook")
async def my_catch2(self):
stack.add(2)
class _NOOP(AsyncHookable):
@none.callable.asynccatch("my_hook")
def my_noop_catch(self):
stack.add("--noop--")
c = AsyncCatching()
await c.my_hook()
assert stack == {0, 1, 2}
def test_asynchook_hangers_register_same_hanger_only_once(self):
"""Ensure that adding the same catch coroutine twice is only
registered once.
"""
@none.callable.asynchook
async def my_hook():
pass
# First registration via decorator.
@none.callable.asynccatch(my_hook)
async def my_catch():
pass
# Force direct registration.
my_hook.hanging.add(my_catch)
assert len(my_hook.hanging) == 1
def test_asynccatch___init___coroutine_isinstance_catch_hook_asynccatch_and_asynchook(
self,
):
"""Ensure the a catching coroutine becomes an instance of
:class:`none.callable.asynccatch`, :class:`none.callable.asynchook` but
also :class:`none.callable.catch` and :class:`none.callable.hook`.
"""
@none.callable.asynchook
def my_hook():
pass
@none.callable.asynccatch(my_hook)
def my_catch():
pass
assert isinstance(my_catch, none.callable.asynccatch)
assert isinstance(my_catch, none.callable.catch)
assert isinstance(my_catch, none.callable.asynchook)
assert isinstance(my_catch, none.callable.hook)
def test_asynccatch___init___method_isinstance_catch_hook_asynccatch_and_asynchook(
self,
):
"""Ensure the a catching method becomes an instance of
:class:`none.callable.asynccatch`, :class:`none.callable.asynchook` but
also :class:`none.callable.catch` and :class:`none.callable.hook`.
"""
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
pass
@none.callable.asynccatch("my_hook")
async def my_catch(self):
pass
assert isinstance(AsyncHookable.my_catch, none.callable.asynccatch)
assert isinstance(AsyncHookable.my_catch, none.callable.catch)
assert isinstance(AsyncHookable.my_catch, none.callable.asynchook)
assert isinstance(AsyncHookable.my_catch, none.callable.hook)
def test_asynccatch___call___update_hook_from_coroutine(self):
"""Ensure that catching a hook from a function updates its list of
hanging coroutines.
"""
@none.callable.asynchook
async def my_hook():
pass
@none.callable.asynccatch(my_hook)
async def my_catch():
pass
assert my_catch in my_hook.hanging
@pytest.mark.asyncio
async def test_asynccatch___call___function_can_also_be_a_hook(self):
"""Ensure that by catching another catch coroutine, all coroutines in
the chain are ran.
"""
stack = set()
@none.callable.asynchook
async def my_hook():
stack.add(0)
@none.callable.asynccatch(my_hook)
async def my_catch1():
stack.add(1)
@none.callable.asynccatch(my_catch1)
async def my_catch2():
stack.add(2)
await my_hook()
assert stack == {0, 1, 2}
@pytest.mark.asyncio
async def test_asynccatch___call___method_can_also_be_a_hook(self):
"""Ensure that by catching another catch method, all methods in the
chain are ran.
"""
stack = set()
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
stack.add(0)
@none.callable.asynccatch("my_hook")
async def my_catch1(self):
stack.add(1)
@none.callable.asynccatch("my_catch1")
async def my_catch2(self):
stack.add(2)
h = AsyncHookable()
await h.my_hook()
assert stack == {0, 1, 2}
@pytest.mark.asyncio
async def test_asynccatch___call___method_with_inheritance_can_also_be_a_hook(self):
"""Ensure that by catching another catch method, all methods in the
chain are ran even when inheritance is involved.
"""
stack = set()
class AsyncHookable(object):
@none.callable.asynchook
async def my_hook(self):
stack.add(0)
@none.callable.asynccatch("my_hook")
async def my_catch1(self):
stack.add(1)
class AsyncCatcher(AsyncHookable):
@none.callable.asynccatch("my_catch1")
async def my_catch2(self):
stack.add(2)
c = AsyncCatcher()
await c.my_hook()
assert stack == {0, 1, 2}
class TestDelay(object):
"""Test cases for :class:`none.callable.delay`."""
def test_delay_no_parenthesis_valueerror(self):
"""Calling the ``delay`` decorator with no parenthesis should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.delay
def noop():
pass
def test_delay_no_parameters_valueerror(self):
"""Calling the ``delay`` decorator with no parameters should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.delay()
def noop():
pass
def test_delay_extra_parameters_valueerror(self):
"""Unexpected parameters should raise a ``ValueError``."""
with pytest.raises(ValueError):
@none.callable.delay(0, 0, 0, "--INVALID--")
def noop():
pass
@pytest.mark.parametrize("args", ((None,), (0, None), (0, None, 0)))
def test_delay_high_none_valueerror(self, args):
"""Setting ``high`` to ``None`` should raise a ``ValueError``."""
with pytest.raises(ValueError):
@none.callable.delay(*args)
def noop():
pass
@given(
low=st.floats(max_value=0, exclude_max=True),
high=st.floats(max_value=0, exclude_max=True),
)
def test_delay_negative_low_or_high_valueerror(self, low, high):
"""Having ``low`` or ``high`` parameters lower than ``0`` should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.delay(low, high)
def noop():
pass
@given(low=st.floats(min_value=0), high=st.floats(min_value=0))
def test_delay_low_higher_than_high_valueerror(self, low, high):
"""Having ``low`` parameter higher than ``high`` should raise a
``ValueError``.
"""
assume(low > high)
with pytest.raises(ValueError):
@none.callable.delay(low, high)
def noop():
pass
@given(
low=st.floats(min_value=0),
high=st.floats(min_value=0),
mode=st.floats(min_value=0),
)
def test_delay_mode_out_of_bounds_valueerror(self, low, high, mode):
"""Having ``mode`` parameter out of bounds should raise a
``ValueError``.
"""
assume(low <= high)
assume(low > mode or mode > high)
with pytest.raises(ValueError):
@none.callable.delay(low, high, mode)
def noop():
pass
@given(high=st.floats(min_value=0))
def test_delay_high_only(self, monkeypatch, high):
"""When setting a high delay value only, the sleep time must match the
provided value.
"""
import time
sentinel = object()
stack = []
with monkeypatch.context() as m:
m.setattr(time, "sleep", lambda x: stack.append(x))
@none.callable.delay(high)
def append_sentinel():
stack.append(sentinel)
append_sentinel()
assert stack == [high, sentinel]
@given(high=st.floats(min_value=0))
def test_delay_low_set_to_none_and_high(self, monkeypatch, high):
"""When setting a high delay value with ``low`` set to ``None``, the
``high`` value is used.
"""
import time
sentinel = object()
stack = []
with monkeypatch.context() as m:
m.setattr(time, "sleep", lambda x: stack.append(x))
@none.callable.delay(None, high)
def append_sentinel():
stack.append(sentinel)
append_sentinel()
assert stack == [high, sentinel]
@given(low=st.floats(min_value=0), high=st.floats(min_value=0))
def test_delay_with_both_low_and_high(self, monkeypatch, low, high):
"""When both ``low`` and ``high`` values are set, a random value in
between is chosen.
"""
assume(low <= high)
import math
import time
sentinel = object()
stack = []
with monkeypatch.context() as m:
m.setattr(time, "sleep", lambda x: stack.append(x))
@none.callable.delay(low, high)
def append_sentinel():
stack.append(sentinel)
append_sentinel()
# By providing ``float("inf")`` to ``random.triangular``,
# ``float("nan")`` will be returned.
#
# As ``time.sleep`` will complain we let the user deal with it.
if not math.isnan(stack[0]):
assert low <= stack[0] <= high
assert stack[-1] == sentinel
@given(
low=st.floats(min_value=0),
high=st.floats(min_value=0),
mode=st.floats(min_value=0),
)
def test_delay_with_low_high_and_mode(self, monkeypatch, low, high, mode):
"""Test ``delay`` will all accepted parameters."""
assume(low <= high)
assume(low <= mode <= high)
import math
import time
sentinel = object()
stack = []
with monkeypatch.context() as m:
m.setattr(time, "sleep", lambda x: stack.append(x))
@none.callable.delay(low, high, mode)
def append_sentinel():
stack.append(sentinel)
append_sentinel()
# By providing ``float("inf")`` to ``random.triangular``,
# ``float("nan")`` will be returned.
#
# As ``time.sleep`` will complain we let the user deal with it.
if not math.isnan(stack[0]):
assert low <= stack[0] <= high
assert stack[-1] == sentinel
class TestAsyncDelay(object):
"""Test cases for :class:`none.callable.adelay`."""
def test_adelay_no_parenthesis_valueerror(self):
"""Calling the ``adelay`` decorator with no parenthesis should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.adelay
async def noop():
pass
def test_adelay_no_parameters_valueerror(self):
"""Calling the ``delay`` decorator with no parameters should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.adelay()
async def noop():
pass
def test_adelay_extra_parameters_valueerror(self):
"""Unexpected parameters should raise a ``ValueError``."""
with pytest.raises(ValueError):
@none.callable.adelay(0, 0, 0, "--INVALID--")
async def noop():
pass
@pytest.mark.parametrize("args", ((None,), (0, None), (0, None, 0)))
def test_adelay_high_none_valueerror(self, args):
"""Setting ``high`` to ``None`` should raise a ``ValueError``."""
with pytest.raises(ValueError):
@none.callable.adelay(*args)
async def noop():
pass
@given(
low=st.floats(max_value=0, exclude_max=True),
high=st.floats(max_value=0, exclude_max=True),
)
def test_adelay_negative_low_or_high_valueerror(self, low, high):
"""Having ``low`` or ``high`` parameters lower than ``0`` should raise a
``ValueError``.
"""
with pytest.raises(ValueError):
@none.callable.adelay(low, high)
async def noop():
pass
@given(low=st.floats(min_value=0), high=st.floats(min_value=0))
def test_adelay_low_higher_than_high_valueerror(self, low, high):
"""Having ``low`` parameter higher than ``high`` should raise a
``ValueError``.
"""
assume(low > high)
with pytest.raises(ValueError):
@none.callable.adelay(low, high)
async def noop():
pass
@given(
low=st.floats(min_value=0),
high=st.floats(min_value=0),
mode=st.floats(min_value=0),
)
def test_adelay_mode_out_of_bounds_valueerror(self, low, high, mode):
"""Having ``mode`` parameter out of bounds should raise a
``ValueError``.
"""
assume(low <= high)
assume(low > mode or mode > high)
with pytest.raises(ValueError):
@none.callable.adelay(low, high, mode)
async def noop():
pass
@pytest.mark.asyncio
@given(high=st.floats(min_value=0))
async def test_adelay_high_only(self, monkeypatch, high):
"""When setting a high delay value only, the sleep time must match the
provided value.
"""
import asyncio
sentinel = object()
stack = []
async def fake_sleep(x):
stack.append(x)
with monkeypatch.context() as m:
m.setattr(asyncio, "sleep", fake_sleep)
@none.callable.adelay(high)
async def append_sentinel():
stack.append(sentinel)
await append_sentinel()
assert stack == [high, sentinel]
@pytest.mark.asyncio
@given(high=st.floats(min_value=0))
async def test_adelay_low_set_to_none_and_high(self, monkeypatch, high):
"""When setting a high delay value with ``low`` set to ``None``, the
``high`` value is used.
"""
import asyncio
sentinel = object()
stack = []
async def fake_sleep(x):
stack.append(x)
with monkeypatch.context() as m:
m.setattr(asyncio, "sleep", fake_sleep)
@none.callable.adelay(None, high)
async def append_sentinel():
stack.append(sentinel)
await append_sentinel()
assert stack == [high, sentinel]
@pytest.mark.asyncio
@given(low=st.floats(min_value=0), high=st.floats(min_value=0))
async def test_adelay_with_both_low_and_high(self, monkeypatch, low, high):
"""When both ``low`` and ``high`` values are set, a random value in
between is chosen.
"""
assume(low <= high)
import math
import asyncio
sentinel = object()
stack = []
async def fake_sleep(x):
stack.append(x)
with monkeypatch.context() as m:
m.setattr(asyncio, "sleep", fake_sleep)
@none.callable.adelay(low, high)
async def append_sentinel():
stack.append(sentinel)
await append_sentinel()
# By providing ``float("inf")`` to ``random.triangular``,
# ``float("nan")`` will be returned.
#
# As ``time.sleep`` will complain we let the user deal with it.
if not math.isnan(stack[0]):
assert low <= stack[0] <= high
assert stack[-1] == sentinel
@pytest.mark.asyncio
@given(
low=st.floats(min_value=0),
high=st.floats(min_value=0),
mode=st.floats(min_value=0),
)
async def test_adelay_with_low_high_and_mode(self, monkeypatch, low, high, mode):
"""Test ``delay`` will all accepted parameters."""
assume(low <= high)
assume(low <= mode <= high)
import math
import asyncio
sentinel = object()
stack = []
async def fake_sleep(x):
stack.append(x)
with monkeypatch.context() as m:
m.setattr(asyncio, "sleep", fake_sleep)
@none.callable.adelay(low, high, mode)
async def append_sentinel():
stack.append(sentinel)
await append_sentinel()
# By providing ``float("inf")`` to ``random.triangular``,
# ``float("nan")`` will be returned.
#
# As ``time.sleep`` will complain we let the user deal with it.
if not math.isnan(stack[0]):
assert low <= stack[0] <= high
assert stack[-1] == sentinel
class TestRetry(object):
"""Test cases for :class:`none.callable.retry`."""
def test_retry_no_exception_on_success(self):
"""The decorated function should not raise on success."""
errors = (TypeError, ValueError)
errors_it = iter(errors)
# Ensure the function executed.
stack = []
@none.callable.retry(*errors)
def throw():
# Raise retryable exceptions and complete successfully on
# exhaustion.
with suppress(StopIteration):
stack.append(1)
raise next(errors_it)
throw()
assert sum(stack) == len(errors) + 1
def test_retry_given_exceptions_only(self):
"""Ensure the decorated function is retried on provided exceptions."""
errors = (TypeError, ValueError, EOFError)
errors_it = iter(errors)
# Ensure the function executed.
stack = []
@none.callable.retry(*errors[:-1])
def throw():
stack.append(1)
raise next(errors_it)
with pytest.raises(errors[-1]):
throw()
assert sum(stack) == len(errors)
@given(attempts=st.integers(max_value=0))
def test_retry_negative_attempts_valueeror(self, attempts):
"""Providing a negative attempts value should raise a ``ValueError``."""
assume(attempts < 0)
with pytest.raises(ValueError):
@none.callable.retry(EOFError, attempts=attempts)
def noop():
pass
@given(attempts=st.integers(min_value=0, max_value=MAX_RETRY))
def test_retry_up_to_max_retry(self, attempts):
"""Ensure the decorated function is retried up to the maximum of allowed
attempts.
"""
stack = []
@none.callable.retry(ValueError, attempts=attempts)
def throw():
stack.append(1)
raise ValueError
# On ``attempts`` set to ``0`` the function is not executed.
if attempts == 0:
throw()
else:
with pytest.raises(ValueError):
throw()
assert sum(stack) == attempts
class TestAsyncRetry(object):
"""Test cases for :class:`none.callable.aretry`."""
@pytest.mark.asyncio
async def test_aretry_no_exception_on_success(self):
"""The decorated function should not raise on success."""
errors = (TypeError, ValueError)
errors_it = iter(errors)
# Ensure the function executed.
stack = []
@none.callable.aretry(*errors)
async def throw():
# Raise retryable exceptions and complete successfully on
# exhaustion.
with suppress(StopIteration):
stack.append(1)
raise next(errors_it)
await throw()
assert sum(stack) == len(errors) + 1
@pytest.mark.asyncio
async def test_aretry_given_exceptions_only(self):
"""Ensure the decorated function is retried on provided exceptions."""
errors = (TypeError, ValueError, EOFError)
errors_it = iter(errors)
# Ensure the function executed.
stack = []
@none.callable.aretry(*errors[:-1])
async def throw():
stack.append(1)
raise next(errors_it)
with pytest.raises(errors[-1]):
await throw()
assert sum(stack) == len(errors)
@given(attempts=st.integers(max_value=0))
def test_aretry_negative_attempts_valueeror(self, attempts):
"""Providing a negative attempts value should raise a ``ValueError``."""
assume(attempts < 0)
with pytest.raises(ValueError):
@none.callable.aretry(EOFError, attempts=attempts)
async def noop():
pass
@pytest.mark.asyncio
@given(attempts=st.integers(min_value=0, max_value=MAX_RETRY))
async def test_aretry_up_to_max_retry(self, attempts):
"""Ensure the decorated function is retried up to the maximum of allowed
attempts.
"""
stack = []
@none.callable.aretry(ValueError, attempts=attempts)
async def throw():
stack.append(1)
raise ValueError
# On ``attempts`` set to ``0`` the function is not executed.
if attempts == 0:
await throw()
else:
with pytest.raises(ValueError):
await throw()
assert sum(stack) == attempts
| 28.068995 | 90 | 0.577428 | 3,821 | 32,953 | 4.810783 | 0.069877 | 0.090741 | 0.02089 | 0.02089 | 0.917963 | 0.912306 | 0.90371 | 0.845066 | 0.821891 | 0.793983 | 0 | 0.009037 | 0.314933 | 32,953 | 1,173 | 91 | 28.092924 | 0.805227 | 0.173793 | 0 | 0.824962 | 0 | 0 | 0.010529 | 0 | 0 | 0 | 0 | 0 | 0.082192 | 1 | 0.14003 | false | 0.054795 | 0.024353 | 0 | 0.207002 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
0da85a41394aa78ad303aa9a4998ec82785b636a | 147 | py | Python | conan/tools/meson/__init__.py | ShuangLiu1992/conan | b420ec1601febfa97f1f61d8da9ba083928ca7ea | [
"MIT"
] | null | null | null | conan/tools/meson/__init__.py | ShuangLiu1992/conan | b420ec1601febfa97f1f61d8da9ba083928ca7ea | [
"MIT"
] | null | null | null | conan/tools/meson/__init__.py | ShuangLiu1992/conan | b420ec1601febfa97f1f61d8da9ba083928ca7ea | [
"MIT"
] | null | null | null | from conan.tools.meson.toolchain import MesonToolchain
from conan.tools.meson.meson import Meson
from conan.tools.meson.layout import meson_layout
| 36.75 | 54 | 0.857143 | 22 | 147 | 5.681818 | 0.363636 | 0.216 | 0.336 | 0.456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081633 | 147 | 3 | 55 | 49 | 0.925926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
21c33bf2fca543e985f3af33a63225f0566ac954 | 330 | py | Python | cdg/changedetection/__init__.py | dan-zam/cdg | f3e31b9dd96e97bc7a4a36c93d0b5318ea9b3d61 | [
"BSD-3-Clause"
] | 13 | 2018-07-02T17:42:15.000Z | 2019-09-05T07:36:58.000Z | cdg/changedetection/__init__.py | dzambon/cdg | 733ac7af7919b07c6ac9dae299b3289afd9e7d83 | [
"BSD-3-Clause"
] | 1 | 2018-07-02T17:18:28.000Z | 2018-07-02T17:18:28.000Z | cdg/changedetection/__init__.py | dzambon/cdg | 733ac7af7919b07c6ac9dae299b3289afd9e7d83 | [
"BSD-3-Clause"
] | 3 | 2019-10-30T08:40:15.000Z | 2020-09-10T07:37:47.000Z | # --------------------------------------------------------------------------------
# Copyright (c) 2017-2019, Daniele Zambon, All rights reserved.
# --------------------------------------------------------------------------------
from .changedetection import *
from .cusum import *
from .twosampletest import *
from .cpm import *
| 41.25 | 82 | 0.375758 | 21 | 330 | 5.904762 | 0.714286 | 0.241935 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026578 | 0.087879 | 330 | 7 | 83 | 47.142857 | 0.385382 | 0.675758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
21ea526a885a2c05c16dac03020428a40e73d2f4 | 5,792 | py | Python | tests/test_lib_cli.py | kusanagi/kusanagi-sdk-python | 4fd6843f39fa271afc7588b5ec58e7758b09fd61 | [
"MIT"
] | 1 | 2022-02-24T19:02:28.000Z | 2022-02-24T19:02:28.000Z | tests/test_lib_cli.py | jeronimoalbi/kusanagi-sdk-python | 4fd6843f39fa271afc7588b5ec58e7758b09fd61 | [
"MIT"
] | null | null | null | tests/test_lib_cli.py | jeronimoalbi/kusanagi-sdk-python | 4fd6843f39fa271afc7588b5ec58e7758b09fd61 | [
"MIT"
] | 1 | 2021-05-16T17:40:21.000Z | 2021-05-16T17:40:21.000Z | # Python 3 SDK for the KUSANAGI(tm) framework (http://kusanagi.io)
# Copyright (c) 2016-2021 KUSANAGI S.L. All rights reserved.
#
# Distributed under the MIT license.
#
# For the full copyright and license information, please view the LICENSE
# file that was distributed with this source code.
import pytest
def test_lib_parse_args(mocker):
from kusanagi.sdk.lib import logging
from kusanagi.sdk.lib.cli import PARSER
from kusanagi.sdk.lib.cli import parse_args
mocker.patch('inspect.getouterframes', return_value=[['', 'test.py']])
class Namespace(object):
pass
namespace = Namespace()
namespace.component = 'service'
namespace.name = 'foo'
namespace.version = '1.0.0'
namespace.framework_version = '3.0.0'
namespace.socket = '@kusanagi-1.2.3.4-77'
namespace.timeout = 10000
namespace.debug = True
namespace.var = ['foo=bar', 'bar=baz']
namespace.tcp = None
namespace.log_level = 7 # SYSLOG_NUMERIC[7] = DEBUG
PARSER.parse_args = mocker.Mock(return_value=namespace)
input_ = parse_args()
assert input_.get_path() == 'test.py'
assert input_.get_component() == 'service'
assert input_.get_name() == 'foo'
assert input_.get_version() == '1.0.0'
assert input_.get_framework_version() == '3.0.0'
assert input_.get_socket() == '@kusanagi-1.2.3.4-77'
assert input_.get_tcp() == 0
assert not input_.is_tcp_enabled()
assert input_.get_channel() == 'ipc://@kusanagi-1.2.3.4-77'
assert input_.get_timeout() == 10000
assert input_.is_debug()
assert input_.has_variable('foo')
assert not input_.has_variable('invalid')
assert input_.get_variable('foo') == 'bar'
assert input_.get_variables() == {'foo': 'bar', 'bar': 'baz'}
assert input_.has_logging()
assert input_.get_log_level() == logging.DEBUG
def test_lib_parse_key_value_list():
from kusanagi.sdk.lib.cli import parse_key_value_list
assert parse_key_value_list([]) == {}
assert parse_key_value_list(['foo=bar', 'bar=baz']) == {'foo': 'bar', 'bar': 'baz'}
with pytest.raises(ValueError):
parse_key_value_list([''])
def test_lib_input_ipc():
from kusanagi.sdk.lib import logging
from kusanagi.sdk.lib.cli import Input
variables = {'foo': 'bar', 'bar': 'baz'}
input_ = Input(
'test.py',
component='service',
name='foo',
version='1.0.0',
framework_version='3.0.0',
socket='@kusanagi-1.2.3.4-77',
timeout=10000,
debug=True,
var=variables,
tcp=None,
log_level=7, # SYSLOG_NUMERIC[7] = DEBUG
)
assert input_.get_path() == 'test.py'
assert input_.get_component() == 'service'
assert input_.get_name() == 'foo'
assert input_.get_version() == '1.0.0'
assert input_.get_framework_version() == '3.0.0'
assert input_.get_socket() == '@kusanagi-1.2.3.4-77'
assert input_.get_tcp() == 0
assert not input_.is_tcp_enabled()
assert input_.get_channel() == 'ipc://@kusanagi-1.2.3.4-77'
assert input_.get_timeout() == 10000
assert input_.is_debug()
assert input_.has_variable('foo')
assert not input_.has_variable('invalid')
assert input_.get_variable('foo') == 'bar'
assert input_.get_variables() == variables
assert input_.has_logging()
assert input_.get_log_level() == logging.DEBUG
def test_lib_input_ipc_default():
from kusanagi.sdk.lib import logging
from kusanagi.sdk.lib.cli import Input
variables = {'foo': 'bar', 'bar': 'baz'}
input_ = Input(
'test.py',
component='service',
name='foo',
version='1.0.0',
framework_version='3.0.0',
socket=None,
timeout=10000,
debug=True,
var=variables,
tcp=None,
log_level=7, # SYSLOG_NUMERIC[7] = DEBUG
)
assert input_.get_path() == 'test.py'
assert input_.get_component() == 'service'
assert input_.get_name() == 'foo'
assert input_.get_version() == '1.0.0'
assert input_.get_framework_version() == '3.0.0'
assert input_.get_socket() == '@kusanagi-service-foo-1-0-0'
assert input_.get_tcp() == 0
assert not input_.is_tcp_enabled()
assert input_.get_channel() == 'ipc://@kusanagi-service-foo-1-0-0'
assert input_.get_timeout() == 10000
assert input_.is_debug()
assert input_.has_variable('foo')
assert not input_.has_variable('invalid')
assert input_.get_variable('foo') == 'bar'
assert input_.get_variables() == variables
assert input_.has_logging()
assert input_.get_log_level() == logging.DEBUG
def test_lib_input_tcp():
from kusanagi.sdk.lib import logging
from kusanagi.sdk.lib.cli import Input
variables = {'foo': 'bar', 'bar': 'baz'}
input_ = Input(
'test.py',
component='service',
name='foo',
version='1.0.0',
framework_version='3.0.0',
socket=None,
timeout=10000,
debug=True,
var=variables,
tcp=77,
log_level=7, # SYSLOG_NUMERIC[7] = DEBUG
)
assert input_.get_path() == 'test.py'
assert input_.get_component() == 'service'
assert input_.get_name() == 'foo'
assert input_.get_version() == '1.0.0'
assert input_.get_framework_version() == '3.0.0'
assert input_.get_socket() == ''
assert input_.get_tcp() == 77
assert input_.is_tcp_enabled()
assert input_.get_channel() == 'tcp://127.0.0.1:77'
assert input_.get_timeout() == 10000
assert input_.is_debug()
assert input_.has_variable('foo')
assert not input_.has_variable('invalid')
assert input_.get_variable('foo') == 'bar'
assert input_.get_variables() == variables
assert input_.has_logging()
assert input_.get_log_level() == logging.DEBUG
| 33.287356 | 87 | 0.645028 | 782 | 5,792 | 4.523018 | 0.130435 | 0.189709 | 0.189992 | 0.050891 | 0.797569 | 0.782867 | 0.782867 | 0.748657 | 0.737913 | 0.712751 | 0 | 0.035317 | 0.208046 | 5,792 | 173 | 88 | 33.479769 | 0.735775 | 0.066126 | 0 | 0.719178 | 0 | 0 | 0.111173 | 0.024829 | 0 | 0 | 0 | 0 | 0.479452 | 1 | 0.034247 | false | 0.006849 | 0.075342 | 0 | 0.116438 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
21f59dfd7cb12eee3098305a7f06dc982671438a | 21,328 | py | Python | kingpin/tests/kazoo_utils/test_hosts.py | fakeNetflix/pinterest-repo-kingpin | baea08ae941a4e57edb9129658fe3e7d40e4d0c3 | [
"Apache-2.0"
] | 76 | 2016-01-27T21:16:53.000Z | 2021-09-23T02:23:49.000Z | kingpin/tests/kazoo_utils/test_hosts.py | fakeNetflix/pinterest-repo-kingpin | baea08ae941a4e57edb9129658fe3e7d40e4d0c3 | [
"Apache-2.0"
] | 2 | 2016-02-26T02:37:46.000Z | 2018-02-23T09:03:41.000Z | kingpin/tests/kazoo_utils/test_hosts.py | fakeNetflix/pinterest-repo-kingpin | baea08ae941a4e57edb9129658fe3e7d40e4d0c3 | [
"Apache-2.0"
] | 22 | 2016-01-27T21:16:58.000Z | 2020-12-24T11:26:01.000Z | #!/usr/bin/python
#
# Copyright 2016 Pinterest, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for classes is common/hosts.py."""
from collections import Counter
from mock import Mock, patch
import os
import tempfile
import testutil
import time
from unittest import TestCase
from kingpin.kazoo_utils import KazooClientManager, ServerSet, hosts, FileWatch
from kingpin.kazoo_utils.hosts import (BaseHostSelector, HostsProvider, RandomHostSelector)
ZK_HOSTS = ["datazk001:2181", "datazk002:2181"]
class HostSelectorTestCase(TestCase):
HOST_LIST = ["host1:8080", "host2:8181"]
PORT_LIST = [8080, 8181]
HOST_PROVIDER_NAME = "test"
SERVER_SET_PATH = "/test_host_selector"
def setUp(self):
super(HostSelectorTestCase, self).setUp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = True
def test_init_base_host_selector_class(self):
"""Test base initialization and functionality."""
host_provider = HostsProvider([])
base_host_selector = BaseHostSelector(host_provider)
# Check that some base states are set.
self.assertTrue(base_host_selector._last is None)
self.assertTrue(base_host_selector._current is None)
self.assertTrue(base_host_selector._select_time is None)
self.assertEquals(base_host_selector._bad_hosts, {})
self.assertEquals(base_host_selector._retry_time, 60)
self.assertTrue(base_host_selector._host_provider is host_provider)
# This is an abstract class. _chose_host() should raise an exception.
self.assertRaises(NotImplementedError, base_host_selector._choose_host)
def test_retrieving_and_invalidation(self):
"""Test host retrieval."""
host_provider = HostsProvider(HostSelectorTestCase.HOST_LIST)
base_host_selector = BaseHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
self.assertTrue(base_host_selector.get_last_host() is None)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
self.assertTrue(host1 in base_host_selector._bad_hosts)
# If called again, with retry_time being set to 0 bad hosts should be
# invalidated.
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorTestCase.HOST_LIST[1])):
host2 = base_host_selector.get_host()
# Now bad hosts should be empty
self.assertTrue(not base_host_selector._bad_hosts)
self.assertEquals(host2, HostSelectorTestCase.HOST_LIST[1])
base_host_selector.invalidate()
self.assertTrue(host2 in base_host_selector._bad_hosts)
def test_reject_invalidation(self):
"""Test rejecting invalidation."""
host_provider = HostsProvider(HostSelectorTestCase.HOST_LIST)
base_host_selector = BaseHostSelector(host_provider, expire_time=0,
retry_time=0)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
# Because 1 is larger than 2 * 0.2 = 0.4
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector._invalidation_threshold = 0.5
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorTestCase.HOST_LIST[0])
base_host_selector.invalidate()
# Because 1 <= 2 * 0.5 = 1.0
self.assertTrue(host1 in base_host_selector._bad_hosts)
def test_random_host_selector(self):
"""Test the RandomHostSelector."""
host_provider = HostsProvider(HostSelectorTestCase.HOST_LIST)
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
# Note that we didn't have to mock _chose_host() call this time,
# it should be im RandomHostSelector class already.
some_host = random_host_selector.get_host()
self.assertTrue(some_host in HostSelectorTestCase.HOST_LIST)
self.assertEquals(random_host_selector._current, some_host)
no_of_iterations = 250
# If I run get_host() about 100 times I expect to have relatively
# even distribution and all hosts in the host_list returned by now.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
# We expect that all calls happened.
self.assertEquals(sum(host_counter.itervalues()), no_of_iterations)
# We should have seen all the elements.
self.assertEquals(set(host_counter),
set(HostSelectorTestCase.HOST_LIST))
# But if we had left large expire_time only one host would be picked
# up all the time, and we'll show that here.
random_host_selector = RandomHostSelector(host_provider,
invalidation_threshold=1.0)
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
self.assertEquals(len(list(host_counter)), 1)
# Test invalidation
hosts = [HostSelectorTestCase.HOST_LIST[0]]
for i in xrange(4):
hosts.append(HostSelectorTestCase.HOST_LIST[1])
def random_select(*args):
return hosts.pop()
mock = Mock(side_effect=random_select)
with patch("random.choice", new=mock):
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=60,
invalidation_threshold=1.0)
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorTestCase.HOST_LIST[1])
random_host_selector.invalidate()
# Because mock will return the bad host three times in a row,
# this will force it to compute the set of good hosts
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorTestCase.HOST_LIST[0])
# At this point, random.choice should have been called 5 times
self.assertEqual(mock.call_count, 5)
@patch("kazoo.client.KazooClient.__new__",
new=Mock(side_effect=testutil.get_mock_kazoo_client))
def test_random_host_selector_with_serverset(self):
testutil.initialize_kazoo_client_manager(ZK_HOSTS)
kazoo_client = KazooClientManager().get_client()
kazoo_client.ensure_path(HostSelectorTestCase.SERVER_SET_PATH)
host_provider = HostsProvider(HostSelectorTestCase.PORT_LIST,
HostSelectorTestCase.SERVER_SET_PATH)
self.assertTrue(host_provider.initialized)
self.assertTrue(host_provider.hosts)
# Since there is no live hosts in the server set, host provider should
# still use the static host list.
self.assertEqual(host_provider._current_host_tuple,
host_provider._static_host_tuple)
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
self.assertTrue(random_host_selector.get_host() in
HostSelectorTestCase.PORT_LIST)
server_set = ServerSet(HostSelectorTestCase.SERVER_SET_PATH, ZK_HOSTS)
g = server_set.join(HostSelectorTestCase.PORT_LIST[0], use_ip=False)
g.get()
no_of_iterations = 100
# After the first endpoint joins, random host selector should only
# start to use hosts in the server set.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
self.assertEqual(len(set(returned_hosts)), 1)
self.assertEqual(len(host_provider.hosts), 1)
g = server_set.join(HostSelectorTestCase.PORT_LIST[1], use_ip=False)
g.get()
# After the second endpoint joins the server set, random host selector
# should return both endpoints now.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
self.assertEqual(len(set(returned_hosts)), 2)
self.assertEqual(len(host_provider.hosts), 2)
def test_invalid_use_zk_for_discovery(self):
""" Testing invalid USE_ZOOKEEPER_FOR_DISCOVERY """
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = False
self.assertRaises(Exception, HostsProvider,
HostSelectorTestCase.HOST_LIST, "/")
class HostSelectorWithLocalFileTestCase(TestCase):
"""
This class has exact test set as the class above. Every time a
HostProvider is initialized, it takes an additional file path
argument. Although adding this file path argument does not change
the code path of all unit tests, we want to keep the exact test set
here to make sure having the local file does not change any behavior
of HostProvider.
"""
HOST_LIST = ["host11:8080", "host12:8181"]
HOST_PROVIDER_NAME = "test_provider"
# Initialize a singleton file watch with low wait time
FILE_WATCH = FileWatch(polling_wait_in_seconds=0.5)
def setUp(self):
super(HostSelectorWithLocalFileTestCase, self).setUp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = True
def test_init_base_host_selector_class(self):
"""Test base initialization and functionality."""
fd, tmp_file = tempfile.mkstemp()
host_provider = HostsProvider([], file_path=tmp_file)
base_host_selector = BaseHostSelector(host_provider)
# Check that some base states are set.
self.assertTrue(base_host_selector._last is None)
self.assertTrue(base_host_selector._current is None)
self.assertTrue(base_host_selector._select_time is None)
self.assertEquals(base_host_selector._bad_hosts, {})
self.assertEquals(base_host_selector._retry_time, 60)
self.assertTrue(base_host_selector._host_provider is host_provider)
# This is an abstract class. _chose_host() should raise an exception.
self.assertRaises(NotImplementedError, base_host_selector._choose_host)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_retrieving_and_invalidation(self):
"""Test host retrieval."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST, file_path=tmp_file)
base_host_selector = BaseHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
self.assertTrue(base_host_selector.get_last_host() is None)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
self.assertTrue(host1 in base_host_selector._bad_hosts)
# If called again, with retry_time being set to 0 bad hosts should be
# invalidated.
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[1])):
host2 = base_host_selector.get_host()
# Now bad hosts should be empty
self.assertTrue(not base_host_selector._bad_hosts)
self.assertEquals(host2, HostSelectorWithLocalFileTestCase.HOST_LIST[1])
base_host_selector.invalidate()
self.assertTrue(host2 in base_host_selector._bad_hosts)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_reject_invalidation(self):
"""Test rejecting invalidation."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST, file_path=tmp_file)
base_host_selector = BaseHostSelector(host_provider, expire_time=0, retry_time=0)
with patch(hosts.__name__ + ".BaseHostSelector._choose_host",
new=Mock(return_value=HostSelectorWithLocalFileTestCase.HOST_LIST[0])):
# Get one host.
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# If invalidated the state of the object changes.
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector.invalidate()
# Because 1 is larger than 2 * 0.2 = 0.4
self.assertTrue(host1 not in base_host_selector._bad_hosts)
base_host_selector._invalidation_threshold = 0.5
host1 = base_host_selector.get_host()
self.assertEquals(host1, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
base_host_selector.invalidate()
# Because 1 <= 2 * 0.5 = 1.0
self.assertTrue(host1 in base_host_selector._bad_hosts)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_random_host_selector(self):
"""Test the RandomHostSelector."""
fd, tmp_file = tempfile.mkstemp()
with open(tmp_file, 'w') as f:
f.write('\n'.join(HostSelectorWithLocalFileTestCase.HOST_LIST))
host_provider = HostsProvider(HostSelectorWithLocalFileTestCase.HOST_LIST,
file_path=tmp_file)
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
# Note that we didn't have to mock _chose_host() call this time,
# it should be im RandomHostSelector class already.
some_host = random_host_selector.get_host()
self.assertTrue(some_host in HostSelectorWithLocalFileTestCase.HOST_LIST)
self.assertEquals(random_host_selector._current, some_host)
no_of_iterations = 250
# If I run get_host() about 100 times I expect to have relatively
# even distribution and all hosts in the host_list returned by now.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
# We expect that all calls happened.
self.assertEquals(sum(host_counter.itervalues()), no_of_iterations)
# We should have seen all the elements.
self.assertEquals(set(host_counter),
set(HostSelectorWithLocalFileTestCase.HOST_LIST))
# But if we had left large expire_time only one host would be picked
# up all the time, and we'll show that here.
random_host_selector = RandomHostSelector(host_provider,
invalidation_threshold=1.0)
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
host_counter = Counter(returned_hosts)
self.assertEquals(len(list(host_counter)), 1)
# Test invalidation
hosts = [HostSelectorWithLocalFileTestCase.HOST_LIST[0]]
for i in xrange(4):
hosts.append(HostSelectorWithLocalFileTestCase.HOST_LIST[1])
def random_select(*args):
return hosts.pop()
mock = Mock(side_effect=random_select)
with patch("random.choice", new=mock):
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=60,
invalidation_threshold=1.0)
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorWithLocalFileTestCase.HOST_LIST[1])
random_host_selector.invalidate()
# Because mock will return the bad host three times in a row,
# this will force it to compute the set of good hosts
host = random_host_selector.get_host()
self.assertEqual(host, HostSelectorWithLocalFileTestCase.HOST_LIST[0])
# At this point, random.choice should have been called 5 times
self.assertEqual(mock.call_count, 5)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_random_host_selector_with_serverset(self):
fd, tmp_file = tempfile.mkstemp()
# Add a new host into the local server set file to simulate a join
f = open(tmp_file, 'w')
f.write(HostSelectorWithLocalFileTestCase.HOST_LIST[0])
f.close()
HostSelectorWithLocalFileTestCase.FILE_WATCH._check_file_updates()
host_provider = HostsProvider(
HostSelectorWithLocalFileTestCase.HOST_LIST, file_path=tmp_file)
self.assertTrue(host_provider.initialized)
self.assertTrue(host_provider.hosts)
self.assertEqual(host_provider._current_host_tuple,
(HostSelectorWithLocalFileTestCase.HOST_LIST[0],))
random_host_selector = RandomHostSelector(
host_provider, expire_time=0, retry_time=0,
invalidation_threshold=1.0)
self.assertTrue(random_host_selector.get_host() in
HostSelectorWithLocalFileTestCase.HOST_LIST)
no_of_iterations = 100
# After the first endpoint joins, random host selector should only
# start to use hosts in the server set.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
self.assertEqual(len(set(returned_hosts)), 1)
self.assertEqual(len(host_provider.hosts), 1)
time.sleep(1)
f = open(tmp_file, 'a')
f.write('\n' + HostSelectorWithLocalFileTestCase.HOST_LIST[1])
f.close()
HostSelectorWithLocalFileTestCase.FILE_WATCH._check_file_updates()
# After the second endpoint joins the server set, random host selector
# should return both endpoints now.
returned_hosts = [random_host_selector.get_host()
for i in xrange(no_of_iterations)]
self.assertEqual(len(set(returned_hosts)), 2)
self.assertEqual(len(host_provider.hosts), 2)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_invalid_use_zk_for_discovery(self):
"""Test invalid USE_ZOOKEEPER_FOR_DISCOVERY setting."""
fd, tmp_file = tempfile.mkstemp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = False
self.assertRaises(Exception, HostsProvider,
HostSelectorWithLocalFileTestCase.HOST_LIST,
file_path = tmp_file)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
def test_both_zk_and_file_paths(self):
"""Test invalid USE_ZOOKEEPER_FOR_DISCOVERY setting."""
fd, tmp_file = tempfile.mkstemp()
hosts.USE_ZOOKEEPER_FOR_DISCOVERY = False
self.assertRaises(Exception, HostsProvider,
HostSelectorWithLocalFileTestCase.HOST_LIST,
"/foo",
file_path = tmp_file)
HostSelectorWithLocalFileTestCase.FILE_WATCH._clear_all_watches()
os.remove(tmp_file)
| 49.029885 | 102 | 0.677935 | 2,500 | 21,328 | 5.5076 | 0.124 | 0.081052 | 0.065074 | 0.033118 | 0.822282 | 0.80427 | 0.800421 | 0.788075 | 0.762873 | 0.749655 | 0 | 0.013707 | 0.247468 | 21,328 | 434 | 103 | 49.142857 | 0.844174 | 0.188438 | 0 | 0.772109 | 0 | 0 | 0.02112 | 0.012369 | 0 | 0 | 0 | 0 | 0.248299 | 1 | 0.057823 | false | 0 | 0.030612 | 0.006803 | 0.12585 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
21ff8f7567a94caf5d4df95ec5833d927cd6d817 | 570 | py | Python | c19_synthesis/None.py | octaviomtz/nbdev_c19_synthesis | 45079757af6c05c3763d5c7147f566862171de9b | [
"Apache-2.0"
] | null | null | null | c19_synthesis/None.py | octaviomtz/nbdev_c19_synthesis | 45079757af6c05c3763d5c7147f566862171de9b | [
"Apache-2.0"
] | null | null | null | c19_synthesis/None.py | octaviomtz/nbdev_c19_synthesis | 45079757af6c05c3763d5c7147f566862171de9b | [
"Apache-2.0"
] | null | null | null |
# Cell
!git clone -q https://github.com/octaviomtz/nbdev_c19_synthesis
# Cell
from nbdev_c19_synthesis.c19_synthesis.core import *
# Cell
!git clone -q https://github.com/octaviomtz/nbdev_c19_synthesis
# Cell
from nbdev_c19_synthesis.c19_synthesis.core import *
# Cell
!git clone -q https://github.com/octaviomtz/nbdev_c19_synthesis
# Cell
import numpy as np
from nbdev_c19_synthesis.c19_synthesis.core import *
# Cell
!git clone -q https://github.com/octaviomtz/nbdev_c19_synthesis
# Cell
import numpy as np
from nbdev_c19_synthesis.c19_synthesis.core import * | 21.111111 | 63 | 0.796491 | 88 | 570 | 4.931818 | 0.193182 | 0.331797 | 0.313364 | 0.119816 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0.047431 | 0.112281 | 570 | 27 | 64 | 21.111111 | 0.810277 | 0.068421 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.6 | null | null | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 11 |
1d05fb9df5809909e0df1ffb7f2114bc7641c74f | 15,421 | py | Python | papercode/ealstm.py | lixx5000/Global-deep-learning-regionalization-from-physical-descriptors-to-random-vectors | 0b03362d8c1692adcd374fa871d6b77dd4c05212 | [
"MIT"
] | null | null | null | papercode/ealstm.py | lixx5000/Global-deep-learning-regionalization-from-physical-descriptors-to-random-vectors | 0b03362d8c1692adcd374fa871d6b77dd4c05212 | [
"MIT"
] | null | null | null | papercode/ealstm.py | lixx5000/Global-deep-learning-regionalization-from-physical-descriptors-to-random-vectors | 0b03362d8c1692adcd374fa871d6b77dd4c05212 | [
"MIT"
] | null | null | null | """
This file is part of the accompanying code to our manuscript (currently under review):
Xiang Li, Ankush Khandelwal, Xiaowei Jia, Kelly Cutler, Rahul Ghosh, Arvind Renganathan, Kshitij Tayal, Shaoming Xu,
John Nieber, Christopher Duffy, Michael Steinbach, Vipin Kumar. 2022.
“Regionalization in a global hydrologic deep learning model: from physical descriptors to random vectors”
Water Resources Research (Under review). Preprint is available: https://www.essoar.org/doi/10.1002/essoar.10510083.1
A majority of this code is built on the Kratzert. et al (2019), see their github repo: https://github.com/kratzert/ealstm_regional_modeling
"""
from typing import Tuple
import torch
import torch.nn as nn
class EALSTM(nn.Module):
"""Implementation of the Entity-Aware-LSTM (EA-LSTM)
TODO: Include paper ref and latex equations
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self,
input_size_dyn: int,
input_size_stat: int,
hidden_size: int,
batch_first: bool = True,
initial_forget_bias: int = 0):
super(EALSTM, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
# create tensors of learnable parameters
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 * hidden_size))
self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat, hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
# initialize parameters
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x_d: torch.Tensor, x_s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""[summary]
Parameters
----------
x_d : torch.Tensor
Tensor, containing a batch of sequences of the dynamic features. Shape has to match
the format specified with batch_first.
x_s : torch.Tensor
Tensor, containing a batch of static features.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor]
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x_d = x_d.transpose(0, 1)
seq_len, batch_size, _ = x_d.size()
h_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
c_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
h_x = (h_0, c_0)
# empty lists to temporally store all intermediate hidden/cell states
h_n, c_n = [], []
# expand bias vectors to batch size
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# calculate input gate only once because inputs are static
bias_s_batch = (self.bias_s.unsqueeze(0).expand(batch_size, *self.bias_s.size()))
i = torch.sigmoid(torch.addmm(bias_s_batch, x_s, self.weight_sh))
# perform forward steps over input sequence
for t in range(seq_len):
h_0, c_0 = h_x
# calculate gates
gates = (torch.addmm(bias_batch, h_0, self.weight_hh) +
torch.mm(x_d[t], self.weight_ih))
f, o, g = gates.chunk(3, 1)
c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
# store intermediate hidden/cell state in list
h_n.append(h_1)
c_n.append(c_1)
h_x = (h_1, c_1)
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
class SRLSTM_EA(nn.Module):
"""Implementation of the Entity-Aware-LSTM (EA-LSTM) with one additional embedding layer between stat_input and LSTM cell.
TODO: Include paper ref and latex equations
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self,
input_size_dyn: int,
input_size_stat: int,
hidden_size: int,
ann_1: int,
batch_first: bool = True,
initial_forget_bias: int = 0):
super(SRLSTM_EA, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
self.ann_1 = ann_1
# create tensors of learnable parameters
self.weight_ann_1 = nn.Parameter(torch.FloatTensor(input_size_stat, ann_1))
self.bias_ann_1 = nn.Parameter(torch.FloatTensor(ann_1))
self.weight_sh = nn.Parameter(torch.FloatTensor(ann_1, hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 3 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 3 * hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(3 * hidden_size))
# initialize parameters
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_ann_1)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 3)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
nn.init.constant_(self.bias_ann_1.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x_d: torch.Tensor, x_s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""[summary]
Parameters
----------
x_d : torch.Tensor
Tensor, containing a batch of sequences of the dynamic features. Shape has to match
the format specified with batch_first.
x_s : torch.Tensor
Tensor, containing a batch of static features.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor]
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x_d = x_d.transpose(0, 1)
seq_len, batch_size, _ = x_d.size()
h_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
c_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
h_x = (h_0, c_0)
# empty lists to temporally store all intermediate hidden/cell states
h_n, c_n = [], []
# expand bias vectors to batch size
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# calculate input gate only once because inputs are static
bias_ann_1_batch = (self.bias_ann_1.unsqueeze(0).expand(batch_size, *self.bias_ann_1.size()))
bias_s_batch = (self.bias_s.unsqueeze(0).expand(batch_size, *self.bias_s.size()))
s_ann_1 = torch.nn.functional.relu(torch.addmm(bias_ann_1_batch, x_s, self.weight_ann_1)) ## Kshitij gave advice: not use tanh. relu, or leaky relu.
# s_ann_output to input gate dimension conversion.
i = torch.sigmoid(torch.addmm(bias_s_batch, s_ann_1, self.weight_sh))
# perform forward steps over input sequence
for t in range(seq_len):
h_0, c_0 = h_x
# calculate gates
gates = (torch.addmm(bias_batch, h_0, self.weight_hh) +
torch.mm(x_d[t], self.weight_ih))
f, o, g = gates.chunk(3, 1)
c_1 = torch.sigmoid(f) * c_0 + i * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
# store intermediate hidden/cell state in list
h_n.append(h_1)
c_n.append(c_1)
h_x = (h_1, c_1)
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
class FMLSTM(nn.Module):
"""Implementation of the Feature-Modulation-LSTM (FM-LSTM)
TODO: Include paper ref and latex equations
Parameters
----------
input_size_dyn : int
Number of dynamic features, which are those, passed to the LSTM at each time step.
input_size_stat : int
Number of static features, which are those that are used to modulate the input gate.
hidden_size : int
Number of hidden/memory cells.
batch_first : bool, optional
If True, expects the batch inputs to be of shape [batch, seq, features] otherwise, the
shape has to be [seq, batch, features], by default True.
initial_forget_bias : int, optional
Value of the initial forget gate bias, by default 0
"""
def __init__(self,
input_size_dyn: int,
input_size_stat: int,
hidden_size: int,
batch_first: bool = True,
initial_forget_bias: int = 0):
super(FMLSTM, self).__init__()
self.input_size_dyn = input_size_dyn
self.input_size_stat = input_size_stat
self.hidden_size = hidden_size
self.batch_first = batch_first
self.initial_forget_bias = initial_forget_bias
# create tensors of learnable parameters
self.weight_ih = nn.Parameter(torch.FloatTensor(input_size_dyn, 4 * hidden_size))
self.weight_hh = nn.Parameter(torch.FloatTensor(hidden_size, 4 * hidden_size))
self.weight_sh = nn.Parameter(torch.FloatTensor(input_size_stat, hidden_size))
self.bias = nn.Parameter(torch.FloatTensor(4 * hidden_size))
self.bias_s = nn.Parameter(torch.FloatTensor(hidden_size))
# initialize parameters
self.reset_parameters()
def reset_parameters(self):
"""Initialize all learnable parameters of the LSTM"""
nn.init.orthogonal_(self.weight_ih.data)
nn.init.orthogonal_(self.weight_sh)
weight_hh_data = torch.eye(self.hidden_size)
weight_hh_data = weight_hh_data.repeat(1, 4)
self.weight_hh.data = weight_hh_data
nn.init.constant_(self.bias.data, val=0)
nn.init.constant_(self.bias_s.data, val=0)
if self.initial_forget_bias != 0:
self.bias.data[:self.hidden_size] = self.initial_forget_bias
def forward(self, x_d: torch.Tensor, x_s: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""[summary]
Parameters
----------
x_d : torch.Tensor
Tensor, containing a batch of sequences of the dynamic features. Shape has to match
the format specified with batch_first.
x_s : torch.Tensor
Tensor, containing a batch of static features.
Returns
-------
h_n : torch.Tensor
The hidden states of each time step of each sample in the batch.
c_n : torch.Tensor]
The cell states of each time step of each sample in the batch.
"""
if self.batch_first:
x_d = x_d.transpose(0, 1)
seq_len, batch_size, _ = x_d.size()
h_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
c_0 = x_d.data.new(batch_size, self.hidden_size).zero_()
h_x = (h_0, c_0)
# empty lists to temporally store all intermediate hidden/cell states
h_n, c_n = [], []
# expand bias vectors to batch size
bias_batch = (self.bias.unsqueeze(0).expand(batch_size, *self.bias.size()))
# calculate input gate only once because inputs are static
bias_s_batch = (self.bias_s.unsqueeze(0).expand(batch_size, *self.bias_s.size()))
p = torch.sigmoid(torch.addmm(bias_s_batch, x_s, self.weight_sh))
# perform forward steps over input sequence
for t in range(seq_len):
h_0, c_0 = h_x
# calculate gates
gates = (torch.addmm(bias_batch, h_0, self.weight_hh) +
torch.mm(x_d[t], self.weight_ih))
i,f, o, g = gates.chunk(4, 1)
c_1 = torch.sigmoid(f) * c_0 + torch.sigmoid(i) * torch.tanh(g)
h_1 = torch.sigmoid(o) * torch.tanh(c_1)
h_r = p*h_1
# store intermediate hidden/cell state in list
h_n.append(h_r)
c_n.append(c_1)
h_x = (h_1, c_1)
h_n = torch.stack(h_n, 0)
c_n = torch.stack(c_n, 0)
if self.batch_first:
h_n = h_n.transpose(0, 1)
c_n = c_n.transpose(0, 1)
return h_n, c_n
| 38.076543 | 158 | 0.609169 | 2,162 | 15,421 | 4.118871 | 0.115634 | 0.047165 | 0.034363 | 0.051544 | 0.899045 | 0.891522 | 0.886693 | 0.869175 | 0.860865 | 0.860865 | 0 | 0.014161 | 0.299397 | 15,421 | 404 | 159 | 38.170792 | 0.81007 | 0.341677 | 0 | 0.838889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007426 | 0 | 1 | 0.05 | false | 0 | 0.016667 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1d09e9bcd4e047b15787fb294fa4c6d80d96598c | 392 | py | Python | plugins/infoblox/icon_infoblox/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 46 | 2019-06-05T20:47:58.000Z | 2022-03-29T10:18:01.000Z | plugins/infoblox/icon_infoblox/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 386 | 2019-06-07T20:20:39.000Z | 2022-03-30T17:35:01.000Z | plugins/infoblox/icon_infoblox/actions/__init__.py | lukaszlaszuk/insightconnect-plugins | 8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892 | [
"MIT"
] | 43 | 2019-07-09T14:13:58.000Z | 2022-03-28T12:04:46.000Z | # GENERATED BY KOMAND SDK - DO NOT EDIT
from .add_fixed_address.action import AddFixedAddress
from .add_host.action import AddHost
from .delete_host.action import DeleteHost
from .get_host.action import GetHost
from .modify_host.action import ModifyHost
from .search_by_ip.action import SearchByIp
from .search_by_mac.action import SearchByMac
from .search_by_name.action import SearchByName
| 39.2 | 53 | 0.84949 | 59 | 392 | 5.440678 | 0.491525 | 0.299065 | 0.199377 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104592 | 392 | 9 | 54 | 43.555556 | 0.91453 | 0.094388 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
df03cc163de764958e8cbc94efa443ab1829c4e5 | 12,084 | py | Python | addons/mrp_account/tests/test_valuation_layers.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mrp_account/tests/test_valuation_layers.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | addons/mrp_account/tests/test_valuation_layers.py | SHIVJITH/Odoo_Machine_Test | 310497a9872db7844b521e6dab5f7a9f61d365a4 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
""" Implementation of "INVENTORY VALUATION TESTS (With valuation layers)" spreadsheet. """
from odoo.addons.stock_account.tests.test_stockvaluationlayer import TestStockValuationCommon
from odoo.tests import Form
class TestMrpValuationCommon(TestStockValuationCommon):
@classmethod
def setUpClass(cls):
super(TestMrpValuationCommon, cls).setUpClass()
cls.component_category = cls.env['product.category'].create(
{'name': 'category2'}
)
cls.component = cls.env['product.product'].create({
'name': 'component1',
'type': 'product',
'categ_id': cls.component_category.id,
})
cls.bom = cls.env['mrp.bom'].create({
'product_id': cls.product1.id,
'product_tmpl_id': cls.product1.product_tmpl_id.id,
'product_uom_id': cls.uom_unit.id,
'product_qty': 1.0,
'type': 'normal',
'bom_line_ids': [
(0, 0, {'product_id': cls.component.id, 'product_qty': 1})
]})
def _make_mo(self, bom, quantity=1):
mo_form = Form(self.env['mrp.production'])
mo_form.product_id = bom.product_id
mo_form.bom_id = bom
mo_form.product_qty = quantity
mo = mo_form.save()
mo.action_confirm()
return mo
def _produce(self, mo, quantity=0):
mo_form = Form(mo)
if not quantity:
quantity = mo.product_qty - mo.qty_produced
mo_form.qty_producing += quantity
mo = mo_form.save()
class TestMrpValuationStandard(TestMrpValuationCommon):
def test_fifo_fifo_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
action = mo.button_mark_done()
backorder = Form(self.env['mrp.production.backorder'].with_context(**action['context']))
backorder.save().action_backorder()
mo = mo.procurement_group_id.mrp_production_ids[-1]
self.assertEqual(self.component.value_svl, 20)
self.assertEqual(self.product1.value_svl, 10)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_fifo_fifo_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 15)
def test_fifo_avco_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
action = mo.button_mark_done()
backorder = Form(self.env['mrp.production.backorder'].with_context(**action['context']))
backorder.save().action_backorder()
mo = mo.procurement_group_id.mrp_production_ids[-1]
self.assertEqual(self.component.value_svl, 20)
self.assertEqual(self.product1.value_svl, 10)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_fifo_avco_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 15)
def test_fifo_std_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.standard_price = 8.8
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
mo._post_inventory()
self.assertEqual(self.component.value_svl, 20)
self.assertEqual(self.product1.value_svl, 8.8)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 8.8 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_fifo_std_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'fifo'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.standard_price = 8.8
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 8.8 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 8.8)
def test_std_avco_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.component.standard_price = 8.8
self._make_in_move(self.component, 1)
self._make_in_move(self.component, 1)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
mo._post_inventory()
self.assertEqual(self.component.value_svl, 8.8)
self.assertEqual(self.product1.value_svl, 8.8)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 8.8 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_std_avco_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self.component.standard_price = 8.8
self._make_in_move(self.component, 1)
self._make_in_move(self.component, 1)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 8.8 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 8.8)
def test_std_std_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.component.standard_price = 8.8
self.product1.standard_price = 7.2
self._make_in_move(self.component, 1)
self._make_in_move(self.component, 1)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
mo._post_inventory()
self.assertEqual(self.component.value_svl, 8.8)
self.assertEqual(self.product1.value_svl, 7.2)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 7.2 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_std_std_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'standard'
self.component.standard_price = 8.8
self.product1.standard_price = 7.2
self._make_in_move(self.component, 1)
self._make_in_move(self.component, 1)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 7.2 * 2)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 7.2)
def test_avco_avco_1(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'average'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo, 1)
mo._post_inventory()
self.assertEqual(self.component.value_svl, 15)
self.assertEqual(self.product1.value_svl, 15)
self.assertEqual(self.component.quantity_svl, 1)
self.assertEqual(self.product1.quantity_svl, 1)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
def test_avco_avco_2(self):
self.component.product_tmpl_id.categ_id.property_cost_method = 'average'
self.product1.product_tmpl_id.categ_id.property_cost_method = 'average'
self._make_in_move(self.component, 1, 10)
self._make_in_move(self.component, 1, 20)
mo = self._make_mo(self.bom, 2)
self._produce(mo)
mo.button_mark_done()
self.assertEqual(self.component.value_svl, 0)
self.assertEqual(self.product1.value_svl, 30)
self.assertEqual(self.component.quantity_svl, 0)
self.assertEqual(self.product1.quantity_svl, 2)
self._make_out_move(self.product1, 1)
self.assertEqual(self.product1.value_svl, 15)
| 43.311828 | 96 | 0.678252 | 1,634 | 12,084 | 4.735006 | 0.064259 | 0.151221 | 0.191547 | 0.146568 | 0.874111 | 0.863901 | 0.863642 | 0.85886 | 0.858343 | 0.858343 | 0 | 0.03144 | 0.210361 | 12,084 | 278 | 97 | 43.467626 | 0.779396 | 0.014813 | 0 | 0.79918 | 0 | 0 | 0.034042 | 0.004035 | 0 | 0 | 0 | 0 | 0.319672 | 1 | 0.061475 | false | 0 | 0.008197 | 0 | 0.081967 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
10c1116bcced00291a3879d4ce0c596d1de61e9f | 1,961 | py | Python | src/the_tale/the_tale/common/bbcode/renderers.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/common/bbcode/renderers.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null | src/the_tale/the_tale/common/bbcode/renderers.py | al-arz/the-tale | 542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5 | [
"BSD-3-Clause"
] | null | null | null |
import smart_imports
smart_imports.all()
default = renderer.Renderer(tags=[tags.TAG.b,
tags.TAG.i,
tags.TAG.u,
tags.TAG.s,
tags.TAG.quote,
tags.TAG.img,
tags.TAG.url,
tags.TAG.spoiler,
tags.TAG.list,
tags.TAG.list_id,
tags.TAG.hr,
tags.TAG.lsb,
tags.TAG.rsb,
tags.TAG.rl,
tags.TAG.youtube,
tags.TAG.center,
tags.TAG.size,
tags.TAG.color,
tags.TAG.pre])
safe = renderer.Renderer(tags=[tags.TAG.b,
tags.TAG.i,
tags.TAG.u,
tags.TAG.s,
tags.TAG.quote,
tags.TAG.img,
tags.TAG.url,
tags.TAG.safe_spoiler,
tags.TAG.list,
tags.TAG.list_id,
tags.TAG.hr,
tags.TAG.lsb,
tags.TAG.rsb,
tags.TAG.rl])
chronicle = renderer.Renderer(tags=[tags.TAG.i,
tags.TAG.url,
tags.TAG.list,
tags.TAG.list_id,
tags.TAG.lsb,
tags.TAG.rsb,
tags.TAG.rl])
| 40.020408 | 53 | 0.27231 | 142 | 1,961 | 3.71831 | 0.204225 | 0.530303 | 0.125 | 0.136364 | 0.778409 | 0.706439 | 0.706439 | 0.706439 | 0.706439 | 0.606061 | 0 | 0 | 0.655788 | 1,961 | 48 | 54 | 40.854167 | 0.782222 | 0 | 0 | 0.690476 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.047619 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
10d262cb917a73ef1338285960bacfe1dd6e25c3 | 359 | py | Python | pyunitwizard/configure/__init__.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 2 | 2021-07-01T14:33:58.000Z | 2022-03-19T19:19:09.000Z | pyunitwizard/configure/__init__.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 15 | 2021-02-11T18:54:16.000Z | 2022-03-18T17:38:03.000Z | pyunitwizard/configure/__init__.py | uibcdf/pyunitwizard | 54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7 | [
"MIT"
] | 2 | 2021-06-17T18:56:02.000Z | 2022-03-08T05:02:17.000Z | from .configure import get_libraries_loaded, get_libraries_supported, get_libraries_found, load_library
from .configure import get_parsers_loaded, get_parsers_supported
from .configure import get_default_form, set_default_form, get_default_parser, set_default_parser
from .configure import get_standard_units, set_standard_units
from .configure import reset
| 51.285714 | 103 | 0.883008 | 51 | 359 | 5.764706 | 0.352941 | 0.221088 | 0.323129 | 0.29932 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.08078 | 359 | 6 | 104 | 59.833333 | 0.890909 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
33bccd5c2e355de133a95f53679e06120f85f67b | 2,684 | py | Python | scripts/extents.py | brickbitbot/cheatsheets | c3b4509bf76fc180621ca1e6433d42742a656759 | [
"BSD-2-Clause"
] | 2 | 2021-12-09T21:56:18.000Z | 2022-02-22T20:52:58.000Z | scripts/extents.py | brickbitbot/cheatsheets | c3b4509bf76fc180621ca1e6433d42742a656759 | [
"BSD-2-Clause"
] | 2 | 2021-05-05T01:05:10.000Z | 2021-05-05T01:05:32.000Z | scripts/extents.py | brickbitbot/cheatsheets | c3b4509bf76fc180621ca1e6433d42742a656759 | [
"BSD-2-Clause"
] | 1 | 2021-12-03T14:43:11.000Z | 2021-12-03T14:43:11.000Z | # -----------------------------------------------------------------------------
# Matplotlib cheat sheet
# Released under the BSD License
# -----------------------------------------------------------------------------
# Scripts to generate all the basic plots
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
Z = np.arange(5*5).reshape(5,5)
fig = plt.figure(figsize=(8,5))
ax = fig.add_subplot(2,2,1)
ax.imshow(Z, extent=[0,10,0,5], interpolation="nearest", origin="upper")
ax.set_xlim(-1, 11), ax.set_xticks([])
ax.set_ylim(-1, 6), ax.set_yticks([0,5])
ax.text(1, 4.5, "(0,0)", ha="center", va="center", color="white", size="large")
ax.text(9, 0.5, "(4,4)", ha="center", va="center", color="black", size="large")
ax.text(5.0, 5.5, 'origin="upper"',
ha="center", va="center", color="black", size="large")
ax.text(5.0, -0.5, "extent=[0,10,0,5]",
ha="center", va="center", color="black", size="large")
ax = fig.add_subplot(2,2,3)
ax.imshow(Z, extent=[0,10,0,5], interpolation="nearest", origin="lower")
ax.set_xlim(-1, 11), ax.set_xticks([0,10])
ax.set_ylim(-1, 6), ax.set_yticks([0,5])
ax.text(1, 0.5, "(0,0)", ha="center", va="center", color="white", size="large")
ax.text(9, 4.5, "(4,4)", ha="center", va="center", color="black", size="large")
ax.text(5.0, 5.5, 'origin="lower"',
ha="center", va="center", color="black", size="large")
ax.text(5.0, -0.5, "extent=[0,10,0,5]",
ha="center", va="center", color="black", size="large")
ax = fig.add_subplot(2,2,4)
ax.imshow(Z, extent=[10,0,0,5], interpolation="nearest", origin="lower")
ax.set_xlim(-1, 11), ax.set_xticks([0,10])
ax.set_ylim(-1, 6), ax.set_yticks([])
ax.text(9, 0.5, "(0,0)", ha="center", va="center", color="white", size="large")
ax.text(1, 4.5, "(4,4)", ha="center", va="center", color="black", size="large")
ax.text(5.0, 5.5, 'origin="lower"',
ha="center", va="center", color="black", size="large")
ax.text(5.0, -0.5, "extent=[10,0,0,5]",
ha="center", va="center", color="black", size="large")
plt.tight_layout()
ax = fig.add_subplot(2,2,2)
ax.imshow(Z, extent=[10,0,0,5], interpolation="nearest", origin="upper")
ax.set_xlim(-1, 11), ax.set_xticks([])
ax.set_ylim(-1, 6), ax.set_yticks([])
ax.text(9, 4.5, "(0,0)", ha="center", va="center", color="white", size="large")
ax.text(1, 0.5, "(4,4)", ha="center", va="center", color="black", size="large")
ax.text(5.0, 5.5, 'origin="upper"',
ha="center", va="center", color="black", size="large")
ax.text(5.0, -0.5, "extent=[10,0,0,5]",
ha="center", va="center", color="black", size="large")
plt.tight_layout()
plt.savefig("../figures/extents.pdf", dpi=600)
# plt.show()
| 40.059701 | 79 | 0.579359 | 465 | 2,684 | 3.296774 | 0.152688 | 0.028702 | 0.104371 | 0.166993 | 0.851924 | 0.851924 | 0.829746 | 0.829746 | 0.829746 | 0.829746 | 0 | 0.067973 | 0.106557 | 2,684 | 66 | 80 | 40.666667 | 0.571309 | 0.09687 | 0 | 0.541667 | 0 | 0 | 0.242449 | 0.009102 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d509e206f0f05dd1dd9a40115b7287e7ffdfe1cb | 159 | py | Python | xv_leak_tools/test_components/vpn_application/ios/ios_vpn_application.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 219 | 2017-12-12T09:42:46.000Z | 2022-03-13T08:25:13.000Z | xv_leak_tools/test_components/vpn_application/ios/ios_vpn_application.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 11 | 2017-12-14T08:14:51.000Z | 2021-08-09T18:37:45.000Z | xv_leak_tools/test_components/vpn_application/ios/ios_vpn_application.py | UAEKondaya1/expressvpn_leak_testing | 9e4cee899ac04f7820ac351fa55efdc0c01370ba | [
"MIT"
] | 45 | 2017-12-14T07:26:36.000Z | 2022-03-11T09:36:56.000Z | from xv_leak_tools.test_components.vpn_application.mobile_vpn_application import MobileVPNApplication
class IOSVPNApplication(MobileVPNApplication):
pass
| 31.8 | 101 | 0.886792 | 17 | 159 | 7.941176 | 0.823529 | 0.207407 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.075472 | 159 | 4 | 102 | 39.75 | 0.918367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
1d153c8e4f8b38131b06a88d9a9390f16fbcea4f | 8,173 | py | Python | tests/structured_concurrency/or/test_simple_situation.py | gottadiveintopython/asyncgui | abfe7759f189321ad24e5711149b85a14062cb3c | [
"MIT"
] | null | null | null | tests/structured_concurrency/or/test_simple_situation.py | gottadiveintopython/asyncgui | abfe7759f189321ad24e5711149b85a14062cb3c | [
"MIT"
] | null | null | null | tests/structured_concurrency/or/test_simple_situation.py | gottadiveintopython/asyncgui | abfe7759f189321ad24e5711149b85a14062cb3c | [
"MIT"
] | null | null | null | import pytest
async def finish_immediately(e=None):
pass
async def fail_immediately(e=None):
raise ZeroDivisionError
async def finish_soon(e):
await e.wait()
async def fail_soon(e):
await e.wait()
raise ZeroDivisionError
async def fail_on_cancel(e=None):
import asyncgui as ag
try:
await ag.sleep_forever()
finally:
raise ZeroDivisionError
async def finish_soon_but_protected(e):
import asyncgui as ag
async with ag.cancel_protection():
await e.wait()
def test_no_child():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main():
tasks = await or_()
assert tasks == []
main_task = ag.start(main())
assert main_task.done
def test_one_child_finishes_immediately():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main():
tasks = await or_(finish_immediately())
assert [True, ] == [task.done for task in tasks]
main_task = ag.start(main())
assert main_task.done
def test_multiple_children_finish_immediately():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main():
tasks = await or_(finish_immediately(), finish_immediately())
assert [True, True, ] == [task.done for task in tasks]
main_task = ag.start(main())
assert main_task.done
def test_one_child_fails_immediately():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main():
with pytest.raises(ZeroDivisionError):
await or_(fail_immediately())
main_task = ag.start(main())
assert main_task.done
def test_multiple_children_fail_immediately():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main():
with pytest.raises(ag.MultiError) as excinfo:
await or_(fail_immediately(), fail_immediately())
assert [ZeroDivisionError, ZeroDivisionError] == \
[type(e) for e in excinfo.value.exceptions]
main_task = ag.start(main())
assert main_task.done
def test_one_child_finishes_soon():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
tasks = await or_(finish_soon(e))
assert [True, ] == [task.done for task in tasks]
e = ag.Event()
main_task = ag.start(main(e))
assert not main_task.done
e.set()
assert main_task.done
def test_multiple_children_finish_soon():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main(e):
tasks = await or_(finish_soon(e), finish_soon(e))
assert [TS.DONE, TS.CANCELLED] == [task.state for task in tasks]
e = ag.Event()
main_task = ag.start(main(e))
assert not main_task.done
e.set()
assert main_task.done
def test_one_child_fails_soon():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
with pytest.raises(ZeroDivisionError):
await or_(fail_soon(e))
e = ag.Event()
main_task = ag.start(main(e))
assert not main_task.done
e.set()
assert main_task.done
def test_multiple_children_fail_soon():
'''
MultiErrorが起こるように思えるが、1つ目の子で例外が起こるや否や2つ目
は即中断されるため、2つ目では例外は起こらない
'''
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
with pytest.raises(ZeroDivisionError):
await or_(fail_soon(e), fail_soon(e))
e = ag.Event()
main_task = ag.start(main(e))
assert not main_task.done
e.set()
assert main_task.done
def test_multiple_children_fail():
'''
1つ目の子で例外が起こる事で2つ目が中断される。その時2つ目でも例外が
起きるためMultiErrorが湧く。
'''
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
with pytest.raises(ag.MultiError) as excinfo:
await or_(fail_soon(e), fail_on_cancel())
assert [ZeroDivisionError, ZeroDivisionError] == \
[type(e) for e in excinfo.value.exceptions]
e = ag.Event()
main_task = ag.start(main(e))
assert not main_task.done
e.set()
assert main_task.done
def test_cancel_all_children():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main():
tasks = await or_(child1, child2)
for task in tasks:
assert task.cancelled
child1 = ag.Task(ag.sleep_forever())
child2 = ag.Task(ag.sleep_forever())
main_task = ag.start(main())
assert main_task.state is TS.STARTED
child1.cancel()
assert main_task.state is TS.STARTED
child2.cancel()
assert main_task.state is TS.DONE
def test_必ず例外を起こす子_を複数持つ親を中断():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main(e):
with pytest.raises(ag.MultiError) as excinfo:
await or_(fail_on_cancel(), fail_on_cancel())
assert [ZeroDivisionError, ZeroDivisionError] == \
[type(e) for e in excinfo.value.exceptions]
await e.wait()
pytest.fail("Failed to cancel")
e = ag.Event()
main_task = ag.Task(main(e))
ag.start(main_task)
assert main_task.state is TS.STARTED
main_task.cancel()
assert main_task.state is TS.CANCELLED
def test_必ず例外を起こす子_を複数持つ親を中断_2():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main():
await or_(fail_on_cancel(), fail_on_cancel())
pytest.fail("Failed to cancel")
main_task = ag.Task(main())
ag.start(main_task)
assert main_task.state is TS.STARTED
with pytest.raises(ag.MultiError) as excinfo:
main_task.cancel()
assert [ZeroDivisionError, ZeroDivisionError] == \
[type(e) for e in excinfo.value.exceptions]
assert main_task.state is TS.CANCELLED
def test_例外を起こさない子_を一つ持つ親を中断():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main():
await or_(ag.sleep_forever())
pytest.fail()
main_task = ag.Task(main())
ag.start(main_task)
assert main_task.state is TS.STARTED
main_task.cancel()
assert main_task.state is TS.CANCELLED
def test_例外を起こさない子_を複数持つ親を中断():
import asyncgui as ag
from asyncgui.structured_concurrency import or_
TS = ag.TaskState
async def main():
await or_(ag.sleep_forever(), ag.sleep_forever())
pytest.fail()
main_task = ag.Task(main())
ag.start(main_task)
assert main_task.state is TS.STARTED
main_task.cancel()
assert main_task.state is TS.CANCELLED
class Test_cancel_protection:
@pytest.mark.parametrize(
'other_child', (fail_on_cancel, fail_immediately))
def test_other_child_fails(self, other_child):
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
with pytest.raises(ZeroDivisionError):
await or_(finish_soon_but_protected(e), other_child(e))
e = ag.Event()
main_task = ag.Task(main(e))
ag.start(main_task)
assert not main_task.done
main_task.cancel()
assert not main_task.done
e.set()
assert main_task.done
@pytest.mark.parametrize('other_child',
(fail_soon, finish_immediately, finish_soon,
finish_soon_but_protected))
def test_other_child_does_not_fail(self, other_child):
import asyncgui as ag
from asyncgui.structured_concurrency import or_
async def main(e):
tasks = await or_(finish_soon_but_protected(e), other_child(e))
await ag.sleep_forever()
pytest.fail("Failed to cancel")
e = ag.Event()
main_task = ag.Task(main(e))
ag.start(main_task)
assert not main_task.cancelled
main_task.cancel()
assert not main_task.cancelled
e.set()
assert main_task.cancelled
| 25.863924 | 75 | 0.669399 | 1,087 | 8,173 | 4.819687 | 0.080037 | 0.093148 | 0.061462 | 0.06528 | 0.849017 | 0.819049 | 0.787173 | 0.750716 | 0.733728 | 0.727238 | 0 | 0.002098 | 0.242016 | 8,173 | 315 | 76 | 25.946032 | 0.843584 | 0.014682 | 0 | 0.735683 | 0 | 0 | 0.008735 | 0 | 0 | 0 | 0 | 0 | 0.185022 | 1 | 0.07489 | false | 0.004405 | 0.162996 | 0 | 0.242291 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1d415b4d03171234d3668af5ab3bda8c91f22d3c | 2,171 | py | Python | mypy_drf_plugin/transformers/validation.py | rafales/djangorestframework-stubs | e73110c5e35f9aae22f19fed4f71dcb440bd3619 | [
"MIT"
] | 47 | 2018-11-23T19:54:53.000Z | 2020-11-09T22:21:55.000Z | mypy_drf_plugin/transformers/validation.py | rafales/djangorestframework-stubs | e73110c5e35f9aae22f19fed4f71dcb440bd3619 | [
"MIT"
] | 13 | 2019-02-26T09:09:54.000Z | 2019-04-07T17:12:28.000Z | mypy_drf_plugin/transformers/validation.py | rafales/djangorestframework-stubs | e73110c5e35f9aae22f19fed4f71dcb440bd3619 | [
"MIT"
] | 6 | 2019-02-26T08:24:09.000Z | 2019-06-24T08:53:56.000Z | from mypy.plugin import MethodContext
from mypy.types import Instance, Type
from mypy_drf_plugin import helpers
def return_typeddict_from_to_representation(ctx: MethodContext) -> Type:
serializer_type = ctx.type
if not isinstance(serializer_type, Instance):
return ctx.default_return_type
typeddict_type = helpers.get_corresponding_typeddict(serializer_type, ctx.api,
use_primitive_types=True)
return typeddict_type
def return_list_of_typeddict_for_list_serializer_from_to_representation(ctx: MethodContext) -> Type:
serializer_type = ctx.type
if not isinstance(serializer_type, Instance):
return ctx.default_return_type
child_sym = serializer_type.type.get('child')
if child_sym is None or not isinstance(child_sym.type, Instance):
return ctx.default_return_type
child_typeddict_type = helpers.get_corresponding_typeddict(child_sym.type, ctx.api,
use_primitive_types=True)
return ctx.api.named_generic_type('builtins.list', [child_typeddict_type])
def return_typeddict_from_to_internal_value(ctx: MethodContext) -> Type:
serializer_type = ctx.type
if not isinstance(serializer_type, Instance):
return ctx.default_return_type
typeddict_type = helpers.get_corresponding_typeddict(serializer_type, ctx.api,
use_primitive_types=False)
return typeddict_type
def return_list_of_typeddict_for_list_serializer_from_to_internal_value(ctx: MethodContext) -> Type:
serializer_type = ctx.type
if not isinstance(serializer_type, Instance):
return ctx.default_return_type
child_sym = serializer_type.type.get('child')
if child_sym is None or not isinstance(child_sym.type, Instance):
return ctx.default_return_type
child_typeddict_type = helpers.get_corresponding_typeddict(child_sym.type, ctx.api,
use_primitive_types=False)
return ctx.api.named_generic_type('builtins.list', [child_typeddict_type])
| 40.962264 | 100 | 0.703823 | 261 | 2,171 | 5.494253 | 0.157088 | 0.117155 | 0.07113 | 0.087866 | 0.934449 | 0.909344 | 0.909344 | 0.909344 | 0.896792 | 0.896792 | 0 | 0 | 0.233994 | 2,171 | 52 | 101 | 41.75 | 0.862297 | 0 | 0 | 0.810811 | 0 | 0 | 0.016582 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.081081 | 0 | 0.459459 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
d5178e620171130d36aa64b52a2399e4bd5b3911 | 132 | py | Python | bflib/monsters/insects/base.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | 3 | 2017-10-28T11:28:38.000Z | 2018-09-12T09:47:00.000Z | bflib/monsters/insects/base.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | bflib/monsters/insects/base.py | ChrisLR/BasicDungeonRL | b293d40bd9a0d3b7aec41b5e1d58441165997ff1 | [
"MIT"
] | null | null | null | from bflib.monsters import listing
from bflib.monsters.base import Monster
@listing.register_type
class Insect(Monster):
pass
| 16.5 | 39 | 0.80303 | 18 | 132 | 5.833333 | 0.666667 | 0.171429 | 0.32381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.136364 | 132 | 7 | 40 | 18.857143 | 0.921053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.2 | 0.4 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
d532d608e30fba6b8d2fcaa4a74170559087e41a | 223 | py | Python | hand_eye/src/hand_eye/__init__.py | SamKaiYang/timda_dual_arm | 8582945cb7bc9d955d224bffb5af2c207bbb311a | [
"MIT"
] | 1 | 2021-07-02T12:37:35.000Z | 2021-07-02T12:37:35.000Z | hand_eye/src/hand_eye/__init__.py | SamKaiYang/timda_dual_arm | 8582945cb7bc9d955d224bffb5af2c207bbb311a | [
"MIT"
] | null | null | null | hand_eye/src/hand_eye/__init__.py | SamKaiYang/timda_dual_arm | 8582945cb7bc9d955d224bffb5af2c207bbb311a | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from hand_eye import HandEyeTrans
# from .hand_eye_connector import HandEyeConnector
# from .MarkerPosture import MarkerPosture
from hand_eye import HandEyeConnector
from hand_eye import MarkerPosture | 37.166667 | 50 | 0.856502 | 29 | 223 | 6.413793 | 0.413793 | 0.172043 | 0.236559 | 0.274194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005 | 0.103139 | 223 | 6 | 51 | 37.166667 | 0.925 | 0.497758 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
d546cee2cc7b3a28420d5467506d189f277fd4ec | 8,392 | py | Python | runtime/bamboo-pipeline/test/pipeline_test_use/component_tests/test_call_assertion_component.py | DomineCore/bamboo-engine | fb4583e70f9e1e87d9d48c2393db8d8104306f37 | [
"MIT"
] | 55 | 2021-09-07T11:50:35.000Z | 2022-03-23T13:19:38.000Z | runtime/bamboo-pipeline/test/pipeline_test_use/component_tests/test_call_assertion_component.py | DomineCore/bamboo-engine | fb4583e70f9e1e87d9d48c2393db8d8104306f37 | [
"MIT"
] | 64 | 2021-09-07T12:04:12.000Z | 2022-03-29T03:47:18.000Z | runtime/bamboo-pipeline/test/pipeline_test_use/component_tests/test_call_assertion_component.py | DomineCore/bamboo-engine | fb4583e70f9e1e87d9d48c2393db8d8104306f37 | [
"MIT"
] | 20 | 2021-09-07T11:52:08.000Z | 2022-03-28T08:05:22.000Z | from django.test import TestCase
from pipeline_test_use.components.collections.experience import TheCallAssertionComponent
from pipeline.component_framework.test import (
Call,
CallAssertion,
ComponentTestCase,
ComponentTestMixin,
ExecuteAssertion,
Patcher,
ScheduleAssertion,
)
class TheCallAssertionComponentTest(TestCase, ComponentTestMixin):
def cases(self):
return [
ComponentTestCase(
name="not call any case",
inputs={},
parent_data={},
execute_assertion=ExecuteAssertion(success=True, outputs={}),
schedule_assertion=[
ScheduleAssertion(success=True, outputs={"count": 1}, callback_data=None),
ScheduleAssertion(success=True, outputs={"count": 2}, callback_data=None),
ScheduleAssertion(success=True, schedule_finished=True, outputs={"count": 2}, callback_data=None),
],
patchers=[
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_1"),
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_2"),
],
execute_call_assertion=[
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[]),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
schedule_call_assertion=[
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[]),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
),
ComponentTestCase(
name="execute call 1 case",
inputs={"e_call_1": True},
parent_data={},
execute_assertion=ExecuteAssertion(success=True, outputs={}),
schedule_assertion=[
ScheduleAssertion(success=True, outputs={"count": 1}, callback_data=None),
ScheduleAssertion(success=True, outputs={"count": 2}, callback_data=None),
ScheduleAssertion(success=True, schedule_finished=True, outputs={"count": 2}, callback_data=None),
],
patchers=[
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_1"),
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_2"),
],
execute_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[Call()]
),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
schedule_call_assertion=[
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[]),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
),
ComponentTestCase(
name="schedule call 1 case",
inputs={"s_call_1": True},
parent_data={},
execute_assertion=ExecuteAssertion(success=True, outputs={}),
schedule_assertion=[
ScheduleAssertion(success=True, outputs={"count": 1}, callback_data=None),
ScheduleAssertion(success=True, outputs={"count": 2}, callback_data=None),
ScheduleAssertion(success=True, schedule_finished=True, outputs={"count": 2}, callback_data=None),
],
patchers=[
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_1"),
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_2"),
],
execute_call_assertion=[
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[]),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
schedule_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1",
calls=[Call(), Call(), Call()],
),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
),
ComponentTestCase(
name="call 1 case",
inputs={"s_call_1": True, "e_call_1": True},
parent_data={},
execute_assertion=ExecuteAssertion(success=True, outputs={}),
schedule_assertion=[
ScheduleAssertion(success=True, outputs={"count": 1}, callback_data=None),
ScheduleAssertion(success=True, outputs={"count": 2}, callback_data=None),
ScheduleAssertion(success=True, schedule_finished=True, outputs={"count": 2}, callback_data=None),
],
patchers=[
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_1"),
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_2"),
],
execute_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[Call()]
),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
schedule_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1",
calls=[Call(), Call(), Call()],
),
CallAssertion(func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[]),
],
),
ComponentTestCase(
name="all call case",
inputs={"s_call_1": True, "e_call_1": True, "s_call_2": True, "e_call_2": True},
parent_data={},
execute_assertion=ExecuteAssertion(success=True, outputs={}),
schedule_assertion=[
ScheduleAssertion(success=True, outputs={"count": 1}, callback_data=None),
ScheduleAssertion(success=True, outputs={"count": 2}, callback_data=None),
ScheduleAssertion(success=True, schedule_finished=True, outputs={"count": 2}, callback_data=None),
],
patchers=[
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_1"),
Patcher(target="pipeline_test_use.components.collections.experience.need_patch_2"),
],
execute_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1", calls=[Call()]
),
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_2", calls=[Call()]
),
],
schedule_call_assertion=[
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_1",
calls=[Call(), Call(), Call()],
),
CallAssertion(
func="pipeline_test_use.components.collections.experience.need_patch_2",
calls=[Call(), Call(), Call()],
),
],
),
]
def component_cls(self):
return TheCallAssertionComponent
| 53.452229 | 118 | 0.565419 | 707 | 8,392 | 6.435644 | 0.077793 | 0.081758 | 0.102198 | 0.17033 | 0.906593 | 0.906593 | 0.896484 | 0.896484 | 0.891868 | 0.891868 | 0 | 0.009948 | 0.329242 | 8,392 | 156 | 119 | 53.794872 | 0.798366 | 0 | 0 | 0.809211 | 0 | 0 | 0.254886 | 0.228789 | 0 | 0 | 0 | 0 | 0.401316 | 1 | 0.013158 | false | 0 | 0.019737 | 0.013158 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
6370e434055c71ebd4aba6b10c9a55615b40dcdb | 8,765 | py | Python | dark-ddos.py | dark-hacker-bd/darkddos | f865181cca7240feb3a98379ef50dce14873c5ca | [
"Apache-2.0"
] | null | null | null | dark-ddos.py | dark-hacker-bd/darkddos | f865181cca7240feb3a98379ef50dce14873c5ca | [
"Apache-2.0"
] | null | null | null | dark-ddos.py | dark-hacker-bd/darkddos | f865181cca7240feb3a98379ef50dce14873c5ca | [
"Apache-2.0"
] | null | null | null | # Obfuscated by Py Compile
# Created by HTR-TECH (https://github.com/htr-tech)
# Instagram : @tahmid.rayat
import marshal,zlib,base64
exec(marshal.loads(zlib.decompress(base64.b64decode("eJztPVtsHNd1d5ZLUlyJelqSZTvxSLFMUuHO7vIhvvQ09bQlmV1JprKmvDvcuSRnHzOrmVmJVOhAifQhoEGtNLKdBgWSNv1o0dhJ0DZAkgIFigL9aD+CfvW3Sn9bJECB9qNAe865d3ZndpeSSLNGA+xy9u59nHvOuefec+7cJ/NMfqLwPQVf9x+6GDPgUViJsUzNr7CM4vsjLBPx/R0s0+H7oywT9f2dLNPp+7tYpsv3d7NMt+/fwjJbfH8Py/T4/hjLxHz/VpbZ6vu3scw239/LMr3kj7DSdlbewTI7mLIOvjeLv5Y8gX8ny+xsklEjrx2stIuVd7PMbhGOImx5D8vsYQp/gXGF3VfAp7DCXmZ0ikAHK+xh96GI+xjfxwr7GX9RJEDgAMPkl1jhZYQwoFDdkKYoxkW2qCB49hX2LLoYjqF8yiSizeXjzTofnLHCF5ixVWR6kWW+yIxt7AEgfJXxHlZQmdHL+KuAQMkcZPwgKxxCZAqPsMKXmLFd5HuN8u2gfIeZsZM8rzNjF3n6mLGbPP3M2OPDvODD7PVh9vkw+1lmIMyY8SI7f4FD0gERfEmCYtTLggUB/ooIQPQXgtFfrEW/2jpabR19sDnaOOTTDjH4pVaxiOK1IAoJfJiAjzDjdZb5MlsE/yC5cXI1chPkJpnRxzIpZvSzzFADyQGQiXGEZYaZ8WWWGWHGIMuMsuVvMD6K/mKEOT+L8qPMiCN8ZoxxeIYp/k8oXqP4ccbhGaH4D6JGAiUtKjcp6KQQ7a2fRo0hTArK4tZ41BhuETkbZlSo3wTLTLQyB0GVDpoAofYjLDNJ7hTKQz9G7nGUnH6C3JPknmLGKAn1NDOOkucNZoyRZ5oZ4+Q5w4wJ8pxlxiR5zjFjijznmXGMPBeYcVy2+wXg8ARGQnP/SuYiM04GEk7VEt5k/C1UzcIlLCzIrMiY88tOfl6oCSRcJp/17xIKlPCAcZrE/Z8twP6eGW+gSINIpwk61SURJAHBGT9qmrLdSnVZLEAAhMYnWeEKM876bAF4rgsiIWY/JCP4FlZ4mxnnEPQ+WOUZxmdY4XdkxYPEl/sYTzPjPAbP3DzAMlcZv0jWJmgKbv1e1yx/synauEASvYaVebX/LejUzP+Bz5V+BbxeDJxrSw7XjRnbLom4HeBM25bF855pW2cdx3ZEQjc4bzj2HZc7HvaQVW9h3NsCnrK+nPXMMjcRzEWc1wEmfnqRW56bhuDbFe7oiQltPKn2n7YMxzaNKZUi1cumZSaGh7SkNjQ0OpIYH9XU61OqaQyoMw53PTsxpKWGtJGhYfUd7rjAUAKCqaOCIyRu6B5H4l4nBPIlrjt57MIxfSuWBFnywFkuM/gpKCgI6HvA7IOlB/stDLIBjbwHbf1iTIj5BHsfCEBtnWAzUEkemPAoK3SiNq0yth81SsZ1kZxBOoUtaLRJ+TQ2C/pztb8XCF9xb4D78rupqZHx8pOPvk/PH23Kk0oeriGfSNWQbw6JoSDy4Qbkn5XEcBD5SEvkGycxEkQ++hTkGyExGkR+9JnI10fiaBD52HMif14SY0HkQ+tC/mwS489uLRsnMQHI31mrnX+G5+F3U0lA/Sqg7pWMq+HPJVs3TGtRXRQDg1+e7O9Bw4OO6YEdBKvpCsuzAraHzKJhWxxMA5idFdfrIvNj2FWPoO44kIt8C6Wqu0TwNePlljivCMOGTr4fKZLjRhDvIllg3TLLYPIOYfQ2snQjSpcSVV5QehUi5+nOIvckw9FTeX9Qo/jWcB+jrv38BWEPCxHRUaC9ipC9+kK9BX5VivygDL+vvqHni/1YPA+BbTeN9p4KwpdNr85ziHHkq8hLVd1JYmyU+OlU8oocbG3xebsqeHufbPT7EbY8ieb2zM0h9n4HWyWzvar4hjyKPKzS+zgY8heh8zywf7WD7R+DjLf2sFmPstzHjg8ZuELyKy8V5yv5GewShJwPmuilvquf6gNhHd0y7DJlQK9peVSlJejRqLa5pxcFNOZbJvcOuQa5ZrMMEFrP68VRv5NkUG9dUGvJSE0O3b4cvlaTw/JpLOKZmxMoDvAVmCwUCkTIIYIx+0A0IA4QCooA0veBPKAPu9XLZgEOBDQGIZmji0YulF7r2RCoW/Rs1BawTq6ksbG4WPSDh136PexOmagw6f1+2Q4mXeQ/RkIm8ZiWwZeF8HilpOelPngONZY09svpbb6809jc0l9ER/UFWmgSX/oV+DmB4e1SdL3KDqU/ElN2i2aEYB2++NBZPkZFUtj+MzeTKMlacxdNRpHhDtF/i949IvrvrbL/7iB9oNIt/mbpVw//6r/+9QQVs6Egaezo08hZGl+fSI+oIHfJ5c2tAcVU0Eu6dREjt1CZdik7oVQbUNitrGYbz6ISItvpPTU2XwjpZEiqe+FnZt0q6bVQSeILVbLjqSoZbaGS+pJn3C3kqUWlX8T2U9fIzloLOYDOS+i8jM4rNX1FuHnRgF71W1FzSQ/Cz/Um1RPl7dyA6kWfpXpRoXpRoXpRP0dN9aIB1Yu2Ur2BoOp1UxWD8qXKpH5UYS+/myw3q2Aa+4b0l9B5rbmpCokeCelc+rAv5dZ6d7NJ75TPT++++sKn5//l7oOTz6t36dfR6WtZGAwsNiqc+zqoSqyh4+9LyFdTShgYnKulGLpTVOftFfW5P4nBfsSSyGb7tOfPFfxofZCZOMkNoNMX19S5Z+QJMqBq8b4cePr7YiKsqlktHu+bU4McDTZkC4S1PjXRF49rWQqtEpY6A1kNORrMgoNSG06VVxNzIj07mBUUE3Or8n0ceMkOIkMSG6UL+ecoD6T50QGMq4BwVYsfguhDcW0VUK6uypdwDUsiPigZzJjzK3RAlNwvhapO1hECjrg2pyYSWhy5reGbREQ1wUDZ+mO+INUQLoiQ2OayKuLzhxwffyDjc+/1NcUhKTWbkMQSQlB1egM+61jXWUFHkoM606Z8kqtz2eyhbDbhc61OYXIAVx8KWFY58lrDhfLN+lKuMTWXeG8O6snnS7BAVS6FCghragJRWWgTNdFSXQewxbV4EFM2CAe13IfkQzqXOyZwDGrYMPtyq0HecodyQbZWc33YGDXRttQTubD2ZkGguRwCZBFgLljMXF8uWD7ShCySzOVQQiFECVFMQSQHzWBKxBLCKWhXovTZprL0SUURv9RoVuPxOH1VwZEv/qa8gpjE0JdQ6U9mRp0C2clsObU5Yw3BQE7Nkf6QloM/159rgGmRG6xMFlsUlVJDOWKroRJja8tms2F4KVTZwtfkZq1PiIMnHz188tE9//lWq6eW+rAR07oyx5qytczw9CfMxWdEFAviefyNDeGhB/M+3FRkAda++ZmxPdpUZLGg8CHu8ZPHjzaEDHj5Dvkfbi7Cv/1pA4sbQ1rnr85iZRMxtmBSpD8IPw1owqkhLh5uOsJQM5QN4GHT880AtkctAGSTexTmcBMQhhTYb58ByKBdqhVcmqM6LYnt4aYiC9XuvSeP7wWqvuF51FTmYJt5jHkDdm8TEYZ4DDaMWnv4dqCcQVP77VbN6cEmo2vuMx74xW5uJs3lf+gX9UFzn7FxRLEGTAFARPnDVs+9MJFQN7phJA2iCRZBoPjrVs8Pw/3lo2bRbBBRo1wCNYnS+8tWT7D9Pmgll40gaX67aTZ5zU8j+fXmbXyr8zM3P00vUc/M0IibXv/knHlD2pMPP3ny+Ovy+fDTJx/+JPB8UotRg4GmZ82kYLYQnSYufkxgBPz4fggLBP0YNRhofNZOCmYL0WniwseCzw98fv+C3D8nDJiR0AGielrQ48M1JIWyiUL6z1O5uB8QyieIHUvwI3jUxrQ60NfrcIIdGfNJU7ancfEJ0RFYmrLIav1UXYPVHwdofhrigurBlwXi/kGQTot2UWs1j38hIWse31XrgSBQc7ZQzE+C6H8RaJ0/iqm1IZOcDcB5gQ8fbc7zMajozzcLW53NoGa76Q4x0zzsj/bWHoI2fgZhQP5U6EGavYCh+GBOUh9QB0TE2vkIfIDgh8rqXL8Kw9wEYkqAb81sACbJAYk5NZGDH8T0NA6npLXTslkEh1E5uBQ3VkaESDo7UJ+9aUkUB/Pwq/XhHBYAw5PNan1rlzALbMrSaYggMOOSG0xgIQbXpIkcyUYGHM8N5tTjx2kiRzt+vDYJQiIbjE+txcBUfBAgoGBfO/o1nAdIkAcjBp5SWsyizmWxxPhBauMj5QEZhDLjTIPWYuaDPn0g40FZ6vignElU+8R8VWJOfWo9IWmcbkS/mK/Aucin1gxRxMkxyDSXlXUKSGgGSFubT/+D00dyAjKRk828sOg+NQ/OzODUD8LSEob6+Uynf/cP8POzTZtO/0nTdPppJm1ECm3Ek49+GHx+de93xSMb4FLpjjrv2KruunpJL1fvgLtSrJbdU1h2YzFuV7ilLnlexZ1MJO7cuaMt6Hk+b9tFLW+XExXHXjBLXKssVU6axvFUMpkcSyXHR8fGksnxpDu2HlZMVS/XZ/fd460YWLGrXnWeE+38km5ZvJS4Pl28sbJspkbKK4XzS2+cmy5WSxfc66fdN9dDXcW9BKpse7e549qW31ektJSWVN1j60Fn2JanLukO4F0yXULu3l0PAmLHM1WnaqlF29EdtWBbFlRUxdIN7gQ0oupyx9LL3FF1y1ArUJN3bMdQTatS9TCrx9Ule57/djbvv2ts3r+di17/2KSlL9UbQ73JcBlzkBahQ6U8x1h969xiB24dWE6KzRn9tDgcxaLuPx9aoxYrnHKX9K19sqC08ksLmNgbCY9W82iqiwu8/oacmRLXXa7O6qanyqh6JXeGpdNqFR5BPNMzi2Rk7eaVeNrLYhb/mdXWhneAfLawXrEM7faSoEAoV2xPfadastweETNUppC3GyqsvVrYXi0Un/ZqYXu1sL1a+KzVQhysrffTXi38Vnu1cOPI2quFm4OxvVr4mRG2Vws/E8L2amF7tbC9WtiS/Hrz/l+vFjaNYNtLhu0lw/aS4fqWDAOfQFAcNtOdIs1gzdsr4jSZU+Xue6y+oV2eC1HlvILUP98uN7yf3JucnJQD3ms4/3tdTu36WCZVeXhQkAjhTpXDGv6cJGb8+eImEqo/QznmjxVjcpatLgSzNse3oJdcTlPu6rM/ciQ669jWYpgDGJjmbcvT855a3hi6kMxC6OiAS+14T8SfWcUJS48mV8d0uqpDHusRB2BqZ7Idfguq1svm7arlcWeNk3Y4D2paeR/qlCKTmDhk5n+n/dnOVcZ8ohgvz4VjS1oo6YsihO3rtl5qJoYTyi73sgh6JkDJL2KIEpRvreIhOVdf4GuUySeDIBdakKlJ8vtCkuKQd33WXU60y0BHMBANBjqDga5goDsY2BIM9AQDsWBgazCwzQ/4Z8LxWPxl+65ZKumJUS2p9t9Ipabw6Psl06ouq8vjR7NHR6ZUbsWvX51SnduTKW1CS2nDA+p5ni/aiaFkciI5kRpWz5kOX7CXE8PaqDbsFhrRzpqWYd9xCbX0q1euqUe1FCJfE/P40EgYs9qvXTl7TZ2+lFYpnBwbmhhwy89JblQbelZZnpPijfUVMEwxFaI4lhoPUUy57nMXx0c+oJ6uVEp8ls+/ZXqJ0eEhLaX2v3Xh2uVLg2rJLHJBb0CdXnLsMk+M4D0HqQntqHpVX9AdU+QIiXEE6ebtckX3zPkSn1IvX714Vh3Xks3Fm317FtvINcc0uOVhzin16qXpaZB1TXR4s8IoiG5sSr1oLdgzurekDQ24H26AYrIFqVRrUrU4XLocGR4KclSrzFBc0q/gt9bNGTUuCKMslhsEMuAaG8CXaizpO2uUMyjSiac1HiI01kqkohm52jPZFFUuc9+YGXDxNGzwZo1GkQBZpxq4RwMih+q3aEAZUmKFbDc4SxwXdd0sruXqdG8HHYPWKxVuGWuYZey3avDZkul6l9E4v0QmuVdp/Vcz2rWFxVMbMNpehO0Hc72/ZlTFkiIeEcWVerlTYNG2F0tirf7krePuQDi16uqebegrlO7ivSFLCRBVteS5CN1fh+bWom4sck8TUJpulwKZELi7DkzLjAlxQn5nQLJgZ7gDnvSU3+Mt2e5a5+W3UX9PGUiw7yn+4qQU7N76Qd0oC6/Q4sHVCXqhiGDvfj8iVmgj/hFduvgED6CKc7sdeGgV5DgG+XCFdhXP5uIL1RV6XRFHcXFLh5lh/olbTHZ0a5HXD+XSS0J+yREA1KGbdzkd77Wr0HV7Tv10t95cYCzbfNUsGfMlO19crBVX6VL2KbuVF1qcin6RyVPRUE5wOtChILaHKLWHSVZb446v/+Me8bMPl88u62Ww85NqZcVbsq0hWteNG4btapUV9VjVKZ1wX6tTq4hF64ZNGOUV2ofRUOdNx8KhYYJGKbUF+076az6o/KFCikPr7Io4Ek61j2f8I1J/IKYYZc4QVjs0AatLHuIWC/OrdKB5P1AFbS90yxtx7lPO/fAGtEaKPAHdQTfedBNED94lBPEAocgEulEnEI7RK1E9vM3PSC9KdUq9QUp18O1++g682QzT62m78GqzQHgPAw3C8MouKmmvbOPKrPUmi2LMdlaMMeemorwfBentwIvQ6PT1XmxA+1CK1oiA3EmQqxJyFyvsFjdE7WU3vD21TQ41CqQ+qDhXSHOovZ8k93UTVcSM+bHHG29KwhfDaT2/xOPT0G4cu0T1b9nxPEbSMOF0Ps8rXnwa2hS8ELt4gPvi1bfj4+OjE/HUYNVbiI9P3Tqe1MYGj4hfUsC0sCQmXTKCRN/ivBI/XTJvc9NCnpaZvAuqfu8TwRURTkc40ugLYLDEHU/jTI6L5BjnDO5yOO3BOKeoysHdVU93vF9/7/fv/fp73/o5fH/26+995yH8/o35H4CiH+8kSOOOpvQp36LQkIUYBnUqmfNDknka9qTRugXMDbKnG0ZWmFfqrPJLtpnnaVT6NPaN6ZNM3loF+HAXF13bcuHatRm61CqNm8NobHE9fUnE1G5LSI8weVKe7BrkJwHkbYOTba7ojl7OFmzTAtringvBZcsdOEQEu4e8XirdRsUeIsXuhr8uMOU9kS5lWjmg7IS/hPIK/XYou5TdkRgofg/8bo904/UuMcm/uLIrZBLfQHK7wOFM3uqG135FWFqM68h6dEgLQtYjB87KObZ8Uo498cK0M9gyPXHFgYK9AxiLIhkLMB/gOeV0oW2X/fStLgbP7Ky1BzSlk9TkXaaAcVHAeN2om2Eao1OtiyEfbWnCW1DS2KOk8WKfNDYoUSFYR2eXsZlDKxTdKPUmvLSQnmXy2hm+3Hofj1O1vqPU7iroUHYo26D/2Is3FnT4tZ3N4ug8myVpZrNl26iWIJjOrmmV0+/Cz8f+wLNL8VBTL9uW6dnOBqsitLUKr1eASsCO+xjWANZGBGtD1MB+SIPaEffeFDugFnqwPiCwj64EwY67B59Zj16OMO9X0Iiev2DJAXdXzSCZ+D7s4ovJYUOVuuWqV0E5qJpo61UsHhf7lqRGnzMt013ihhqPi81mw34NiuBNX7QVh9827arbLEEU7g/qNdMFNfIStOxeqB9xWwk2x7SOzlPqYR5+/jhQD/QWxUsVd2CnmPCK+btk2tux2tuxBFPt7Vjt7Vjt7Vjt7ViN6NrbsdaLrb0dq70dq70dy/e0t2O1t2O1t2PVSaqbgqi9Hatl5k3djhUiIsc3Br+NU2SO/1IYGidLGDoRWN/ZIIBoDlwCBNrSxx/8/zzWDozFaB5WzDjhGU9aSOinA54n5yYTiYGT/e++l7h5ZCBxUjtC01afz5G///6z3/zT5fmZzTjyR5Nobsm+U3FMy/u3+lKCOPtH2T+ne/o/R1J4u2vg1tvt/mximeNJXfMuT9N8Mp0ZxIlkmrhd0t2lkjlPs4oOJ3iPpvRMS1zZXHBtiyAXuYdnW2mqWUxQE3TetosmxxBOHJerJc+sOHaeuy5g0CqgM1hcMT8mZ4ldb08goHF/ttNNY0tI06IsXULNS7Zu0Pqsyz2DL+iAnVt5m5hDsHnH2yVSs1BEo8Szjj1ve+JK8nO4I8rPXUvnCyDEJQLIYqMntnFSOS1SZgTztuNPscsFTHGV+Yrr8XIay53e5bMppkBpTUPccu7i1D81VJLgNafK6aJhuoOXLpSlM6mUWLIX7bTmh+b1fJEkNc8dqBmzRIE8L1Zs/wpuu+hS0U2D0nB1dNGpVoiT29WSZXuECb3yX1k4Ds97/q6sYJy/8UsyYleoQh39TpbOLhN+/3CzmN+UGdJvM3/iGtcYAksOtZlRYV9wGYGmtdO4O6C+3iFWM84zfw4cJ5hpdpPuNhZXhTuLt8VaxBVf0GK9mQRdJkEvOna1ksbFWXGNL2drLiwi+8fEXPeJbooHNH8K5uAPablhZ0eMJmN7lVeUbRGKUQIx4HZFenp2Uxz4Ont6erb2dMPvlp5Yz2iX0ru7E9J6ujE9+O1QupXeSE93r4JfiOno6epSQn+R3kRnBH9xUrhD6QFq5EY6IX8PuDvhu6ujN7JNkX8d4fw9Ufjr7NkFPHUDPwcPbD2wdS/R3YPulzuVvcoeuWreq+wGKnsA/3b4xiL/C186CUY=")))) | 1,460.833333 | 8,630 | 0.960068 | 306 | 8,765 | 27.5 | 0.980392 | 0.001664 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.156036 | 0.00194 | 8,765 | 6 | 8,630 | 1,460.833333 | 0.805898 | 0.011409 | 0 | 0 | 0 | 0.5 | 0.98961 | 0.98961 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 10 |
639cd43b1d54662183c6d41529246589ce399da4 | 12,195 | py | Python | rvpvp/isa/rvv/vfxxx_vf.py | ultrafive/riscv-pvp | 843e38422c3d545352b955764927d5e7847e5453 | [
"Unlicense"
] | 5 | 2021-05-10T09:57:00.000Z | 2021-10-05T14:39:20.000Z | rvpvp/isa/rvv/vfxxx_vf.py | ultrafive/riscv-pvp | 843e38422c3d545352b955764927d5e7847e5453 | [
"Unlicense"
] | null | null | null | rvpvp/isa/rvv/vfxxx_vf.py | ultrafive/riscv-pvp | 843e38422c3d545352b955764927d5e7847e5453 | [
"Unlicense"
] | 1 | 2021-05-14T20:24:11.000Z | 2021-05-14T20:24:11.000Z | from ...isa.inst import *
import numpy as np
import struct
import ctypes
FE_TONEAREST = 0x0000
FE_DOWNWARD = 0x0400
FE_UPWARD = 0x0800
FE_TOWARDZERO = 0x0c00
libm = ctypes.CDLL('libm.so.6')
round_dict = { 0:FE_TONEAREST , 1:FE_TOWARDZERO , 2:FE_DOWNWARD , 3:FE_UPWARD }
class Vfadd_vf(Inst):
name = 'vfadd.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['rs1'] + self['vs2'][vstart:self['vl']],
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfsub_vf(Inst):
name = 'vfsub.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['vs2'][vstart:self['vl']] - self['rs1'],
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfrsub_vf(Inst):
name = 'vfrsub.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['rs1'] - self['vs2'][vstart:self['vl']],
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfmul_vf(Inst):
name = 'vfmul.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['rs1'] * self['vs2'][vstart:self['vl']],
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfdiv_vf(Inst):
name = 'vfdiv.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['vs2'][vstart:self['vl']] / self['rs1'] ,
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfrdiv_vf(Inst):
name = 'vfrdiv.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( self['rs1'] / self['vs2'][vstart:self['vl']],
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
def max( a, b ):
result = np.zeros( b.size, dtype=b.dtype )
for no in range(0, b.size):
if np.isnan( a ):
result[no] = b[no]
elif np.isnan( b[no] ):
result[no] = a
else:
result[no] = np.maximum( a, b[no] )
return result
class Vfmax_vf(Inst):
name = 'vfmax.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( max( self['rs1'], self['vs2'][vstart:self['vl']] ),
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
def min_vf( a, b ):
result = np.zeros( b.size, dtype=b.dtype )
for no in range(0, b.size):
if np.isnan( a ):
result[no] = b[no]
elif np.isnan( b[no] ):
result[no] = a
else:
result[no] = np.minimum( a, b[no] )
return result
class Vfmin_vf(Inst):
name = 'vfmin.vf'
def golden(self):
if 'vs2' in self:
if 'frm' in self:
libm.fesetround(round_dict[self['frm']])
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
result[vstart:self['vl']] = self.masked( min_vf( self['rs1'], self['vs2'][vstart:self['vl']] ),
self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
if 'frm' in self:
libm.fesetround( 0 )
return result
else:
return 0
class Vfsgnj_vf(Inst):
name = 'vfsgnj.vf'
def golden(self):
if 'vs2' in self:
if self['vs2'].dtype == np.float16:
str_int = '<H'
str_float = '<e'
signal_bit = 15
elif self['vs2'].dtype == np.float32:
str_int = '<I'
str_float = '<f'
signal_bit = 31
elif self['vs2'].dtype == np.float64:
str_int = '<Q'
str_float = '<d'
signal_bit = 63
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
vd = np.where( struct.unpack( str_int, struct.pack( str_float, self['rs1'] ) )[0] >> signal_bit, - abs( self['vs2'][vstart:self['vl']] ), abs( self['vs2'][vstart:self['vl']] ) )
result[vstart:self['vl']] = self.masked( vd, self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
return result
else:
return 0
class Vfsgnjn_vf(Inst):
name = 'vfsgnjn.vf'
def golden(self):
if 'vs2' in self:
if self['vs2'].dtype == np.float16:
str_int = '<H'
str_float = '<e'
signal_bit = 15
elif self['vs2'].dtype == np.float32:
str_int = '<I'
str_float = '<f'
signal_bit = 31
elif self['vs2'].dtype == np.float64:
str_int = '<Q'
str_float = '<d'
signal_bit = 63
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
vd = np.where( struct.unpack( str_int, struct.pack( str_float, self['rs1'] ) )[0] >> signal_bit, abs( self['vs2'][vstart:self['vl']] ), - abs( self['vs2'][vstart:self['vl']] ) )
result[vstart:self['vl']] = self.masked( vd, self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
return result
else:
return 0
class Vfsgnjx_vf(Inst):
name = 'vfsgnjx.vf'
def golden(self):
if 'vs2' in self:
if self['vs2'].dtype == np.float16:
str_int = '<H'
str_float = '<e'
signal_bit = 15
elif self['vs2'].dtype == np.float32:
str_int = '<I'
str_float = '<f'
signal_bit = 31
elif self['vs2'].dtype == np.float64:
str_int = '<Q'
str_float = '<d'
signal_bit = 63
if 'orig' in self:
result = self['orig'].copy()
else:
result = np.zeros( self['vl'], dtype = self['vs2'].dtype )
if 'vstart' in self:
if self['vstart'] >= self['vl']:
return result
vstart = self['vstart']
else:
vstart = 0
vd = np.zeros( self['vl'] - vstart, dtype = self['vs2'].dtype )
for i, v in enumerate(self['vs2'][vstart:self['vl']]):
vd[i] = np.where( struct.unpack( str_int, struct.pack( str_float, self['rs1'] ) )[0] >> signal_bit ==
struct.unpack( str_int, struct.pack( str_float, v ) )[0] >> signal_bit, abs( v ), - abs( v ) )
result[vstart:self['vl']] = self.masked( vd, self['orig'][vstart:self['vl']] if 'orig' in self else 0, vstart )
return result
else:
return 0 | 28.898104 | 189 | 0.436408 | 1,369 | 12,195 | 3.837107 | 0.070855 | 0.068532 | 0.105083 | 0.050257 | 0.897773 | 0.894156 | 0.88616 | 0.88616 | 0.879307 | 0.879307 | 0 | 0.023229 | 0.4246 | 12,195 | 422 | 190 | 28.898104 | 0.725381 | 0 | 0 | 0.83871 | 0 | 0 | 0.071335 | 0 | 0 | 0 | 0.001968 | 0 | 0 | 1 | 0.041935 | false | 0 | 0.012903 | 0 | 0.23871 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
63a4a184ad5a183e0a7364b8443c80a529644d0a | 366 | py | Python | neurodsp/tests/utils/test_sim.py | elybrand/neurodsp | 96355f4c75e1eedef2a77a8bfafc718f80b8dae3 | [
"Apache-2.0"
] | 1 | 2020-01-04T18:15:49.000Z | 2020-01-04T18:15:49.000Z | neurodsp/tests/test_utils_sim.py | josepfont65/neurodsp | a7c5b72665eed6368e29bf4f15443a28a2e18732 | [
"Apache-2.0"
] | null | null | null | neurodsp/tests/test_utils_sim.py | josepfont65/neurodsp | a7c5b72665eed6368e29bf4f15443a28a2e18732 | [
"Apache-2.0"
] | null | null | null | """Tests for simulation related utility functions."""
from neurodsp.utils.sim import *
###################################################################################################
###################################################################################################
def test_set_random_seed():
set_random_seed()
set_random_seed(100)
| 30.5 | 99 | 0.336066 | 23 | 366 | 5.043478 | 0.73913 | 0.232759 | 0.336207 | 0.275862 | 0.336207 | 0.336207 | 0 | 0 | 0 | 0 | 0 | 0.008876 | 0.076503 | 366 | 11 | 100 | 33.272727 | 0.33432 | 0.128415 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | true | 0 | 0.25 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
63b38b5e1e67782d6c6c1c71ef64c28c0dbea25e | 2,833 | py | Python | gates/__init__.py | radosnystudent/Quantum-algorithms | abc0603b9c85d628434300bc71e76d56164cf949 | [
"MIT"
] | null | null | null | gates/__init__.py | radosnystudent/Quantum-algorithms | abc0603b9c85d628434300bc71e76d56164cf949 | [
"MIT"
] | null | null | null | gates/__init__.py | radosnystudent/Quantum-algorithms | abc0603b9c85d628434300bc71e76d56164cf949 | [
"MIT"
] | null | null | null | import numpy as np
from math import sqrt, sin, cos
def X_gate(alfa : complex, beta : complex):
return np.matrix([[complex(0,0), complex(1,0)], [complex(1,0), complex(0,0)]], dtype=complex) * np.matrix([[alfa], [beta]])
def Y_gate(alfa : complex, beta : complex):
return np.matrix([[complex(0,0), complex(0,-1)], [complex(0,1), complex(0,0)]], dtype=complex) * np.matrix([[alfa], [beta]])
def Z_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), complex(-1,0)]], dtype=complex) * np.matrix([[alfa], [beta]])
def S_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), complex(0,1)]], dtype=complex) * np.matrix([[alfa], [beta]])
def St_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), complex(0,-1)]], dtype=complex) * np.matrix([[alfa], [beta]])
def M_gate(alfa : complex, beta : complex):
return np.matrix([[complex(0.5,0.5), complex(0.5,-0.5)], [complex(0.5,-0.5), complex(0.5,0.5)]], dtype=complex) * np.matrix([[alfa], [beta]])
def H_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1/sqrt(2),0), complex(1/sqrt(2),0)], [complex(1/sqrt(2),0), complex(-1/sqrt(2),0)]], dtype=complex) * np.matrix([[alfa], [beta]])
def T_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), np.exp(complex(0,1)*np.pi/4)]], dtype=complex) * np.matrix([[alfa], [beta]])
def Tt_gate(alfa : complex, beta : complex):
return np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), np.exp(complex(0,-1)*np.pi/4)]], dtype=complex) * np.matrix([[alfa], [beta]])
def Rx_gate(alfa : complex, beta : complex, fi : float):
def Rx(fi : float):
return cos(fi/2) * np.matrix('1 0; 0 1') + complex(0,1) * sin(fi/2) * np.matrix([[complex(0,0), complex(1,0)], [complex(1,0), complex(0,0)]], dtype=complex)
return Rx(fi) * np.matrix([[alfa], [beta]])
def Ry_gate(alfa : complex, beta : complex, fi : float):
def Ry(fi : float):
return cos(fi/2) * np.matrix('1 0; 0 1') + complex(0,1) * sin(fi/2) * np.matrix([[complex(0,0), complex(0,-1)], [complex(0,1), complex(0,0)]], dtype=complex)
return Ry(fi) * np.matrix([[alfa], [beta]])
def Rz_gate(alfa : complex, beta : complex, fi : float):
def Rz(fi : float):
return cos(fi/2) * np.matrix('1 0; 0 1') + complex(0,1) * sin(fi/2) * np.matrix([[complex(1,0), complex(0,0)], [complex(0,0), complex(-1,0)]], dtype=complex)
return Rz(fi) * np.matrix([[alfa], [beta]])
gate_list = { 'X' : X_gate, 'Y' : Y_gate, 'Z' : Z_gate, 'S': S_gate, 'St' : St_gate, 'T' : T_gate,
'Tt' : Tt_gate, 'M' : M_gate, 'H' : H_gate, 'Rx' : Rx_gate, 'Ry' : Ry_gate, 'Rz' : Rz_gate} | 54.480769 | 165 | 0.596188 | 482 | 2,833 | 3.452282 | 0.080913 | 0.168269 | 0.108173 | 0.134615 | 0.899639 | 0.888822 | 0.85637 | 0.83774 | 0.742788 | 0.742788 | 0 | 0.056326 | 0.160254 | 2,833 | 52 | 166 | 54.480769 | 0.643127 | 0 | 0 | 0 | 0 | 0 | 0.014467 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.441176 | false | 0 | 0.058824 | 0.352941 | 0.941176 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 9 |
63b46dfae20e65c0103b398fc36a4d880c2d8fca | 65,071 | py | Python | playground/plotting/PlotOBands30mGen3/jessepickerdata/dnafiles/OttBands30mGen3dnas.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 38 | 2021-09-18T15:33:28.000Z | 2022-02-21T17:29:08.000Z | playground/plotting/PlotOBands30mGen3/jessepickerdata/dnafiles/OttBands30mGen3dnas.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 4 | 2022-01-02T14:46:12.000Z | 2022-02-16T18:39:41.000Z | playground/plotting/PlotOBands30mGen3/jessepickerdata/dnafiles/OttBands30mGen3dnas.py | ysdede/jesse_strategies | ade9f4ba42cec11207c766d267b9d8feb8bce648 | [
"CC0-1.0"
] | 11 | 2021-10-19T06:21:43.000Z | 2022-02-21T17:29:10.000Z | dnas = [
['T<BD?iv', 64, 115, 107.86, 53, 13, 7.89, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['[<5D?iq', 60, 104, 103.02, 50, 12, 5.33, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['\\</D?iq', 61, 105, 101.66, 50, 12, 4.94, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['S<BD?iv', 63, 117, 100.58, 53, 13, 7.89, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['W<HD?iv', 63, 111, 99.12, 53, 13, 6.12, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['U<@D?iv', 62, 113, 99.02, 53, 13, 8.3, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 38, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['X<BD?iv', 62, 112, 99.59, 53, 13, 7.89, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['U<BD?iv', 63, 114, 95.03, 53, 13, 7.89, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['W<=D?iq', 62, 108, 91.55, 53, 13, 7.69, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['\\<)D3in', 60, 99, 91.46, 50, 12, 5.33, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['\\<)D.in', 60, 99, 91.46, 50, 12, 5.33, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['[<)D.in', 60, 101, 90.82, 50, 12, 5.33, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
[']<HD?iv', 65, 103, 90.54, 50, 12, 3.15, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['[<)D8in', 60, 101, 90.82, 50, 12, 5.33, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['[<)D>in', 60, 101, 90.82, 50, 12, 5.33, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['[<)D.im', 60, 101, 90.82, 50, 12, 5.33, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<HD?iq', 63, 109, 89.14, 53, 13, 6.12, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['\\<9D=in', 60, 99, 88.11, 50, 12, 4.54, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['\\<9D4in', 60, 99, 88.11, 50, 12, 4.54, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<HD?iq', 62, 108, 88.39, 53, 13, 6.12, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['U<BD<in', 63, 104, 88.79, 53, 13, 7.89, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<;D?in', 64, 104, 88.16, 53, 13, 8.7, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<9D4in', 62, 106, 87.01, 53, 13, 9.11, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<AD?iq', 62, 110, 87.43, 53, 13, 8.5, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 40, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['V</D?iq', 62, 110, 87.38, 53, 13, 9.92, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['V<BD?in', 63, 106, 87.14, 53, 13, 7.89, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['[<;D?in', 62, 99, 87.57, 50, 12, 4.34, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['\\<;D>in', 62, 99, 87.19, 50, 12, 4.34, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['\\<;D8in', 62, 99, 87.19, 50, 12, 4.34, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<)D?in', 62, 106, 87.72, 53, 13, 10.73, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<)D8in', 62, 106, 87.72, 53, 13, 10.73, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<)D3in', 62, 106, 87.72, 53, 13, 10.73, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<9Dniu', 60, 100, 86.19, 54, 11, 6.15, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 49}],
['T<=D?in', 64, 106, 86.63, 53, 13, 7.69, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<;D8in', 63, 106, 86.5, 53, 13, 8.7, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<;D>in', 63, 106, 86.5, 53, 13, 8.7, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<;D?in', 63, 106, 86.5, 53, 13, 8.7, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<-D<in', 62, 111, 86.92, 50, 14, 7.35, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 8, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<;DDin', 63, 106, 86.24, 53, 13, 8.7, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<)D.in', 61, 111, 85.88, 50, 14, 8.31, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<2D?iq', 62, 112, 85.48, 53, 13, 8.3, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 16, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['[<BD<in', 62, 99, 85.01, 50, 12, 3.94, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['[<BD?in', 62, 99, 85.01, 50, 12, 3.94, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<;D?in', 64, 109, 85.76, 50, 14, 6.38, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<;D>in', 64, 109, 85.76, 50, 14, 6.38, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<)D?in', 61, 111, 85.88, 50, 14, 8.31, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<)D0in', 61, 111, 85.88, 50, 14, 8.31, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<)D<in', 61, 111, 85.88, 50, 14, 8.31, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<)D.in', 61, 102, 84.53, 53, 13, 10.73, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<9Dqiw', 59, 99, 84.86, 58, 12, 7.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 50}],
['R</D?iq', 62, 116, 84.15, 50, 14, 7.54, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['W<)D3in', 61, 102, 84.53, 53, 13, 10.73, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<=D?iq', 62, 113, 84.58, 53, 13, 7.69, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['\\<BD?in', 62, 99, 84.84, 50, 12, 3.94, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<>D?in', 63, 107, 84.12, 53, 13, 6.51, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 35, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['V<=D?in', 63, 106, 84.62, 53, 13, 7.69, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<9D.in', 61, 102, 83.28, 53, 13, 9.11, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['T:Ue*Xf', 60, 105, 83.62, 53, 13, 9.22, {'ott_ubw': 134, 'ott_dbw': 67, 'long_tps_qty_index': 71, 'short_tps_qty_index': 97, 'chop_rsi_len': 6, 'chop_bandwidth': 147, 'max_risk': 41}],
['\\<HD?iq', 63, 104, 83.94, 50, 12, 3.15, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['Z</D?iq', 60, 105, 83.19, 50, 12, 4.94, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['^</D>iq', 60, 102, 83.95, 50, 12, 4.94, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['[<=D?in', 62, 99, 83.19, 50, 12, 3.84, {'ott_ubw': 152, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<;D>in', 63, 109, 83.22, 53, 13, 8.7, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<;D?in', 63, 109, 83.22, 53, 13, 8.7, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<)D.in', 61, 107, 82.8, 53, 13, 10.73, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<)D>in', 61, 107, 82.8, 53, 13, 10.73, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<2D?iq', 62, 115, 82.78, 53, 13, 8.3, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 16, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['`<BD?iv', 63, 99, 82.06, 46, 13, 3.68, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['T<BD?in', 62, 109, 82.44, 53, 13, 7.89, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<=D?in', 63, 107, 82.69, 53, 13, 7.69, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<;D>in', 62, 102, 82.8, 53, 13, 8.7, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<;D?in', 62, 102, 82.8, 53, 13, 8.7, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<;DDin', 63, 109, 82.81, 53, 13, 8.7, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<9D4in', 61, 109, 81.81, 53, 13, 9.11, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<9D.in', 61, 109, 81.81, 53, 13, 9.11, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y</D?iq', 59, 110, 81.09, 50, 12, 4.94, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['\\<+D<in', 60, 101, 81.58, 50, 12, 4.44, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<9D4in', 61, 112, 80.16, 50, 14, 6.77, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<)D8in', 61, 107, 80.32, 53, 13, 10.73, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<)D0in', 61, 107, 80.32, 53, 13, 10.73, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['Q<@D?iv', 61, 117, 80.1, 50, 14, 7.59, {'ott_ubw': 126, 'ott_dbw': 72, 'long_tps_qty_index': 38, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['W<+D<in', 61, 102, 80.72, 53, 13, 8.91, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<=D?in', 62, 102, 80.16, 53, 13, 7.69, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<+D1in', 61, 102, 80.72, 53, 13, 8.91, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
[']<)D.in', 61, 95, 80.83, 50, 12, 5.33, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['^<)D>in', 61, 95, 80.83, 50, 12, 5.33, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
[']<)D>in', 61, 95, 80.83, 50, 12, 5.33, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<)D<in', 59, 103, 79.9, 50, 12, 5.33, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['R<BD?in', 62, 112, 79.69, 50, 14, 5.61, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<)D?in', 59, 103, 79.9, 50, 12, 5.33, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<)D8in', 59, 103, 79.9, 50, 12, 5.33, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<)D>in', 59, 103, 79.9, 50, 12, 5.33, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<)D.in', 59, 103, 79.9, 50, 12, 5.33, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<9D.in', 60, 110, 78.03, 53, 13, 9.11, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<9D4in', 59, 103, 78.1, 50, 12, 4.54, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['X</D?iq', 60, 109, 78.33, 53, 13, 9.92, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['S</D?iq', 61, 114, 78.4, 53, 13, 9.92, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<YD?iq', 60, 110, 78.18, 53, 13, 7.29, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 78, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['S<;DDin', 62, 110, 78.96, 53, 13, 8.7, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<-D<in', 61, 108, 77.81, 53, 13, 9.71, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 8, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['^<2D?iq', 61, 102, 77.04, 50, 12, 4.14, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 16, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['S<HD?iq', 62, 116, 77.42, 53, 13, 6.12, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<tDiiv', 65, 104, 77.63, 53, 13, 5.13, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 120, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 49}],
['Z<;D?in', 61, 103, 77.38, 50, 12, 4.34, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<;D>in', 61, 103, 77.38, 50, 12, 4.34, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Z<;D8in', 61, 103, 77.38, 50, 12, 4.34, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
[']<9D4in', 61, 95, 76.86, 50, 12, 4.54, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<)D.in', 61, 108, 76.35, 53, 13, 10.73, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<)D?in', 61, 108, 76.35, 53, 13, 10.73, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<)D3in', 61, 108, 76.35, 53, 13, 10.73, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<)D>in', 61, 108, 76.35, 53, 13, 10.73, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['_</D?iq', 60, 101, 76.06, 50, 12, 4.94, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['Z<BD?in', 61, 103, 76.83, 50, 12, 3.94, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['^<9D=in', 61, 95, 76.86, 50, 12, 4.54, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['><BDiiv', 58, 151, 75.49, 54, 24, 3.2, {'ott_ubw': 77, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 49}],
[']<tDiiv', 65, 99, 75.57, 50, 12, 2.66, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 120, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 49}],
[']<;D?in', 61, 97, 74.28, 50, 12, 4.34, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
[']<.D<in', 60, 97, 74.04, 50, 12, 4.54, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 9, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<D;7Qr', 57, 107, 74.87, 50, 12, 3.29, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 44, 'short_tps_qty_index': 30, 'chop_rsi_len': 9, 'chop_bandwidth': 129, 'max_risk': 47}],
['Y<HD?iq', 60, 110, 74.82, 50, 12, 3.15, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<HD?iq', 61, 109, 74.4, 53, 13, 6.12, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['G<BDiiv', 59, 141, 74.83, 45, 22, -0.64, {'ott_ubw': 100, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 49}],
['F<@D?iv', 57, 148, 74.47, 45, 22, -0.39, {'ott_ubw': 98, 'ott_dbw': 72, 'long_tps_qty_index': 38, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
[']<;D>in', 61, 97, 74.28, 50, 12, 4.34, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['^<;D>in', 61, 97, 74.28, 50, 12, 4.34, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['^<;D8in', 61, 97, 74.28, 50, 12, 4.34, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<=D?iq', 60, 110, 73.38, 50, 12, 3.84, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<BD<in', 60, 102, 73.65, 53, 13, 7.89, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['X<BD?in', 60, 102, 73.65, 53, 13, 7.89, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['X<)D3in', 59, 102, 72.08, 53, 13, 10.73, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['X<)D<in', 59, 102, 72.08, 53, 13, 10.73, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<9D?ia', 62, 91, 72.08, 53, 13, 9.11, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 39}],
['T<MD+i_', 60, 92, 72.17, 53, 13, 8.68, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9Dqir', 58, 98, 72.82, 54, 11, 6.15, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 47}],
['6<MDai_', 52, 159, 72.3, 50, 24, 6.44, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['`</D?iq', 60, 99, 72.23, 50, 12, 4.94, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['^<BD?in', 61, 97, 72.54, 50, 12, 3.94, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<+D<in', 60, 110, 72.02, 53, 13, 8.9, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['r4)D3in', 57, 105, 72.15, 35, 14, -2.33, {'ott_ubw': 212, 'ott_dbw': 51, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['W<9D?ia', 62, 89, 71.59, 53, 13, 9.11, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 39}],
['T<9DDi_', 60, 92, 71.82, 53, 13, 9.08, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 38}],
['W<MDEi_', 60, 87, 71.14, 53, 13, 8.68, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 38}],
['r4+D<in', 57, 103, 71.19, 35, 14, -2.33, {'ott_ubw': 212, 'ott_dbw': 51, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['r4)D<in', 57, 103, 71.18, 35, 14, -2.33, {'ott_ubw': 212, 'ott_dbw': 51, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<5D?i_', 60, 93, 70.81, 53, 13, 10.7, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<MDdi_', 52, 156, 70.54, 50, 24, 6.44, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 38}],
['^<ZD?iq', 61, 102, 70.3, 50, 12, 3.35, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 79, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
[']<HD?iq', 62, 102, 70.72, 50, 12, 3.15, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 51, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['_<2D?iq', 61, 101, 70.02, 50, 12, 4.14, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 16, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<;D>in', 60, 102, 70.17, 53, 13, 8.7, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['_<)D8in', 60, 96, 70.68, 50, 12, 5.33, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['_<)D.in', 60, 96, 70.68, 50, 12, 5.33, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<XD+i_', 60, 89, 69.07, 53, 13, 7.67, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['r:)D3in', 66, 83, 69.62, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<)D0in', 62, 89, 68.62, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['_<9D=in', 60, 96, 68.61, 50, 12, 4.54, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['T<9DAi_', 60, 93, 68.37, 53, 13, 9.08, {'ott_ubw': 134, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['c<ID+iw', 65, 98, 68.99, 50, 14, 2.7, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 52, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 50}],
['W<XD+i_', 60, 87, 68.94, 53, 13, 7.67, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['W<ID?i_', 62, 88, 68.26, 53, 13, 7.06, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 52, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['_<=D?iq', 61, 101, 68.49, 50, 12, 3.84, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['h<)D8in', 62, 88, 68.56, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<)D3in', 62, 88, 68.56, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<,D8in', 62, 88, 68.6, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 6, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<)D.in', 62, 88, 68.56, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<5D?i_', 59, 94, 67.06, 53, 13, 10.7, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['^<>D?in', 61, 97, 67.81, 50, 12, 3.35, {'ott_ubw': 160, 'ott_dbw': 72, 'long_tps_qty_index': 35, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['_<;D?in', 61, 96, 67.59, 50, 12, 4.34, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['_<;D2in', 61, 96, 67.59, 50, 12, 4.34, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<9D4in', 62, 89, 66.94, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['U<9D;i_', 60, 90, 66.08, 53, 13, 9.09, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 38}],
['U<MD?i_', 60, 90, 66.03, 53, 13, 8.68, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['a</D?iq', 60, 97, 66.19, 50, 12, 4.94, {'ott_ubw': 168, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['_<BD?in', 61, 96, 66.69, 50, 12, 3.94, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:;D?in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<;D>in', 64, 89, 66.44, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<;D4in', 64, 89, 66.44, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<;D?in', 64, 89, 66.44, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:;D>in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<;D>in', 63, 88, 66.23, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:;D6in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<;D8in', 64, 89, 66.44, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['s:;D>in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 215, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:;D8in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:;D2in', 65, 85, 66.35, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<)D8in', 60, 94, 66.42, 50, 12, 5.33, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<)D?in', 60, 94, 66.42, 50, 12, 5.33, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<)D.in', 60, 94, 66.42, 50, 12, 5.33, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<)D0in', 60, 94, 66.42, 50, 12, 5.33, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<)D.im', 60, 94, 66.42, 50, 12, 5.33, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['F<kDliw', 59, 142, 65.99, 47, 23, 0.16, {'ott_ubw': 98, 'ott_dbw': 72, 'long_tps_qty_index': 106, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 50}],
['Q<BD?iv', 60, 124, 65.01, 50, 14, 7.18, {'ott_ubw': 126, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['Y<BD?in', 59, 104, 65.93, 50, 12, 3.94, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<BD<in', 64, 89, 65.97, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<BD?in', 64, 89, 65.97, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['i<=D?in', 64, 89, 65.83, 45, 11, 0.16, {'ott_ubw': 189, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<;D>in', 59, 104, 65.16, 50, 12, 4.34, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<;D?in', 59, 104, 65.16, 50, 12, 4.34, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<;D6in', 59, 104, 65.16, 50, 12, 4.34, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<;D2in', 59, 104, 65.16, 50, 12, 4.34, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['S<9DAi_', 59, 94, 64.89, 53, 13, 9.08, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['];@ELwq', 62, 97, 64.2, 58, 12, 6.4, {'ott_ubw': 158, 'ott_dbw': 69, 'long_tps_qty_index': 38, 'short_tps_qty_index': 46, 'chop_rsi_len': 14, 'chop_bandwidth': 225, 'max_risk': 47}],
['`<;D>in', 61, 94, 64.21, 50, 12, 4.34, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<;D8in', 61, 94, 64.21, 50, 12, 4.34, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['`<;D2in', 61, 94, 64.21, 50, 12, 4.34, {'ott_ubw': 165, 'ott_dbw': 72, 'long_tps_qty_index': 30, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<)D8in', 57, 104, 64.28, 50, 12, 5.33, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<)D.in', 57, 104, 64.28, 50, 12, 5.33, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['Y<9D4in', 57, 104, 64.94, 50, 12, 4.54, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 45}],
['r<.D<is', 64, 85, 63.4, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 9, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 48}],
['Z<9Dqim', 57, 92, 63.94, 54, 11, 6.15, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 45}],
['c<MD+iv', 60, 97, 63.38, 46, 13, 2.76, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 49}],
['6<MDhi_', 52, 155, 63.08, 44, 25, 2.16, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<MDfi_', 52, 155, 63.08, 44, 25, 2.16, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['W<YD;i_', 60, 88, 63.3, 53, 13, 7.26, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 78, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 38}],
['X<MD+i_', 60, 85, 62.22, 53, 13, 8.68, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<8D`i_', 53, 127, 62.07, 50, 22, 4.98, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['U<YD;i_', 60, 90, 62.84, 53, 13, 7.26, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 78, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 38}],
['N<BDiiv', 60, 123, 62.82, 53, 15, 7.39, {'ott_ubw': 119, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 49}],
['h<+D<in', 61, 88, 62.29, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<+D1in', 61, 88, 62.29, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['s:)D.in', 65, 86, 62.82, 50, 12, 1.85, {'ott_ubw': 215, 'ott_dbw': 67, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['r:)D.in', 65, 86, 62.82, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['r<)D.is', 64, 87, 61.31, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 48}],
['c<8D?is', 61, 95, 61.43, 46, 13, 2.25, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<)D.it', 64, 87, 61.31, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<)D?it', 64, 87, 61.31, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<)D>it', 64, 87, 61.31, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<)D3is', 64, 87, 61.31, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 8, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<9D.is', 64, 87, 60.69, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 48}],
['r<9D.it', 64, 87, 60.69, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 48}],
['RN2)F-e', 61, 59, 60.38, 83, 6, 17.32, {'ott_ubw': 129, 'ott_dbw': 119, 'long_tps_qty_index': 16, 'short_tps_qty_index': 2, 'chop_rsi_len': 13, 'chop_bandwidth': 38, 'max_risk': 41}],
['Q<9D?ic', 59, 106, 60.58, 50, 14, 8.39, {'ott_ubw': 126, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['c:mD?i_', 64, 74, 60.81, 50, 12, 3.32, {'ott_ubw': 173, 'ott_dbw': 67, 'long_tps_qty_index': 109, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['c<8D?ir', 60, 94, 60.47, 50, 12, 4.14, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['c<ID+iu', 63, 97, 60.87, 46, 13, 1.65, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 52, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 49}],
['X<XD+i_', 60, 85, 60.85, 53, 13, 7.67, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<8D+i_', 51, 178, 60.0, 47, 23, 5.86, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<7D+i_', 51, 178, 60.53, 50, 24, 7.17, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 24, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['g</D?iq', 61, 93, 60.61, 45, 11, 0.16, {'ott_ubw': 183, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['r</D>it', 64, 87, 60.89, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['r</D?is', 64, 87, 60.89, 45, 11, 0.16, {'ott_ubw': 212, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['f</D?iq', 61, 93, 60.61, 45, 11, 0.16, {'ott_ubw': 181, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['r:/D?iq', 65, 89, 60.43, 50, 12, 1.85, {'ott_ubw': 212, 'ott_dbw': 67, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['q</D?it', 63, 88, 60.78, 45, 11, 0.16, {'ott_ubw': 209, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 48}],
['h<BD?in', 62, 88, 60.43, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['c<qD?iv', 63, 97, 60.58, 46, 13, 1.89, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 116, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 49}],
['h<BD<in', 62, 88, 60.43, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 41, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['h<=D?in', 62, 88, 60.48, 45, 11, 0.16, {'ott_ubw': 186, 'ott_dbw': 72, 'long_tps_qty_index': 33, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 45}],
['j<+D<in', 62, 87, 60.29, 45, 11, 0.16, {'ott_ubw': 191, 'ott_dbw': 72, 'long_tps_qty_index': 5, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 45}],
['j<)D.in', 62, 87, 60.12, 45, 11, 0.16, {'ott_ubw': 191, 'ott_dbw': 72, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['r5)D.in', 58, 98, 60.42, 38, 13, -2.95, {'ott_ubw': 212, 'ott_dbw': 54, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['s5)D.in', 58, 98, 60.42, 38, 13, -2.95, {'ott_ubw': 215, 'ott_dbw': 54, 'long_tps_qty_index': 2, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 45}],
['b<2D?iq', 62, 95, 59.55, 50, 12, 4.14, {'ott_ubw': 171, 'ott_dbw': 72, 'long_tps_qty_index': 16, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['c</D?iq', 60, 95, 59.67, 50, 12, 4.94, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['W<gD?i_', 60, 88, 58.9, 53, 13, 5.9, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 100, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['c:ID+i_', 64, 74, 58.42, 50, 12, 4.02, {'ott_ubw': 173, 'ott_dbw': 67, 'long_tps_qty_index': 52, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['b</D?iq', 60, 96, 58.82, 50, 12, 4.94, {'ott_ubw': 171, 'ott_dbw': 72, 'long_tps_qty_index': 11, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 47}],
['X<9D?ic', 59, 92, 57.87, 53, 13, 9.11, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['6<XD`i_', 53, 160, 57.18, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XD_i_', 53, 160, 57.18, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XD^i_', 53, 160, 57.18, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['S<vDEi]', 60, 87, 57.85, 53, 13, 5.11, {'ott_ubw': 132, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['_<9D?ic', 60, 84, 57.25, 50, 12, 4.54, {'ott_ubw': 163, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['c<8D?ig', 62, 83, 57.88, 50, 12, 4.14, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 42}],
['6<XDei_', 53, 157, 56.97, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XDdi_', 53, 157, 56.97, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XDbi_', 53, 157, 56.97, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9D\\i[', 54, 131, 56.74, 50, 24, 6.21, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 36}],
['\\<8D?i_', 59, 82, 56.17, 50, 12, 4.12, {'ott_ubw': 155, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<vDnif', 62, 82, 56.62, 54, 11, 4.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 41}],
['+<5D^i_', 53, 126, 55.02, 50, 22, 7.6, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<1D+i_', 51, 178, 55.95, 50, 24, 7.67, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 14, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XDub_', 53, 157, 55.57, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 24, 'chop_bandwidth': 172, 'max_risk': 38}],
['+<9Dei_', 55, 122, 55.42, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<sD_i_', 56, 127, 55.19, 52, 23, 6.53, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 119, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['V<vDEi]', 62, 82, 55.26, 53, 13, 5.11, {'ott_ubw': 139, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['+<8Dii_', 54, 119, 55.62, 50, 22, 4.98, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<vDniq', 61, 98, 55.05, 54, 11, 4.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 47}],
['c<8D?ie', 62, 82, 55.68, 50, 12, 4.14, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 41}],
['X<8D?i_', 58, 86, 54.18, 53, 13, 8.28, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9D/i_', 59, 83, 53.05, 50, 12, 4.52, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 7, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9D_i_', 59, 76, 53.83, 50, 12, 4.52, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9D7i_', 59, 83, 53.05, 50, 12, 4.52, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9D:i_', 59, 83, 53.05, 50, 12, 4.52, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 10, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<9D6i_', 59, 83, 53.05, 50, 12, 4.52, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 9, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<vDnih', 61, 84, 53.48, 54, 11, 4.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 42}],
['Z<8D?i_', 59, 83, 53.69, 50, 12, 4.12, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['c<MD+id', 62, 78, 53.51, 50, 12, 4.34, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 190, 'max_risk': 40}],
['X<MD?i_', 58, 86, 52.85, 53, 13, 8.68, {'ott_ubw': 145, 'ott_dbw': 72, 'long_tps_qty_index': 59, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<`Dhi_', 55, 120, 52.47, 52, 23, 7.31, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 89, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9D`i_', 54, 127, 52.52, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9D_i_', 54, 127, 52.52, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dai_', 54, 127, 52.52, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dqb_', 53, 132, 52.73, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 172, 'max_risk': 38}],
['Z<9D(i_', 59, 81, 52.33, 54, 11, 6.13, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 5, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<vD)i_', 59, 81, 52.08, 54, 11, 4.24, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 5, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<`D_i_', 54, 127, 51.33, 52, 23, 7.31, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 89, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<bD_i_', 53, 127, 51.36, 52, 23, 7.09, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 92, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Deic', 56, 124, 51.4, 50, 22, 6.75, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 40}],
['W<vDEi]', 60, 81, 51.73, 53, 13, 5.11, {'ott_ubw': 142, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['U<9Dqi_', 58, 79, 50.0, 58, 12, 10.7, {'ott_ubw': 137, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dfi_', 55, 120, 50.83, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['R<9D?i_', 58, 97, 50.37, 50, 14, 6.84, {'ott_ubw': 129, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dgi_', 55, 120, 50.83, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dhi_', 55, 120, 50.83, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XD+t_', 51, 178, 49.27, 56, 23, 9.12, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 6, 'chop_bandwidth': 217, 'max_risk': 38}],
['Z<9Dqii', 57, 85, 49.94, 54, 11, 6.15, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 43}],
['Z<9Dqij', 57, 85, 49.94, 54, 11, 6.15, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 43}],
['c<8D?id', 62, 79, 49.86, 50, 12, 4.14, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['c<8D?ic', 62, 79, 49.86, 50, 12, 4.14, {'ott_ubw': 173, 'ott_dbw': 72, 'long_tps_qty_index': 25, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['+<gDki_', 56, 111, 48.96, 52, 23, 7.58, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 100, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<sDfi_', 56, 120, 48.82, 52, 23, 6.53, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 119, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 38}],
[',<vDfi]', 57, 120, 47.44, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 37}],
['O<tD?i_', 60, 109, 47.61, 53, 15, 6.03, {'ott_ubw': 121, 'ott_dbw': 72, 'long_tps_qty_index': 120, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
[',<vD_i]', 56, 125, 46.29, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 37}],
[',<vDci]', 57, 121, 46.73, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 37}],
[',<vDai]', 56, 125, 46.29, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 19, 'chop_bandwidth': 190, 'max_risk': 37}],
[',<vDdi]', 57, 121, 46.73, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 20, 'chop_bandwidth': 190, 'max_risk': 37}],
[':<9D?i_', 52, 170, 46.1, 52, 23, 7.04, {'ott_ubw': 67, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dqf_', 55, 119, 46.17, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 182, 'max_risk': 38}],
['6<<Dui_', 54, 139, 46.49, 50, 24, 5.38, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 32, 'short_tps_qty_index': 44, 'chop_rsi_len': 24, 'chop_bandwidth': 190, 'max_risk': 38}],
['Y<9D?ic', 57, 90, 46.22, 50, 12, 4.54, {'ott_ubw': 147, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
[']<9D?i_', 59, 79, 46.37, 50, 12, 4.52, {'ott_ubw': 158, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9DZi_', 52, 136, 45.27, 50, 24, 6.21, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9D\\i_', 52, 136, 45.27, 50, 24, 6.21, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9D[i_', 52, 136, 45.27, 50, 24, 6.21, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XDZi_', 51, 162, 44.12, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XD[i_', 51, 162, 44.12, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<XD\\i_', 51, 162, 44.12, 54, 24, 7.58, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 76, 'short_tps_qty_index': 44, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<9Dqe_', 54, 123, 44.35, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 179, 'max_risk': 38}],
['Z<vDnip', 60, 95, 44.31, 54, 11, 4.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 46}],
['+<p\\\\i_', 54, 136, 43.21, 50, 24, 4.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 114, 'short_tps_qty_index': 82, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['6<DDDi_', 51, 180, 42.04, 58, 24, 9.08, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 44, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 38}],
['Z<vDnii', 60, 85, 42.55, 54, 11, 4.27, {'ott_ubw': 150, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 23, 'chop_bandwidth': 190, 'max_risk': 43}],
['+<9Dgia', 55, 120, 41.5, 50, 22, 6.75, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 21, 'chop_bandwidth': 190, 'max_risk': 39}],
['M<vDEi]', 57, 107, 41.4, 60, 15, 5.57, {'ott_ubw': 116, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['d<9D?i_', 60, 74, 41.48, 50, 12, 4.54, {'ott_ubw': 176, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
[',<vDki]', 57, 111, 40.56, 61, 21, 8.16, {'ott_ubw': 30, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 37}],
['+<pk\\i_', 55, 136, 40.11, 50, 24, 4.3, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 114, 'short_tps_qty_index': 106, 'chop_rsi_len': 18, 'chop_bandwidth': 190, 'max_risk': 38}],
['/<sD?i_', 51, 166, 39.71, 56, 23, 8.66, {'ott_ubw': 38, 'ott_dbw': 72, 'long_tps_qty_index': 119, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 38}],
['.<vSEi]', 50, 157, 39.37, 60, 23, 7.55, {'ott_ubw': 36, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 68, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['+<9Dki_', 55, 110, 38.36, 50, 22, 6.73, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 38}],
['/<9D?ic', 50, 170, 36.02, 50, 22, 9.51, {'ott_ubw': 38, 'ott_dbw': 72, 'long_tps_qty_index': 27, 'short_tps_qty_index': 44, 'chop_rsi_len': 11, 'chop_bandwidth': 190, 'max_risk': 40}],
['+<5Dji_', 54, 111, 36.38, 50, 22, 7.6, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 38}],
['+<5Dki_', 54, 111, 36.38, 50, 22, 7.6, {'ott_ubw': 28, 'ott_dbw': 72, 'long_tps_qty_index': 21, 'short_tps_qty_index': 44, 'chop_rsi_len': 22, 'chop_bandwidth': 190, 'max_risk': 38}],
['d;?sahl', 61, 84, 32.78, 53, 13, 5.76, {'ott_ubw': 176, 'ott_dbw': 69, 'long_tps_qty_index': 36, 'short_tps_qty_index': 119, 'chop_rsi_len': 19, 'chop_bandwidth': 187, 'max_risk': 44}],
['6<vDEi]', 53, 177, 31.75, 58, 24, 5.69, {'ott_ubw': 56, 'ott_dbw': 72, 'long_tps_qty_index': 123, 'short_tps_qty_index': 44, 'chop_rsi_len': 12, 'chop_bandwidth': 190, 'max_risk': 37}],
['_NYtD?e', 64, 39, 28.35, 100, 4, 9.78, {'ott_ubw': 163, 'ott_dbw': 119, 'long_tps_qty_index': 78, 'short_tps_qty_index': 120, 'chop_rsi_len': 12, 'chop_bandwidth': 83, 'max_risk': 41}],
]
| 185.387464 | 189 | 0.644511 | 12,660 | 65,071 | 2.974803 | 0.026145 | 0.110884 | 0.203287 | 0.138605 | 0.916333 | 0.914022 | 0.898171 | 0.893763 | 0.892409 | 0.88909 | 0 | 0.188888 | 0.11237 | 65,071 | 350 | 190 | 185.917143 | 0.463149 | 0 | 0 | 0 | 0 | 0 | 0.492278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
63d557cf129ce661034520774b605b1ed5242b76 | 19,191 | py | Python | Classification/load_data_30_turn.py | sansastra/Anomaly-Detection | 012ba6633ef5d53566e06acef1a6495179109fd4 | [
"MIT"
] | 5 | 2021-05-19T02:17:29.000Z | 2022-03-30T09:22:30.000Z | Classification/load_data_30_turn.py | sansastra/Anomaly-Detection | 012ba6633ef5d53566e06acef1a6495179109fd4 | [
"MIT"
] | null | null | null | Classification/load_data_30_turn.py | sansastra/Anomaly-Detection | 012ba6633ef5d53566e06acef1a6495179109fd4 | [
"MIT"
] | 2 | 2021-01-26T07:39:23.000Z | 2021-01-26T18:12:26.000Z | import numpy as np
import pandas as pd
from math import sin, cos, sqrt, atan2, radians
import matplotlib.pyplot as plt
import os
import h5py
import pickle
from PIL import Image
from datetime import date, time, datetime
from itertools import groupby
from operator import itemgetter
import math
from os.path import join
interactive = True
headers=['index1', 'ship_nr','id','repeat_indicator','mmsi','nav_status','rot_over_range','rot','sog','position_accuracy','x','y','cog','true_heading',
'timestamp','special_manoeuvre','spare','raim','sync_state','slot_timeout','slot_offset', 'abs_time', 'date', 'time']
ROSTOCK = (12.114733, 54.145409)
Dummy_Nr = -1
CLASSES = 2
EXTRA_FEATURES = 0 # one for distance, and another for time to last seen
SAMPLING_TIME = 3 # seconds
MINSOG = 7
MINCOGCHANGE = 30
nr_of_vessels = 10 # -1
def load_all_data(timesteps, dim, features, CLASSES): # without taking time into account
############## generate data from pickle to train ANN ##
np.random.seed(10)
column_len = dim * timesteps
cog_index = features.index("cog") # if features is changed then cog position might change so later it is used correctly
with open('/home/sing_sd/Desktop/anomaly_detection/PythonCode/Resources/ais_data_rostock.csv', 'rb') as f: #
data = pd.read_csv(f)
# plt.rcParams.update({'font.size': 16})
# fig, ax = plt.subplots(figsize=(8, 7))
# im = Image.open('world_11_54_12-4_55.PNG') # in degrees and minutes
# ax.imshow(im, extent=(11, 12.6666, 54.0, 55), aspect='auto')
# ax.plot(data['x'], data['y'], 'w.',markersize=2, label='Trajectories of vessels')
# boundary_x, boundary_y = get_po_boundary()
# ax.plot(boundary_x, boundary_y, 'k-', markersize=8, label='AIS transmission reach')
# ax.plot(ROSTOCK[0], ROSTOCK[1], 'ko', markersize=12, label='Rostock location')
# plt.xlabel('Longitude')
# plt.ylabel('Latitude')
# # plt.title('AIS on-off switching anomaly detection')
# ax.legend()
# plt.show()
# plt.pause(0.1)
# with open('data_len_track.pkl', 'rb') as f:
# data_all_tracks = pickle.load(f)
# vessel_nr = 18 #28
data1 = data.mmsi.unique()
overall_data = np.full(shape=(data.shape[0], column_len), fill_value=np.nan)
startIndex = 0
for mmsi in data1[:nr_of_vessels]: #[vessel_nr:vessel_nr+1]: #
decoded_mmsi = data[data['mmsi'] == mmsi]
decoded_mmsi = decoded_mmsi.reset_index(drop=True)
decoded_mmsi = decoded_mmsi[decoded_mmsi['sog'] > MINSOG]
# decoded_mmsi.plot(kind='scatter', x=1, y=2, color='red')
if decoded_mmsi.shape[0] > timesteps:
data_per_track = decoded_mmsi.shape[0]
overall_data[startIndex:startIndex + data_per_track, 0:dim] = decoded_mmsi[features] # decoded_mmsi.iloc[:, 3:dim+3]
# shift from top and put on remaining columns
for clm_nr in range(1, timesteps):
overall_data[startIndex : startIndex + data_per_track - clm_nr,
clm_nr * dim:(clm_nr + 1) * dim] = overall_data[startIndex + 1 : startIndex + data_per_track - clm_nr + 1, (clm_nr - 1) * dim:clm_nr * dim]
overall_data[startIndex + data_per_track -timesteps +1 : startIndex + data_per_track,:] = np.nan
startIndex += data_per_track - timesteps +1
overall_data = overall_data[np.where(overall_data[:, 0 :1] >= 0)[0]]
# compute number of missing zeros after the first meassage in each row
clm_ind = range(cog_index, timesteps*dim, dim) # cog_index is the column number in features array
max_cog_val = np.nanmax(overall_data[:, clm_ind], axis=1)
min_cog_val = np.nanmin(overall_data[:, clm_ind], axis=1)
max_cog_val = max_cog_val.T
min_cog_val = min_cog_val.T
# find anomaly samples
ind_row1 = np.where(((abs(max_cog_val - min_cog_val) > MINCOGCHANGE) ))[0]
# ind_ano = ind_row1[np.where((abs(360. - max_cog_val[ind_row1] - min_cog_val[ind_row1]) > MINCOGCHANGE))[0]]
# find false anomalies
ind_ano1 = ind_row1[(max_cog_val[ind_row1] > 360 - MINCOGCHANGE) & (min_cog_val[ind_row1] < MINCOGCHANGE)]
false_ano = np.array([])
for j in ind_ano1:
aa = np.where((overall_data[j, :] - MINCOGCHANGE <= 0))[0]
bb = np.where((overall_data[j, :] - MINCOGCHANGE > 0))[0]
max_cog_val_j = np.nanmax(overall_data[j, aa]) # finds max cog in 360+ data = angle rightside of North
min_cog_val_j = abs(360-np.nanmin(overall_data[j, bb])) # finds min cog in 360- data and then angle leftside
if max_cog_val_j + min_cog_val_j <= 30:
false_ano = np.append(false_ano, np.where((ind_row1 == j))[0])
# delete false anomalies
ind_ano = np.delete(ind_row1, false_ano)
ind_row_normal = np.delete(np.where(overall_data[:, 0:1] >= 0)[0], ind_ano)
delete_normal = np.random.choice(np.arange(len(ind_row_normal)), overall_data.shape[0] - 2 * len(ind_ano),
replace=False)
overall_data[ind_row_normal[delete_normal], 0] = np.nan
# overall_data[ind_row_normal[0: overall_data.shape[0]-2*len(ind_ano)], 0] = np.nan
# assign target values
Y_data = np.zeros((overall_data.shape[0], CLASSES))
Y_data[:, 0] = 1
Y_data[ind_ano] = [0, 1]
where_are_NaNs = np.isnan(overall_data[:, 0])
Y_data[where_are_NaNs, 0] = np.nan
overall_data = overall_data[~where_are_NaNs]
Y_data = Y_data[~where_are_NaNs]
print('total number of normal samples is ', len(Y_data) - len(ind_ano))
print('total number of anomaly samples is ', len(ind_ano))
# plt.rcParams.update({'font.size': 16})
# fig, ax = plt.subplots(figsize=(8, 7))
# #im = Image.open('world_11_54_12-4_55.PNG') # in degrees and minutes
# #ax.imshow(im, extent=(7, 9, 53.0, 55), aspect='auto')
# #ax.plot(overall_data[:,0], overall_data[:,1], 'w.',label='Trajectories of vessels')
#
# plt.pause(0.001)
# for i in range(len(ind_ano)):
# for j in range(timesteps):
# ax.plot(overall_data[ind_ano[i], j*dim], overall_data[ind_ano[i], j*dim +1], 'bo', markersize=6)
#
# plt.xlabel('Longitude')
# plt.ylabel('Latitude')
# plt.title('AIS on-off switching anomaly detection')
# # ax.legend()
# plt.show()
# plt.pause(0.01)
np.savetxt("X_data.csv", overall_data, delimiter=",")
np.savetxt("Y_data.csv", Y_data, delimiter=",")
# overall_data[np.isnan(overall_data)] = Dummy_Nr
return overall_data, Y_data
def load_data(timesteps, dim, features, CLASSES): # this is with dummy number, or time
############## generate data from pickle to train ANN ##
np.random.seed(10)
column_len = dim * timesteps
cog_index = features.index("cog") # if features is changed then cog position might change so later it is used correctly
with open('/home/sing_sd/Desktop/anomaly_detection/PythonCode/Resources/ais_data_rostock.csv', 'rb') as f: #
data = pd.read_csv(f)
data1 = data.mmsi.unique()
total_rows_data = 0
for mmsi in data1[:nr_of_vessels]: #[vessel_nr:vessel_nr+1]: #
decoded_mmsi = data[data['mmsi'] == mmsi]
decoded_mmsi = decoded_mmsi[decoded_mmsi['sog'] > MINSOG]
decoded_mmsi = decoded_mmsi.reset_index(drop=True)
if decoded_mmsi.shape[0] > timesteps:
total_rows_data += int((decoded_mmsi.iloc[-1]['time']- decoded_mmsi.iloc[0]['time'])// SAMPLING_TIME + 1)
overall_data = np.full(shape=(total_rows_data, column_len), fill_value=np.nan)
startIndex = 0
for mmsi in data1[:nr_of_vessels]: #[vessel_nr:vessel_nr+1]: #
decoded_mmsi = data[data['mmsi'] == mmsi]
#decoded_mmsi = decoded_mmsi.reset_index(drop=True)
decoded_mmsi = decoded_mmsi[decoded_mmsi['sog'] > MINSOG]
decoded_mmsi = decoded_mmsi.reset_index(drop=True)
if decoded_mmsi.shape[0] > timesteps:
data_per_track = int((np.array(decoded_mmsi.iloc[-1]['time'])- decoded_mmsi.iloc[0]['time'])// SAMPLING_TIME + 1)
decoded_mmsi['time_0'] = decoded_mmsi.iloc[0]['time']
decoded_mmsi['time_0'] = (decoded_mmsi['time'] - decoded_mmsi['time_0'])/SAMPLING_TIME
temp_data = pd.DataFrame(index=range(data_per_track), columns=features, dtype=np.float)
temp_data.iloc[np.array(decoded_mmsi['time_0'], dtype=int), 0:dim] = np.array(decoded_mmsi[features])
# interpolate
#temp_data = temp_data.interpolate(method='linear', columns=features, limit_direction='forward', axis=0)
temp_data = temp_data.fillna(method="ffill")
temp_data.loc[temp_data["cog"] > 360,"cog"] = 360
overall_data[startIndex: startIndex+data_per_track, 0:dim] = temp_data[features]
# shift from top and put on remaining columns
for clm_nr in range(1, timesteps):
overall_data[startIndex : startIndex + data_per_track - clm_nr,
clm_nr * dim:(clm_nr + 1) * dim] = overall_data[startIndex + 1 : startIndex + data_per_track - clm_nr + 1, (clm_nr - 1) * dim:clm_nr * dim]
overall_data[startIndex + data_per_track -timesteps +1 : startIndex + data_per_track,:] = np.nan
startIndex += data_per_track - timesteps + 1
overall_data = overall_data[np.where( overall_data[:, 0:1] >= 0)[0]]
# compute number of missing zeros after the first meassage in each row
# overall_data = np.unique(overall_data, axis=0)
clm_ind = range(cog_index, timesteps*dim, dim)
max_cog_val = np.nanmax(overall_data[:, clm_ind], 1)
min_cog_val = np.nanmin(overall_data[:, clm_ind], 1)
max_cog_val = max_cog_val.T
min_cog_val = min_cog_val.T
# many rows are duplicating, np.unique does not give good results
# overall_data[np.where((max_cog_val == min_cog_val)), 0:1] = np.nan
ind_row1 = np.where(((abs(max_cog_val - min_cog_val) > MINCOGCHANGE) ))[0]
# ind_ano = ind_row1[np.where((abs(360. - max_cog_val[ind_row1] - min_cog_val[ind_row1]) > MINCOGCHANGE))[0]]
# find false anomalies
ind_ano1 = ind_row1[(max_cog_val[ind_row1] > 360 - MINCOGCHANGE) & (min_cog_val[ind_row1] < MINCOGCHANGE)]
false_ano = np.array([])
for j in ind_ano1:
aa = np.where((overall_data[j, :] - MINCOGCHANGE <= 0))[0]
bb = np.where((overall_data[j, :] - MINCOGCHANGE > 0))[0]
max_cog_val_j = np.nanmax(overall_data[j, aa]) # finds max cog in 360+ data = angle rightside of North
min_cog_val_j = abs(360 - np.nanmin(overall_data[j, bb])) # finds min cog in 360- data and then angle leftside
if max_cog_val_j + min_cog_val_j <= 30:
false_ano = np.append(false_ano, np.where((ind_row1 == j))[0])
# delete false anomalies
ind_ano = np.delete(ind_row1, false_ano)
ind_row_normal = np.delete(np.where(overall_data[:, 0:1] >= 0)[0], ind_ano)
delete_normal = np.random.choice(np.arange(len(ind_row_normal)),overall_data.shape[0]-2*len(ind_ano), replace=False)
overall_data[ind_row_normal[delete_normal], 0] = np.nan
ind_row_normal = np.delete(ind_row_normal, delete_normal)
# assign target values
Y_data = np.zeros((overall_data.shape[0], CLASSES)) #
Y_data[:, 0] = 1
Y_data[ind_ano] = [0, 1]
where_are_NaNs = np.isnan(overall_data[:, 0])
Y_data[ where_are_NaNs, 0] = np.nan
overall_data = overall_data[~where_are_NaNs]
Y_data = Y_data[~where_are_NaNs]
print('total number of normal samples is ', len(Y_data) - len(ind_ano))
print('total number of anomaly samples is ', len(ind_ano))
# overall_data[np.isnan(overall_data)] = Dummy_Nr
return overall_data, Y_data
def load_test_data(timesteps, dim, track_to_check):
path = '/home/sing_sd/Desktop/anomaly_detection/PythonCode/Resources/track_pickle/'
filename = 'track{}'.format(track_to_check)
try:
data = pd.read_pickle(path + filename + '.pkl')
except IOError:
print("Error: File does not appear to exist for track ", track_to_check)
return 0, 0
# without interpolation
#data = data[data['sog'] > MINSOG]
data_per_track = data.shape[0]
overall_data = np.full(shape=(data_per_track, timesteps*dim ), fill_value=np.nan)
overall_data[:, 0:dim] = data.iloc[:, 2:6]
# shift from top and put on remaining columns
for clm_nr in range(1, timesteps):
overall_data[0: data_per_track - 1, clm_nr * dim:(clm_nr + 1) * dim] = overall_data[1: data_per_track, (clm_nr - 1) * dim:clm_nr * dim]
overall_data = overall_data[np.where(overall_data[:, -1] >= 0)[0]]
# compute number of missing zeros after the first meassage in each row
clm_ind = range(0, timesteps * dim, dim) # cog_index is the column number in features array
max_cog_val = np.nanmax(overall_data[:, clm_ind], axis=1)
min_cog_val = np.nanmin(overall_data[:, clm_ind], axis=1)
max_cog_val = max_cog_val.T
min_cog_val = min_cog_val.T
# find anomaly samples
ind_row1 = np.where(((abs(max_cog_val - min_cog_val) > MINCOGCHANGE)))[0]
# ind_ano = ind_row1[np.where((abs(360. - max_cog_val[ind_row1] - min_cog_val[ind_row1]) > MINCOGCHANGE))[0]]
# find false anomalies
ind_ano1 = ind_row1[(max_cog_val[ind_row1] > 360 - MINCOGCHANGE) & (min_cog_val[ind_row1] < MINCOGCHANGE)]
false_ano = np.array([])
for j in ind_ano1:
aa = np.where((overall_data[j, :] - MINCOGCHANGE <= 0))[0]
bb = np.where((overall_data[j, :] - MINCOGCHANGE > 0))[0]
max_cog_val_j = np.nanmax(overall_data[j, aa]) # finds max cog in 360+ data = angle rightside of North
min_cog_val_j = abs(360 - np.nanmin(overall_data[j, bb])) # finds min cog in 360- data and then angle leftside
if max_cog_val_j + min_cog_val_j <= 30:
false_ano = np.append(false_ano, np.where((ind_row1 == j))[0])
# delete false anomalies
ind_ano = np.delete(ind_row1, false_ano)
ind_row_normal = np.delete(np.where(overall_data[:, 0:1] >= 0)[0], ind_ano)
delete_normal = np.random.choice(np.arange(len(ind_row_normal)), overall_data.shape[0] - 2 * len(ind_ano),
replace=False)
overall_data[ind_row_normal[delete_normal], 0] = np.nan
# overall_data[ind_row_normal[0: overall_data.shape[0]-2*len(ind_ano)], 0] = np.nan
# assign target values
Y_data = np.zeros((overall_data.shape[0], CLASSES))
Y_data[:, 0] = 1
Y_data[ind_ano] = [0, 1]
where_are_NaNs = np.isnan(overall_data[:, 0])
Y_data[where_are_NaNs, 0] = np.nan
overall_data = overall_data[~where_are_NaNs]
Y_data = Y_data[~where_are_NaNs]
print('total number of normal samples is ', len(Y_data) - len(ind_ano))
print('total number of anomaly samples is ', len(ind_ano))
return overall_data, Y_data
def load_test_data_time(timesteps, dim, features, track_to_check):
path = '/home/sing_sd/Desktop/anomaly_detection/PythonCode/Resources/track_pickle/'
filename = 'track{}'.format(track_to_check)
try:
data = pd.read_pickle(path + filename + '.pkl')
except IOError:
print("Error: File does not appear to exist for track ", track_to_check)
return 0, 0
data = data[data['sog'] > MINSOG]
data = data.reset_index(drop=True)
original_data = data
start_time = datetime.strptime(data.iloc[0]['date'] + ' ' + data.iloc[0]['time'],
'%m/%d/%Y %H:%M:%S')
end_time = datetime.strptime(data.iloc[-1]['date'] + ' ' + data.iloc[-1]['time'],
'%m/%d/%Y %H:%M:%S')
data_per_track = int((end_time - start_time).total_seconds() // SAMPLING_TIME + 1)
overall_data = np.full(shape=(data_per_track, timesteps * dim), fill_value=np.nan)
temp_data = pd.DataFrame(index=range(data_per_track), columns=features, dtype=np.float)
position_interpolated = pd.DataFrame(index=range(data_per_track), columns=["x","y"], dtype=np.float)
for slot_index in range(0, data.shape[0]): # //
current_time = datetime.strptime(data.iloc[slot_index]['date'] + ' ' + data.iloc[slot_index]['time'],
'%m/%d/%Y %H:%M:%S')
index1 = int((current_time - start_time).total_seconds()) // SAMPLING_TIME
temp_data.loc[index1, 0:dim] = data.loc[slot_index, features]
position_interpolated.loc[index1, 0:2] = data.loc[slot_index, ["x","y"]]
# interpolate
temp_data = temp_data.fillna(method="ffill")
position_interpolated = position_interpolated.interpolate(method='linear', limit_direction='forward', axis=0)
# temp_data = temp_data.drop(temp_data.iloc[:,2] > 360)
overall_data[:, 0:dim] = temp_data.iloc[:, 0:dim]
# shift from top and put on remaining columns
for clm_nr in range(1, timesteps):
overall_data[0: data_per_track - 1, clm_nr * dim:(clm_nr + 1) * dim] = overall_data[1: data_per_track, (clm_nr - 1) * dim:clm_nr * dim]
overall_data = overall_data[np.where(overall_data[:, -1] >= 0)[0]]
# compute number of missing zeros after the first meassage in each row
clm_ind = range(2, timesteps * dim, dim)
max_cog_val = np.nanmax(overall_data[:, clm_ind], 1)
min_cog_val = np.nanmin(overall_data[:, clm_ind], 1)
max_cog_val = max_cog_val.T
min_cog_val = min_cog_val.T
ind_row1 = np.where(((abs(max_cog_val - min_cog_val) > MINCOGCHANGE)))[0]
ind_ano = ind_row1[np.where((abs(360. - max_cog_val[ind_row1] - min_cog_val[ind_row1]) > MINCOGCHANGE))[0]]
ind_row_normal = np.delete(np.where(overall_data[:, 0:1] >= 0)[0], ind_ano)
# assign target values
Y_data = np.zeros((overall_data.shape[0], CLASSES))
Y_data[:, 0] = 1
Y_data[ind_ano] = [0, 1]
where_are_NaNs = np.isnan(overall_data[:, 0])
overall_data = overall_data[~where_are_NaNs]
Y_data = Y_data[~where_are_NaNs]
print('total number of normal samples is ', len(Y_data) - len(ind_ano))
print('total number of anomaly samples is ', len(ind_ano))
overall_data[np.isnan(overall_data)] = Dummy_Nr
return original_data, np.array(position_interpolated), overall_data, Y_data
def load_saved_data():
with open("X_data.csv", 'rb') as f:
X_data = pd.read_csv(f, sep=",", header=None)
X_data = np.array(X_data)
with open("Y_data.csv", 'rb') as f:
Y_data = pd.read_csv(f, sep=",", header=None)
Y_data = np.array(Y_data)
return X_data, Y_data
# path = '/home/sing_sd/Desktop/anomaly_detection/PythonCode/Resources/track_pickle/'
# for track in range(1,229):
# filename = 'track{}'.format(track)
# try:
# data = pd.read_pickle(path + filename + '.pkl')
# data.to_csv(filename+".csv", index = False)
# except IOError:
# print("Error: File does not appear to exist for track ", track) | 46.355072 | 156 | 0.645146 | 2,891 | 19,191 | 4.035282 | 0.112764 | 0.092405 | 0.023144 | 0.021601 | 0.804389 | 0.77833 | 0.760415 | 0.744471 | 0.728184 | 0.704612 | 0 | 0.026768 | 0.217446 | 19,191 | 414 | 157 | 46.355072 | 0.750033 | 0.229847 | 0 | 0.70082 | 0 | 0 | 0.079179 | 0.021799 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020492 | false | 0 | 0.053279 | 0 | 0.102459 | 0.040984 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
63d62e14d60dad0eb69d5d2c6e1189c394e2d4d3 | 510 | py | Python | mlprogram/synthesizers/__init__.py | HiroakiMikami/mlprogram | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 9 | 2020-05-24T11:25:01.000Z | 2022-03-28T15:32:10.000Z | mlprogram/synthesizers/__init__.py | HiroakiMikami/mlprogram | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 87 | 2020-05-09T08:56:55.000Z | 2022-03-31T14:46:45.000Z | mlprogram/synthesizers/__init__.py | HiroakiMikami/NL2Prog | 573e94c567064705fa65267dd83946bf183197de | [
"MIT"
] | 3 | 2021-02-22T20:38:29.000Z | 2021-11-11T18:48:44.000Z | from mlprogram.synthesizers.beam_search import BeamSearch # noqa
from mlprogram.synthesizers.dfs import DFS # noqa
from mlprogram.synthesizers.filtered_synthesizer import FilteredSynthesizer # noqa
from mlprogram.synthesizers.reinforce_synthesizer import REINFORCESynthesizer # noqa
from mlprogram.synthesizers.smc import SMC # noqa
from mlprogram.synthesizers.synthesizer import Result, Synthesizer # noqa
from mlprogram.synthesizers.synthesizer_with_timeout import \
SynthesizerWithTimeout # noqa
| 56.666667 | 85 | 0.847059 | 55 | 510 | 7.763636 | 0.345455 | 0.213115 | 0.409836 | 0.407494 | 0.187354 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107843 | 510 | 8 | 86 | 63.75 | 0.938462 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.875 | 0 | 0.875 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
898ded8dd7167d6f65c4f491528aa2e66baa82e8 | 198 | py | Python | packages/dht/dht/mock_Adafruit_DHT.py | mattcontinisio/meat-curing-chamber | 41eefc52c0e30af315843ca507d299b7d58a1570 | [
"MIT"
] | 2 | 2020-03-02T17:49:35.000Z | 2020-03-02T20:46:37.000Z | packages/dht/dht/mock_Adafruit_DHT.py | mattcontinisio/meat-curing-chamber | 41eefc52c0e30af315843ca507d299b7d58a1570 | [
"MIT"
] | 2 | 2021-10-06T12:58:43.000Z | 2022-02-13T07:24:02.000Z | packages/dht/dht/mock_Adafruit_DHT.py | mattcontinisio/meat-curing-chamber | 41eefc52c0e30af315843ca507d299b7d58a1570 | [
"MIT"
] | null | null | null | import random
def read(sensor_type, pin):
return (random.uniform(60, 90), random.uniform(0, 30))
def read_retry(sensor_type, pin):
return (random.uniform(60, 90), random.uniform(0, 30))
| 19.8 | 58 | 0.69697 | 31 | 198 | 4.354839 | 0.451613 | 0.385185 | 0.192593 | 0.281481 | 0.77037 | 0.77037 | 0.77037 | 0.77037 | 0.77037 | 0.77037 | 0 | 0.083333 | 0.151515 | 198 | 9 | 59 | 22 | 0.720238 | 0 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0 | 0.2 | 0.4 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 12 |
89a0022e2095a363be97b2501441763d13473032 | 93 | py | Python | riot_api/api/post/__init__.py | Alex-Weatherhead/riot_api | 2d589f57cd46e0f7c54de29245078c730acd710f | [
"MIT"
] | null | null | null | riot_api/api/post/__init__.py | Alex-Weatherhead/riot_api | 2d589f57cd46e0f7c54de29245078c730acd710f | [
"MIT"
] | null | null | null | riot_api/api/post/__init__.py | Alex-Weatherhead/riot_api | 2d589f57cd46e0f7c54de29245078c730acd710f | [
"MIT"
] | null | null | null | from . import tournament_stub_v4 as tournament_stub
from . import tournament_v4 as tournament | 46.5 | 51 | 0.860215 | 14 | 93 | 5.428571 | 0.428571 | 0.263158 | 0.526316 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02439 | 0.11828 | 93 | 2 | 52 | 46.5 | 0.902439 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
982ff0af4194f7804a0117714835e6bc18baf849 | 139,806 | py | Python | functions/player_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | null | null | null | functions/player_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | 1 | 2021-02-24T21:50:18.000Z | 2021-02-24T21:50:18.000Z | functions/player_functions.py | mtasa-typescript/mtasa-wiki-dump | edea1746850fb6c99d6155d1d7891e2cceb33a5c | [
"MIT"
] | null | null | null | # Autogenerated file. ANY CHANGES WILL BE OVERWRITTEN
from to_python.core.types import FunctionType, \
FunctionArgument, \
FunctionArgumentValues, \
FunctionReturnTypes, \
FunctionSignature, \
FunctionDoc, \
FunctionData, \
CompoundFunctionData
DUMP_PARTIAL = [
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='forcePlayerMap',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='forceOn',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to forcefully show a players radar map.' ,
arguments={
"thePlayer": """: A player object referencing the specified player """,
"forceOn": """: A boolean value representing whether or not the players radar map will be forced on """
},
result='' ,
),
url='forcePlayerMap',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='forcePlayerMap',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='forceOn',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to forcefully show a players radar map.' ,
arguments={
"forceOn": """: A boolean value representing whether or not the players radar map will be forced on """
},
result='' ,
),
url='forcePlayerMap',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getAlivePlayers',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a table of all the alive players on the server. Opposite function of getDeadPlayers.' ,
arguments={
},
result='returns a table of all the alive players.' ,
),
url='getAlivePlayers',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getDeadPlayers',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a table of all currently dead players on the server.' ,
arguments={
},
result='returns a table of all the dead players.' ,
),
url='getDeadPlayers',
)
],
client=[
],
),
CompoundFunctionData(
server=[
],
client=[
FunctionData(
signature=FunctionSignature(
name='getLocalPlayer',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['player'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the player element of the client running the current script.\nYou can use the predefined variable localPlayer instead of typing getLocalPlayer()' ,
arguments={
},
result='returns the local player element.' ,
),
url='getLocalPlayer',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerACInfo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['table'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns anti-cheat info for a player. The info returned by this function can change over time, so use the server event onPlayerACInfo instead.' ,
arguments={
"thePlayer": """The player whose anti-cheat info you want to check. """
},
result='returns a table with the following entries:\n* detectedac: a string containing a comma separated list of anti-cheat_guide|anti-cheat codes the player has triggered.\n*d3d9size: a number representing the file size of any custom d3d9.dll the player may have installed.\n*d3d9md5: a string containing the md5 of any custom d3d9.dll the player may have installed.\n*d3d9sha256: a string containing the sha256 of any custom d3d9.dll the player may have installed.' ,
),
url='getPlayerACInfo',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerAnnounceValue',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='key',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
"thePlayer": """This is the Player whos value you want to retrieve. """,
"key": """The name of the key. """
},
result='this function returns a string containing the requested value if a valid key was specified or false otherwise.' ,
),
url='getPlayerAnnounceValue',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerBlurLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to check the current blur level of a specified player.' ,
arguments={
"thePlayer": """The player whose blur level you want to check. """
},
result='returns the players blur level if successful, false if an invalid player was given.' ,
),
url='getPlayerBlurLevel',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getBlurLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to check the current blur level of a specified player.' ,
arguments={
},
result='returns the local blur level.' ,
),
url='getPlayerBlurLevel',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerCount',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the number of players currently connected to the server.' ,
arguments={
},
result='returns the number of players connected to the server as an int.' ,
),
url='getPlayerCount',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerFromName',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['player'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='playerName',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a player element for the player with the name passed to the function.' ,
arguments={
"playerName": """: A string containing the name of the player you want to reference """
},
result='returns a player element for the player with the nickname provided. if there is no player with that name, false is returned.' ,
),
url='getPlayerFromName',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerFromName',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['player'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='playerName',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a player element for the player with the name passed to the function.' ,
arguments={
"playerName": """: A string containing the name of the player you want to reference """
},
result='returns a player element for the player with the nickname provided. if there is no player with that name, false is returned.' ,
),
url='getPlayerFromName',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerIdleTime',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the amount of time in milliseconds that a players position has not changed.' ,
arguments={
"thePlayer": """: The player you wish to get the idle time of. """
},
result='returns the amount of time in milliseconds that a player has been idle, false otherwise.' ,
),
url='getPlayerIdleTime',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerIP',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a string containing the IP address of the player.' ,
arguments={
"thePlayer": """The player element you want to get the IP of. """
},
result='returns a string containing the requested playerss ip, or false if the player passed to the function is invalid.' ,
),
url='getPlayerIP',
)
],
client=[
],
),
CompoundFunctionData(
server=[
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerMapBoundingBox',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the GUI bounding box of the radar map texture.' ,
arguments={
},
result='* if the players map is showing, it returns four integers: minx, miny, maxx, maxy. these are absolute position coordinates of where the players map is drawn on the screen.\n** minx, miny represent the world coordinates -3000, 3000 (upper-left corner of the world map).\n** maxx, maxy represent the world coordinates 3000, -3000 (lower-right corner of the world map).\n** negative values may be returned if these coordinates are off screen.\n* if the map is not showing, a false boolean value is returned.' ,
),
url='getPlayerMapBoundingBox',
)
],
),
CompoundFunctionData(
server=[
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerMapOpacity',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='' ,
arguments={
},
result='returns an integer with a value from 0 to 255, where 0 is fully transparent and 255 is fully opaque.' ,
),
url='getPlayerMapOpacity',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Returns the amount of money a player currently has.' ,
arguments={
"thePlayer": """The player you wish the retrieve the amount of money from. """
},
result='returns an integer with the amount of money the specified player has, false if the player is invalid.' ,
),
url='getPlayerMoney',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Returns the amount of money a player currently has.' ,
arguments={
},
result='returns an integer with the amount of money the local player has.' ,
),
url='getPlayerMoney',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerName',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a string containing the name of the specified player.' ,
arguments={
"thePlayer": """the player you want to get the name of """
},
result='returns a string containing the requested players name, or false if the player passed to the function is invalid.' ,
),
url='getPlayerName',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerName',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a string containing the name of the specified player.' ,
arguments={
"thePlayer": """the player you want to get the name of """
},
result='returns a string containing the requested players name, or false if the player passed to the function is invalid.' ,
),
url='getPlayerName',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerNametagColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the current color of a players name tag as RGB values. These are in the range 0-255.' ,
arguments={
"thePlayer": """The player whose name tag RGB color values you wish to retrieve. """
},
result='returns red, green and blue values if an existent player was specified, false otherwise.' ,
),
url='getPlayerNametagColor',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerNametagColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
),
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the current color of a players name tag as RGB values. These are in the range 0-255.' ,
arguments={
"thePlayer": """The player whose name tag RGB color values you wish to retrieve. """
},
result='returns red, green and blue values if an existent player was specified, false otherwise.' ,
),
url='getPlayerNametagColor',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerNametagText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will allow you to retrieve the name tag a player is currently using.' ,
arguments={
"thePlayer": """The person whose name tag you want to retrieve """
},
result='returns a string with the nametag text, false if the player is invalid.' ,
),
url='getPlayerNametagText',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerNametagText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will allow you to retrieve the name tag a player is currently using.' ,
arguments={
"thePlayer": """The person whose name tag you want to retrieve """
},
result='returns a string with the nametag text, false if the player is invalid.' ,
),
url='getPlayerNametagText',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerPing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the ping of a specified player. The ping is the number of milliseconds that data takes to travel from the players client to the server or vice versa. If a player is using a VPN their ping will still be returned correctly.' ,
arguments={
"thePlayer": """: The player whose ping you want to determine. """
},
result='returns the ping as an int, or false if the player is invalid.' ,
),
url='getPlayerPing',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerPing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the ping of a specified player. The ping is the number of milliseconds that data takes to travel from the players client to the server or vice versa. If a player is using a VPN their ping will still be returned correctly.' ,
arguments={
"thePlayer": """: The player whose ping you want to determine. """
},
result='returns the ping as an int, or false if the player is invalid.' ,
),
url='getPlayerPing',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerScriptDebugLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will allow you to retrieve the player current debug script level.' ,
arguments={
"thePlayer": """The person whose debug script level you want """
},
result='returns an int with the player debug script level, false if the player is invalid.' ,
),
url='getPlayerScriptDebugLevel',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerSerial',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns the serial for a specified player.' ,
arguments={
"thePlayer": """A player object referencing the specified player. """
},
result='returns the serial as a string if it was found, false otherwise.' ,
),
url='getPlayerSerial',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerVersion',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['string'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets the client version of the specified player as a sortable string. The string is always 15 characters long and is formatted as follows:\n* 1 character representing the major version\n* 1 dot character\n* 1 character representing the minor version\n* 1 dot character\n* 1 character representing the maintenance version\n* 1 dash character\n* 1 character representing the build type\n* 1 dot character\n* 5 characters representing the build number\n* 1 dot character\n* 1 character representing the build revision\nAn example of a version string would be: 1.0.4-9.01746.0\nWhere the first three numbers represent the major/minor/maintenance version, i.e. 1.0.4<br>\nThe fourth number is 9, which means its a release build, (Development and beta builds have lower numbers here)<br>\nAnd the fifth and sixth numbers represent the build number.' ,
arguments={
"thePlayer": """The player whose client version you wish to get. """
},
result='returns a string containing the client version, or false if the player is invalid.' ,
),
url='getPlayerVersion',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getPlayerWantedLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets a players current wanted level. The wanted level is indicated by the amount of stars a player has on the GTA HUD.' ,
arguments={
"thePlayer": """The player whose wanted level you wish to get """
},
result='' ,
),
url='getPlayerWantedLevel',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='getPlayerWantedLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['int'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function gets a players current wanted level. The wanted level is indicated by the amount of stars a player has on the GTA HUD.' ,
arguments={
},
result='' ,
),
url='getPlayerWantedLevel',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='getRandomPlayer',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['player'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function returns a random player.' ,
arguments={
},
result='returns a random player, false if the server is empty.' ,
),
url='getRandomPlayer',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='givePlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function adds money to a players current money amount. To set absolute values, setPlayerMoney can be used.<br>' ,
arguments={
"thePlayer": """the player you are giving the money to. """,
"amount": """a positive integer number specifying the amount of money to give to the player. """
},
result='' ,
),
url='givePlayerMoney',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='givePlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function adds money to a players current money amount. To set absolute values, setPlayerMoney can be used.<br>' ,
arguments={
"amount": """a positive integer number specifying the amount of money to give to the player. """
},
result='' ,
),
url='givePlayerMoney',
)
],
),
CompoundFunctionData(
server=[
],
client=[
FunctionData(
signature=FunctionSignature(
name='isPlayerHudComponentVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='component',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function can be used to check whether an hud component is visable or not.' ,
arguments={
"component": """The component you wish to check. Valid values are: """,
"ammo": """The display showing how much ammo the player has in their weapon """,
"area_name": """The text that appears containing the name of the area a player has entered """,
"armour": """The display showing the players armor """,
"breath": """The display showing the players breath """,
"clock": """The display showing the in-game time """,
"health": """The display showing the players health """,
"money": """The display showing how much money the player has """,
"radar": """The bottom-left corner miniradar """,
"vehicle_name": """The text that appears containing the players vehicle name when the player enters a vehicle """,
"weapon": """The display showing the players weapon """,
"radio": """The display showing the radio label """,
"wanted": """The display showing the players wanted level """,
"crosshair": """The weapon crosshair and sniper scope """
},
result='returns true if the component is visable, false if not.' ,
),
url='isPlayerHudComponentVisible',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='isPlayerMapForced',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function checks if the specified players radar map has been forced on or not.' ,
arguments={
"thePlayer": """A player object referencing the specified player """
},
result='returns true if the players radar map is forced on, false otherwise.' ,
),
url='isPlayerMapForced',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='isPlayerMapForced',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function checks if the specified players radar map has been forced on or not.' ,
arguments={
},
result='returns true if the local players radar map is forced on, false otherwise.' ,
),
url='isPlayerMapForced',
)
],
),
CompoundFunctionData(
server=[
],
client=[
FunctionData(
signature=FunctionSignature(
name='isPlayerMapVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function checks if the local player has their map showing.' ,
arguments={
},
result='returns true if the player has the map visible, false otherwise.' ,
),
url='isPlayerMapVisible',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='isPlayerMuted',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Use this function to check if a player has been muted.' ,
arguments={
"thePlayer": """The player you are checking. """
},
result='returns true if the player is muted and false otherwise.' ,
),
url='isPlayerMuted',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='isPlayerNametagShowing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will allow you to determine if a players name tag is currently showing.' ,
arguments={
"thePlayer": """The player whose current name tag condition you want to check """
},
result='returns true if the players name tag is being shown, false otherwise.' ,
),
url='isPlayerNametagShowing',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='isPlayerNametagShowing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will allow you to determine if a players name tag is currently showing.' ,
arguments={
"thePlayer": """The player whose current name tag condition you want to check """
},
result='returns true if the players name tag is being shown, false otherwise.' ,
),
url='isPlayerNametagShowing',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='isVoiceEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Added to client side.\nThis function allows you to make the server reveal whether or not voice is currently enabled.' ,
arguments={
},
result='returns true if the voice is enabled on the server, false otherwise.' ,
),
url='isVoiceEnabled',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='isVoiceEnabled',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Added to client side.\nThis function allows you to make the server reveal whether or not voice is currently enabled.' ,
arguments={
},
result='returns true if the voice is enabled on the server, false otherwise.' ,
),
url='isVoiceEnabled',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='redirectPlayer',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='serverIP',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value='""',
)
],
[
FunctionArgument(
name='serverPort',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value='0',
)
],
[
FunctionArgument(
name='serverPassword',
argument_type=FunctionType(
names=['string'],
is_optional=True,
),
default_value='""',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function redirects the player to a specified server.' ,
arguments={
"thePlayer": """The player you want to redirect """,
"serverIP": """The IP address (or domain name that resolves to the IP address) of the server you want to redirect the player to. Use an empty string to reconnect to the same server. """,
"serverPort": """The game port of the server you want to redirect the player to, this is usually 22003. Set to zero to use the same port as the current server. """,
"serverPassword": """The password for the server if its protected """
},
result='returns true if the player was redirected successfully, false if bad arguments were passed.' ,
),
url='redirectPlayer',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='resendPlayerACInfo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will force the specified player to resend their AC info, triggering the onPlayerACInfo event again.' ,
arguments={
"thePlayer": """: A player object referencing the specified player """
},
result='returns true if the ac info will be resent, false otherwise.' ,
),
url='resendPlayerACInfo',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='resendPlayerModInfo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will force the specified player to resend their mod info, triggering the onPlayerModInfo event again.' ,
arguments={
"thePlayer": """: A player object referencing the specified player """
},
result='returns true if the mod info will be resent, false otherwise.' ,
),
url='resendPlayerModInfo',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerAnnounceValue',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='key',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='value',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to change ASE announce values for any player using a specified key.\nAs an example this can be used to change the score value which will be shown at https://www.game-state.com/ game-state.coms server list.\nFor server-wide changes you can use setRuleValue!' ,
arguments={
"thePlayer": """The player whos announce value you wish to change. """,
"key": """The key which the value will be stored at. """,
"value": """The value you wish to store. """
},
result='returns true if the value was set succesfully, false otherwise.' ,
),
url='setPlayerAnnounceValue',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerBlurLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='level',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets the motion blur level on the clients screen. Accepts a value between 0 and 255.' ,
arguments={
"thePlayer": """The player whose blur level will be changed. """,
"level": """The level to set the blur to (default: 36) """
},
result='' ,
),
url='setPlayerBlurLevel',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setBlurLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='level',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets the motion blur level on the clients screen. Accepts a value between 0 and 255.' ,
arguments={
"level": """The level to set the blur to (default: 36) """
},
result='' ,
),
url='setPlayerBlurLevel',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerHudComponentVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='component',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='show',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will show or hide a part of the players HUD.' ,
arguments={
"thePlayer": """The player element for which you wish to show/hide a HUD component """,
"component": """The component you wish to show or hide. Valid values are: """,
"all": """All of the following at the same time """,
"ammo": """The display showing how much ammo the player has in their weapon """,
"area_name": """The text that appears containing the name of the area a player has entered """,
"armour": """The display showing the players armor """,
"breath": """The display showing the players breath """,
"clock": """The display showing the in-game time """,
"health": """The display showing the players health """,
"money": """The display showing how much money the player has """,
"radar": """The bottom-left corner miniradar """,
"vehicle_name": """The text that appears containing the players vehicle name when the player enters a vehicle """,
"weapon": """The display showing the players weapon """,
"radio": """The display showing the radio label """,
"wanted": """The display showing the players wanted level """,
"crosshair": """The weapon crosshair and sniper scope """,
"show": """Specify if the component should be shown (true) or hidden (false) """
},
result='' ,
),
url='setPlayerHudComponentVisible',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setPlayerHudComponentVisible',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='component',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='show',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function will show or hide a part of the players HUD.' ,
arguments={
"component": """The component you wish to show or hide. Valid values are: """,
"all": """All of the following at the same time """,
"ammo": """The display showing how much ammo the player has in their weapon """,
"area_name": """The text that appears containing the name of the area a player has entered """,
"armour": """The display showing the players armor """,
"breath": """The display showing the players breath """,
"clock": """The display showing the in-game time """,
"health": """The display showing the players health """,
"money": """The display showing how much money the player has """,
"radar": """The bottom-left corner miniradar """,
"vehicle_name": """The text that appears containing the players vehicle name when the player enters a vehicle """,
"weapon": """The display showing the players weapon """,
"radio": """The display showing the radio label """,
"wanted": """The display showing the players wanted level """,
"crosshair": """The weapon crosshair and sniper scope """,
"show": """Specify if the component should be shown (true) or hidden (false) """
},
result='' ,
),
url='setPlayerHudComponentVisible',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='instant',
argument_type=FunctionType(
names=['bool'],
is_optional=True,
),
default_value='false',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets a players money to a certain value, regardless of current player money. It should be noted that setting negative values does not work and in fact gives the player large amounts of money.' ,
arguments={
"thePlayer": """Which player to set the money of. """,
"amount": """A whole integer specifying the new amount of money the player will have. """,
"instant": """If set to true money will be set instantly without counting up/down like in singleplayer.}} """
},
result='' ,
),
url='setPlayerMoney',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setPlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='instant',
argument_type=FunctionType(
names=['bool'],
is_optional=True,
),
default_value='false',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Sets a players money to a certain value, regardless of current player money. It should be noted that setting negative values does not work and in fact gives the player large amounts of money.' ,
arguments={
"amount": """A whole integer specifying the new amount of money the local player will have. """,
"instant": """If set to true money will be set instantly without counting up/down like in singleplayer.}} """
},
result='' ,
),
url='setPlayerMoney',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerMuted',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='state',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Use this function to mute or unmute the player.' ,
arguments={
"thePlayer": """The player you are muting or unmuting. """,
"state": """Use true to mute and false to unmute the player. """
},
result='returns true if the player was successfully muted or unmuted, false otherwise.' ,
),
url='setPlayerMuted',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerName',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='newName',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function changes the specified players name. Note that any change made to a players name with this function is not saved in their settings so the name change only lasts till they disconnect.' ,
arguments={
"thePlayer": """the player that will have its name set. """,
"newName": """the new name to set for the player. """
},
result='returns true if the player name was changed succesfully, false if invalid arguments are specified.' ,
),
url='setPlayerName',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to change the RGB color mixture in the name tags of players.' ,
arguments={
"thePlayer": """The player whose name tag text you wish to change the color of """,
"r": """The amount of red you want in the mixture of RGB (0-255 is valid) """,
"g": """The amount of green you want in the mixture of RGB (0-255 is valid) """,
"b": """The amount of blue you want in the mixture of RGB (0-255 is valid) """,
"false": """If false is specified instead of the colors, the nametag color will reset to defaulting to your team color. """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='setPlayerNametagColor',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagColor',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='r',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='g',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='b',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This allows you to change the RGB color mixture in the name tags of players.' ,
arguments={
"thePlayer": """The player whose name tag text you wish to change the color of """,
"r": """The amount of red you want in the mixture of RGB (0-255 is valid) """,
"g": """The amount of green you want in the mixture of RGB (0-255 is valid) """,
"b": """The amount of blue you want in the mixture of RGB (0-255 is valid) """,
"false": """If false is specified instead of the colors, the nametag color will reset to defaulting to your team color. """
},
result='returns true if the function was successful, false otherwise.' ,
),
url='setPlayerNametagColor',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagShowing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='showing',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Use this to define whether the players name tag is visible or invisible.' ,
arguments={
"thePlayer": """Define the player whos tag visiblity status you want to change """,
"showing": """Use true or false to show/hide the tag """
},
result='returns true if successful, false otherwise' ,
),
url='setPlayerNametagShowing',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagShowing',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='showing',
argument_type=FunctionType(
names=['bool'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='Use this to define whether the players name tag is visible or invisible.' ,
arguments={
"thePlayer": """Define the player whos tag visiblity status you want to change """,
"showing": """Use true or false to show/hide the tag """
},
result='returns true if successful, false otherwise' ,
),
url='setPlayerNametagShowing',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='text',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will change the text of a players nickname in the world to something besides the nickname he chose. This will not change the players actual nickname, it only changes the visible aspect inside the world (you will see his original nickname in the scoreboard and will refer to his original name in scripts).' ,
arguments={
"thePlayer": """The player whose nickname text you wish to change """,
"text": """The new nickname text that will be displayed """
},
result='returns true if successful, false otherwise.' ,
),
url='setPlayerNametagText',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='setPlayerNametagText',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='text',
argument_type=FunctionType(
names=['string'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will change the text of a players nickname in the world to something besides the nickname he chose. This will not change the players actual nickname, it only changes the visible aspect inside the world (you will see his original nickname in the scoreboard and will refer to his original name in scripts).' ,
arguments={
"thePlayer": """The player whose nickname text you wish to change """,
"text": """The new nickname text that will be displayed """
},
result='returns true if successful, false otherwise.' ,
),
url='setPlayerNametagText',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerScriptDebugLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='level',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This will set players debug level, equivalent to Debugging|debugscript <level>.' ,
arguments={
"thePlayer": """The player whose debug level you wish to change """,
"level": """0: close debug console, 1: only errors, 2: errors and warnings, 3: errors, warnings and info messages """
},
result='returns true if successful, false otherwise.' ,
),
url='setPlayerScriptDebugLevel',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerVoiceBroadcastTo',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='broadcastTo',
argument_type=FunctionType(
names=['mixed'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to change who can hear the voice of a player.' ,
arguments={
"thePlayer": """The player you wish to change """,
"broadcastTo": """Element or table of elements who should hear the voice from this player """
},
result='returns true if the value was set successfully, false otherwise.' ,
),
url='setPlayerVoiceBroadcastTo',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerVoiceIgnoreFrom',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['element'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='ignoreFrom',
argument_type=FunctionType(
names=['mixed'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function allows you to mute voices for a player.' ,
arguments={
"thePlayer": """The player you wish to change """,
"ignoreFrom": """Element or table of elements which the player should not hear voices from. Use nil if no one should be ignored. """
},
result='returns true if the value was set successfully, false otherwise.' ,
),
url='setPlayerVoiceIgnoreFrom',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='setPlayerWantedLevel',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='stars',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function is used to set a players wanted level. The wanted level is indicated by the amount of stars a player has on the GTA HUD.' ,
arguments={
"thePlayer": """The player whose wanted level is to be set """,
"stars": """An integer from 0 to 6 representing the wanted level """
},
result='returns true if the wanted level was set successfully, false if any of the arguments were invalid.' ,
),
url='setPlayerWantedLevel',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='spawnPlayer',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='x',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='y',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='z',
argument_type=FunctionType(
names=['float'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='rotation',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='skinID',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='interior',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='dimension',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='0',
)
],
[
FunctionArgument(
name='theTeam',
argument_type=FunctionType(
names=['team'],
is_optional=True,
),
default_value='getPlayerTeam(thePlayer)',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function spawns the player at an arbitary point on the map.<br>' ,
arguments={
"thePlayer": """The player you want to spawn. """,
"x": """The x co-ordinate to spawn the player at. """,
"y": """The y co-ordinate to spawn the player at. """,
"z": """The z co-ordinate to spawn the player at. """,
"rotation": """rotation of the player on spawn. """,
"skinID": """players skin on spawn. Character Skins """,
"interior": """interior the player will spawn into. Interior IDs """,
"dimension": """The ID of the dimension that the player should be in. """,
"theTeam": """the team the player will join. """
},
result='returns true if the player was spawned successfully, false otherwise.' ,
),
url='spawnPlayer',
)
],
client=[
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='takePlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function subtracts money from a players current money amount.' ,
arguments={
"thePlayer": """the player you are taking the money from. """,
"amount": """an integer number specifying the amount of money to take from the player. """
},
result='' ,
),
url='takePlayerMoney',
)
],
client=[
FunctionData(
signature=FunctionSignature(
name='takePlayerMoney',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='amount',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function subtracts money from a players current money amount.' ,
arguments={
"amount": """an integer number specifying the amount of money to take from the player. """
},
result='' ,
),
url='takePlayerMoney',
)
],
),
CompoundFunctionData(
server=[
FunctionData(
signature=FunctionSignature(
name='takePlayerScreenShot',
return_types=FunctionReturnTypes(
return_types=[
FunctionType(
names=['bool'],
is_optional=False,
)
],
variable_length=False,
),
arguments=FunctionArgumentValues(
arguments=[
[
FunctionArgument(
name='thePlayer',
argument_type=FunctionType(
names=['player'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='width',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='height',
argument_type=FunctionType(
names=['int'],
is_optional=False,
),
default_value=None,
)
],
[
FunctionArgument(
name='tag',
argument_type=FunctionType(
names=['string'],
is_optional=True,
),
default_value='""',
)
],
[
FunctionArgument(
name='quality',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='30',
)
],
[
FunctionArgument(
name='maxBandwith',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='5000',
)
],
[
FunctionArgument(
name='maxPacketSize',
argument_type=FunctionType(
names=['int'],
is_optional=True,
),
default_value='500',
)
]
],
variable_length=False,
),
generic_types=[
],
),
docs=FunctionDoc(
description='This function forces a client to capture the current screen output and send it back to the server. The image will contain the GTA HUD and the output of any dxDraw functions that are not flagged as post GUI. The image specifically excludes the chat box and all GUI (including the client console). The result is received with the event onPlayerScreenShot.' ,
arguments={
"thePlayer": """the player to get the screen capture from. """,
"width": """the width of the capture image. """,
"height": """the height of the capture image. """,
"tag": """A string to help identify the screen capture. The string is passed to the matching onPlayerScreenShot event for your personal convenience. """,
"quality": """Quality of the final JPEG image from 0 to 100. A lower value can reduce the memory used by the image considerably which will result in faster and less intrusive uploads. """,
"maxBandwith": """The amount of client upload bandwidth to use (in bytes per second) when sending the image.
*'''maxPacketSize: ''' The maximum size of one packet. """
},
result='returns true if the function was successfully, false if invalid arguments are specified.' ,
),
url='takePlayerScreenShot',
)
],
client=[
],
)
]
| 39.415281 | 888 | 0.3556 | 7,792 | 139,806 | 6.286191 | 0.076617 | 0.061431 | 0.050529 | 0.059797 | 0.861745 | 0.846393 | 0.813952 | 0.769058 | 0.757809 | 0.755584 | 0 | 0.002443 | 0.578402 | 139,806 | 3,546 | 889 | 39.426396 | 0.828577 | 0.000365 | 0 | 0.801053 | 1 | 0.010825 | 0.18444 | 0.00737 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.002633 | 0.000293 | 0 | 0.000293 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
7f453709a33f2392d8ed93298a72ffc964f91114 | 35 | py | Python | micsv/__init__.py | kklot/MICS | 5b13b8c047b23f94ad60b264cfd1d246b86110dd | [
"MIT"
] | null | null | null | micsv/__init__.py | kklot/MICS | 5b13b8c047b23f94ad60b264cfd1d246b86110dd | [
"MIT"
] | null | null | null | micsv/__init__.py | kklot/MICS | 5b13b8c047b23f94ad60b264cfd1d246b86110dd | [
"MIT"
] | null | null | null | from micsv.run_mics import run_mics | 35 | 35 | 0.885714 | 7 | 35 | 4.142857 | 0.714286 | 0.482759 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085714 | 35 | 1 | 35 | 35 | 0.90625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
7f80b6905feacf141b9bb4db677bebed11e01a1d | 1,395 | py | Python | tests/fixtures/server.py | Kosinkadink/ceptic | 06d03ffbad6c28e40c541053218dbea7383eea1c | [
"MIT"
] | 2 | 2017-07-18T03:12:12.000Z | 2019-11-21T20:00:25.000Z | tests/fixtures/server.py | Kosinkadink/ceptic | 06d03ffbad6c28e40c541053218dbea7383eea1c | [
"MIT"
] | null | null | null | tests/fixtures/server.py | Kosinkadink/ceptic | 06d03ffbad6c28e40c541053218dbea7383eea1c | [
"MIT"
] | null | null | null | import pytest
import contextlib
from ceptic.server import CepticServer, server_settings
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("locations")
def server_all_files(locations):
@contextlib.contextmanager
def _real_func(settings=None):
if settings is None:
settings = server_settings()
app = CepticServer(settings, locations.s_certfile, locations.s_keyfile, locations.s_cafile)
yield app
# cleanup
if not app.is_stopped():
app.stop()
return _real_func
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("locations")
def server_certfile_keyfile_only(locations):
@contextlib.contextmanager
def _real_func(settings=None):
if settings is None:
settings = server_settings()
app = CepticServer(settings, locations.s_certfile, locations.s_keyfile)
yield app
# cleanup
if not app.is_stopped():
app.stop()
return _real_func
@pytest.fixture(scope="function")
@pytest.mark.usefixtures("locations")
def server_not_secure():
@contextlib.contextmanager
def _real_func(settings=None):
if settings is None:
settings = server_settings()
app = CepticServer(settings, secure=False)
yield app
# cleanup
if not app.is_stopped():
app.stop()
return _real_func
| 27.9 | 99 | 0.670968 | 157 | 1,395 | 5.764331 | 0.235669 | 0.053039 | 0.059669 | 0.086188 | 0.846409 | 0.846409 | 0.846409 | 0.846409 | 0.846409 | 0.846409 | 0 | 0 | 0.237276 | 1,395 | 49 | 100 | 28.469388 | 0.850564 | 0.016487 | 0 | 0.769231 | 0 | 0 | 0.037281 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.076923 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7f9d29463bd86cf3b1b8c75329831f24ba8598b3 | 40 | py | Python | rpp/__init__.py | davidkant/rust-party-python | 5c5162cedf7db6edc99b76932033e560f7eabefd | [
"Apache-2.0"
] | 1 | 2020-02-01T10:34:28.000Z | 2020-02-01T10:34:28.000Z | rpp/__init__.py | davidkant/rust-party-python | 5c5162cedf7db6edc99b76932033e560f7eabefd | [
"Apache-2.0"
] | 1 | 2019-07-07T17:57:39.000Z | 2019-07-07T17:57:39.000Z | rpp/__init__.py | davidkant/rust-party-python | 5c5162cedf7db6edc99b76932033e560f7eabefd | [
"Apache-2.0"
] | null | null | null | from . import spec
from . import params
| 13.333333 | 20 | 0.75 | 6 | 40 | 5 | 0.666667 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.2 | 40 | 2 | 21 | 20 | 0.9375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
f6e7d04da3dc92f7e38d7f33155237a05a90b779 | 91 | py | Python | tests/test_version.py | viagostini/url-shortener | f374addcafe90c8d87686c1d9ef5e740859e9a4e | [
"MIT"
] | 2 | 2020-07-18T19:11:58.000Z | 2020-07-18T19:12:04.000Z | tests/test_version.py | viagostini/url_shortener | f374addcafe90c8d87686c1d9ef5e740859e9a4e | [
"MIT"
] | null | null | null | tests/test_version.py | viagostini/url_shortener | f374addcafe90c8d87686c1d9ef5e740859e9a4e | [
"MIT"
] | null | null | null | import url_shortener
def test_version():
assert url_shortener.__version__ == "0.1.0"
| 15.166667 | 47 | 0.736264 | 13 | 91 | 4.615385 | 0.692308 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038961 | 0.153846 | 91 | 5 | 48 | 18.2 | 0.74026 | 0 | 0 | 0 | 0 | 0 | 0.054945 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
100c545292fd8e08ca116b859e0c91298359bfbe | 240 | py | Python | 03_GraphBasedPlanner/graph_ltpl/__init__.py | f1tenth/ESweek2021_educationclassA3 | 7620a36d21c1824efba8a83f0671926bf8e028f3 | [
"MIT"
] | 15 | 2021-10-09T13:48:49.000Z | 2022-03-27T04:36:44.000Z | 03_GraphBasedPlanner/graph_ltpl/__init__.py | yinflight/ESweek2021_educationclassA3 | 7a32bacdb7f3154a773d28b6b6abffdaa154a526 | [
"MIT"
] | 1 | 2021-11-27T01:47:25.000Z | 2021-11-27T02:44:04.000Z | 03_GraphBasedPlanner/graph_ltpl/__init__.py | yinflight/ESweek2021_educationclassA3 | 7a32bacdb7f3154a773d28b6b6abffdaa154a526 | [
"MIT"
] | 2 | 2021-11-03T19:32:55.000Z | 2021-11-27T02:43:13.000Z | import graph_ltpl.data_objects
import graph_ltpl.helper_funcs.src
import graph_ltpl.imp_global_traj.src
import graph_ltpl.offline_graph.src
import graph_ltpl.online_graph.src
import graph_ltpl.testing_tools.src
import graph_ltpl.Graph_LTPL
| 30 | 37 | 0.891667 | 41 | 240 | 4.853659 | 0.365854 | 0.361809 | 0.527638 | 0.452261 | 0.231156 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058333 | 240 | 7 | 38 | 34.285714 | 0.880531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
63da9a3435141f05b1dad5a099df836934a9c9db | 62 | py | Python | easyturk/__init__.py | kayburns/easyturk | 6d2078af88d5196d809ab068a9fd4b1f96a43414 | [
"MIT"
] | null | null | null | easyturk/__init__.py | kayburns/easyturk | 6d2078af88d5196d809ab068a9fd4b1f96a43414 | [
"MIT"
] | null | null | null | easyturk/__init__.py | kayburns/easyturk | 6d2078af88d5196d809ab068a9fd4b1f96a43414 | [
"MIT"
] | null | null | null | from .easyturk import EasyTurk
from easyturk import interface
| 20.666667 | 30 | 0.854839 | 8 | 62 | 6.625 | 0.5 | 0.45283 | 0.679245 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.129032 | 62 | 2 | 31 | 31 | 0.981481 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
121faa0468c7f3ff15abe4289990e3ddba7e558b | 26,340 | py | Python | proteus/UnstructuredFMMandFSWsolvers.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null | proteus/UnstructuredFMMandFSWsolvers.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null | proteus/UnstructuredFMMandFSWsolvers.py | robertsawko/proteus | 6f1e4c2ca1af85a906b35a5162430006f0343861 | [
"NASA-1.3"
] | null | null | null | """
Fast marching and fast sweeping solvers
"""
import numpy
import math
import sys,atexit
import FemTools,MeshTools,EGeometry
import StupidHeap as SHeap
########################################################################
#solvers
########################################################################
class FMMEikonalSolver:
"""
Encapsulate naive implementation of Fast Marching Methods on unstructured grids
for
\|\grad T\| = 1/F
T = 0 on \Gamma
1d local solver is standard upwind approximation
2d local solver variations:
acute triangulations version 1 or version 2 from Qian Zhang etal 07
obtuse triangulation not implemented
3d local solver varitions: not fully checked
For now, the input should be non-negative!
TODO:
3D version needs to be tested more
"""
from proteus import cfmmfsw
def __init__(self,mesh,dofMap,nSpace,localSolverType='QianEtalV2',frontInitType='magnitudeOnly',#'magnitudeOnly',
debugLevel=3):
self.mesh = mesh
self.nSpace = nSpace
self.orderApprox = 1
self.debugLevel= debugLevel
#reality check
assert 1 <= self.nSpace and self.nSpace <= 3, "1d,2d, and 3d only right now"
assert self.orderApprox == 1, "first order only for now"
#default speeds for Eikonal equation
import numpy
self.unitNodalSpeeds = numpy.ones((self.mesh.nNodes_global,),'d')
self.frontInitFlag = 1
if frontInitType == 'magnitudeOnly':
self.frontInitFlag = 0
#could pass in frontInit type here
self.csolver = FMMEikonalSolver.cfmmfsw.FMMEikonalSolver(self.nSpace,self.mesh.cmesh)
#
self.localPWLreconstruction = FMMEikonalSolver.cfmmfsw.localPWLreconstruction
def solve(self,phi0,T,nodalSpeeds=None,zeroTol=1.0e-4,trialTol=1.0e-1,verbose=0):
"""
Test first order fast marching method algorithm for eikonal equation
\|\grad T \| = 1, \phi(\vec x) = 0, x \in \Gamma
assuming \phi_0 describes initial location of interface Gamma and
has reasonable values (absolute values) for T close to Gamma. Here
T can be interpreted as the travel time from Gamma.
Right now assumes global node numbers <--> global dofs but this can be
fixed easily
Input
phi0: dof array from P1 C0 FiniteElementFunction holding initial condition
T : dof array from P1 C0 FiniteElementFunction for solution
Output
T(\vec x_n) : travel time from initial front to node (\vec x_n)
Internal data structures
Status : status of nodal point (dictionary)
-1 --> Far
0 --> Trial
1 --> Known
Trial : nodal points adjacent to front tuples (index,val) stored in heap
TODO
have return flag
"""
import numpy
assert len(T) == len(phi0), "phi0 and T must be same dimensionality"
assert len(T) == self.mesh.nNodes_global, "FemSpaces must be C0 P1"
#mwf debug
#import pdb
#pdb.set_trace()
failed = False
if nodalSpeeds == None:
speed = self.unitNodalSpeeds
else:
speed = nodalSpeeds
assert len(speed) == self.mesh.nNodes_global, "nodalSpeed dim= %s must be %s " % (len(speed),self.mesh.nNodes_global)
failed = self.csolver.solve(phi0,speed,T,zeroTol=zeroTol,trialTol=trialTol,
initFlag=self.frontInitFlag,verbose=verbose)
return bool(failed)
#solve
#class
class FSWEikonalSolver:
"""
Encapsulate naive implementation of Fast Marching Methods on unstructured grids
for
\|\grad T\| = 1/F
T = 0 on \Gamma
1d local solver is standard upwind approximation
2d local solver variations:
acute triangulations version 1 or version 2 from Qian Zhang etal 07
obtuse triangulation not implemented
3d local solver variations: not fully checked
For now, the input should be non-negative!
TODO:
3D version needs to be tested more
"""
from proteus import cfmmfsw
def __init__(self,mesh,dofMap,nSpace,iterAtol=1.0e-8,iterRtol=0.0,maxIts=100,
localSolverType='QianEtalV2',frontInitType='magnitudeOnly',#frontInitType='magnitudeOnly',
refPoints=None,
orderApprox=1,LARGE=1.234e28,debugLevel=3):
self.mesh = mesh
self.nSpace = nSpace
self.iterAtol= iterAtol
self.iterRtol= iterRtol
self.maxIts = maxIts
self.orderApprox = 1
self.LARGE = LARGE
self.debugLevel = debugLevel
self.xRefOrderingPoints = refPoints
self.nRefOrderingPoints = None
if self.xRefOrderingPoints != None:
self.nRefOrderingPoints = len(self.xRefOrderingPoints)
#reality check
assert 1 <= self.nSpace and self.nSpace <= 3, "1d,2d, and 3d only right now"
assert self.orderApprox == 1, "first order only for now"
#default speeds for Eikonal equation
import numpy
self.unitNodalSpeeds = numpy.ones((self.mesh.nNodes_global,),'d')
self.frontInitFlag = 1
if frontInitType == 'magnitudeOnly':
self.frontInitFlag = 0
self.csolver = None
if self.xRefOrderingPoints == None:
self.csolver = FSWEikonalSolver.cfmmfsw.FSWEikonalSolver(self.nSpace,self.mesh.cmesh,
atol=self.iterAtol,rtol=self.iterRtol,
maxIts=self.maxIts,
initFlag=self.frontInitFlag)
else:
self.csolver = FSWEikonalSolver.cfmmfsw.FSWEikonalSolver(self.nSpace,self.mesh.cmesh,
atol=self.iterAtol,rtol=self.iterRtol,
maxIts=self.maxIts,
initFlag=self.frontInitFlag,
nRefPoints=self.nRefOrderingPoints,
refPoints=self.xRefOrderingPoints)
#
self.localPWLreconstruction = FSWEikonalSolver.cfmmfsw.localPWLreconstruction
#end init
def solve(self,phi0,T,nodalSpeeds=None,zeroTol=1.0e-4,trialTol=1.0e-1,verbose=0):
"""
Test first order fast sweeping method algorithm for eikonal equation
\|\grad T \| = 1, \phi(\vec x) = 0, x \in \Gamma
assuming \phi_0 describes initial location of interface Gamma and
has reasonable values (absolute values) for T close to Gamma. Here
T can be interpreted as the travel time from Gamma.
Right now assumes global node numbers <--> global dofs but this can be
fixed easily
Input
phi0: dof array holding P1 C0 FiniteElementFunction holding initial condition
T : dof array holding P1 C0 FiniteElementFunction for solution
Output
T(\vec x_n) : travel time from initial front to node (\vec x_n)
Internal data structures
Status : status of nodal point (dictionary)
0 --> Not Known (Trial)
1 --> Known
Order : ordering of points in domain using l_p metric from fixed reference points
"""
import numpy
from math import sqrt, fmod
assert len(T) == len(phi0), "phi0 and T must be same dimensionality"
assert len(T) == self.mesh.nNodes_global, "FemSpaces must be C0 P1"
failed = False
if nodalSpeeds == None:
speed = self.unitNodalSpeeds
else:
speed = nodalSpeeds
assert len(speed) == len(T), "nodalSpeed dim= %s must be %s " % (len(speed),len(T))
failed = self.csolver.solve(phi0,speed,T,zeroTol=zeroTol,trialTol=trialTol,
initFlag=self.frontInitFlag,verbose=1)#mwf hack
return bool(failed)
#solve
#class
########################################################################
#test codes
########################################################################
def unstructuredEx1d(initFunc,Lx,nx,method='FMM',verbose=0):
"""
run a couple of redistancing examples in 1d: circle and two circles
"""
import numpy
mesh = MeshTools.EdgeMesh()
mesh.generateEdgeMeshFromRectangularGrid(nx,Lx)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]
phi0[I] = initFunc(x)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g \n" % (x,phi0[I]))
#
failed = False
if method == 'FSW':
solver = FSWEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,1,iterAtol=1.0e-8,maxIts=100)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FSWEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
else:
solver = FMMEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,1)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]
fout.write("%g %g \n" % (x,Tp[I]))
phout.write("%g %g \n" % (x,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
def unstructuredEx2d(initFunc,Lx,Ly,nx,ny,method='FMM',verbose=0):
"""
run a couple of redistancing examples in 2d:
"""
import numpy
mesh = MeshTools.TriangularMesh()
mesh.generateTriangularMeshFromRectangularGrid(nx,ny,Lx,Ly)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]
phi0[I] = initFunc(x,y)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g %g \n" % (x,y,phi0[I]))
#
failed = False
if method == 'FSW':
#test different ref nodes (say just 3 in middle of domain?
refNodes = numpy.array([[0.25,0.25,0.0],[0.5,0.5,0.0],[0.75,0.75,0.0]])
#solver = FSWEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,2,iterAtol=1.0e-8,refPoints=refNodes,maxIts=100)
solver = FSWEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,2,iterAtol=1.0e-8,maxIts=100)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FSWEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
else:
solver = FMMEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,2)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
#meth switch
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]
fout.write("%g %g %g \n" % (x,y,Tp[I]))
phout.write("%g %g %g \n" % (x,y,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
def unstructuredEx3d(initFunc,Lx,Ly,Lz,nx,ny,nz,method='FMM',verbose=0):
"""
run a redistancing example in 3d:
"""
import numpy
mesh = MeshTools.TetrahedralMesh()
mesh.generateTetrahedralMeshFromRectangularGrid(nx,ny,nz,Lx,Ly,Lz)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]; z=mesh.nodeArray[I,2]
phi0[I] = initFunc(x,y,z)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g %g %g \n" % (x,y,z,phi0[I]))
#
failed = False
if method == 'FSW':
solver = FSWEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,3,iterAtol=1.0e-8,maxIts=100)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FSWEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
else:
solver = FMMEikonalSolver(mesh,FemPhi0.femSpace.dofMap.l2g,3)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,FemTp.dof,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,FemTm.dof,verbose=verbose)
print "back."
#method switch
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]; z=mesh.nodeArray[I,2]
fout.write("%g %g %g %g \n" % (x,y,z,Tp[I]))
phout.write("%g %g %g %g \n" % (x,y,z,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
########################################################################
#try to test out 3d versions
def test3dLocalSolver(verbose=0):
#try some simple configurations that I can back out soln for
import numpy,math
nNodes=4; nSpace=3;
nodes = numpy.zeros((nNodes,nSpace),'d')
#reference tet
nodes[1,:]=[1.0,0.0,0.0]; nodes[2,:]=[0.0,1.0,0.0]; nodes[3,:]=[0.0,0.0,1.0]
T = numpy.zeros((nNodes,),'d')
sqrt3 = math.sqrt(3.)
waveNormal = 1.0/sqrt3*numpy.array([-1.,1.,1.])
eikSpeed=1.0
eN = 0;
#nodes with causal ordering
N_A = 1; N_B=0; N_C=2; N_D=3
#generic node numbering
N = [0,1,2];
T[N_A]=0; T[N_B]=sqrt3/3.0; T[N_C]=2.0*sqrt3/3.0
print "calling qianZhangLocalSolver\n\t nodes=%s \n N=%s \n\t T=%s " % (nodes,N,T)
T_D = qianZhangLocalSolver3d(eN,N_D,N[0],N[1],N[2],nodes,T,eikSpeed,verbose=verbose)
print "T_D= %s " % T_D
def unstructuredEx1dInCpp(initFunc,Lx,nx,method='FMM',verbose=0):
"""
run a couple of redistancing examples in 1d: circle and two circles
use c++ interface
"""
import numpy
from proteus import cfmmfsw
mesh = MeshTools.EdgeMesh()
mesh.generateEdgeMeshFromRectangularGrid(nx,Lx)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]
phi0[I] = initFunc(x)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g \n" % (x,phi0[I]))
#
failed = False
nd = 1
nodalSpeeds = numpy.ones((mesh.nNodes_global,),'d')
if method == 'FSW':
solver = cfmmfsw.FSWEikonalSolver(nd,mesh.cmesh,atol=1.0e-8,rtol=1.0e-8,maxIts=100,
initFlag=0)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,initFlag=0,verbose=verbose)
print "back. calling FSWEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,initFlag=0,verbose=verbose)
print "back."
else:
solver = cfmmfsw.FMMEikonalSolver(nd,mesh.cmesh)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,initFlag=0,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,initFlag=0,verbose=verbose)
print "back."
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]
fout.write("%g %g \n" % (x,Tp[I]))
phout.write("%g %g \n" % (x,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
def unstructuredEx2dInCpp(initFunc,Lx,Ly,nx,ny,method='FMM',verbose=0):
"""
run a couple of redistancing examples in 2d:
"""
import numpy
from proteus import cfmmfsw
mesh = MeshTools.TriangularMesh()
mesh.generateTriangularMeshFromRectangularGrid(nx,ny,Lx,Ly)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]
phi0[I] = initFunc(x,y)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g %g \n" % (x,y,phi0[I]))
#
failed = False
nd = 2
nodalSpeeds = numpy.ones((mesh.nNodes_global,),'d')
if method == 'FSW':
solver = cfmmfsw.FSWEikonalSolver(nd,mesh.cmesh,atol=1.0e-8,rtol=1.0e-8,maxIts=100,
initFlag=0)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. calling FSWEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. failed= %s" % failed
else:
solver = cfmmfsw.FMMEikonalSolver(nd,mesh.cmesh)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back."
#meth switch
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]
fout.write("%g %g %g \n" % (x,y,Tp[I]))
phout.write("%g %g %g \n" % (x,y,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
def unstructuredEx3dinCpp(initFunc,Lx,Ly,Lz,nx,ny,nz,method='FMM',verbose=0):
"""
run a redistancing example in 3d:
"""
import numpy
from proteus import cfmmfsw
mesh = MeshTools.TetrahedralMesh()
mesh.generateTetrahedralMeshFromRectangularGrid(nx,ny,nz,Lx,Ly,Lz)
femSpace = FemTools.C0_AffineLinearOnSimplexWithNodalBasis(mesh)
FemPhi0 = FemTools.FiniteElementFunction(femSpace,name="phi0")
FemPhi0p = FemTools.FiniteElementFunction(femSpace,name="phi0p")
FemPhi0m = FemTools.FiniteElementFunction(femSpace,name="phi0m")
FemTp = FemTools.FiniteElementFunction(femSpace,name="Tp")
FemTm = FemTools.FiniteElementFunction(femSpace,name="Tm")
phi0 = FemPhi0.dof ; phi0p = FemPhi0p.dof ; phi0m = FemPhi0m.dof ;
Tp = FemTp.dof; Tm = FemTm.dof
icout = open("phi0.dat",'w')
#construct initial level set, short cut assuming dofs <--> node numbers
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]; z=mesh.nodeArray[I,2]
phi0[I] = initFunc(x,y,z)
phi0p[I]= max(phi0[I],0.0)
phi0m[I]= abs(min(phi0[I],0.0))
icout.write("%g %g %g %g \n" % (x,y,z,phi0[I]))
#
failed = False
nd = 3
nodalSpeeds = numpy.ones((mesh.nNodes_global,),'d')
if method == 'FSW':
solver = cfmmfsw.FSWEikonalSolver(nd,mesh.cmesh,atol=1.0e-8,rtol=1.0e-8,maxIts=100,
initFlag=0)
print "calling FSWEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. failed= %s calling FSWEikonalSolver.solve for - ..." % failed
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. failed= %s" % failed
else:
solver = cfmmfsw.FMMEikonalSolver(nd,mesh.cmesh)
print "calling FMMEikonalSolver.solve for + ..."
failed = solver.solve(FemPhi0p.dof,nodalSpeeds,FemTp.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back. calling FMMEikonalSolver.solve for - ..."
failed = solver.solve(FemPhi0m.dof,nodalSpeeds,FemTm.dof,zeroTol=1.0e-4,trialTol=1.0e-1,
initFlag=0,verbose=verbose)
print "back."
#method switch
fout = open("T.dat",'w')
phout= open("phi.dat",'w')
for I in range(mesh.nNodes_global):
x = mesh.nodeArray[I,0]; y = mesh.nodeArray[I,1]; z=mesh.nodeArray[I,2]
fout.write("%g %g %g %g \n" % (x,y,z,Tp[I]))
phout.write("%g %g %g %g \n" % (x,y,z,Tp[I]-Tm[I]))
icout.close()
fout.close()
phout.close()
if __name__ == "__main__":
import math
#method = 'FMM'
method = 'FSW'
dim = 2
#now need for mpi
from proteus import Comm
comm = proteus.Comm.get()
def circle1d(x):
return (x-0.5)**2 - 0.2**2
def twoCircle1d(x):
return min((x-0.25)**2 - 0.1**2,(x-0.75)**2 - 0.1**2)
#
def circle2d(x,y):
return (x-0.5)**2 + (y-0.5)**2 - 0.2**2
def fourPetal(x,y):
r0 = 0.25; a = 40; b = 4;
tx = x-0.5; ty = y-0.5
r = math.sqrt(tx**2 + ty**2); th = math.atan2(tx,ty)
pr = 0.5*(r0 + math.cos(b*th)/(a*r0))
return r**2 - pr**2
def twoCircle2d(x,y):
r0 = 0.15; r1 = 0.15; c0 = (0.25,0.25); c1=(0.75,0.75)
d20= (x-c0[0])**2 + (y-c0[1])**2 - r0**2; d21 = (x-c1[0])**2 + (y-c1[1])**2 - r1**2
return min(d20,d21)
#
def sphere3d(x,y,z):
return (x-0.5)**2 + (y-0.5)**2 + (z-0.5)**2 - 0.2**2
def twoSphere3d(x,y,z):
return min((x-0.25)**2 + (y-0.25)**2 + (z-0.25)**2 - 0.1**2,
(x-0.75)**2 + (y-0.75)**2 + (z-0.75)**2 - 0.1**2)
Lx = 1.; Ly = 1.; Lz = 1.
if dim == 2:
nx=21; ny=21
#testFunc= circle2d
#testFunc= fourPetal
testFunc= twoCircle2d
unstructuredEx2d(testFunc,Lx,Ly,nx,ny,method=method,verbose=0)
#unstructuredEx2dInCpp(testFunc,Lx,Ly,nx,ny,method=method,verbose=1)
elif dim == 1:
#nx=11
#testFunc= circle1d
nx=41
testFunc= twoCircle1d
unstructuredEx1d(testFunc,Lx,nx,method=method,verbose=9)
#unstructuredEx1dInCpp(testFunc,Lx,nx,method=method,verbose=9)
else:
#test3dLocalSolver(verbose=10)
nx=21; ny = 21; nz=21
testFunc= sphere3d
#testFunc= twoSphere3d
#unstructuredEx3d(testFunc,Lx,Ly,Lz,nx,ny,nz,method=method,verbose=0)
unstructuredEx3dinCpp(testFunc,Lx,Ly,Lz,nx,ny,nz,method=method,verbose=1)
| 37.95389 | 125 | 0.603872 | 3,340 | 26,340 | 4.743713 | 0.108683 | 0.004544 | 0.070058 | 0.077632 | 0.832302 | 0.824413 | 0.813747 | 0.807498 | 0.794181 | 0.77979 | 0 | 0.036487 | 0.253948 | 26,340 | 693 | 126 | 38.008658 | 0.769783 | 0.052278 | 0 | 0.754137 | 0 | 0.002364 | 0.097161 | 0.026162 | 0 | 0 | 0 | 0.004329 | 0.023641 | 0 | null | null | 0 | 0.056738 | null | null | 0.089835 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
122b5e3a8c4941ce316acd360633b5b3b5118008 | 260 | py | Python | caesarcipher/decrypt.py | mr-tolmatskiy/Cesarcipher | 78e9aa01e8db93d2cc7c62e12075853786c86ab1 | [
"Apache-2.0"
] | null | null | null | caesarcipher/decrypt.py | mr-tolmatskiy/Cesarcipher | 78e9aa01e8db93d2cc7c62e12075853786c86ab1 | [
"Apache-2.0"
] | null | null | null | caesarcipher/decrypt.py | mr-tolmatskiy/Cesarcipher | 78e9aa01e8db93d2cc7c62e12075853786c86ab1 | [
"Apache-2.0"
] | null | null | null | def next_letter(letter, step=1):
return chr((ord(letter) - 97 - step) % 26 + 97)
def decrypt(encrypted_text, step=1):
decrypted_text = ''
for letter in encrypted_text:
decrypted_text += next_letter(letter, step)
return decrypted_text
| 26 | 51 | 0.676923 | 36 | 260 | 4.694444 | 0.444444 | 0.230769 | 0.189349 | 0.236686 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039024 | 0.211538 | 260 | 9 | 52 | 28.888889 | 0.785366 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.285714 | false | 0 | 0 | 0.142857 | 0.571429 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
d617323933845b60bbdbf2f81f932a5ad88b036b | 319 | py | Python | python-lib/feature_aggregations/__init__.py | dataiku/dss-plugin-events-aggregator | a0d0b3b9fbee6251160895b4809f4f942abf1cc2 | [
"MIT"
] | null | null | null | python-lib/feature_aggregations/__init__.py | dataiku/dss-plugin-events-aggregator | a0d0b3b9fbee6251160895b4809f4f942abf1cc2 | [
"MIT"
] | null | null | null | python-lib/feature_aggregations/__init__.py | dataiku/dss-plugin-events-aggregator | a0d0b3b9fbee6251160895b4809f4f942abf1cc2 | [
"MIT"
] | null | null | null | # coding: utf-8
from feature_aggregations.feature_aggregator import FeatureAggregator, AggregationParams, TransformParams, PopulationsDefinitionMode, WindowWidthUnit
from feature_aggregations.file_management import FileManager
from feature_aggregations.preprocessing import CardinalityLimiter, CardinalityLimiterParams
| 63.8 | 149 | 0.902821 | 28 | 319 | 10.107143 | 0.678571 | 0.116608 | 0.243816 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003344 | 0.062696 | 319 | 4 | 150 | 79.75 | 0.943144 | 0.040752 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
d61ec53687df41f011733d36abe2ca8d391f10b3 | 10,538 | py | Python | tests/test_shows.py | questionlp/api.wwdt.me_v2 | 9e3705bba2668221740f5d28e94eec90998c3d00 | [
"Apache-2.0"
] | null | null | null | tests/test_shows.py | questionlp/api.wwdt.me_v2 | 9e3705bba2668221740f5d28e94eec90998c3d00 | [
"Apache-2.0"
] | null | null | null | tests/test_shows.py | questionlp/api.wwdt.me_v2 | 9e3705bba2668221740f5d28e94eec90998c3d00 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# vim: set noai syntax=python ts=4 sw=4:
#
# Copyright (c) 2018-2022 Linh Pham
# api.wwdt.me is released under the terms of the Apache License 2.0
"""Testing /v2.0/shows routes
"""
from fastapi.testclient import TestClient
import pytest
from requests.models import Response
from app.main import app
from app.config import API_VERSION
client = TestClient(app)
def test_shows():
"""Test /v2.0/shows route"""
response = client.get(f"/v{API_VERSION}/shows")
shows = response.json()
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
@pytest.mark.parametrize("show_id", [1083])
def test_shows_id(show_id: int):
"""Test /v2.0/shows/id/{show_id} route"""
response = client.get(f"/v{API_VERSION}/shows/id/{show_id}")
show = response.json()
assert response.status_code == 200
assert "id" in show
assert show["id"] == show_id
assert "date" in show
assert "best_of" in show
assert "repeat_show" in show
@pytest.mark.parametrize("show_date", ["2018-10-27"])
def test_shows_date_iso_show_date(show_date: str):
"""Test /v2.0/shows/date/iso/{show_id} route"""
response = client.get(f"/v{API_VERSION}/shows/date/iso/{show_date}")
show = response.json()
assert response.status_code == 200
assert "id" in show
assert "date" in show
assert show["date"] == show_date
assert "best_of" in show
assert "repeat_show" in show
@pytest.mark.parametrize("year", [2006])
def test_shows_date_year(year: int):
"""Test /v2.0/shows/date/{year} route"""
response = client.get(f"/v{API_VERSION}/shows/date/{year}")
shows = response.json()
formatted_year = f"{year:04}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].startswith(formatted_year)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
@pytest.mark.parametrize("year, month", [(2006, 6)])
def test_shows_date_year_month(year: int, month: int):
"""Test /v2.0/shows/date/{year}/{month} route"""
response = client.get(f"/v{API_VERSION}/shows/date/{year}/{month}")
shows = response.json()
formatted_year_month = f"{year:04}-{month:02}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].startswith(formatted_year_month)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
@pytest.mark.parametrize("month, day", [(10, 27)])
def test_shows_date_month_day(month: int, day: int):
"""Test /v2.0/shows/date/month-day/{month}/{day} route"""
response = client.get(f"/v{API_VERSION}/shows/date/month-day/{month}/{day}")
shows = response.json()
formatted_month_day = f"{month:02}-{day:02}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].find(formatted_month_day)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
@pytest.mark.parametrize("year, month, day", [(2018, 10, 27)])
def test_shows_date_year_month_day(year: int, month: int, day: int):
"""Test /v2.0/shows/date/{year}/{month}/{day} route"""
response = client.get(f"/v{API_VERSION}/shows/date/{year}/{month}/{day}")
show = response.json()
formatted_date = f"{year:04}-{month:02}-{day:02}"
assert response.status_code == 200
assert "id" in show
assert "date" in show
assert show["date"] == formatted_date
assert "best_of" in show
assert "repeat_show" in show
def test_show_dates():
"""Test /v2.0/shows/dates route"""
response = client.get(f"/v{API_VERSION}/shows/dates")
dates = response.json()
assert response.status_code == 200
assert "shows" in dates
assert dates["shows"]
def test_shows_details():
"""Test /v2.0/shows/details route"""
response = client.get(f"/v{API_VERSION}/shows/details")
shows = response.json()
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
assert "location" in shows["shows"][0]
assert "description" in shows["shows"][0]
assert "host" in shows["shows"][0]
assert "scorekeeper" in shows["shows"][0]
assert "panelists" in shows["shows"][0]
assert "guests" in shows["shows"][0]
@pytest.mark.parametrize("show_date", ["2018-10-27"])
def test_shows_details_date_iso_show_date(show_date: str):
"""Test /v2.0/shows/details/date/iso/{show_id} route"""
response = client.get(f"/v{API_VERSION}/shows/details/date/iso/{show_date}")
show = response.json()
assert response.status_code == 200
assert "id" in show
assert "date" in show
assert show["date"] == show_date
assert "best_of" in show
assert "repeat_show" in show
assert "location" in show
assert "description" in show
assert "host" in show
assert "scorekeeper" in show
assert "panelists" in show
assert "guests" in show
@pytest.mark.parametrize("year", [2006])
def test_shows_details_date_year(year: int):
"""Test /v2.0/shows/details/date/{year} route"""
response = client.get(f"/v{API_VERSION}/shows/details/date/{year}")
shows = response.json()
formatted_year = f"{year:04}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].startswith(formatted_year)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
assert "location" in shows["shows"][0]
assert "description" in shows["shows"][0]
assert "host" in shows["shows"][0]
assert "scorekeeper" in shows["shows"][0]
assert "panelists" in shows["shows"][0]
assert "guests" in shows["shows"][0]
@pytest.mark.parametrize("year, month", [(2006, 6)])
def test_shows_details_date_year_month(year: int, month: int):
"""Test /v2.0/shows/details/date/{year}/{month} route"""
response = client.get(f"/v{API_VERSION}/shows/details/date/{year}/{month}")
shows = response.json()
formatted_year_month = f"{year:04}-{month:02}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].startswith(formatted_year_month)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
assert "location" in shows["shows"][0]
assert "description" in shows["shows"][0]
assert "host" in shows["shows"][0]
assert "scorekeeper" in shows["shows"][0]
assert "panelists" in shows["shows"][0]
assert "guests" in shows["shows"][0]
@pytest.mark.parametrize("month, day", [(10, 27)])
def test_shows_details_date_month_day(month: int, day: int):
"""Test /v2.0/shows/details/date/month-day/{month}/{day} route"""
response = client.get(f"/v{API_VERSION}/shows/details/date/month-day/{month}/{day}")
shows = response.json()
formatted_month_day = f"{month:02}-{day:02}"
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert shows["shows"][0]["date"].find(formatted_month_day)
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
assert "location" in shows["shows"][0]
assert "description" in shows["shows"][0]
assert "host" in shows["shows"][0]
assert "scorekeeper" in shows["shows"][0]
assert "panelists" in shows["shows"][0]
assert "guests" in shows["shows"][0]
@pytest.mark.parametrize("year, month, day", [(2018, 10, 27)])
def test_shows_details_date_year_month_day(year: int, month: int, day: int):
"""Test /v2.0/shows/details/date/{year}/{month}/{day} route"""
response = client.get(f"/v{API_VERSION}/shows/details/date/{year}/{month}/{day}")
show = response.json()
formatted_date = f"{year:04}-{month:02}-{day:02}"
assert response.status_code == 200
assert "id" in show
assert "date" in show
assert show["date"] == formatted_date
assert "best_of" in show
assert "repeat_show" in show
assert "location" in show
assert "description" in show
assert "host" in show
assert "scorekeeper" in show
assert "panelists" in show
assert "guests" in show
@pytest.mark.parametrize("show_id", [1083])
def test_shows_details_id(show_id: int):
"""Test /v2.0/shows/details/id/{show_id} route"""
response = client.get(f"/v{API_VERSION}/shows/details/id/{show_id}")
show = response.json()
assert response.status_code == 200
assert "id" in show
assert show["id"] == show_id
assert "date" in show
assert "best_of" in show
assert "repeat_show" in show
assert "location" in show
assert "description" in show
assert "host" in show
assert "scorekeeper" in show
assert "panelists" in show
assert "guests" in show
def test_shows_details_recent():
"""Test /v2.0/shows/details/recent route"""
response = client.get(f"/v{API_VERSION}/shows/details/recent")
shows = response.json()
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
assert "location" in shows["shows"][0]
assert "description" in shows["shows"][0]
assert "host" in shows["shows"][0]
assert "scorekeeper" in shows["shows"][0]
assert "panelists" in shows["shows"][0]
assert "guests" in shows["shows"][0]
def test_shows_recent():
"""Test /v2.0/shows/recent route"""
response = client.get(f"/v{API_VERSION}/shows/recent")
shows = response.json()
assert response.status_code == 200
assert "shows" in shows
assert "id" in shows["shows"][0]
assert "date" in shows["shows"][0]
assert "best_of" in shows["shows"][0]
assert "repeat_show" in shows["shows"][0]
| 32.424615 | 88 | 0.65743 | 1,559 | 10,538 | 4.332906 | 0.058371 | 0.082902 | 0.12376 | 0.134715 | 0.937232 | 0.927017 | 0.923908 | 0.921836 | 0.903923 | 0.884826 | 0 | 0.031541 | 0.17565 | 10,538 | 324 | 89 | 32.524691 | 0.746057 | 0.0855 | 0 | 0.813043 | 0 | 0 | 0.233833 | 0.077665 | 0 | 0 | 0 | 0 | 0.665217 | 1 | 0.073913 | false | 0 | 0.021739 | 0 | 0.095652 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
d64fd69e4f7c88b1170259fad3077a22128e06c6 | 26,752 | py | Python | tests/trans_sec/analytics/oinc_tests.py | termlen0/transparent-security | ea58da4c8de8300b24ba72a69f77b8ab39ada072 | [
"Apache-2.0"
] | 1 | 2021-05-12T17:55:52.000Z | 2021-05-12T17:55:52.000Z | tests/trans_sec/analytics/oinc_tests.py | termlen0/transparent-security | ea58da4c8de8300b24ba72a69f77b8ab39ada072 | [
"Apache-2.0"
] | null | null | null | tests/trans_sec/analytics/oinc_tests.py | termlen0/transparent-security | ea58da4c8de8300b24ba72a69f77b8ab39ada072 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2019 Cable Television Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Unit tests for http_session.py
import logging
import sys
import unittest
from random import randrange, randint
import ipaddress
import mock
from scapy.all import get_if_hwaddr
from scapy.layers.inet import IP, UDP, TCP
from scapy.layers.inet6 import IPv6
from scapy.layers.l2 import Ether
import trans_sec.consts
from trans_sec import consts
from trans_sec.analytics import oinc
from trans_sec.analytics.oinc import SimpleAE
from trans_sec.packet.inspect_layer import (
IntShim, IntMeta2, IntHeader, SourceIntMeta, IntMeta1, UdpInt,
TelemetryReport)
from trans_sec.utils.http_session import HttpSession
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
logger = logging.getLogger('oinc_tests')
class SimpleAETests(unittest.TestCase):
"""
Unit tests for the class SimpleAE
"""
def setUp(self):
self.ae = SimpleAE(mock.Mock(HttpSession), packet_count=20,
sample_interval=2)
self.sport = randrange(1000, 8000)
self.dport = randrange(1000, 8000)
self.dst_ipv4 = '10.1.0.1'
self.dst_ipv6 = ipaddress.ip_address(
unicode('0000:0000:0000:0000:0000:0001:0000:0001'))
self.src_ipv4 = '10.2.0.1'
self.src_ipv6 = ipaddress.ip_address(
unicode('0000:0000:0000:0000:0000:0002:0000:0001'))
self.dst_mac = rand_mac()
self.src_mac = rand_mac()
# self.orig_mac = rand_mac()
self.orig_mac = '00:00:00:02:02:00'
logger.info('Test sport - [%s] dport - [%s]', self.sport, self.dport)
self.int_pkt_ipv4_udp = (
Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt(dport=trans_sec.consts.UDP_INT_DST_PORT) /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security'
)
self.int_pkt_ipv4_tcp = (
Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt(dport=trans_sec.consts.UDP_INT_DST_PORT) /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security'
)
self.int_pkt_ipv6_udp = (
Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UDP(dport=consts.UDP_TRPT_DST_PORT) /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security'
)
self.int_pkt_ipv6_tcp = (
Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UDP(dport=consts.UDP_TRPT_DST_PORT) /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security'
)
self.trpt_pkt_ipv4_out_ipv4_in_udp = (
Ether(src=self.src_mac, dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UDP(sport=0, dport=consts.UDP_TRPT_DST_PORT,
# udp + telemetry header size
len=len(self.int_pkt_ipv4_udp) + 20 + 20) /
TelemetryReport(domain_id=consts.TRPT_DOMAIN_ID) /
str(self.int_pkt_ipv4_udp)
)
self.trpt_pkt_ipv4_out_ipv6_in_udp = (
Ether(src=self.src_mac, dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UDP(sport=0, dport=consts.UDP_TRPT_DST_PORT,
# udp + telemetry header size
len=len(self.int_pkt_ipv4_udp) + 20 + 20) /
TelemetryReport(domain_id=consts.TRPT_DOMAIN_ID) /
str(self.int_pkt_ipv6_udp)
)
self.trpt_pkt_ipv4_out_ipv4_in_tcp = (
Ether(src=self.src_mac, dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UDP(sport=0, dport=consts.UDP_TRPT_DST_PORT,
# udp + telemetry header size
len=len(self.int_pkt_ipv4_udp) + 20 + 20) /
TelemetryReport(domain_id=consts.TRPT_DOMAIN_ID) /
str(self.int_pkt_ipv4_tcp)
)
self.trpt_pkt_ipv4_out_ipv6_in_tcp = (
Ether(src=self.src_mac, dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UDP(sport=0, dport=consts.UDP_TRPT_DST_PORT,
# udp + telemetry header size
len=len(self.int_pkt_ipv4_udp) + 20 + 20) /
TelemetryReport(domain_id=consts.TRPT_DOMAIN_ID) /
str(self.int_pkt_ipv6_tcp)
)
def test_extract_ipv4_udp_packet(self):
"""
Tests to ensure that an IPv4 UDP single packet will be parsed properly
"""
int_data = oinc.extract_int_data(self.int_pkt_ipv4_udp[Ether])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(self.src_ipv4, int_data['devAddr'])
self.assertEqual(self.dst_ipv4, int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.UDP_PROTO, int_data['protocol'])
def test_extract_ipv4_udp_packet_trpt(self):
"""
Tests to ensure that an IPv4 UDP single packet will be parsed properly
"""
int_data = oinc.extract_trpt_data(
self.trpt_pkt_ipv4_out_ipv4_in_udp[UDP])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(self.src_ipv4, int_data['devAddr'])
self.assertEqual(self.dst_ipv4, int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.UDP_PROTO, int_data['protocol'])
def test_extract_ipv4_tcp_packet(self):
"""
Tests to ensure that an IPv4 UDP single packet will be parsed properly
"""
int_data = oinc.extract_int_data(self.int_pkt_ipv4_tcp[Ether])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(self.src_ipv4, int_data['devAddr'])
self.assertEqual(self.dst_ipv4, int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.TCP_PROTO, int_data['protocol'])
def test_extract_ipv4_tcp_packet_trpt(self):
"""
Tests to ensure that an IPv4 UDP single packet will be parsed properly
"""
int_data = oinc.extract_trpt_data(
self.trpt_pkt_ipv4_out_ipv4_in_tcp[UDP])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(self.src_ipv4, int_data['devAddr'])
self.assertEqual(self.dst_ipv4, int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.TCP_PROTO, int_data['protocol'])
def test_extract_ipv6_udp_packet(self):
"""
Tests to ensure that an IPv4 UDP single packet will be parsed properly
"""
int_data = oinc.extract_int_data(self.int_pkt_ipv6_udp[Ether])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(str(self.src_ipv6), int_data['devAddr'])
self.assertEqual(str(self.dst_ipv6), int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.UDP_PROTO, int_data['protocol'])
def test_extract_ipv6_udp_packet_trpt(self):
"""
Tests to ensure that an IPv6 UDP single packet will be parsed properly
"""
int_data = oinc.extract_trpt_data(
self.trpt_pkt_ipv4_out_ipv6_in_udp[UDP])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(str(self.src_ipv6), int_data['devAddr'])
self.assertEqual(str(self.dst_ipv6), int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.UDP_PROTO, int_data['protocol'])
def test_extract_ipv6_tcp_packet(self):
"""
Tests to ensure that an IPv6 TCP single packet will be parsed properly
"""
int_data = oinc.extract_int_data(self.int_pkt_ipv6_tcp[Ether])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(str(self.src_ipv6), int_data['devAddr'])
self.assertEqual(str(self.dst_ipv6), int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.TCP_PROTO, int_data['protocol'])
def test_extract_ipv6_tcp_packet_trpt(self):
"""
Tests to ensure that an IPv6 TCP single packet will be parsed properly
"""
int_data = oinc.extract_trpt_data(
self.trpt_pkt_ipv4_out_ipv6_in_tcp[UDP])
self.assertEqual(self.orig_mac, int_data['devMac'])
self.assertEqual(str(self.src_ipv6), int_data['devAddr'])
self.assertEqual(str(self.dst_ipv6), int_data['dstAddr'])
self.assertEqual(self.dport, int_data['dstPort'])
self.assertEqual(trans_sec.consts.TCP_PROTO, int_data['protocol'])
def test_process_single_ipv4_udp_packet(self):
"""
Tests to ensure that an IPv4 UDP single packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.assertFalse(self.ae.process_packet(self.int_pkt_ipv4_udp))
def test_process_single_ipv4_udp_packet_trpt(self):
"""
Tests to ensure that an IPv4 UDP single packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.assertFalse(
self.ae.process_packet(self.trpt_pkt_ipv4_out_ipv4_in_udp))
def test_process_single_ipv6_udp_packet(self):
"""
Tests to ensure that an IPv6 UDP single packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.int_pkt_ipv6_udp)
def test_process_single_ipv6_udp_packet_trpt(self):
"""
Tests to ensure that an IPv6 UDP single packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.trpt_pkt_ipv4_out_ipv6_in_udp)
def test_process_single_ipv4_tcp_packet(self):
"""
Tests to ensure that a single IPv4 TCP packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.int_pkt_ipv4_tcp)
def test_process_single_ipv4_tcp_packet_trpt(self):
"""
Tests to ensure that a single IPv4 TCP packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.trpt_pkt_ipv4_out_ipv4_in_tcp)
def test_process_single_ipv6_tcp_packet(self):
"""
Tests to ensure that a single IPv6 TCP packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.int_pkt_ipv6_tcp)
def test_process_single_ipv6_tcp_packet_trpt(self):
"""
Tests to ensure that a single IPv6 TCP packet is handled without Error
note: only testing via the handle_packet() API which would be called by
by the scapy sniffer thread
:return:
"""
self.ae.process_packet(self.trpt_pkt_ipv4_out_ipv6_in_tcp)
def test_start_one_ipv4_udp_attack(self):
"""
Tests to ensure that one IPv4 UDP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(self.int_pkt_ipv4_udp)
if index < self.ae.packet_count:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv4_udp_attack_trpt(self):
"""
Tests to ensure that one IPv4 UDP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(
self.trpt_pkt_ipv4_out_ipv4_in_udp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv6_udp_attack(self):
"""
Tests to ensure that one IPv6 UDP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(self.int_pkt_ipv6_udp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv6_udp_attack_trpt(self):
"""
Tests to ensure that one IPv6 UDP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(
self.trpt_pkt_ipv4_out_ipv6_in_udp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv4_tcp_attack(self):
"""
Tests to ensure that one IPv4 TCP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(self.int_pkt_ipv4_tcp)
if index < self.ae.packet_count:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv4_tcp_attack_trpt(self):
"""
Tests to ensure that one IPv4 TCP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(
self.trpt_pkt_ipv4_out_ipv4_in_tcp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv6_tcp_attack(self):
"""
Tests to ensure that one IPv6 TCP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(self.int_pkt_ipv6_tcp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_one_ipv6_tcp_attack_trpt(self):
"""
Tests to ensure that one IPv6 TCP attack has been triggered
:return:
"""
for index in range(0, self.ae.packet_count + 1):
logger.debug('Processing packet #%s', index)
ret_val = self.ae.process_packet(
self.trpt_pkt_ipv4_out_ipv6_in_tcp)
if index < self.ae.packet_count + 1:
self.assertFalse(ret_val)
else:
self.assertTrue(ret_val)
def test_start_two_ipv4_udp_attacks(self):
"""
Tests to ensure that two IPv4 UDP attacks have been triggered
:return:
"""
pkt1 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
pkt2 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
for index in range(0, self.ae.packet_count):
logger.info('Iteration #%s', index)
ret_val1 = self.ae.process_packet(pkt1)
ret_val2 = self.ae.process_packet(pkt2)
logger.info('Checking index - [%s] - count - [%s]',
index, self.ae.packet_count)
if index * 2 < self.ae.packet_count:
logger.info('Expecting false - [%s]', ret_val1)
self.assertFalse(ret_val1)
self.assertFalse(ret_val2)
else:
logger.info('Expecting true - [%s]', ret_val1)
self.assertTrue(ret_val1)
self.assertTrue(ret_val2)
def test_start_two_ipv6_udp_attacks(self):
"""
Tests to ensure that two IPv6 UDP attacks have been triggered
:return:
"""
pkt1 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
pkt2 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.UDP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
UDP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
for index in range(0, self.ae.packet_count):
logger.info('Iteration #%s', index)
ret_val1 = self.ae.process_packet(pkt1)
ret_val2 = self.ae.process_packet(pkt2)
logger.info('Checking index - [%s] - count - [%s]',
index, self.ae.packet_count)
if index * 2 < self.ae.packet_count:
logger.info('Expecting false - [%s]', ret_val1)
self.assertFalse(ret_val1)
self.assertFalse(ret_val2)
else:
logger.info('Expecting true - [%s]', ret_val1)
self.assertTrue(ret_val1)
self.assertTrue(ret_val2)
def test_start_two_ipv4_tcp_attacks(self):
"""
Tests to ensure that two IPv4 UDP attacks have been triggered
:return:
"""
pkt1 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
pkt2 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac) /
IP(dst=self.dst_ipv4, src=self.src_ipv4,
proto=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
for index in range(0, self.ae.packet_count):
logger.info('Iteration #%s', index)
ret_val1 = self.ae.process_packet(pkt1)
ret_val2 = self.ae.process_packet(pkt2)
logger.info('Checking index - [%s] - count - [%s]',
index, self.ae.packet_count)
if index * 2 < self.ae.packet_count:
logger.info('Expecting false - [%s]', ret_val1)
self.assertFalse(ret_val1)
self.assertFalse(ret_val2)
else:
logger.info('Expecting true - [%s]', ret_val1)
self.assertTrue(ret_val1)
self.assertTrue(ret_val2)
def test_start_two_ipv6_tcp_attacks(self):
"""
Tests to ensure that two IPv6 UDP attacks have been triggered
:return:
"""
pkt1 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
pkt2 = (Ether(src=get_if_hwaddr('lo'), dst=self.dst_mac,
type=trans_sec.consts.IPV6_TYPE) /
IPv6(dst=self.dst_ipv6,
src=self.src_ipv6,
nh=trans_sec.consts.UDP_PROTO) /
UdpInt() /
IntShim(length=9, next_proto=trans_sec.consts.TCP_PROTO) /
IntHeader(meta_len=1) /
IntMeta1(switch_id=3) /
IntMeta2(switch_id=2) /
SourceIntMeta(switch_id=1, orig_mac=self.orig_mac) /
TCP(dport=self.dport, sport=self.sport) /
'hello transparent-security')
for index in range(0, self.ae.packet_count):
logger.info('Iteration #%s', index)
ret_val1 = self.ae.process_packet(pkt1)
ret_val2 = self.ae.process_packet(pkt2)
logger.info('Checking index - [%s] - count - [%s]',
index, self.ae.packet_count)
if index * 2 < self.ae.packet_count:
logger.info('Expecting false - [%s]', ret_val1)
self.assertFalse(ret_val1)
self.assertFalse(ret_val2)
else:
logger.info('Expecting true - [%s]', ret_val1)
self.assertTrue(ret_val1)
self.assertTrue(ret_val2)
def rand_mac():
return "%02x:%02x:%02x:%02x:%02x:%02x" % (
randint(0, 255),
randint(0, 255),
randint(0, 255),
randint(0, 255),
randint(0, 255),
randint(0, 255)
)
| 42.262243 | 79 | 0.593526 | 3,434 | 26,752 | 4.383227 | 0.068433 | 0.021127 | 0.041855 | 0.031624 | 0.907653 | 0.905528 | 0.900877 | 0.8941 | 0.881278 | 0.874635 | 0 | 0.027617 | 0.307005 | 26,752 | 632 | 80 | 42.329114 | 0.784293 | 0.13741 | 0 | 0.742009 | 0 | 0 | 0.060242 | 0.004839 | 0 | 0 | 0 | 0 | 0.16895 | 1 | 0.068493 | false | 0 | 0.03653 | 0.002283 | 0.109589 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c38a95a649462495d5c6c5682d6884147bbc56ef | 110 | py | Python | URI 1144 SEQUENCIA LOGICA.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | URI 1144 SEQUENCIA LOGICA.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | URI 1144 SEQUENCIA LOGICA.py | castrolimoeiro/Uri-exercise | 7a9227c55a79f14fe8bde4aa0ebb4c268bbda4bb | [
"MIT"
] | null | null | null | n = int(input())
for c in range(1, n+1):
print(f'{c} {c*c} {c*c*c}')
print(f'{c} {c*c+1} {c*c*c+1}')
| 18.333333 | 35 | 0.445455 | 28 | 110 | 1.75 | 0.357143 | 0.367347 | 0.367347 | 0.244898 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045977 | 0.209091 | 110 | 5 | 36 | 22 | 0.517241 | 0 | 0 | 0 | 0 | 0 | 0.345455 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
c399f9a6f61adf2416de28ec52e13d89bbfe05c6 | 44,859 | py | Python | django/electric_power_sale/models.py | zcjwin/hasura-django-auth | fd052bb05f051ee7fdaecf9433d5f6d7db580ca9 | [
"MIT"
] | null | null | null | django/electric_power_sale/models.py | zcjwin/hasura-django-auth | fd052bb05f051ee7fdaecf9433d5f6d7db580ca9 | [
"MIT"
] | 1 | 2022-03-21T03:04:31.000Z | 2022-03-21T03:04:31.000Z | django/electric_power_sale/models.py | zcjwin/hasura-django-auth | fd052bb05f051ee7fdaecf9433d5f6d7db580ca9 | [
"MIT"
] | null | null | null | from django.db import models
from api.models import HasuraUser, Organization
from datetime import date,datetime
def default_cur_date():
return date.today()
def default_cur_mth():
return int(date.today().strftime("%Y%m"))
def default_cur_datetime():
return datetime.now()
def default_year_start_date():
epoch_year = date.today().year
year_start = date(epoch_year, 1, 1)
return year_start
def default_year_end_date():
epoch_year = date.today().year
year_end = date(epoch_year, 12, 31)
return year_end
def default_cur_year():
return date.today().year
class Agent(models.Model):
"""居间资料
"""
name = models.CharField("居间名称",max_length=200)
organization = models.ForeignKey(Organization, verbose_name="上级机构",null=True,on_delete=models.SET_NULL)
address = models.CharField("地址",null=True,blank=True,max_length=200)
agent_no = models.CharField("编号",null=True,blank=True,max_length=60)
toucher_1 = models.CharField("联系人1",null=True,blank=True,max_length=60)
toucher_2 = models.CharField("联系人2",null=True,blank=True,max_length=60)
toucher_mobile_1 = models.CharField("联系电话1",null=True,blank=True,max_length=60)
toucher_mobile_2 = models.CharField("联系电话2",null=True,blank=True,max_length=60)
default_agent_rate = models.DecimalField("默认居间分成比例",max_digits=20,decimal_places=4,default=0)
note = models.TextField("备注1",null=True,blank=True)
is_active = models.BooleanField("是否有效", default=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
# Create your models here.
class Customer(models.Model):
"""客户资料
"""
def get_customer_no():
"""自动生成客户编号
Returns:
string: 8位日期-6位序号
"""
count = Customer.objects.count()
if no == None:
count = 1
else:
count += 1
return "{:%Y%M%d}-{:06d}".format(date.today(),count)
name = models.CharField("客户名称",max_length=200)
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
address = models.CharField("地址",null=True,blank=True,max_length=200)
custom_no = models.CharField("客户编号",null=True,blank=True,max_length=60,default=get_customer_no)
toucher_1 = models.CharField("联系人1",null=True,blank=True,max_length=60)
toucher_2 = models.CharField("联系人2",null=True,blank=True,max_length=60)
toucher_mobile_1 = models.CharField("联系电话1",null=True,blank=True,max_length=60)
toucher_mobile_2 = models.CharField("联系电话2",null=True,blank=True,max_length=60)
grid_account = models.CharField("电网账号",null=True,blank=True,max_length=60)
grid_password = models.CharField("电网密码",null=True,blank=True,max_length=60)
elect_level = models.CharField("电压等级",null=True,blank=True,max_length=60)
transformer_volume = models.CharField("变压器容量",null=True,blank=True,max_length=60)
#服务费率
TRANSFORMER_TYPE_LT_35="transformer_type_lt_35"
TRANSFORMER_TYPE_GT_35="transformer_type_gt_35"
TRANSFORMER_TYPE_CHOICES = [(TRANSFORMER_TYPE_LT_35,"35KVA以下"),(TRANSFORMER_TYPE_LT_35,"35KVA以上")]
transformer_type = models.CharField("变压器容量类型",
choices=TRANSFORMER_TYPE_CHOICES ,
default=TRANSFORMER_TYPE_LT_35,
null=True,blank=True,max_length=60)
#收入结算方式
#服务费率
INCOME_TYPE_RATE="income_type_rate"
#固定金额
INCOME_TYPE_FIXED="income_type_fixed"
#分成比例
INCOME_TYPE_DIVIDE_RATE="income_type_divide_rate"
INCOME_TYPE_CHOICES = [(INCOME_TYPE_RATE,"按服务费率"),(INCOME_TYPE_FIXED,"按固定金额"),(INCOME_TYPE_DIVIDE_RATE,"按分成比例")]
income_type = models.CharField("收入结算方式",max_length=40,
choices=INCOME_TYPE_CHOICES ,
default=INCOME_TYPE_RATE)
#客户用电性质
#常规
USE_TYPE_COMMON="use_type_common"
#分时段
USE_TYPE_SEPRATE_TIME="use_type_seprate_time"
#常规-高耗能
USE_TYPE_COMMON_HIGH_POWER="use_type_common_high_power"
#高耗能-分时段
USE_TYPE_HIGH_POWER_SEPRATE_TIME="use_type_high_power_seprate_time"
USE_TYPE_CHOICES = [(USE_TYPE_COMMON,"常规"),(USE_TYPE_SEPRATE_TIME,"常规-分时段"),(USE_TYPE_COMMON_HIGH_POWER,"常规-高耗能"),(USE_TYPE_HIGH_POWER_SEPRATE_TIME,"高耗能-分时段")]
use_type = models.CharField("客户用电性质",max_length=80,
choices=USE_TYPE_CHOICES ,
default=USE_TYPE_COMMON)
rate = models.DecimalField("服务费率",max_digits=10,decimal_places=4,null=True,default=0)
fix_fee = models.DecimalField("固定服务费",null=True,max_digits=20,decimal_places=4,default=0)
divide_rate = models.DecimalField("分成比例",null=True,max_digits=20,decimal_places=4,default=0)
agent = models.ForeignKey(Agent, verbose_name="所属居间",null=True, on_delete=models.SET_NULL)
agent_rate = models.DecimalField("与居间分成比例",null=True,max_digits=20,decimal_places=4,default=0)
tax_diff = models.DecimalField("税差",max_digits=20,null=True,decimal_places=4,default=0)
note_1 = models.TextField("备注1",null=True,blank=True)
note_2 = models.TextField("备注2",null=True,blank=True)
note_3 = models.TextField("备注3",null=True,blank=True)
is_active = models.BooleanField("是否有效", default=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class DeviceNo(models.Model):
"""客户户号表
Args:
models (_type_): _description_
"""
customer = models.ForeignKey(Customer, verbose_name="关联客户",on_delete=models.CASCADE)
device_no = models.CharField("电表号",max_length=40)
is_active = models.BooleanField("是否有效", default=True)
note = models.TextField("备注1",null=True,blank=True)
class Contract(models.Model):
"""销售合同
"""
name = models.CharField("合同名称",max_length=200)
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
customer = models.ForeignKey(Customer, verbose_name="关联客户",null=True,on_delete=models.SET_NULL)
contract_no = models.CharField("合同编号",max_length=40,blank=True,null=True)
contract_year = models.IntegerField("所属年度",default=default_cur_year)
contract_start_date = models.DateField("合同生效日期",default=default_year_start_date)
contract_end_date = models.DateField("合同结束日期",default=default_year_end_date)
#电量计费方式
#常规
PRICE_TYPE_COMMON="price_type_common"
#分时段
PRICE_TYPE_SEPRATE_TIME="price_type_seprate_time"
PRICE_TYPE_CHOICES = [(PRICE_TYPE_COMMON,"常规"),(PRICE_TYPE_SEPRATE_TIME,"分时段")]
contract_price_type = models.CharField("电价价方式",max_length=40,
choices=PRICE_TYPE_CHOICES,
default=PRICE_TYPE_COMMON)
price_common = models.DecimalField("常规时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_peak = models.DecimalField("峰时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_flat= models.DecimalField("平时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_valley = models.DecimalField("谷时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注1",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
is_active = models.BooleanField("是否有效", default=True)
class ContractLine(models.Model):
"""合同明细(电量计划表)
"""
contract = models.ForeignKey(Contract, verbose_name="合同",on_delete=models.CASCADE)
plan_common_mth_1= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_1= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_1= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_1= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_2= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_2= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_2= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_2= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_3= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_3= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_3= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_3= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_4= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_4= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_4= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_4= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_5= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_5= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_5= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_5= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_6= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_6= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_6= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_6= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_7= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_7= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_7= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_7= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_8= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_8= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_8= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_8= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_9= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_9= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_9= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_9= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_10= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_10= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_10= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_10= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_11= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_11= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_11= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_11= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
plan_common_mth_12= models.DecimalField("计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat_mth_12= models.DecimalField("计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley_mth_12= models.DecimalField("计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak_mth_12= models.DecimalField("计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
#以下字段从不同业务表中同步
adjust_plan_common_mth_1= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_1= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_1= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_1= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_2= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_2= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_2= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_2= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_3= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_3= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_3= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_3= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_4= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_4= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_4= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_4= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_5= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_5= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_5= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_5= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_6= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_6= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_6= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_6= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_7= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_7= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_7= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_7= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_8= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_8= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_8= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_8= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_9= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_9= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_9= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_9= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_10= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_10= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_10= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_10= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_11= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_11= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_11= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_11= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_12= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_12= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_12= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_12= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_1= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_1= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_1= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_1= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_2= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_2= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_2= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_2= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_3= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_3= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_3= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_3= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_4= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_4= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_4= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_4= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_5= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_5= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_5= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_5= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_6= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_6= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_6= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_6= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_7= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_7= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_7= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_7= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_8= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_8= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_8= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_8= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_9= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_9= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_9= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_9= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_10= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_10= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_10= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_10= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_11= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_11= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_11= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_11= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
act_common_mth_12= models.DecimalField("电量结算-常规",max_digits=20,decimal_places=4,default=0)
act_flat_mth_12= models.DecimalField("电量结算-平时段",max_digits=20,decimal_places=4,default=0)
act_valley_mth_12= models.DecimalField("电量结算-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak_mth_12= models.DecimalField("电量结算-峰时段",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注1",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
class MthAdjust(models.Model):
"""月度电量调整表
Args:
models (_type_): _description_
"""
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
mth = models.IntegerField("月份", default=default_cur_mth)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthAdjustLine(models.Model):
"""月度电量调整子表
Args:
models (_type_): _description_
"""
mth_adjust= models.ForeignKey(MthAdjust, verbose_name="月度电量调整主表",on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, verbose_name="关联客户",null=True,on_delete=models.SET_NULL)
contract = models.ForeignKey(Contract, verbose_name="关联合同",null=True,on_delete=models.SET_NULL)
#调整前
previous_plan_common_mth_1= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_1= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_1= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_1= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_2= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_2= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_2= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_2= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_3= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_3= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_3= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_3= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_4= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_4= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_4= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_4= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_5= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_5= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_5= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_5= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_6= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_6= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_6= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_6= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_7= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_7= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_7= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_7= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_8= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_8= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_8= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_8= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_9= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_9= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_9= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_9= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_10= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_10= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_10= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_10= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_11= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_11= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_11= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_11= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
previous_plan_common_mth_12= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
previous_plan_flat_mth_12= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
previous_plan_valley_mth_12= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
previous_plan_peak_mth_12= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
#调整后
adjust_plan_common_mth_1= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_1= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_1= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_1= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_2= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_2= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_2= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_2= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_3= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_3= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_3= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_3= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_4= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_4= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_4= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_4= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_5= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_5= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_5= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_5= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_6= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_6= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_6= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_6= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_7= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_7= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_7= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_7= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_8= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_8= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_8= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_8= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_9= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_9= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_9= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_9= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_10= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_10= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_10= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_10= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_11= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_11= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_11= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_11= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_common_mth_12= models.DecimalField("计划电量调整-常规",max_digits=20,decimal_places=4,default=0)
adjust_plan_flat_mth_12= models.DecimalField("计划电量调整-平时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_valley_mth_12= models.DecimalField("计划电量调整-谷时段",max_digits=20,decimal_places=4,default=0)
adjust_plan_peak_mth_12= models.DecimalField("计划电量调整-峰时段",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
class MthCustomerBill(models.Model):
"""月度电量结算单主表
Args:
models (_type_): _description_
"""
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
mth = models.IntegerField("月份", default=default_cur_mth)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthCustomerBillLine(models.Model):
"""月度电量结算单明细
Args:
models (_type_): _description_
"""
mth_customer_bill= models.ForeignKey(MthCustomerBill, verbose_name="月度电量结算单主表",on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, verbose_name="关联客户",null=True,on_delete=models.SET_NULL)
contract = models.ForeignKey(Contract, verbose_name="关联合同",null=True,on_delete=models.SET_NULL)
# contract_name = models.CharField("合同名称",max_length=40)
#结算电量
act_common= models.DecimalField("月结算电量-常规",max_digits=20,decimal_places=4,default=0)
act_flat= models.DecimalField("月结算电量-平时段",max_digits=20,decimal_places=4,default=0)
act_valley= models.DecimalField("月结算电量-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak= models.DecimalField("月结算电量-峰时段",max_digits=20,decimal_places=4,default=0)
#结算价格
price_common = models.DecimalField("常规时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_peak = models.DecimalField("峰时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_flat= models.DecimalField("平时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_valley = models.DecimalField("谷时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
service_rate = models.DecimalField("代理服务费比例",max_digits=20,decimal_places=4,default=0)
service_fee = models.DecimalField("代理服务费",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthAgentBill(models.Model):
"""月度电量居间结算单主表
Args:
models (_type_): _description_
"""
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
mth = models.IntegerField("月份", default=default_cur_mth)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthAgentBillLine(models.Model):
"""月度电量结算单明细
Args:
models (_type_): _description_
"""
mth_agent_bill= models.ForeignKey(MthAgentBill, verbose_name="月度电量结算单主表",on_delete=models.CASCADE)
agent = models.ForeignKey(Agent, verbose_name="居间",null=True,on_delete=models.SET_NULL)
#计划电量
plan_common= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
#结算电量
act_common= models.DecimalField("月结算电量-常规",max_digits=20,decimal_places=4,default=0)
act_flat= models.DecimalField("月结算电量-平时段",max_digits=20,decimal_places=4,default=0)
act_valley= models.DecimalField("月结算电量-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak= models.DecimalField("月结算电量-峰时段",max_digits=20,decimal_places=4,default=0)
#结算价格
price_common = models.DecimalField("常规时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_peak = models.DecimalField("峰时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_flat= models.DecimalField("平时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
price_valley = models.DecimalField("谷时段电价(元/KWA)",max_digits=20,decimal_places=4,default=0)
service_rate = models.DecimalField("代理服务费比例",max_digits=20,decimal_places=4,default=0)
service_fee = models.DecimalField("代理服务费",max_digits=20,decimal_places=4,default=0)
agent_rate = models.DecimalField("居间分成比例",max_digits=20,decimal_places=4,default=0)
agent_fee = models.DecimalField("居间分成金额",max_digits=20,decimal_places=4,default=0)
tax_diff = models.DecimalField("增值税差额",max_digits=20,decimal_places=4,default=0)
act_agent_fee= models.DecimalField("实际结算居间分成费",max_digits=20,decimal_places=4,default=0)
agent_confirm_date= models.DateField("居间确认时间", default=default_cur_date)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthDraftCustomerBill(models.Model):
"""月度电量确认单主表
Args:
models (_type_): _description_
"""
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
mth = models.IntegerField("月份", default=default_cur_mth)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthDraftCustomerBillLine(models.Model):
"""月度电量确认单明细
Args:
models (_type_): _description_
"""
mth_draft_customer_bill= models.ForeignKey(MthDraftCustomerBill, verbose_name="月度电量确认单主表",on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, verbose_name="关联客户",null=True,on_delete=models.SET_NULL)
customer_device_no = models.CharField("户号",max_length=40)
#结算电量
act_common= models.DecimalField("月结算电量-常规",max_digits=20,decimal_places=4,default=0)
act_flat= models.DecimalField("月结算电量-平时段",max_digits=20,decimal_places=4,default=0)
act_valley= models.DecimalField("月结算电量-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak= models.DecimalField("月结算电量-峰时段",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthDiffCustomerBill(models.Model):
"""月度电量偏差控制主表
Args:
models (_type_): _description_
"""
organization = models.ForeignKey(Organization, verbose_name="所属机构",null=True,on_delete=models.SET_NULL)
mth = models.IntegerField("月份", default=default_cur_mth)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
class MthDiffCustomerBillLine(models.Model):
"""月度电量偏差控制表明细
Args:
models (_type_): _description_
"""
mth_diff_customer_bill= models.ForeignKey(MthDiffCustomerBill, verbose_name="月度电量偏差控制主表",on_delete=models.CASCADE)
customer = models.ForeignKey(Customer, verbose_name="关联客户",null=True,on_delete=models.SET_NULL)
customer_device_no = models.CharField("户号",max_length=40)
#计划电量
plan_common= models.DecimalField("调整前计划电量-常规",max_digits=20,decimal_places=4,default=0)
plan_flat= models.DecimalField("调整前计划电量-平时段",max_digits=20,decimal_places=4,default=0)
plan_valley= models.DecimalField("调整前计划电量-谷时段",max_digits=20,decimal_places=4,default=0)
plan_peak= models.DecimalField("调整前计划电量-峰时段",max_digits=20,decimal_places=4,default=0)
#结算电量
act_common= models.DecimalField("月结算电量-常规",max_digits=20,decimal_places=4,default=0)
act_flat= models.DecimalField("月结算电量-平时段",max_digits=20,decimal_places=4,default=0)
act_valley= models.DecimalField("月结算电量-谷时段",max_digits=20,decimal_places=4,default=0)
act_peak= models.DecimalField("月结算电量-峰时段",max_digits=20,decimal_places=4,default=0)
state = models.CharField("状态",max_length=40,default="draft")
note = models.TextField("备注",null=True,blank=True)
created_by = models.ForeignKey(HasuraUser, verbose_name="录入人",null=True,on_delete=models.SET_NULL)
created_at = models.DateTimeField("录入时间", default=default_cur_datetime)
updated_by = models.ForeignKey(HasuraUser, verbose_name="更新人",related_name="+",null=True,on_delete=models.SET_NULL)
updated_at = models.DateTimeField("更新时间", default=default_cur_datetime)
| 57.291188 | 163 | 0.781114 | 6,784 | 44,859 | 4.861291 | 0.037588 | 0.158283 | 0.123109 | 0.184026 | 0.910519 | 0.900361 | 0.885442 | 0.869978 | 0.85891 | 0.853301 | 0 | 0.039132 | 0.087965 | 44,859 | 782 | 164 | 57.36445 | 0.766944 | 0.018971 | 0 | 0.502 | 0 | 0 | 0.081245 | 0.003864 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014 | false | 0.002 | 0.006 | 0.008 | 0.964 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
c3a3585f3faa71ed222d70ab535ab09129349871 | 5,561 | py | Python | tests/bugs/core_3547_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/bugs/core_3547_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/bugs/core_3547_test.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: bugs.core_3547
# title: Floating-point negative zero doesn't match positive zero in the index
# decription:
# tracker_id: CORE-3547
# min_versions: ['2.5.1']
# versions: 2.5.1, 2.5.1
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5.1
# resources: None
substitutions_1 = []
init_script_1 = """
recreate table t_float_no_pk (col float);
commit;
insert into t_float_no_pk (col) values (0e0);
insert into t_float_no_pk (col) values (-0e0);
commit;
recreate table t1_double_as_pk (col double precision, constraint t1_double_pk primary key(col) using index t1_double_pk);
commit;
"""
db_1 = db_factory(page_size=4096, sql_dialect=3, init=init_script_1)
test_script_1 = """
set list on;
select count(*) "where id = 0" from rdb$relations where rdb$relation_id = 0;
select count(*) "where id = 0e0" from rdb$relations where rdb$relation_id = 0e0;
select count(*) "where id = (1e0 - 1e0)" from rdb$relations where rdb$relation_id = (1e0 - 1e0);
select count(*) "where id = -0e0" from rdb$relations where rdb$relation_id = -0e0;
select count(*) "where id = -(1e0 - 1e0)" from rdb$relations where rdb$relation_id = -(1e0 - 1e0);
select count(*) "where 0e0 = -0e0" from rdb$database where 0e0 = -0e0;
insert into t1_double_as_pk (col) values (0e0);
commit;
insert into t1_double_as_pk (col) values (-0e0);
commit;
select count(distinct col) "t_float_no_pk: count(dist col)" from t_float_no_pk;
select count(*) "t_double_pk: col, count(*)" from t1_double_as_pk group by col;
-- :: NB ::: Problematic key representaion for 0e0 differ in Windows vs Linux!
-- NIX: -Problematic key value is ("COL" = 0.000000000000000)
-- WIN: -Problematic key value is ("COL" = 0.0000000000000000)
-- ^
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
where id = 0 1
where id = 0e0 1
where id = (1e0 - 1e0) 1
where id = -0e0 1
where id = -(1e0 - 1e0) 1
where 0e0 = -0e0 1
t_float_no_pk: count(dist col) 1
t_double_pk: col, count(*) 1
"""
expected_stderr_1 = """
Statement failed, SQLSTATE = 23000
violation of PRIMARY or UNIQUE KEY constraint "T1_DOUBLE_PK" on table "T1_DOUBLE_AS_PK"
-Problematic key value is ("COL" = 0.0000000000000000)
"""
@pytest.mark.version('>=2.5.1')
@pytest.mark.platform('Windows')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.expected_stderr = expected_stderr_1
act_1.execute()
assert act_1.clean_stderr == act_1.clean_expected_stderr
assert act_1.clean_stdout == act_1.clean_expected_stdout
# version: 2.5.1
# resources: None
substitutions_2 = []
init_script_2 = """
recreate table t_float_no_pk (col float);
commit;
insert into t_float_no_pk (col) values (0e0);
insert into t_float_no_pk (col) values (-0e0);
commit;
recreate table t1_double_as_pk (col double precision, constraint t1_double_pk primary key(col) using index t1_double_pk);
commit;
"""
db_2 = db_factory(page_size=4096, sql_dialect=3, init=init_script_2)
test_script_2 = """
set list on;
select count(*) "where id = 0" from rdb$relations where rdb$relation_id = 0;
select count(*) "where id = 0e0" from rdb$relations where rdb$relation_id = 0e0;
select count(*) "where id = (1e0 - 1e0)" from rdb$relations where rdb$relation_id = (1e0 - 1e0);
select count(*) "where id = -0e0" from rdb$relations where rdb$relation_id = -0e0;
select count(*) "where id = -(1e0 - 1e0)" from rdb$relations where rdb$relation_id = -(1e0 - 1e0);
select count(*) "where 0e0 = -0e0" from rdb$database where 0e0 = -0e0;
insert into t1_double_as_pk (col) values (0e0);
commit;
insert into t1_double_as_pk (col) values (-0e0);
commit;
select count(distinct col) "t_float_no_pk: count(dist col)" from t_float_no_pk;
select count(*) "t_double_pk: col, count(*)" from t1_double_as_pk group by col;
-- :: NB ::: Problematic key representaion for 0e0 differ in Windows vs Linux!
-- NIX: -Problematic key value is ("COL" = 0.000000000000000)
-- WIN: -Problematic key value is ("COL" = 0.0000000000000000)
-- ^
"""
act_2 = isql_act('db_2', test_script_2, substitutions=substitutions_2)
expected_stdout_2 = """
where id = 0 1
where id = 0e0 1
where id = (1e0 - 1e0) 1
where id = -0e0 1
where id = -(1e0 - 1e0) 1
where 0e0 = -0e0 1
t_float_no_pk: count(dist col) 1
t_double_pk: col, count(*) 1
"""
expected_stderr_2 = """
Statement failed, SQLSTATE = 23000
violation of PRIMARY or UNIQUE KEY constraint "T1_DOUBLE_PK" on table "T1_DOUBLE_AS_PK"
-Problematic key value is ("COL" = 0.000000000000000)
"""
@pytest.mark.version('>=2.5.1')
@pytest.mark.platform('Linux', 'MacOS', 'Solaris', 'FreeBSD', 'HP-UX')
def test_2(act_2: Action):
act_2.expected_stdout = expected_stdout_2
act_2.expected_stderr = expected_stderr_2
act_2.execute()
assert act_2.clean_stderr == act_2.clean_expected_stderr
assert act_2.clean_stdout == act_2.clean_expected_stdout
| 38.089041 | 125 | 0.637475 | 809 | 5,561 | 4.153276 | 0.149567 | 0.041667 | 0.028571 | 0.035714 | 0.783036 | 0.766369 | 0.766369 | 0.735714 | 0.735714 | 0.713095 | 0 | 0.083134 | 0.249416 | 5,561 | 145 | 126 | 38.351724 | 0.72185 | 0.052688 | 0 | 0.709091 | 0 | 0.054545 | 0.761896 | 0 | 0 | 0 | 0 | 0 | 0.036364 | 1 | 0.018182 | false | 0 | 0.018182 | 0 | 0.036364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c3f98a86a341ec6c6100d49db29f9ebb7eec0462 | 25,887 | py | Python | sdk/python/pulumi_consul/intention.py | pulumi/pulumi-consul | 5b66c5b97fda6b5433bfb4d4173c999e468c82e8 | [
"ECL-2.0",
"Apache-2.0"
] | 3 | 2019-11-12T12:21:18.000Z | 2021-07-31T08:17:22.000Z | sdk/python/pulumi_consul/intention.py | pulumi/pulumi-consul | 5b66c5b97fda6b5433bfb4d4173c999e468c82e8 | [
"ECL-2.0",
"Apache-2.0"
] | 38 | 2019-11-21T15:19:33.000Z | 2022-03-31T15:24:11.000Z | sdk/python/pulumi_consul/intention.py | pulumi/pulumi-consul | 5b66c5b97fda6b5433bfb4d4173c999e468c82e8 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-11-24T12:23:13.000Z | 2021-12-06T17:33:31.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['IntentionArgs', 'Intention']
@pulumi.input_type
class IntentionArgs:
def __init__(__self__, *,
action: pulumi.Input[str],
destination_name: pulumi.Input[str],
source_name: pulumi.Input[str],
datacenter: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_namespace: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_namespace: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a Intention resource.
:param pulumi.Input[str] action: The intention action. Must be one of `allow` or `deny`.
:param pulumi.Input[str] destination_name: The name of the destination service for the intention. This
service does not have to exist.
:param pulumi.Input[str] source_name: The name of the source service for the intention. This
service does not have to exist.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[str] description: Optional description that can be used by Consul
tooling, but is not used internally.
:param pulumi.Input[str] destination_namespace: The destination
namespace of the intention.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: Key/value pairs that are opaque to Consul and are associated
with the intention.
:param pulumi.Input[str] source_namespace: The source namespace of the
intention.
"""
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "destination_name", destination_name)
pulumi.set(__self__, "source_name", source_name)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_namespace is not None:
pulumi.set(__self__, "destination_namespace", destination_namespace)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if source_namespace is not None:
pulumi.set(__self__, "source_namespace", source_namespace)
@property
@pulumi.getter
def action(self) -> pulumi.Input[str]:
"""
The intention action. Must be one of `allow` or `deny`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: pulumi.Input[str]):
pulumi.set(self, "action", value)
@property
@pulumi.getter(name="destinationName")
def destination_name(self) -> pulumi.Input[str]:
"""
The name of the destination service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "destination_name")
@destination_name.setter
def destination_name(self, value: pulumi.Input[str]):
pulumi.set(self, "destination_name", value)
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> pulumi.Input[str]:
"""
The name of the source service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "source_name")
@source_name.setter
def source_name(self, value: pulumi.Input[str]):
pulumi.set(self, "source_name", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional description that can be used by Consul
tooling, but is not used internally.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationNamespace")
def destination_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The destination
namespace of the intention.
"""
return pulumi.get(self, "destination_namespace")
@destination_namespace.setter
def destination_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_namespace", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key/value pairs that are opaque to Consul and are associated
with the intention.
"""
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter(name="sourceNamespace")
def source_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The source namespace of the
intention.
"""
return pulumi.get(self, "source_namespace")
@source_namespace.setter
def source_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_namespace", value)
@pulumi.input_type
class _IntentionState:
def __init__(__self__, *,
action: Optional[pulumi.Input[str]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_name: Optional[pulumi.Input[str]] = None,
destination_namespace: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_namespace: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering Intention resources.
:param pulumi.Input[str] action: The intention action. Must be one of `allow` or `deny`.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[str] description: Optional description that can be used by Consul
tooling, but is not used internally.
:param pulumi.Input[str] destination_name: The name of the destination service for the intention. This
service does not have to exist.
:param pulumi.Input[str] destination_namespace: The destination
namespace of the intention.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: Key/value pairs that are opaque to Consul and are associated
with the intention.
:param pulumi.Input[str] source_name: The name of the source service for the intention. This
service does not have to exist.
:param pulumi.Input[str] source_namespace: The source namespace of the
intention.
"""
if action is not None:
pulumi.set(__self__, "action", action)
if datacenter is not None:
pulumi.set(__self__, "datacenter", datacenter)
if description is not None:
pulumi.set(__self__, "description", description)
if destination_name is not None:
pulumi.set(__self__, "destination_name", destination_name)
if destination_namespace is not None:
pulumi.set(__self__, "destination_namespace", destination_namespace)
if meta is not None:
pulumi.set(__self__, "meta", meta)
if source_name is not None:
pulumi.set(__self__, "source_name", source_name)
if source_namespace is not None:
pulumi.set(__self__, "source_namespace", source_namespace)
@property
@pulumi.getter
def action(self) -> Optional[pulumi.Input[str]]:
"""
The intention action. Must be one of `allow` or `deny`.
"""
return pulumi.get(self, "action")
@action.setter
def action(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "action", value)
@property
@pulumi.getter
def datacenter(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@datacenter.setter
def datacenter(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Optional description that can be used by Consul
tooling, but is not used internally.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="destinationName")
def destination_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the destination service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "destination_name")
@destination_name.setter
def destination_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_name", value)
@property
@pulumi.getter(name="destinationNamespace")
def destination_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The destination
namespace of the intention.
"""
return pulumi.get(self, "destination_namespace")
@destination_namespace.setter
def destination_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "destination_namespace", value)
@property
@pulumi.getter
def meta(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Key/value pairs that are opaque to Consul and are associated
with the intention.
"""
return pulumi.get(self, "meta")
@meta.setter
def meta(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "meta", value)
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the source service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "source_name")
@source_name.setter
def source_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_name", value)
@property
@pulumi.getter(name="sourceNamespace")
def source_namespace(self) -> Optional[pulumi.Input[str]]:
"""
The source namespace of the
intention.
"""
return pulumi.get(self, "source_namespace")
@source_namespace.setter
def source_namespace(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source_namespace", value)
class Intention(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_name: Optional[pulumi.Input[str]] = None,
destination_namespace: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_namespace: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
[Intentions](https://www.consul.io/docs/connect/intentions.html) are used to define
rules for which services may connect to one another when using [Consul Connect](https://www.consul.io/docs/connect/index.html).
> **NOTE:** This resource is appropriate for managing legacy intentions in
Consul version 1.8 and earlier. As of Consul 1.9, intentions should be managed
using the [`service-intentions`](https://www.consul.io/docs/connect/intentions)
configuration entry. It is recommended to migrate from the `Intention`
resource to `ConfigEntry` when running Consul 1.9 and later.
It is appropriate to either reference existing services, or specify non-existent services
that will be created in the future when creating intentions. This resource can be used
in conjunction with the `Service` datasource when referencing services
registered on nodes that have a running Consul agent.
## Example Usage
Create a simplest intention with static service names:
```python
import pulumi
import pulumi_consul as consul
database = consul.Intention("database",
action="allow",
destination_name="db",
source_name="api")
```
Referencing a known service via a datasource:
```python
import pulumi
import pulumi_consul as consul
database = consul.Intention("database",
action="allow",
destination_name=consul_service["pg"]["name"],
source_name="api")
pg = consul.get_service(name="postgresql")
```
## Import
`consul_intention` can be imported
```sh
$ pulumi import consul:index/intention:Intention database 657a57d6-0d56-57e2-31cb-e9f1ed3c18dd
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The intention action. Must be one of `allow` or `deny`.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[str] description: Optional description that can be used by Consul
tooling, but is not used internally.
:param pulumi.Input[str] destination_name: The name of the destination service for the intention. This
service does not have to exist.
:param pulumi.Input[str] destination_namespace: The destination
namespace of the intention.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: Key/value pairs that are opaque to Consul and are associated
with the intention.
:param pulumi.Input[str] source_name: The name of the source service for the intention. This
service does not have to exist.
:param pulumi.Input[str] source_namespace: The source namespace of the
intention.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: IntentionArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
[Intentions](https://www.consul.io/docs/connect/intentions.html) are used to define
rules for which services may connect to one another when using [Consul Connect](https://www.consul.io/docs/connect/index.html).
> **NOTE:** This resource is appropriate for managing legacy intentions in
Consul version 1.8 and earlier. As of Consul 1.9, intentions should be managed
using the [`service-intentions`](https://www.consul.io/docs/connect/intentions)
configuration entry. It is recommended to migrate from the `Intention`
resource to `ConfigEntry` when running Consul 1.9 and later.
It is appropriate to either reference existing services, or specify non-existent services
that will be created in the future when creating intentions. This resource can be used
in conjunction with the `Service` datasource when referencing services
registered on nodes that have a running Consul agent.
## Example Usage
Create a simplest intention with static service names:
```python
import pulumi
import pulumi_consul as consul
database = consul.Intention("database",
action="allow",
destination_name="db",
source_name="api")
```
Referencing a known service via a datasource:
```python
import pulumi
import pulumi_consul as consul
database = consul.Intention("database",
action="allow",
destination_name=consul_service["pg"]["name"],
source_name="api")
pg = consul.get_service(name="postgresql")
```
## Import
`consul_intention` can be imported
```sh
$ pulumi import consul:index/intention:Intention database 657a57d6-0d56-57e2-31cb-e9f1ed3c18dd
```
:param str resource_name: The name of the resource.
:param IntentionArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(IntentionArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_name: Optional[pulumi.Input[str]] = None,
destination_namespace: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_namespace: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = IntentionArgs.__new__(IntentionArgs)
if action is None and not opts.urn:
raise TypeError("Missing required property 'action'")
__props__.__dict__["action"] = action
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["description"] = description
if destination_name is None and not opts.urn:
raise TypeError("Missing required property 'destination_name'")
__props__.__dict__["destination_name"] = destination_name
__props__.__dict__["destination_namespace"] = destination_namespace
__props__.__dict__["meta"] = meta
if source_name is None and not opts.urn:
raise TypeError("Missing required property 'source_name'")
__props__.__dict__["source_name"] = source_name
__props__.__dict__["source_namespace"] = source_namespace
super(Intention, __self__).__init__(
'consul:index/intention:Intention',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
action: Optional[pulumi.Input[str]] = None,
datacenter: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
destination_name: Optional[pulumi.Input[str]] = None,
destination_namespace: Optional[pulumi.Input[str]] = None,
meta: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
source_name: Optional[pulumi.Input[str]] = None,
source_namespace: Optional[pulumi.Input[str]] = None) -> 'Intention':
"""
Get an existing Intention resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] action: The intention action. Must be one of `allow` or `deny`.
:param pulumi.Input[str] datacenter: The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
:param pulumi.Input[str] description: Optional description that can be used by Consul
tooling, but is not used internally.
:param pulumi.Input[str] destination_name: The name of the destination service for the intention. This
service does not have to exist.
:param pulumi.Input[str] destination_namespace: The destination
namespace of the intention.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] meta: Key/value pairs that are opaque to Consul and are associated
with the intention.
:param pulumi.Input[str] source_name: The name of the source service for the intention. This
service does not have to exist.
:param pulumi.Input[str] source_namespace: The source namespace of the
intention.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _IntentionState.__new__(_IntentionState)
__props__.__dict__["action"] = action
__props__.__dict__["datacenter"] = datacenter
__props__.__dict__["description"] = description
__props__.__dict__["destination_name"] = destination_name
__props__.__dict__["destination_namespace"] = destination_namespace
__props__.__dict__["meta"] = meta
__props__.__dict__["source_name"] = source_name
__props__.__dict__["source_namespace"] = source_namespace
return Intention(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def action(self) -> pulumi.Output[str]:
"""
The intention action. Must be one of `allow` or `deny`.
"""
return pulumi.get(self, "action")
@property
@pulumi.getter
def datacenter(self) -> pulumi.Output[str]:
"""
The datacenter to use. This overrides the
agent's default datacenter and the datacenter in the provider setup.
"""
return pulumi.get(self, "datacenter")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Optional description that can be used by Consul
tooling, but is not used internally.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="destinationName")
def destination_name(self) -> pulumi.Output[str]:
"""
The name of the destination service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "destination_name")
@property
@pulumi.getter(name="destinationNamespace")
def destination_namespace(self) -> pulumi.Output[Optional[str]]:
"""
The destination
namespace of the intention.
"""
return pulumi.get(self, "destination_namespace")
@property
@pulumi.getter
def meta(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Key/value pairs that are opaque to Consul and are associated
with the intention.
"""
return pulumi.get(self, "meta")
@property
@pulumi.getter(name="sourceName")
def source_name(self) -> pulumi.Output[str]:
"""
The name of the source service for the intention. This
service does not have to exist.
"""
return pulumi.get(self, "source_name")
@property
@pulumi.getter(name="sourceNamespace")
def source_namespace(self) -> pulumi.Output[Optional[str]]:
"""
The source namespace of the
intention.
"""
return pulumi.get(self, "source_namespace")
| 41.552167 | 135 | 0.63839 | 2,975 | 25,887 | 5.393277 | 0.07563 | 0.082954 | 0.09249 | 0.074042 | 0.900717 | 0.882518 | 0.872733 | 0.855905 | 0.848613 | 0.83914 | 0 | 0.002685 | 0.266234 | 25,887 | 622 | 136 | 41.618971 | 0.842011 | 0.379109 | 0 | 0.753425 | 1 | 0 | 0.097153 | 0.015649 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160959 | false | 0.003425 | 0.017123 | 0 | 0.273973 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
7f13d40a25c91b265fd81505aada3799144f9c54 | 92 | py | Python | parameters_8000.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | parameters_8000.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | parameters_8000.py | kyomei/python-locadora | c461252387f77bd01465fd851d0b5bfa9ce53493 | [
"BSD-3-Clause"
] | null | null | null | password="pbkdf2(1000,20,sha512)$b8c83ceb51c7ef4b$ccf6abf1c01ef0d7c9e2b13d5c3b092608b8ef37"
| 46 | 91 | 0.891304 | 7 | 92 | 11.714286 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.417582 | 0.01087 | 92 | 1 | 92 | 92 | 0.483516 | 0 | 0 | 0 | 0 | 0 | 0.869565 | 0.869565 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
61ac9575be8d348b32ccc6c2b83b63c5528ae2af | 114 | py | Python | viewformer/__init__.py | jkulhanek/viewformer | 9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c | [
"MIT"
] | 87 | 2022-03-22T02:03:17.000Z | 2022-03-31T01:45:52.000Z | viewformer/__init__.py | jkulhanek/viewformer | 9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c | [
"MIT"
] | null | null | null | viewformer/__init__.py | jkulhanek/viewformer | 9ad2c5a2f7abe4b7ff490ced0132bf3d2f07e29c | [
"MIT"
] | 5 | 2022-03-22T10:39:34.000Z | 2022-03-28T02:05:28.000Z | import viewformer.models # noqa: F401
import viewformer.data # noqa: F401
import viewformer.utils # noqa: F401
| 28.5 | 38 | 0.763158 | 15 | 114 | 5.8 | 0.466667 | 0.551724 | 0.321839 | 0.551724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 0.157895 | 114 | 3 | 39 | 38 | 0.8125 | 0.280702 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
61b6e8fdc29e4759d35250128a22a186ad47a0bc | 554 | py | Python | scripts/idbupdateowner.py | ecx86/ida-scripts | 963d7a40330eed1d5e8ead8f4297e5a3555e2456 | [
"MIT"
] | 10 | 2018-08-01T15:53:04.000Z | 2020-02-13T22:03:55.000Z | scripts/idbupdateowner.py | rcx/ida-scripts | 963d7a40330eed1d5e8ead8f4297e5a3555e2456 | [
"MIT"
] | null | null | null | scripts/idbupdateowner.py | rcx/ida-scripts | 963d7a40330eed1d5e8ead8f4297e5a3555e2456 | [
"MIT"
] | 3 | 2020-07-01T01:43:17.000Z | 2022-02-14T10:23:51.000Z | import idaapi
import binascii
dumped_netnode_value ='ca75b28848ea06bcae409699fa2510a03bbaf43bd167eecb17d52918187133a793ebf8d3270230c7164d7a79b53c2c3edd611ede975690784cf2c254abe8b587140d19a3f46b2be109bde1da1b7ed4d7c9f7b58135f2c296db4e86ad29b6f0b999b5599d40c3bae8b29d2cc06ecef63cba0e1b9a9505c1efe9019a7020127e100000000000000000000000000000000000000000000000000000000000000000'
idaapi.netnode('$ user1', 0, False).kill() # deleting netnode with plain text info
idaapi.netnode('$ original user', 0, False).supset(0, binascii.unhexlify(dumped_netnode_value))
| 92.333333 | 344 | 0.904332 | 33 | 554 | 15.060606 | 0.606061 | 0.052314 | 0.072435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.419962 | 0.041516 | 554 | 5 | 345 | 110.8 | 0.516008 | 0.066787 | 0 | 0 | 0 | 0 | 0.664078 | 0.621359 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 8 |
61b90b04a091f93626368bd7f8ab2a3099963928 | 563 | py | Python | tests/data/format/final_period/function_title_docstrings.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 4 | 2022-01-02T22:50:59.000Z | 2022-02-09T09:04:37.000Z | tests/data/format/final_period/function_title_docstrings.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 80 | 2022-01-02T09:02:50.000Z | 2022-03-30T13:34:10.000Z | tests/data/format/final_period/function_title_docstrings.py | DanielNoord/pydocstringformatter | a69302cee6bd32b9b5cc48912a47d0e8ad3f7abe | [
"MIT"
] | 2 | 2022-01-02T11:58:29.000Z | 2022-01-04T18:53:29.000Z | def func():
def inner_func():
"""Summary
==========
docstring
"""
def inner_func():
"""Summary
----------
docstring
"""
def inner_func():
"""Summary
^^^^^^^^^^
docstring
"""
def inner_func():
"""Summary
**********
docstring
"""
def inner_func():
"""Summary
^^^^^^^^^^
docstring
"""
def inner_func():
"""Summary
aaaaaaaaaa
docstring
"""
| 13.093023 | 21 | 0.339254 | 33 | 563 | 5.606061 | 0.181818 | 0.259459 | 0.389189 | 0.616216 | 0.859459 | 0.859459 | 0.859459 | 0.859459 | 0.859459 | 0.859459 | 0 | 0 | 0.470693 | 563 | 42 | 22 | 13.404762 | 0.620805 | 0.31794 | 0 | 0.857143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | true | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 13 |
4eebe65857bd2efd98886d2264bc43967c6d64f0 | 16,756 | py | Python | data_service/views/wallet.py | freelancing-solutions/GCP-Based-Database-as-a-Service | 7d6a12c33db238ca2f748bf4ddea6d2cf3c16da3 | [
"MIT"
] | 1 | 2021-04-15T19:45:04.000Z | 2021-04-15T19:45:04.000Z | data_service/views/wallet.py | freelancing-solutions/pinydesk | 7d6a12c33db238ca2f748bf4ddea6d2cf3c16da3 | [
"MIT"
] | 516 | 2021-05-02T11:46:36.000Z | 2022-03-29T06:09:49.000Z | data_service/views/wallet.py | freelancing-solutions/pinydesk | 7d6a12c33db238ca2f748bf4ddea6d2cf3c16da3 | [
"MIT"
] | 1 | 2021-09-04T22:40:14.000Z | 2021-09-04T22:40:14.000Z | import typing
from flask import jsonify, current_app
from data_service.config.exceptions import DataServiceError
from data_service.main import cache_stocks
from data_service.store.mixins import AmountMixin
from data_service.store.wallet import WalletModel, WalletValidator
from data_service.utils.utils import return_ttl, end_of_month
from data_service.config.exception_handlers import handle_view_errors
from data_service.config.use_context import use_context
class Validator(WalletValidator):
def __init__(self):
super(Validator, self).__init__()
self._max_retries = current_app.config.get('DATASTORE_RETRIES')
self._max_timeout = current_app.config.get('DATASTORE_TIMEOUT')
@staticmethod
def is_uid_none(uid: typing.Union[None, str]) -> bool:
if (uid is None) or (uid == ''):
return True
return False
@staticmethod
async def is_uid_none_async(uid: typing.Union[None, str]) -> bool:
if (uid is None) or (uid == ''):
return True
return False
def can_add_wallet(self, uid: typing.Union[None, str] = None) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = self.wallet_exist(uid=uid)
if isinstance(wallet_exist, bool):
return not wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
async def can_add_wallet_async(self, uid: typing.Union[None, str] = None) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = await self.wallet_exist_async(uid=uid)
if isinstance(wallet_exist, bool):
return not wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
def can_update_wallet(self, uid: typing.Union[None, str] = None) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = self.wallet_exist(uid=uid)
if isinstance(wallet_exist, bool):
return wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
async def can_update_wallet_async(self, uid: typing.Union[None, str] = None) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = await self.wallet_exist_async(uid=uid)
if isinstance(wallet_exist, bool):
return wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
def can_reset_wallet(self, uid: typing.Union[None, str]) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = self.wallet_exist(uid=uid)
if isinstance(wallet_exist, bool):
return wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
async def can_reset_wallet_async(self, uid: typing.Union[None, str]) -> bool:
if not(self.is_uid_none(uid=uid)):
wallet_exist: typing.Union[bool, None] = await self.wallet_exist_async(uid=uid)
if isinstance(wallet_exist, bool):
return wallet_exist
raise DataServiceError(status=500, description='Unable to verify wallet data')
return False
# noinspection DuplicatedCode
class WalletView(Validator):
"""
view functions for the wallet
# TODO - Refactor Wallet View and improve functionality
"""
def __init__(self):
super(WalletView, self).__init__()
@use_context
@handle_view_errors
def create_wallet(self, uid: typing.Union[str, None], currency: typing.Union[str, None],
paypal_address: typing.Union[str, None]) -> tuple:
if self.can_add_wallet(uid=uid) is True:
wallet_instance: WalletModel = WalletModel()
amount_instance: AmountMixin = AmountMixin()
amount_instance.amount = 0
amount_instance.currency = currency
wallet_instance.uid = uid
wallet_instance.available_funds = amount_instance
wallet_instance.paypal_address = paypal_address
key = wallet_instance.put(retries=self._max_retries, timeout=self._max_timeout)
if key is None:
raise DataServiceError(status=500, description="An Error occurred creating Wallet")
return jsonify({'status': True, 'message': 'successfully created wallet',
'payload': wallet_instance.to_dict()}), 200
return jsonify({'status': False, 'message': 'Unable to create wallet'}), 500
@use_context
@handle_view_errors
async def create_wallet_async(self, uid: typing.Union[str, None], currency: typing.Union[str, None],
paypal_address: typing.Union[str, None]) -> tuple:
if await self.can_add_wallet_async(uid=uid) is True:
wallet_instance: WalletModel = WalletModel()
amount_instance: AmountMixin = AmountMixin()
amount_instance.amount = 0
amount_instance.currency = currency
wallet_instance.uid = uid
wallet_instance.available_funds = amount_instance
wallet_instance.paypal_address = paypal_address
key = wallet_instance.put_async(retries=self._max_retries, timeout=self._max_timeout).get_result()
if key is None:
raise DataServiceError(status=500, description="An Error occurred creating Wallet")
return jsonify({'status': True, 'message': 'successfully created wallet',
'payload': wallet_instance.to_dict()}), 200
return jsonify({'status': False, 'message': 'Unable to create wallet'}), 500
@cache_stocks.cached(timeout=return_ttl(name='medium'), unless=end_of_month)
@use_context
@handle_view_errors
def get_wallet(self, uid: typing.Union[str, None]) -> tuple:
if not(self.is_uid_none(uid=uid)):
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get()
return jsonify({'status': True, 'payload': wallet_instance.to_dict(), 'message': 'wallet found'}), 200
return jsonify({'status': False, 'message': 'uid cannot be None'}), 500
@cache_stocks.cached(timeout=return_ttl(name='medium'), unless=end_of_month)
@use_context
@handle_view_errors
async def get_wallet_async(self, uid: typing.Union[str, None]) -> tuple:
if not(self.is_uid_none(uid=uid)):
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get_async().get_result()
return jsonify({'status': True, 'payload': wallet_instance.to_dict(), 'message': 'wallet found'}), 200
return jsonify({'status': False, 'message': 'uid cannot be None'}), 500
@use_context
@handle_view_errors
def update_wallet(self, wallet_data: dict) -> tuple:
uid: typing.Union[str, None] = wallet_data.get("uid")
available_funds: typing.Union[int, None] = wallet_data.get("available_funds")
currency: typing.Union[str, None] = wallet_data.get('currency')
paypal_address: typing.Union[str, None] = wallet_data.get("paypal_address")
if self.can_update_wallet(uid=uid) is True:
wall_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get()
# No need to test for wallet availability as can update returned True
wall_instance.uid = uid
amount_instance: AmountMixin = AmountMixin(amount=available_funds, currency=currency)
wall_instance.available_funds = amount_instance
wall_instance.paypal_address = paypal_address
key = wall_instance.put(retries=self._max_retries, timeout=self._max_timeout)
if key is None:
message: str = "An Error occurred updating Wallet"
raise DataServiceError(status=500, description=message)
return jsonify({'status': True, 'payload': wall_instance.to_dict(),
'message': 'successfully updated wallet'}), 200
return jsonify({'status': False, 'message': 'Unable to update wallet'}), 500
@use_context
@handle_view_errors
async def update_wallet_async(self, wallet_data: dict) -> tuple:
uid: typing.Union[str, None] = wallet_data.get("uid")
available_funds: typing.Union[int, None] = wallet_data.get("available_funds")
currency: typing.Union[str, None] = wallet_data.get('currency')
paypal_address: typing.Union[str, None] = wallet_data.get("paypal_address")
if await self.can_update_wallet_async(uid=uid) is True:
wall_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get_async().get_result()
# No need to test for wallet availability as can update returned True
wall_instance.uid = uid
amount_instance: AmountMixin = AmountMixin(amount=available_funds, currency=currency)
wall_instance.available_funds = amount_instance
wall_instance.paypal_address = paypal_address
key = wall_instance.put_async(retries=self._max_retries, timeout=self._max_timeout).get_result()
if key is None:
message: str = "Database error while updating wallet"
raise DataServiceError(status=500, description=message)
return jsonify({'status': True, 'payload': wall_instance.to_dict(),
'message': 'successfully updated wallet'}), 200
return jsonify({'status': False, 'message': 'Unable to update wallet'}), 500
@use_context
@handle_view_errors
def reset_wallet(self, wallet_data: dict) -> tuple:
uid: typing.Union[str, None] = wallet_data.get('uid')
currency: typing.Union[str, None] = wallet_data.get('currency')
if self.can_reset_wallet(uid=uid) is True:
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get()
amount_instance: AmountMixin = AmountMixin(amount=0, currency=currency)
wallet_instance.available_funds = amount_instance
key = wallet_instance.put(retries=self._max_retries, timeout=self._max_timeout)
if key is None:
message: str = "Database error while updating wallet"
raise DataServiceError(status=500, description=message)
return jsonify({'status': True, 'payload': wallet_instance.to_dict(),
'message': 'wallet is rest'}), 200
return jsonify({'status': False, 'message': 'Unable to reset wallet'}), 500
@use_context
@handle_view_errors
async def reset_wallet_async(self, wallet_data: dict) -> tuple:
uid: typing.Union[str, None] = wallet_data.get('uid')
currency: typing.Union[str, None] = wallet_data.get('currency')
if await self.can_reset_wallet_async(uid=uid) is True:
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get_async().get_result()
amount_instance: AmountMixin = AmountMixin(amount=0, currency=currency)
wallet_instance.available_funds = amount_instance
key = wallet_instance.put_async(retries=self._max_retries, timeout=self._max_timeout).get_result()
if key is None:
message: str = "Database error while resetting wallet"
raise DataServiceError(status=500, description=message)
return jsonify({'status': True, 'payload': wallet_instance.to_dict(),
'message': 'wallet is rest'}), 200
return jsonify({'status': False, 'message': 'Unable to reset wallet'}), 500
@cache_stocks.cached(timeout=return_ttl(name='medium'), unless=end_of_month)
@use_context
@handle_view_errors
def return_all_wallets(self) -> tuple:
wallet_list: typing.List[WalletModel] = WalletModel.query().fetch()
payload: typing.List[dict] = [wallet.to_dict() for wallet in wallet_list]
return jsonify({'status': True,
'payload': payload,
'message': 'wallets returned'}), 200
@cache_stocks.cached(timeout=return_ttl(name='medium'), unless=end_of_month)
@use_context
@handle_view_errors
async def return_all_wallets_async(self) -> tuple:
wallet_list: typing.List[WalletModel] = WalletModel.query().fetch_async().get_result()
payload: typing.List[dict] = [wallet.to_dict() for wallet in wallet_list]
return jsonify({'status': True,
'payload': payload,
'message': 'wallets returned'}), 200
@use_context
@handle_view_errors
def return_wallets_by_balance(self, lower_bound: int, higher_bound: int) -> tuple:
# if either lower_bound and higher_bound are not int then exit
if not(isinstance(lower_bound, int) or isinstance(higher_bound, int)):
return jsonify({'status': False, 'message': "specify lower bound and higher bound"}), 500
wallet_list: typing.List[WalletModel] = WalletModel.query(WalletModel.available_funds > lower_bound,
WalletModel.available_funds < higher_bound).fetch()
payload: typing.List[dict] = [wallet.to_dict() for wallet in wallet_list]
return jsonify({'status': True, 'payload': payload, 'message': 'wallets returned'}), 200
@use_context
@handle_view_errors
async def return_wallets_by_balance_async(self, lower_bound: int, higher_bound: int) -> tuple:
# if either lower_bound and higher_bound are not int then exit
if not(isinstance(lower_bound, int) or isinstance(higher_bound, int)):
return jsonify({'status': False, 'message': "specify lower bound and higher bound"}), 500
wallet_list: typing.List[WalletModel] = WalletModel.query(WalletModel.available_funds > lower_bound,
WalletModel.available_funds < higher_bound).fetch_async().get_result()
payload: typing.List[dict] = [wallet.to_dict() for wallet in wallet_list]
return jsonify({'status': True, 'payload': payload, 'message': 'wallets returned'}), 200
@use_context
@handle_view_errors
def wallet_transact(self, uid: str, add: int = None, sub: int = None) -> tuple:
if self.can_update_wallet(uid=uid) is True:
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get()
if isinstance(wallet_instance, WalletModel):
if add is not None:
wallet_instance.available_funds.amount += add
if sub is not None:
wallet_instance.available_funds.amount -= sub
key = wallet_instance.put()
if key is None:
message: str = "General error updating database"
raise DataServiceError(status=500, description=message)
message: str = "Successfully created transaction"
return jsonify({'status': True, 'payload': wallet_instance.to_dict(),
'message': message}), 200
message: str = "Unable to find wallet"
return jsonify({'status': False, 'message': message}), 500
@use_context
@handle_view_errors
async def wallet_transact_async(self, uid: str, add: int = None, sub: int = None) -> tuple:
if await self.can_update_wallet_async(uid=uid) is True:
wallet_instance: WalletModel = WalletModel.query(WalletModel.uid == uid).get_async().get_result()
if isinstance(wallet_instance, WalletModel):
if isinstance(add, int):
wallet_instance.available_funds.amount += add
if isinstance(sub, int):
wallet_instance.available_funds.amount -= sub
key = wallet_instance.put_async().get_result()
if key is None:
message: str = "General error updating database"
raise DataServiceError(status=500, description=message)
message: str = "Successfully created transaction"
return jsonify({'status': True, 'payload': wallet_instance.to_dict(),
'message': message}), 200
message: str = "Unable to find wallet"
return jsonify({'status': False, 'message': message}), 500
# TODO add wallet_withdrawals | 53.705128 | 136 | 0.648424 | 1,975 | 16,756 | 5.295696 | 0.075443 | 0.048188 | 0.047232 | 0.030978 | 0.908309 | 0.898747 | 0.886796 | 0.876661 | 0.862511 | 0.851611 | 0 | 0.009869 | 0.250119 | 16,756 | 312 | 137 | 53.705128 | 0.822523 | 0.023932 | 0 | 0.786517 | 0 | 0 | 0.098511 | 0 | 0 | 0 | 0 | 0.00641 | 0 | 1 | 0.048689 | false | 0 | 0.033708 | 0 | 0.247191 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4ef132b880125385f186366bbf1d4d1aa677e6cc | 46,925 | py | Python | tests/test_setup_onefs.py | willnx/vlab_onefs | df90738806a5a4800b91e62f79090a11a0b01088 | [
"Apache-2.0"
] | 1 | 2019-04-10T16:17:18.000Z | 2019-04-10T16:17:18.000Z | tests/test_setup_onefs.py | willnx/vlab_onefs | df90738806a5a4800b91e62f79090a11a0b01088 | [
"Apache-2.0"
] | 2 | 2018-11-16T19:35:14.000Z | 2019-05-22T19:00:38.000Z | tests/test_setup_onefs.py | willnx/vlab_onefs | df90738806a5a4800b91e62f79090a11a0b01088 | [
"Apache-2.0"
] | null | null | null | # -*- coding: UTF-8 -*-
"""
A suite of tests for the functions in setup_onefs.py
"""
import unittest
from unittest.mock import patch, MagicMock
from vlab_onefs_api.lib.worker import setup_onefs
class TestvSphereConsole(unittest.TestCase):
"""A suite of test cases for vSphereConsole object"""
@patch.object(setup_onefs, 'webdriver')
def test_init(self, fake_webdriver):
"""``__init__`` works for vSphereConsole"""
console = setup_onefs.vSphereConsole(url='https://someHTMLconsole.com')
self.assertTrue(isinstance(console, setup_onefs.vSphereConsole))
@patch.object(setup_onefs.vSphereConsole, '_login')
@patch.object(setup_onefs, 'webdriver')
def test_auto_login(self, fake_webdriver, fake_login):
"""Creating the vSphereConsole object automatically logs a user into the HTML console"""
console = setup_onefs.vSphereConsole(url='https://someHTMLconsole.com')
self.assertEqual(fake_login.call_count, 1)
@patch.object(setup_onefs.vSphereConsole, '_get_console')
@patch.object(setup_onefs, 'webdriver')
def test_finds_console(self, fake_webdriver, fake_get_console):
"""Creating the vSphereConsole object binds to the console HTML object"""
console = setup_onefs.vSphereConsole(url='https://someHTMLconsole.com')
self.assertEqual(fake_get_console.call_count, 1)
@patch.object(setup_onefs, 'webdriver')
def test_with(self, fake_webdriver):
"""vSphereConsole auto-closes the session upon exiting ``with`` statement"""
fake_driver = MagicMock()
fake_webdriver.Chrome.return_value = fake_driver
with setup_onefs.vSphereConsole(url='https://someHTMLconsole.com') as console:
pass
self.assertEqual(fake_driver.quit.call_count, 1)
@patch.object(setup_onefs.vSphereConsole, '_get_console')
@patch.object(setup_onefs.time, 'sleep')
@patch.object(setup_onefs, 'webdriver')
def test_send_keys(self, fake_webdriver, fake_sleep, fake_get_console):
"""``send_keys`` Sends the supplied intput to the HTML console"""
fake_console = MagicMock()
fake_get_console.return_value = fake_console
with setup_onefs.vSphereConsole(url='https://someHTMLconsole.com') as console:
console.send_keys('woot', auto_enter=False)
the_args, _ = fake_console.send_keys.call_args
expected = ('woot',)
self.assertEqual(the_args, expected)
@patch.object(setup_onefs.vSphereConsole, '_get_console')
@patch.object(setup_onefs.time, 'sleep')
@patch.object(setup_onefs, 'webdriver')
def test_send_keys_pauses(self, fake_webdriver, fake_sleep, fake_get_console):
"""``send_keys`` pauses to let the HTML console 'catch up'"""
fake_console = MagicMock()
fake_get_console.return_value = fake_console
with setup_onefs.vSphereConsole(url='https://someHTMLconsole.com') as console:
console.send_keys('woot', auto_enter=False)
self.assertEqual(fake_sleep.call_count, 1)
@patch.object(setup_onefs.vSphereConsole, '_get_console')
@patch.object(setup_onefs.time, 'sleep')
@patch.object(setup_onefs, 'webdriver')
def test_send_keys_auto_enters(self, fake_webdriver, fake_sleep, fake_get_console):
"""``send_keys`` automatically sends the ENTER key by default"""
fake_console = MagicMock()
fake_get_console.return_value = fake_console
with setup_onefs.vSphereConsole(url='https://someHTMLconsole.com') as console:
console.send_keys('woot')
the_args, _ = fake_console.send_keys.call_args
expected = (setup_onefs.Keys.ENTER,)
self.assertEqual(the_args, expected)
@patch.object(setup_onefs.time, 'sleep')
@patch.object(setup_onefs, 'vSphereConsole')
class TestSetupFunctions(unittest.TestCase):
"""A suite of test cases for the functions within setup_onefs.py"""
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_join_existing_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``join_existing_cluster`` sets the node into complinace mode"""
fake_logger = MagicMock()
setup_onefs.join_existing_cluster('https://someHTMLconsole.com', 'mycluster', True, fake_logger)
self.assertTrue(fake_enable_compliance_mode.called)
def test_join_existing_cluster(self, fake_vSphereConsole, fake_sleep):
"""``join_existing_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.join_existing_cluster('https://someHTMLconsole.com', 'mycluster', False, fake_logger)
expected = None
self.assertEqual(output, expected)
def test_join_existing_cluster_with(self, fake_vSphereConsole, fake_sleep):
"""``join_existing_cluster`` uses context manager of vSphereConsole"""
fake_logger = MagicMock()
setup_onefs.join_existing_cluster('https://someHTMLconsole.com', 'mycluster', False, fake_logger)
call_count = fake_vSphereConsole.return_value.__enter__.call_count
expected = 1
self.assertEqual(call_count, expected)
def test_join_existing_cluster_pause(self, fake_vSphereConsole, fake_sleep):
"""``join_existing_cluster`` waits for the disks to format"""
fake_logger = MagicMock()
setup_onefs.join_existing_cluster('https://someHTMLconsole.com', 'mycluster', False, fake_logger)
waited_for_prompt = fake_vSphereConsole.return_value.__enter__.return_value.wait_for_prompt.called
self.assertTrue(waited_for_prompt)
@patch.object(setup_onefs, 'format_disks')
def test_join_existing_cluster_formats(self, fake_format_disks, fake_vSphereConsole, fake_sleep):
"""``join_existing_cluster`` formats the disks"""
fake_logger = MagicMock()
setup_onefs.join_existing_cluster('https://someHTMLconsole.com', 'mycluster', False, fake_logger)
formatted_disks = fake_format_disks.called
self.assertTrue(formatted_disks)
@patch.object(setup_onefs, 'get_compliance_license')
def test_configure_new_cluster_compliance(self, fake_get_compliance_license, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` Obtains a license when compliance is True"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_cluster(version='8.1.1.1',
logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance=True,
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_get_compliance_license.called)
def test_configure_new_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_cluster(version='8.1.1.1',
logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance=False,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'configure_new_7_2_cluster')
def test_configure_new_cluster_7_2(self, fake_configure_new_7_2_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 7.2.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='7.2.1.6', compliance=False, logger=fake_logger)
called = fake_configure_new_7_2_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'configure_new_8_0_cluster')
def test_configure_new_cluster_8_0(self, fake_configure_new_8_0_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 8.0.0.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='8.0.0.1', compliance=False, logger=fake_logger)
called = fake_configure_new_8_0_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'configure_new_8_1_cluster')
def test_configure_new_cluster_8_1_0(self, fake_configure_new_8_1_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 8.1.0.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='8.1.0.3', compliance=False, logger=fake_logger)
called = fake_configure_new_8_1_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'configure_new_8_1_cluster')
def test_configure_new_cluster_8_1_1(self, fake_configure_new_8_1_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 8.1.1.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='8.1.1.2', compliance=False, logger=fake_logger)
called = fake_configure_new_8_1_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'configure_new_8_1_2_cluster')
def test_configure_new_cluster_8_1_2(self, fake_configure_new_8_1_2_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 8.1.2.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='8.1.2.0', compliance=False, logger=fake_logger)
called = fake_configure_new_8_1_2_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'configure_new_8_2_0_cluster')
def test_configure_new_cluster_8_2_0(self, fake_configure_new_8_2_0_cluster, fake_vSphereConsole, fake_sleep):
"""``configure_new_cluster`` executes the correct function for OneFS 8.2.0.x"""
fake_logger = MagicMock()
setup_onefs.configure_new_cluster(version='8.2.0.0', compliance=False, logger=fake_logger)
called = fake_configure_new_8_2_0_cluster.call_count
expected = 1
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_configure_new_8_0_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_0_cluster`` can configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_enable_compliance_mode.called)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_configure_new_8_0_cluster_compliance_license(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_0_cluster`` passes the license as needed to configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
the_args, _ = fake_make_new_and_accept_eual.call_args
license = the_args[1]
expected = 'some-internal-license'
self.assertEqual(license, expected)
def test_configure_new_8_0_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_0_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_configure_new_7_2_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``configure_new_7_2_cluster`` can configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_7_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_enable_compliance_mode.called)
def test_configure_new_7_2_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_7_2_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_7_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_configure_new_8_1_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_cluster`` can configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_enable_compliance_mode.called)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_configure_new_8_1_cluster_compliance_license(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_cluster`` passes the license as needed to configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
the_args, _ = fake_make_new_and_accept_eual.call_args
license = the_args[1]
expected = 'some-internal-license'
self.assertEqual(license, expected)
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_configure_new_8_1_2_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_2_cluster`` can configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
version='8.1.2.0',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_enable_compliance_mode.called)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_configure_new_8_1_2_cluster_compliance_license(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_2_cluster`` passes the license as needed to configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
version='8.1.2.0',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
the_args, _ = fake_make_new_and_accept_eual.call_args
license = the_args[1]
expected = 'some-internal-license'
self.assertEqual(license, expected)
def test_configure_new_8_1_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'enable_compliance_mode')
def test_configure_new_8_2_0_cluster_compliance(self, fake_enable_compliance_mode, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_2_0_cluster`` can configure a compliance mode cluster"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_2_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
self.assertTrue(fake_enable_compliance_mode.called)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_configure_new_8_2_0_cluster_compliance_license(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_2_0_cluster`` Needs no compliance mode license"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_2_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license='some-internal-license',
smartconnect_ip='3.0.9.21')
_, the_kwargs = fake_make_new_and_accept_eual.call_args
license = the_kwargs['compliance_license']
expected = False
self.assertEqual(license, expected)
def test_configure_new_8_2_0_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_2_0_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_2_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_configure_new_8_2_0_cluster_eula(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_2_0_cluster`` presses enter before trying to accept the EULA"""
fake_logger = MagicMock()
setup_onefs.configure_new_8_2_0_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
_, the_kwargs = fake_make_new_and_accept_eual.call_args
pressed_enter = the_kwargs['auto_enter']
self.assertTrue(pressed_enter)
def test_configure_new_8_1_2_cluster(self, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_2_cluster`` returns None"""
fake_logger = MagicMock()
output = setup_onefs.configure_new_8_1_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
version='8.1.2.0',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'set_esrs')
def test_configure_new_8_1_2_cluster_esrs(self, fake_set_esrs, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_2_cluster`` does not config ESRS"""
fake_logger = MagicMock()
setup_onefs.configure_new_8_1_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
version='8.1.2.0',
sc_zonename='myzone.foo.org',
compliance_license=None,
smartconnect_ip='3.0.9.21')
called = fake_set_esrs.call_count
expected = 0
self.assertEqual(called, expected)
@patch.object(setup_onefs, 'make_new_and_accept_eual')
def test_compliance_8_1_3(self, fake_make_new_and_accept_eual, fake_vSphereConsole, fake_sleep):
"""``configure_new_8_1_2_cluster`` Does not supply a license for 8.1.3 clusters"""
fake_logger = MagicMock()
setup_onefs.configure_new_8_1_2_cluster(logger=fake_logger,
console_url='https://someHTMLconsole.com',
cluster_name='mycluster',
int_netmask='255.255.255.0',
int_ip_low='8.6.7.5',
int_ip_high='8.6.7.50',
ext_netmask='255.255.255.0',
ext_ip_low='3.0.9.2',
ext_ip_high='3.0.9.20',
gateway='3.0.9.1',
dns_servers='1.1.1.1',
encoding='utf-8',
version='8.1.3.0',
sc_zonename='myzone.foo.org',
compliance_license='some-license',
smartconnect_ip='3.0.9.21')
the_args, _ = fake_make_new_and_accept_eual.call_args
comp_license = the_args[1]
expected = None
self.assertTrue(comp_license is expected)
class TestWizardRoutines(unittest.TestCase):
"""
A set of test cases for the functions that handle a specific part of the
OneFS configuration wizard.
"""
@classmethod
def setUp(cls):
cls.fake_console = MagicMock()
@patch.object(setup_onefs.time, 'sleep')
def test_format_disks(self, fake_sleep):
"""``format_disks`` returns None"""
output = setup_onefs.format_disks(self.fake_console)
expected = None
self.assertEqual(output, expected)
def test_format_disks(self):
"""``format_disks`` blocks while the disks format"""
setup_onefs.format_disks(self.fake_console)
waited_for_prompt = self.fake_console.wait_for_prompt.called
self.assertTrue(waited_for_prompt)
def test_make_new_and_accept_eual(self):
"""``make_new_and_accept_eual`` returns None"""
output = setup_onefs.make_new_and_accept_eual(self.fake_console, None)
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs.time, 'sleep')
def test_set_passwords(self, fake_sleep):
"""``set_passwords`` returns None"""
output = setup_onefs.set_passwords(self.fake_console)
expected = None
self.assertEqual(output, expected)
def test_set_esrs(self):
"""``set_esrs`` returns None"""
output = setup_onefs.set_esrs(self.fake_console)
expected = None
self.assertEqual(output, expected)
def test_set_name(self):
"""``set_esrs`` returns None"""
output = setup_onefs.set_name(self.fake_console, 'mycluster')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs.time, 'sleep')
def test_set_encoding(self, fake_sleep):
"""``set_encoding`` returns None"""
output = setup_onefs.set_encoding(self.fake_console, 'utf-8')
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs.time, 'sleep')
def test_config_network(self, fake_sleep):
"""``config_network`` returns None"""
output = setup_onefs.config_network(self.fake_console,
netmask='255.255.255.0',
ip_low='2.2.2.2',
ip_high='2.2.2.20')
expected = None
self.assertEqual(output, expected)
def test_set_default_gateway(self):
"""``set_default_gateway`` returns None"""
output = setup_onefs.set_default_gateway(self.fake_console, '2.2.2.1')
expected = None
self.assertEqual(output, expected)
def test_set_smartconnect(self):
"""``set_smartconnect`` returns None"""
output = setup_onefs.set_smartconnect(self.fake_console,
sc_zonename='myzone.foo.com',
smartconnect_ip='3.3.3.3')
expected = None
self.assertEqual(output, expected)
def test_set_smartconnect_zone_name_optional(self):
"""``set_smartconnect`` setting the SmartConnect zone name is optional"""
output = setup_onefs.set_smartconnect(self.fake_console,
smartconnect_ip='3.3.3.3')
expected = None
self.assertEqual(output, expected)
def test_set_smartconnect_sip_optional(self):
"""``set_smartconnect`` setting the SmartConnect IP is optional"""
output = setup_onefs.set_smartconnect(self.fake_console,
sc_zonename='myzone.foo.com')
expected = None
self.assertEqual(output, expected)
def test_set_smartconnect_all_optional(self):
"""``set_smartconnect`` All smartconnect settings are skipped if not supplied"""
setup_onefs.set_smartconnect(self.fake_console)
call_count = self.fake_console.send_keys.call_count
expected = 1
self.assertEqual(call_count, expected)
def test_set_dns(self):
"""``set_dns`` returns None"""
output = setup_onefs.set_dns(self.fake_console, dns_servers='1.1.1.1')
expected = None
self.assertEqual(output, expected)
def test_set_timezone(self):
"""``set_timezone`` returns None"""
output = setup_onefs.set_timezone(self.fake_console)
expected = None
self.assertEqual(output, expected)
def test_set_join_mode(self):
"""``set_join_mode`` returns None"""
output = setup_onefs.set_join_mode(self.fake_console)
expected = None
self.assertEqual(output, expected)
def test_commit_config(self):
"""``commit_config`` returns None"""
output = setup_onefs.commit_config(self.fake_console)
expected = None
self.assertEqual(output, expected)
@patch.object(setup_onefs, 'requests')
def test_get_compliance_license(self, fake_requests):
"""``get_compliance_license`` returns a license"""
fake_resp = MagicMock()
fake_resp.content = b'some-internal-license\n'
fake_requests.get.return_value = fake_resp
license = setup_onefs.get_compliance_license()
expected = 'some-internal-license'
self.assertEqual(license, expected)
def test_set_sysctls_logs_in(self):
"""``set_sysctls`` logins into the OneFS shell"""
setup_onefs.set_sysctls(self.fake_console)
user, password = self.fake_console.send_keys.call_args_list[:2]
user = user[0][0] # pull the 1st positional arg
password = password[0][0]
sent = (user, password)
expected = ('root', setup_onefs.DEFAULT_ROOT_PW)
self.assertEqual(sent, expected)
def test_set_sysctls(self):
"""``set_sysctls`` sets the expected sysctls"""
setup_onefs.set_sysctls(self.fake_console)
sysctls = self.fake_console.send_keys.call_args_list[2:] # chop off the login
sysctls.pop() # chop off the exit
sysctls = [x[0][0] for x in sysctls]
expected = ['isi_sysctl_cluster kern.cam.da.default_timeout=180',
'isi_sysctl_cluster debug.debugger_on_panic=0']
# sorted() to avoid false positive due to ordering
self.assertEqual(sorted(sysctls), sorted(expected))
def test_set_sysctls_logs_out(self):
"""``set_sysctls`` exits the terminal once done"""
setup_onefs.set_sysctls(self.fake_console)
exit = self.fake_console.send_keys.call_args_list[-1][0][0]
command = 'exit'
self.assertEqual(exit, command)
def test_set_sysctls_compadmin(self):
"""``set_sysctls`` logs in as compadmin in compliance mode"""
setup_onefs.set_sysctls(self.fake_console, compliance_mode=True)
user = self.fake_console.send_keys.call_args_list[:1]
user = user[0][0][0] # pull the 1st positional arg
expected = 'compadmin'
self.assertEqual(user, expected)
def test_set_sysctls_sudo(self):
"""``set_sysctls`` uses 'sudo' to set the systctls"""
setup_onefs.set_sysctls(self.fake_console, compliance_mode=True)
sysctls = self.fake_console.send_keys.call_args_list[2:] # chop off the login
sysctls.pop() # chop off the exit
sysctls = [x[0][0] for x in sysctls]
expected = ['sudo isi_sysctl_cluster kern.cam.da.default_timeout=180',
'sudo isi_sysctl_cluster debug.debugger_on_panic=0']
# sorted() to avoid false positive due to ordering
self.assertEqual(sorted(sysctls), sorted(expected))
if __name__ == '__main__':
unittest.main()
| 53.628571 | 130 | 0.487906 | 4,713 | 46,925 | 4.534691 | 0.054742 | 0.051001 | 0.010668 | 0.042252 | 0.856261 | 0.832257 | 0.812465 | 0.778448 | 0.752807 | 0.70452 | 0 | 0.04903 | 0.422355 | 46,925 | 874 | 131 | 53.689931 | 0.73943 | 0.081875 | 0 | 0.745342 | 0 | 0 | 0.10087 | 0.020123 | 0 | 0 | 0 | 0 | 0.093168 | 1 | 0.094721 | false | 0.009317 | 0.004658 | 0 | 0.104037 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
f6339073b652d3edc6e86998a110d1f8bbb7ac09 | 161 | py | Python | frameworks/constantpredictor/__init__.py | Ennosigaeon/automlbenchmark | bd3e529d641b64300a075d59408203d537311b7e | [
"MIT"
] | 282 | 2018-09-19T09:45:46.000Z | 2022-03-30T04:05:51.000Z | frameworks/constantpredictor/__init__.py | Ennosigaeon/automlbenchmark | bd3e529d641b64300a075d59408203d537311b7e | [
"MIT"
] | 267 | 2018-11-02T11:43:11.000Z | 2022-03-31T08:58:16.000Z | frameworks/constantpredictor/__init__.py | Ennosigaeon/automlbenchmark | bd3e529d641b64300a075d59408203d537311b7e | [
"MIT"
] | 104 | 2018-10-17T19:32:36.000Z | 2022-03-19T22:47:59.000Z |
def version():
from sklearn import __version__
return __version__
def run(*args, **kwargs):
from .exec import run
return run(*args, **kwargs)
| 16.1 | 35 | 0.664596 | 20 | 161 | 4.95 | 0.5 | 0.141414 | 0.262626 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.229814 | 161 | 9 | 36 | 17.888889 | 0.798387 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
1443fc3495df1182d9211561a21d2f5978cbf52b | 25,114 | py | Python | tensorhub/models/image/classifiers/classification_wrapper/transfer_learning.py | navalchand/tensorhub | cc3140653b43bb9126055f61b31200e0d2b6e3c6 | [
"MIT"
] | null | null | null | tensorhub/models/image/classifiers/classification_wrapper/transfer_learning.py | navalchand/tensorhub | cc3140653b43bb9126055f61b31200e0d2b6e3c6 | [
"MIT"
] | null | null | null | tensorhub/models/image/classifiers/classification_wrapper/transfer_learning.py | navalchand/tensorhub | cc3140653b43bb9126055f61b31200e0d2b6e3c6 | [
"MIT"
] | null | null | null | # Copyright 2019 The TensorHub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# Load packages
from tensorflow import keras
from tensorhub.models.image.classifiers.classification_wrapper.model_tail import ModelTail
class VGG16(ModelTail):
"""VGG16 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(VGG16, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.vgg16.VGG16(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class VGG19(ModelTail):
"""VGG19 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(VGG19, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.vgg19.VGG19(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class MobileNet(ModelTail):
"""MobileNet based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(MobileNet, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.mobilenet.MobileNet(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class ResNet50(ModelTail):
"""ResNet50 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(ResNet50, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.resnet50.ResNet50(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class InceptionV3(ModelTail):
"""InceptionV3 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=299, img_width=299, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(InceptionV3, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.inception_v3.InceptionV3(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class InceptionResNetV2(ModelTail):
"""InceptionResNetV2 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=299, img_width=299, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(InceptionResNetV2, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.inception_resnet_v2.InceptionResNetV2(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class Xception(ModelTail):
"""XceptionNet based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height, img_width, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(Xception, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.xception.Xception(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class DenseNet121(ModelTail):
"""DenseNet121 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(DenseNet121, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.densenet.DenseNet121(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class DenseNet169(ModelTail):
"""DenseNet169 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(DenseNet169, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.densenet.DenseNet169(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class DenseNet201(ModelTail):
"""DenseNet201 based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(DenseNet201, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.densenet.DenseNet201(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class NASNetMobile(ModelTail):
"""NASNetMobile based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=224, img_width=224, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(NASNetMobile, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.nasnet.NASNetMobile(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model
class NASNetLarge(ModelTail):
"""NASNet Large based image classification model with transfer learning support on imagenet weights.
Arguments:
ModelTail {cls} -- Template class to convert base architetcure to classifier.
"""
def __init__(self, n_classes, img_height=331, img_width=331, weights="imagenet", num_nodes=None, dropouts=None, activation="relu"):
"""Class constructor.
Arguments:
n_classes {int} -- Number of classes for classification.
Keyword Arguments:
img_height {int} -- Height of the input image.
img_width {int} -- Width of the input image.
weights {str} -- If "imagenet" pre-trained imagenet weights will be downloaded. Else path to custom trained weights must be specified.
num_nodes {list} -- List of nodes for each dense layer.
dropouts {list} -- List of dropout rate corresponding to each dense layer.
activation {str} -- Activation to be used for each dense layer.
"""
self.img_height = img_height
self.img_width = img_width
self.weights = weights
# Initiate base model architecture
super(NASNetLarge, self).__init__(n_classes, num_nodes, dropouts, activation)
def model(self):
"""Create image classifier.
Returns:
keras-model -- Model for image classification with specified configuration.
"""
# Load base model using keras application module
self.base_model = keras.applications.nasnet.NASNetLarge(
weights=self.weights,
include_top=False,
input_shape=(self.img_height, self.img_width, 3)
)
# Creating top sequential model as per specified parameters
top_model = self.create_model_tail(self.base_model)
# Stich to create classification model
model = keras.models.Model(inputs=self.base_model.input, outputs=top_model(self.base_model.output))
return model | 44.137083 | 146 | 0.656327 | 2,969 | 25,114 | 5.412597 | 0.060626 | 0.040324 | 0.03883 | 0.022402 | 0.922775 | 0.922775 | 0.922775 | 0.922775 | 0.922775 | 0.922775 | 0 | 0.008768 | 0.264275 | 25,114 | 569 | 147 | 44.137083 | 0.860962 | 0.525046 | 0 | 0.714286 | 0 | 0 | 0.014306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131868 | false | 0 | 0.010989 | 0 | 0.274725 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
145023c004921eb84408d1b460fe25f063a2e3a3 | 162 | py | Python | push2_python/exceptions.py | AvneeshSarwate/push2-python | 884a3f06fdf4d78d0e99afc9a7cf6623bb7622d1 | [
"MIT"
] | 46 | 2018-10-08T18:12:49.000Z | 2022-03-18T08:51:16.000Z | push2_python/exceptions.py | AvneeshSarwate/push2-python | 884a3f06fdf4d78d0e99afc9a7cf6623bb7622d1 | [
"MIT"
] | 1 | 2019-07-25T08:40:18.000Z | 2019-07-25T08:40:18.000Z | push2_python/exceptions.py | AvneeshSarwate/push2-python | 884a3f06fdf4d78d0e99afc9a7cf6623bb7622d1 | [
"MIT"
] | 16 | 2019-03-15T04:58:02.000Z | 2022-03-18T08:51:21.000Z | class Push2USBDeviceNotFound(Exception):
pass
class Push2USBDeviceConfigurationError(Exception):
pass
class Push2MIDIeviceNotFound(Exception):
pass
| 18 | 50 | 0.802469 | 12 | 162 | 10.833333 | 0.5 | 0.3 | 0.276923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021583 | 0.141975 | 162 | 8 | 51 | 20.25 | 0.913669 | 0 | 0 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.5 | 0 | 0 | 0.5 | 0 | 1 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
1469e49d931aa92ec31958305475353b5f9290d2 | 317 | py | Python | src/xbrief/padder/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | src/xbrief/padder/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | src/xbrief/padder/__init__.py | pydget/xbrief | 9e91927a98754b0fca1fa55eae9a785b15e963f9 | [
"MIT"
] | null | null | null | def pad_start(text: str, width: int, fill_char: str = ' '):
return f'{text:{fill_char[0]}>{width}}'
def pad_end(text: str, width: int, fill_char: str = ' '):
return f'{text:{fill_char[0]}<{width}}'
def pad_centered(text: str, width: int, fill_char: str = ' '):
return f'{text:{fill_char[0]}^{width}}'
| 28.818182 | 62 | 0.618297 | 51 | 317 | 3.666667 | 0.27451 | 0.256684 | 0.192513 | 0.240642 | 0.882353 | 0.882353 | 0.882353 | 0.882353 | 0.882353 | 0.882353 | 0 | 0.011321 | 0.164038 | 317 | 10 | 63 | 31.7 | 0.69434 | 0 | 0 | 0 | 0 | 0 | 0.283912 | 0.274448 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 12 |
1483d425da8d85fe43ef19b7fc96577e31e70620 | 141 | py | Python | app/main/tests/test_helpers.py | spetrovic450/ksvotes.org | 1fa25a4098657b5f2f89e345332a26b92b993ecd | [
"MIT"
] | null | null | null | app/main/tests/test_helpers.py | spetrovic450/ksvotes.org | 1fa25a4098657b5f2f89e345332a26b92b993ecd | [
"MIT"
] | 1 | 2021-12-13T20:14:18.000Z | 2021-12-13T20:14:18.000Z | app/main/tests/test_helpers.py | lukecivantos/flvotes | ace6fbee9d6cfaa9e4e69e266e321d041ad65da4 | [
"MIT"
] | null | null | null | from app.main.helpers import is_even_year
def test_is_even_year():
assert is_even_year(year=2018)
assert not is_even_year(year=2019) | 28.2 | 41 | 0.787234 | 26 | 141 | 3.923077 | 0.538462 | 0.235294 | 0.392157 | 0.27451 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065574 | 0.134752 | 141 | 5 | 42 | 28.2 | 0.770492 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.5 | 1 | 0.25 | true | 0 | 0.25 | 0 | 0.5 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
14bfcd4a8d35e0ffa68646dbb2631bdb4c71dc60 | 12,868 | py | Python | tasks-deploy/restore-usb/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | 3 | 2021-03-30T06:27:58.000Z | 2021-04-03T17:56:35.000Z | tasks-deploy/restore-usb/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | tasks-deploy/restore-usb/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
flags = ['LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_FmIGDD0Yg0}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_3mb9NokuaD}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_ZLcP4Spthb}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_iYJJtOguEy}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_7Ov3fKoEt3}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_TosvhyWlyk}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Ktfz1EnjoN}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_6lgGazpUqv}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_10SgPN2UWm}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_EFLI2z1sPt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_1yg7j6rNsz}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_49ocfuSFxc}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_kA5DRHhfsz}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_1Qj633DgEJ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_tfuyDJCqaK}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_sPnhIwreAg}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_4LBZwkbCbp}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_JHezgP4IGU}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Lq9nWTMEwD}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_uzFbv5Wbp3}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_eZBJdOJgT9}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_jQoJFJjrCS}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_p8aT00PBQe}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_SpjdASGyo5}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Pn11Kq6hJZ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_m3TCWL1wLj}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_5bScEK89io}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Ce74aCNNXA}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_F8ZXP3xrVL}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_B9a7ldF8LA}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_saW9Bnup4Y}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_sxdl7ozHy2}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_v1FkDGOCfM}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_tTIE6VZgse}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_XOtC7vskoX}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_8JAlgB5yJ6}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_WIeCHh151K}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_0zNYpKp9xw}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Ae4gsn2S4n}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RmAz14wRZZ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_UC3a5HckAN}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OmnfG7NfWM}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Nm5PalbyqS}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_JMlYK1U0mt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_TZ2AFkMLF5}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_aZmDYEfwel}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_HTjFs1BDqI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_0qOrel2Dn4}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_DP1cyv0PLM}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_q5TECloG9z}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yx2apK8cBI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_13Y4sdQvUC}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Xg3xWRs9oI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_HbEGH7OU96}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_D1x2gMaep1}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_z5TFqa4xEv}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_VHL4EEUZIo}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Nt4VgxjfuG}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RMdVI5sFVX}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_3wU8vyovpl}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_mVEbEUc8gr}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yAzynsHRT4}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_lay8soJvOM}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_PlLM0Tij4N}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_kilKR9oAt9}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OgLNgEOjD0}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_pNfb0BzAk1}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_a19fEY7gh2}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_sg8TYIR0Ph}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_mnajxus5P5}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_fZjDrS022o}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_sIpS80gQaV}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_O7euljoKtu}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_7BOssoJMEv}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_IcUf81XaBB}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_dYIk7bV7nk}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_BL5WxUOUyw}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_tTczeHq1ko}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_JyvvOdfK82}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_mHxKsNvArI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_de9OYojYA1}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_X5FLJ0Lay4}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Idkhit0TeB}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_DjNr6OUrwu}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_ldxGLbQ2WI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_A2gTOFkhxL}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Jrj8CW1DOU}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_LQJH04FU1v}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OICzCGbvCV}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_w9RRrNG7VP}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_5s8o4wdItJ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_guZ6BHCxUR}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yRW0XBzEKY}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_iYoeqI1fAh}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Wm5VeNrXwt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_hSRIuEAIqT}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_GXf099jFwY}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yOzn9M6wQS}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_dFi7paPmv5}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_1Okr0AoJtv}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_DfG96GPbpP}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_XxSanRnLBb}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_5Pl0fgNckI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_P2gLiCsHb4}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_0qY9nyHaJu}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OSPqW8lt0V}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_noubbIFBPU}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_92rn8cReXh}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_eLjQHX55AF}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_mmHVeZUcyf}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RrxQcxx7zW}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_3lxRimEOgJ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_6qo4Wdbc7z}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_qVbQoZ2zqd}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_bo3PHnnDXP}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_TJutBxLqIG}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_iV5CIYXKH8}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Zc8YqgxHpJ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OPN456zEox}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Go6gK2TJQz}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_VuNpLhabBM}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_2KMTzl2n58}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_xqnKdNweBE}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_YmirxQlRES}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_b0K1ea7vqr}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_cXqmUnbzPl}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_V1swZVP76M}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_LsZCGkcICC}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_VYCVqxtXXA}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_hyshxvGioS}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_hO9sV5txNe}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_fjhGo36aWA}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_q2MZ6qJpZC}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_EC19PJ2HER}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_YWIbzR40m6}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Rf8iSvFTyg}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_hH2KdO28Wh}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Yp3hcybqw8}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_WVJJVsDLTS}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_BW5lXxZV1h}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_CWbcKIFQKe}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_aqegqAKdFZ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_oNJtWbRH6d}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_sMVb3u8Vyn}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Al2Vs3S35x}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_lJxM31lFXI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yPbCAh1Y61}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_09wzmENQWs}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_xmtGXHYFAv}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_iHUi5Wq7kU}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_80ABRRVGaH}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_H8dxxsLxCD}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_csD5YSZsQs}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_cWISOu6zAF}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_btFkhLLHC0}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_AxGuJSPuVh}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_guep0SAwvZ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_ZOn80r7lXc}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RDLpaHNzQL}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_dl5mDoN9Lt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_avX9AG6F8v}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_FzH8IQp2dZ}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_bYlsqarf1a}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_uW5F3C0CFD}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_iqgEhlfjTx}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_L78iZDFIU6}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_jstxOmBex2}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_469eEeY0QU}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_EXp1zq7RA2}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_fDxkXkYWME}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_AgbSJ15daw}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Q42d2on0Rq}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_rHxeW7M8Z9}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_x2ZLwr26fO}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_zWoZeMfKv5}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_yPzb9HdQw9}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Csyx4L7mR2}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OIBnhTYfHi}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RkiEeJl16F}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_uZln5Aa92n}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_OiNZNfC6Sn}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_pI72Z0t11e}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_pABelpYjVn}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_bokZXT5YTK}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_s0ULIj2UTT}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_X06Akp009I}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_BzQezm7wvH}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_c5stU3R1jt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_LOmp1NcADD}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_T6U7tuJtdw}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_CSQBO7l06t}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_S11m2E5smz}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_plK4Fu8wGW}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_ijEudH3vgt}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_Cf2LXXCxfx}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_M41yZWqDbc}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_36vnJ6H1q6}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_SPEzCPLrBI}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_1vpDlGAzcq}', 'LKL{B1nW4aLk_15_mUccH_c0OoLeRR_Th4N_yoU_th0uGht_RNU46h4O3R}'] | 1,838.285714 | 12,608 | 0.90014 | 1,832 | 12,868 | 5.558406 | 0.122817 | 0.216046 | 0.255328 | 0.35353 | 0.785623 | 0.785623 | 0.785623 | 0.785623 | 0 | 0 | 0 | 0.138429 | 0.019817 | 12,868 | 7 | 12,608 | 1,838.285714 | 0.668913 | 0 | 0 | 0 | 0 | 0 | 0.91736 | 0.91736 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 12 |
14c0367610f5f0a6c7964b74ee23fc57a40f4da0 | 14,517 | py | Python | discovery-provider/src/tasks/social_features.py | elopio/audius-protocol | 3b774e9dad09131735a9729fab816b4eac412c61 | [
"Apache-2.0"
] | 1 | 2020-11-10T04:04:47.000Z | 2020-11-10T04:04:47.000Z | discovery-provider/src/tasks/social_features.py | elopio/audius-protocol | 3b774e9dad09131735a9729fab816b4eac412c61 | [
"Apache-2.0"
] | null | null | null | discovery-provider/src/tasks/social_features.py | elopio/audius-protocol | 3b774e9dad09131735a9729fab816b4eac412c61 | [
"Apache-2.0"
] | null | null | null | import logging
from datetime import datetime
from src import contract_addresses
from src.models import Repost, RepostType, Follow, Playlist
logger = logging.getLogger(__name__)
def social_feature_state_update(
self, update_task, session, social_feature_factory_txs, block_number, block_timestamp
):
"""Return int representing number of social feature related state changes in this transaction"""
num_total_changes = 0
if not social_feature_factory_txs:
return num_total_changes
social_feature_factory_abi = update_task.abi_values["SocialFeatureFactory"]["abi"]
social_feature_factory_contract = update_task.web3.eth.contract(
address=contract_addresses["social_feature_factory"],
abi=social_feature_factory_abi,
)
block_datetime = datetime.utcfromtimestamp(block_timestamp)
# stores net state changes of all reposts and follows and corresponding events in current block
# track_repost_state_changes = { "user_id": { "track_id": {__Repost__} } }
# playlist_repost_state_changes = { "user_id": { "playlist_id": {__Repost__} } }
# follow_state_changes = { "follower_user_id": { "followee_user_id": {__Follow__} } }
track_repost_state_changes = {}
playlist_repost_state_changes = {}
follow_state_changes = {}
for tx_receipt in social_feature_factory_txs:
add_track_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
track_repost_state_changes,
)
delete_track_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
track_repost_state_changes,
)
add_playlist_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
playlist_repost_state_changes,
)
delete_playlist_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
playlist_repost_state_changes,
)
add_follow(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
follow_state_changes,
)
delete_follow(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
follow_state_changes,
)
# bulk process all repost and follow changes
for repost_user_id in track_repost_state_changes:
for repost_track_id in track_repost_state_changes[repost_user_id]:
invalidate_old_repost(session, repost_user_id, repost_track_id, RepostType.track)
session.add(track_repost_state_changes[repost_user_id][repost_track_id])
num_total_changes += len(track_repost_state_changes[repost_user_id])
for repost_user_id in playlist_repost_state_changes:
for repost_playlist_id in playlist_repost_state_changes[repost_user_id]:
invalidate_old_repost(
session,
repost_user_id,
repost_playlist_id,
playlist_repost_state_changes[repost_user_id][repost_playlist_id].repost_type
)
session.add(playlist_repost_state_changes[repost_user_id][repost_playlist_id])
num_total_changes += len(playlist_repost_state_changes[repost_user_id])
for follower_user_id in follow_state_changes:
for followee_user_id in follow_state_changes[follower_user_id]:
invalidate_old_follow(session, follower_user_id, followee_user_id)
session.add(follow_state_changes[follower_user_id][followee_user_id])
num_total_changes += len(follow_state_changes[follower_user_id])
return num_total_changes
######## HELPERS ########
def invalidate_old_repost(session, repost_user_id, repost_item_id, repost_type):
# update existing db entry to is_current = False
num_invalidated_repost_entries = (
session.query(Repost)
.filter(
Repost.user_id == repost_user_id,
Repost.repost_item_id == repost_item_id,
Repost.repost_type == repost_type,
Repost.is_current == True
)
.update({"is_current": False})
)
# TODO - after on-chain storage is implemented, assert num_invalidated_repost_entries > 0
return num_invalidated_repost_entries
def invalidate_old_follow(session, follower_user_id, followee_user_id):
# update existing db entry to is_current = False
num_invalidated_follow_entries = (
session.query(Follow)
.filter(
Follow.follower_user_id == follower_user_id,
Follow.followee_user_id == followee_user_id,
Follow.is_current == True
)
.update({"is_current": False})
)
# TODO - after on-chain storage is implemented, assert num_invalidated_follow_entries > 0
return num_invalidated_follow_entries
def add_track_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
track_repost_state_changes,
):
new_track_repost_events = social_feature_factory_contract.events.TrackRepostAdded().processReceipt(
tx_receipt
)
for event in new_track_repost_events:
event_args = event["args"]
repost_user_id = event_args._userId
repost_track_id = event_args._trackId
if (repost_user_id in track_repost_state_changes) \
and (repost_track_id in track_repost_state_changes[repost_user_id]):
track_repost_state_changes[repost_user_id][repost_track_id].is_delete = False
else:
repost = Repost(
blockhash=update_task.web3.toHex(event.blockHash),
blocknumber=block_number,
user_id=repost_user_id,
repost_item_id=repost_track_id,
repost_type=RepostType.track,
is_current=True,
is_delete=False,
created_at=block_datetime,
)
if repost_user_id in track_repost_state_changes:
track_repost_state_changes[repost_user_id][repost_track_id] = repost
else:
track_repost_state_changes[repost_user_id] = {repost_track_id: repost}
def delete_track_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
track_repost_state_changes
):
new_repost_events = social_feature_factory_contract.events.TrackRepostDeleted().processReceipt(
tx_receipt
)
for event in new_repost_events:
event_args = event["args"]
repost_user_id = event_args._userId
repost_track_id = event_args._trackId
if (repost_user_id in track_repost_state_changes) \
and (repost_track_id in track_repost_state_changes[repost_user_id]):
track_repost_state_changes[repost_user_id][repost_track_id].is_delete = True
else:
repost = Repost(
blockhash=update_task.web3.toHex(event.blockHash),
blocknumber=block_number,
user_id=repost_user_id,
repost_item_id=repost_track_id,
repost_type=RepostType.track,
is_current=True,
is_delete=True,
created_at=block_datetime,
)
if repost_user_id in track_repost_state_changes:
track_repost_state_changes[repost_user_id][repost_track_id] = repost
else:
track_repost_state_changes[repost_user_id] = {repost_track_id: repost}
def add_playlist_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
playlist_repost_state_changes,
):
new_playlist_repost_events = social_feature_factory_contract.events.PlaylistRepostAdded().processReceipt(
tx_receipt
)
for event in new_playlist_repost_events:
event_args = event["args"]
repost_user_id = event_args._userId
repost_playlist_id = event_args._playlistId
repost_type = RepostType.playlist
playlist_entries = session.query(Playlist).filter(
Playlist.is_current == True,
Playlist.playlist_id == repost_playlist_id
).all()
if playlist_entries and playlist_entries[0].is_album:
repost_type = RepostType.album
if (repost_user_id in playlist_repost_state_changes) \
and (repost_playlist_id in playlist_repost_state_changes[repost_user_id]):
playlist_repost_state_changes[repost_user_id][repost_playlist_id].is_delete = False
else:
repost = Repost(
blockhash=update_task.web3.toHex(event.blockHash),
blocknumber=block_number,
user_id=repost_user_id,
repost_item_id=repost_playlist_id,
repost_type=repost_type,
is_current=True,
is_delete=False,
created_at=block_datetime,
)
if repost_user_id in playlist_repost_state_changes:
playlist_repost_state_changes[repost_user_id][repost_playlist_id] = repost
else:
playlist_repost_state_changes[repost_user_id] = {repost_playlist_id: repost}
def delete_playlist_repost(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
playlist_repost_state_changes,
):
new_playlist_repost_events = social_feature_factory_contract.events.PlaylistRepostDeleted().processReceipt(
tx_receipt
)
for event in new_playlist_repost_events:
event_args = event["args"]
repost_user_id = event_args._userId
repost_playlist_id = event_args._playlistId
repost_type = RepostType.playlist
playlist_entries = session.query(Playlist).filter(
Playlist.is_current == True,
Playlist.playlist_id == repost_playlist_id
).all()
if playlist_entries and playlist_entries[0].is_album:
repost_type = RepostType.album
if (repost_user_id in playlist_repost_state_changes) \
and (repost_playlist_id in playlist_repost_state_changes[repost_user_id]):
playlist_repost_state_changes[repost_user_id][repost_playlist_id].is_delete = True
else:
repost = Repost(
blockhash=update_task.web3.toHex(event.blockHash),
blocknumber=block_number,
user_id=repost_user_id,
repost_item_id=repost_playlist_id,
repost_type=repost_type,
is_current=True,
is_delete=True,
created_at=block_datetime,
)
if repost_user_id in playlist_repost_state_changes:
playlist_repost_state_changes[repost_user_id][repost_playlist_id] = repost
else:
playlist_repost_state_changes[repost_user_id] = {repost_playlist_id: repost}
def add_follow(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
follow_state_changes
):
new_follow_events = social_feature_factory_contract.events.UserFollowAdded().processReceipt(tx_receipt)
for entry in new_follow_events:
event_args = entry["args"]
follower_user_id = event_args._followerUserId
followee_user_id = event_args._followeeUserId
if (follower_user_id in follow_state_changes) and (followee_user_id in follow_state_changes[follower_user_id]):
follow_state_changes[follower_user_id][followee_user_id].is_delete = False
else:
follow = Follow(
blockhash=update_task.web3.toHex(entry.blockHash),
blocknumber=block_number,
follower_user_id=follower_user_id,
followee_user_id=followee_user_id,
is_current=True,
is_delete=False,
created_at=block_datetime,
)
if follower_user_id in follow_state_changes:
follow_state_changes[follower_user_id][followee_user_id] = follow
else:
follow_state_changes[follower_user_id] = {followee_user_id: follow}
def delete_follow(
self,
social_feature_factory_contract,
update_task,
session,
tx_receipt,
block_number,
block_datetime,
follow_state_changes
):
new_follow_events = social_feature_factory_contract.events.UserFollowDeleted().processReceipt(tx_receipt)
for entry in new_follow_events:
event_args = entry["args"]
follower_user_id = event_args._followerUserId
followee_user_id = event_args._followeeUserId
if (follower_user_id in follow_state_changes) and (followee_user_id in follow_state_changes[follower_user_id]):
follow_state_changes[follower_user_id][followee_user_id].is_delete = True
else:
follow = Follow(
blockhash=update_task.web3.toHex(entry.blockHash),
blocknumber=block_number,
follower_user_id=follower_user_id,
followee_user_id=followee_user_id,
is_current=True,
is_delete=True,
created_at=block_datetime,
)
if follower_user_id in follow_state_changes:
follow_state_changes[follower_user_id][followee_user_id] = follow
else:
follow_state_changes[follower_user_id] = {followee_user_id: follow}
| 36.751899 | 119 | 0.651719 | 1,623 | 14,517 | 5.332717 | 0.071473 | 0.069324 | 0.063778 | 0.049913 | 0.86632 | 0.841594 | 0.821375 | 0.7829 | 0.76892 | 0.763605 | 0 | 0.001162 | 0.288558 | 14,517 | 394 | 120 | 36.845178 | 0.836851 | 0.051595 | 0 | 0.706745 | 0 | 0 | 0.006478 | 0.001601 | 0 | 0 | 0 | 0.002538 | 0 | 1 | 0.026393 | false | 0 | 0.01173 | 0 | 0.049853 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2129e6e0db9d402423374a86fad9b66a0660afa6 | 353 | py | Python | radionets/dl_framework/architecture.py | Kevin2/radionets | 44e10a85a096f5cea8e9d83f96db65bdd4df9517 | [
"MIT"
] | 9 | 2021-06-17T10:12:28.000Z | 2022-03-23T23:04:19.000Z | radionets/dl_framework/architecture.py | radionets-project/radionets | 9b87ddbf704e78db55944e70071a7002f6213399 | [
"MIT"
] | 24 | 2021-02-12T13:57:11.000Z | 2022-03-03T08:00:31.000Z | radionets/dl_framework/architecture.py | Kevin2/radionets | 44e10a85a096f5cea8e9d83f96db65bdd4df9517 | [
"MIT"
] | 3 | 2020-01-08T09:01:09.000Z | 2020-10-19T18:53:13.000Z | from radionets.dl_framework.architectures.basics import *
from radionets.dl_framework.architectures.unet import *
from radionets.dl_framework.architectures.filter_deep import *
from radionets.dl_framework.architectures.superRes import *
from radionets.dl_framework.architectures.res_exp import *
from radionets.dl_framework.architectures.lists import *
| 50.428571 | 62 | 0.864023 | 44 | 353 | 6.75 | 0.318182 | 0.262626 | 0.30303 | 0.484848 | 0.848485 | 0.723906 | 0 | 0 | 0 | 0 | 0 | 0 | 0.067989 | 353 | 6 | 63 | 58.833333 | 0.902736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 9 |
213447aba344242dafe6d3f02eaa592bc2bd2f63 | 5,134 | py | Python | tests/panoramic/cli/husky/service/model_retriever/model_augments_test.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-11-13T17:26:59.000Z | 2021-03-19T15:11:26.000Z | tests/panoramic/cli/husky/service/model_retriever/model_augments_test.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 5 | 2020-10-28T10:22:35.000Z | 2021-01-27T17:33:58.000Z | tests/panoramic/cli/husky/service/model_retriever/model_augments_test.py | kubamahnert/panoramic-cli | 036f45a05d39f5762088ce23dbe367b938192f79 | [
"MIT"
] | 3 | 2021-01-26T07:58:03.000Z | 2021-03-11T13:28:34.000Z | import pytest
from panoramic.cli.husky.core.model.enums import ModelVisibility
from panoramic.cli.husky.service.constants import TaxonSlugs
from panoramic.cli.husky.service.model_retriever.model_augments import ModelAugments
from tests.panoramic.cli.husky.test.mocks.husky_model import generate_husky_mock_model
@pytest.mark.parametrize(
'inp_model,expected_model',
[
(
generate_husky_mock_model(
visibility=ModelVisibility.available,
project_id='project_2',
attributes={'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True}},
),
generate_husky_mock_model(
visibility=ModelVisibility.available,
attributes={
'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True},
TaxonSlugs.COMPANY_ID: {
'taxon': TaxonSlugs.COMPANY_ID,
'identifier': False,
'tel_transformation': "'company_id'",
},
TaxonSlugs.PROJECT_ID: {
'taxon': TaxonSlugs.PROJECT_ID,
'identifier': False,
'tel_transformation': "'project_2'",
},
},
project_id='project_2',
),
),
(
generate_husky_mock_model(
visibility=ModelVisibility.available,
company_id='cid_1',
attributes={'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True}},
),
generate_husky_mock_model(
visibility=ModelVisibility.available,
company_id='cid_1',
attributes={
'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True},
TaxonSlugs.COMPANY_ID: {
'taxon': TaxonSlugs.COMPANY_ID,
'identifier': False,
'tel_transformation': "'cid_1'",
},
},
),
),
(
generate_husky_mock_model(
visibility=ModelVisibility.available,
project_id='project_2',
company_id='cid_1',
attributes={'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True}},
),
generate_husky_mock_model(
visibility=ModelVisibility.available,
project_id='project_2',
company_id='cid_1',
attributes={
'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True},
TaxonSlugs.PROJECT_ID: {
'taxon': TaxonSlugs.PROJECT_ID,
'identifier': False,
'tel_transformation': "'project_2'",
},
TaxonSlugs.COMPANY_ID: {
'taxon': TaxonSlugs.COMPANY_ID,
'identifier': False,
'tel_transformation': "'cid_1'",
},
},
),
),
(
generate_husky_mock_model(
visibility=ModelVisibility.available,
project_id='project_2',
company_id='cid_1',
attributes={
'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True},
TaxonSlugs.PROJECT_ID: {
'tel_transformation': '"column_a"',
'taxon': TaxonSlugs.PROJECT_ID,
'identifier': False,
},
TaxonSlugs.COMPANY_ID: {
'tel_transformation': '"column_b"',
'taxon': TaxonSlugs.COMPANY_ID,
'identifier': False,
},
},
),
generate_husky_mock_model(
visibility=ModelVisibility.available,
project_id='project_2',
company_id='cid_1',
attributes={
'ad_id': {'tel_transformation': '"ad_id"', 'taxon': 'ad_id', 'identifier': True},
TaxonSlugs.PROJECT_ID: {
'tel_transformation': '"column_a"',
'taxon': TaxonSlugs.PROJECT_ID,
'identifier': False,
},
TaxonSlugs.COMPANY_ID: {
'tel_transformation': '"column_b"',
'taxon': TaxonSlugs.COMPANY_ID,
'identifier': False,
},
},
),
),
],
)
def test_model_add_model_info_attributes(inp_model, expected_model):
ModelAugments._model_add_model_info_attributes(inp_model)
assert inp_model.to_primitive() == expected_model.to_primitive()
| 41.403226 | 110 | 0.47351 | 396 | 5,134 | 5.782828 | 0.126263 | 0.041921 | 0.099563 | 0.086463 | 0.841921 | 0.817467 | 0.817467 | 0.7869 | 0.7869 | 0.7869 | 0 | 0.005339 | 0.416245 | 5,134 | 123 | 111 | 41.739837 | 0.758759 | 0 | 0 | 0.733333 | 0 | 0 | 0.173938 | 0.004675 | 0 | 0 | 0 | 0 | 0.008333 | 1 | 0.008333 | false | 0 | 0.041667 | 0 | 0.05 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
dccf212d58c527e82d6128a3f62469ecd06bfa67 | 1,729 | py | Python | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_parsing_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | 1 | 2021-04-09T15:55:35.000Z | 2021-04-09T15:55:35.000Z | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_parsing_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | src/graph_transpiler/webdnn/frontend/tensorflow/ops/gen_parsing_ops.py | steerapi/webdnn | 1df51cc094e5a528cfd3452c264905708eadb491 | [
"MIT"
] | null | null | null | import tensorflow as tf
from webdnn.frontend.tensorflow.converter import TensorFlowConverter
@TensorFlowConverter.register_handler("DecodeCSV")
def decode_csv_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DecodeJSONExample")
def decode_json_example_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("DecodeRaw")
def decode_raw_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ParseExample")
def parse_example_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ParseSingleSequenceExample")
def parse_single_sequence_example_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("ParseTensor")
def parse_tensor_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
@TensorFlowConverter.register_handler("StringToNumber")
def string_to_number_handler(converter: TensorFlowConverter, tf_op: "tf.Operation"):
raise NotImplementedError(f"[TensorFlowConverter] {tf_op.type} is not supported yet.")
| 44.333333 | 97 | 0.814922 | 190 | 1,729 | 7.210526 | 0.215789 | 0.214599 | 0.235037 | 0.189051 | 0.772263 | 0.772263 | 0.772263 | 0.772263 | 0.772263 | 0.772263 | 0 | 0 | 0.082128 | 1,729 | 38 | 98 | 45.5 | 0.863264 | 0 | 0 | 0.304348 | 0 | 0 | 0.331984 | 0.100058 | 0 | 0 | 0 | 0 | 0 | 1 | 0.304348 | false | 0 | 0.086957 | 0 | 0.391304 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
dcf4011fe7e4c90edb33d6e5fd287e5d9e7b52fc | 3,221 | py | Python | tests/dhcpv6/relay_agent/test_v6_relay_encapsulation.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | tests/dhcpv6/relay_agent/test_v6_relay_encapsulation.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | tests/dhcpv6/relay_agent/test_v6_relay_encapsulation.py | shawnmullaney/forge | aaaef0a0645f73d24666aab6a400f3604e753aac | [
"0BSD"
] | null | null | null | """DHCPv6 Relay Agent encapsulation and Interface ID"""
# pylint: disable=invalid-name,line-too-long
import pytest
import srv_control
import srv_msg
import references
import misc
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
def test_v6_relay_message_interfaceid():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.config_srv('interface-id', '0', '15')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward()
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
# Response MUST include ADVERTISE message.
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
@pytest.mark.disabled
def test_v6_relay_encapsulate_12lvl():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_requests_option('7')
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward(12)
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
# Response MUST include ADVERTISE message.
# TODO: we should check these 12 levels in RELAYREPLY
# kea probably should rejected this msg as RFC says 8 levels are allowed
references.references_check('RFC3315')
@pytest.mark.v6
@pytest.mark.dhcp6
@pytest.mark.relay
def test_v6_relay_encapsulate_8lvl():
misc.test_setup()
srv_control.config_srv_subnet('3000::/64', '3000::1-3000::ff')
srv_control.build_and_send_config_files('SSH', 'config-file')
srv_control.start_srv('DHCP', 'started')
misc.test_procedure()
srv_msg.client_does_include('Client', None, 'client-id')
srv_msg.client_does_include('Client', None, 'IA-NA')
srv_msg.client_send_msg('SOLICIT')
srv_msg.client_does_include('RelayAgent', None, 'interface-id')
srv_msg.create_relay_forward(8)
misc.pass_criteria()
srv_msg.send_wait_for_message('MUST', None, 'RELAYREPLY')
srv_msg.response_check_include_option('Response', None, '18')
srv_msg.response_check_include_option('Response', None, '9')
# Response MUST include ADVERTISE message.
# TODO: we should check these 8 levels in RELAYREPLY
# RFC allows up to 8 levels of nesting
references.references_check('RFC3315')
| 31.891089 | 76 | 0.736107 | 459 | 3,221 | 4.856209 | 0.211329 | 0.069987 | 0.069987 | 0.064603 | 0.825931 | 0.80978 | 0.80978 | 0.80978 | 0.80978 | 0.80978 | 0 | 0.03264 | 0.13443 | 3,221 | 100 | 77 | 32.21 | 0.766858 | 0.132568 | 0 | 0.784615 | 0 | 0 | 0.162172 | 0 | 0 | 0 | 0 | 0.01 | 0 | 1 | 0.046154 | true | 0.046154 | 0.076923 | 0 | 0.123077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
0d45f4bd4e8248b608e719be9ba61a767084976e | 75 | py | Python | devel/apps/ik/admin/__init__.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | devel/apps/ik/admin/__init__.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | devel/apps/ik/admin/__init__.py | riscoscloverleaf/chatcube | a7184ef76108f90a74a88d3183a3d21c1249a0f5 | [
"MIT"
] | null | null | null | import ik.admin.member
import ik.admin.messages
#import ik.admin.feedback
| 15 | 25 | 0.813333 | 12 | 75 | 5.083333 | 0.5 | 0.393443 | 0.639344 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093333 | 75 | 4 | 26 | 18.75 | 0.897059 | 0.32 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
b4b860ed62f4e551cd987a1df245ca33add42b63 | 15,444 | py | Python | test.py | konsh/crypto_compare | 48511ca22c217a30a7aa3945550853bd3e91a0c7 | [
"MIT"
] | 1 | 2019-04-18T15:26:07.000Z | 2019-04-18T15:26:07.000Z | test.py | konsh/crypto_compare | 48511ca22c217a30a7aa3945550853bd3e91a0c7 | [
"MIT"
] | null | null | null | test.py | konsh/crypto_compare | 48511ca22c217a30a7aa3945550853bd3e91a0c7 | [
"MIT"
] | 1 | 2021-01-23T16:44:33.000Z | 2021-01-23T16:44:33.000Z | import mock
import pytest
from pytest_mock import mocker
from crypto_compare.client import Client
import urllib2
from urlparse import urlparse
import os.path
import unittest
from mock import patch
def describe_coin():
def describe_list():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
_assert_success(Client().coin_list())
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_error():
with pytest.raises(ValueError) as excinfo:
Client().coin_list()
def describe_snapshot_full_by_id():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
_assert_success(Client().coin_snapshot_full_by_id(1182))
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def without_coin_id():
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot_full_by_id('')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_coin_id():
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot_full_by_id(123456)
def describe_snapshot():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
_assert_success(Client().coin_snapshot('BTC','ETH'))
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot('','')
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot('BTC','')
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot('','ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().coin_snapshot('123', '456')
def describe_price():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().price(fsym='BTC', tsyms='ETH')
assert response['USD'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().price()
with pytest.raises(ValueError) as excinfo:
Client().price(fsym='')
with pytest.raises(ValueError) as excinfo:
Client().price(tsyms='')
def describe_multi():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().price_multi(fsyms='BTC,ETH', tsyms='USD,EUR')
assert response['BTC'] != None
assert response['ETH'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().price_multi()
with pytest.raises(ValueError) as excinfo:
Client().price_multi(fsyms='')
with pytest.raises(ValueError) as excinfo:
Client().price_multi(tsyms='')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().price_multi(fsyms='BTC,ETH', tsyms='PPH')
def describe_multifull():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().price_multifull(fsyms='BTC,ETH', tsyms='USD,EUR')
assert response['RAW'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().price_multifull()
with pytest.raises(ValueError) as excinfo:
Client().price_multifull(fsyms='')
with pytest.raises(ValueError) as excinfo:
Client().price_multifull(tsyms='')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().price_multifull(fsyms='BTC,ETH', tsyms='PPH')
def describe_historical():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().price_historical(fsym='BTC', tsyms='USD,EUR')
assert response['BTC'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().price_historical()
with pytest.raises(ValueError) as excinfo:
Client().price_historical(fsym='')
with pytest.raises(ValueError) as excinfo:
Client().price_historical(tsyms='')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().price_historical(fsym='BTC', tsyms='USD,EUR')
def describe_generate_avg():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().generate_avg(fsym='BTC', tsym='USD', markets='Coinbase')
assert response['RAW'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().generate_avg()
with pytest.raises(ValueError) as excinfo:
Client().generate_avg(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().generate_avg(markets='Coinbase', tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().generate_avg(markets='TestMarket', tsym='ETH', fsym='BTC')
def describe_day_avg():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().day_avg(fsym='BTC', tsym='USD')
assert response['USD'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().day_avg()
with pytest.raises(ValueError) as excinfo:
Client().day_avg(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().day_avg(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().day_avg(tsym='DFG', fsym='BTC')
def describe_subs():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().subs(fsym='BTC')
assert response['USD'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().subs()
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().subs(fsym='DFG')
def describe_subs_watchlist():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().subs_watchlist(fsyms='BTC', tsym='ETH')
assert response['BTC'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().subs_watchlist()
with pytest.raises(ValueError) as excinfo:
Client().subs_watchlist(fsyms='BTC')
with pytest.raises(ValueError) as excinfo:
Client().subs_watchlist(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().subs_watchlist(fsyms='DFG', tsym='BTC')
def describe_top():
def describe_exchanges():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().top_exchanges(fsym='BTC', tsym='ETH')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().top_exchanges()
with pytest.raises(ValueError) as excinfo:
Client().top_exchanges(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().top_exchanges(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().top_exchanges(fsym='DFG', tsym='PPH')
def describe_volumes():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().top_volumes(tsym='BTC')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().top_volumes()
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().top_volumes(tsym='PPH')
def describe_pairs():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().top_pairs(fsym='BTC')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().top_pairs()
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().top_pairs(fsym='DFG')
def describe_histo():
def describe_day():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().histo_day(fsym='BTC', tsym='ETH')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_day()
with pytest.raises(ValueError) as excinfo:
Client().histo_day(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().histo_day(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_day(fsym='DFG', tsym='PPH')
def describe_hour():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().histo_hour(fsym='BTC', tsym='ETH')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_hour()
with pytest.raises(ValueError) as excinfo:
Client().histo_hour(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().histo_hour(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_hour(fsym='DFG', tsym='PPH')
def describe_minute():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().histo_minute(fsym='BTC', tsym='ETH')
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_minute()
with pytest.raises(ValueError) as excinfo:
Client().histo_minute(fsym='BTC')
with pytest.raises(ValueError) as excinfo:
Client().histo_minute(tsym='ETH')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
with pytest.raises(ValueError) as excinfo:
Client().histo_minute(fsym='DFG', tsym='PPH')
def describe_mining():
def describe_contracts():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
_assert_success(Client().mining_contracts())
def describe_equipments():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
_assert_success(Client().mining_equipment())
def describe_all_exchanges():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().all_exchanges()
response["Cryptsy"] != None
def describe_social_stats():
@mock.patch('urllib2.urlopen', _fake_url_open_with_success)
def with_success():
response = Client().social_stats(1182)
assert response['Response'] == "Success"
assert response['Data'] != None
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_empty_args():
with pytest.raises(ValueError) as excinfo:
Client().social_stats('')
@mock.patch('urllib2.urlopen', _fake_url_open_with_error)
def with_invalid_args():
response = Client().social_stats("abcdefg")
assert response['Response'] == "Success"
assert response['Data']['General']['Name'] == ''
def __url_resource_filepath(url, sub_folder):
parsed_url = urlparse(url)
url_parts = filter(None, parsed_url.path.split('/'))
data_parts = url_parts[url_parts.index("data")+1: len(url_parts)]
resource_name = '/' + "/".join(data_parts)
resource_file = os.path.normpath('tests/resources/' + sub_folder + "/" + resource_name)
return resource_file
def _fake_url_open_with_success(url):
return open(__url_resource_filepath(url, 'success'), mode='rb')
def _fake_url_open_with_error(url):
return open(__url_resource_filepath(url, 'error'), mode='rb')
def _assert_success(response):
assert response['Response'] == "Success"
assert response['Message'] != None | 28.70632 | 91 | 0.62309 | 1,769 | 15,444 | 5.141323 | 0.059921 | 0.04387 | 0.068939 | 0.094008 | 0.880924 | 0.86564 | 0.850797 | 0.830126 | 0.808466 | 0.747114 | 0 | 0.006684 | 0.254079 | 15,444 | 538 | 92 | 28.70632 | 0.782813 | 0 | 0 | 0.5875 | 0 | 0 | 0.087601 | 0 | 0.003125 | 0 | 0 | 0 | 0.103125 | 1 | 0.2625 | false | 0 | 0.028125 | 0.00625 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2e971fa787e9e55d33494114a2f4c42d46651cef | 925 | py | Python | object_pool/tests/cls.py | dduraipandian/object_pool | 03426d51e510eed1fc8ad33e4f232f57d8b6a70d | [
"MIT"
] | 3 | 2020-07-08T14:14:58.000Z | 2022-01-25T08:05:31.000Z | object_pool/tests/cls.py | dduraipandian/object_pool | 03426d51e510eed1fc8ad33e4f232f57d8b6a70d | [
"MIT"
] | null | null | null | object_pool/tests/cls.py | dduraipandian/object_pool | 03426d51e510eed1fc8ad33e4f232f57d8b6a70d | [
"MIT"
] | 3 | 2020-02-10T08:19:09.000Z | 2022-01-22T06:02:49.000Z | class Browser:
def __init__(self):
self.browser = self.__class__.__create_connection()
@staticmethod
def __create_connection():
obj = "connection_object"
return obj
def do_work(self):
return True
def clean_up(self, **stats):
print("connection object is closed")
def check_invalid(self, **stats):
'''Returns True if resource is valid, otherwise False'''
return False
class Browser1:
def __init__(self):
self.browser = self.__class__.__create_connection()
@staticmethod
def __create_connection():
obj = "connection_object"
return obj
def do_work(self):
return False
def clean_up(self, **stats):
print("connection object is closed")
def check_invalid(self, **stats):
'''Returns True if resource is valid, otherwise False'''
print(stats)
return False
| 23.125 | 64 | 0.627027 | 104 | 925 | 5.230769 | 0.278846 | 0.117647 | 0.040441 | 0.055147 | 0.878676 | 0.878676 | 0.878676 | 0.878676 | 0.878676 | 0.878676 | 0 | 0.001497 | 0.277838 | 925 | 39 | 65 | 23.717949 | 0.812874 | 0.109189 | 0 | 0.851852 | 0 | 0 | 0.108241 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.37037 | false | 0 | 0 | 0.074074 | 0.666667 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
2ebadd83f3fb409060aaec1b7df984fdf892a513 | 159 | py | Python | src/media_list/viewsets/__init__.py | mincem/media_list | ed255c37feaf94da82851627466719a2af95635e | [
"MIT"
] | null | null | null | src/media_list/viewsets/__init__.py | mincem/media_list | ed255c37feaf94da82851627466719a2af95635e | [
"MIT"
] | 2 | 2020-08-02T17:25:09.000Z | 2022-03-12T00:12:46.000Z | src/media_list/viewsets/__init__.py | mincem/media_list | ed255c37feaf94da82851627466719a2af95635e | [
"MIT"
] | null | null | null | from .viewset import Viewset
from .manga_viewset import manga_viewset
from .movie_viewset import movie_viewset
from .manga_api_viewset import MangaApiViewSet
| 26.5 | 46 | 0.867925 | 22 | 159 | 6 | 0.318182 | 0.393939 | 0.242424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106918 | 159 | 5 | 47 | 31.8 | 0.929577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
2c2b78377292688b73968ec2037a99b7c75690fa | 30 | py | Python | src/lib/_thread.py | blockpy-edu/skulpt | dc70288aedcd7670605ef28f8525546440b39f93 | [
"MIT"
] | 4 | 2020-01-19T01:42:06.000Z | 2021-05-13T09:51:38.000Z | src/lib/_thread.py | blockpy-edu/skulpt | dc70288aedcd7670605ef28f8525546440b39f93 | [
"MIT"
] | null | null | null | src/lib/_thread.py | blockpy-edu/skulpt | dc70288aedcd7670605ef28f8525546440b39f93 | [
"MIT"
] | 4 | 2019-10-16T21:50:53.000Z | 2021-01-11T06:25:57.000Z | def get_ident():
return 1
| 10 | 16 | 0.633333 | 5 | 30 | 3.6 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045455 | 0.266667 | 30 | 2 | 17 | 15 | 0.772727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
25882445f9e369e0e3337f8ac2173bbc54bfae16 | 272 | py | Python | shardingpy/routing/router/sharding/factory.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | 1 | 2021-01-29T13:29:29.000Z | 2021-01-29T13:29:29.000Z | shardingpy/routing/router/sharding/factory.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | null | null | null | shardingpy/routing/router/sharding/factory.py | hongfuli/sharding-py | a26a64aa9d9196c830e7e2fa4095a58bef608a40 | [
"Apache-2.0"
] | null | null | null | from shardingpy.routing.router.sharding.impl import ParsingSQLRouter
def create_sql_router(sharding_rule, sharding_meta_data, database_type, show_sql):
# TODO HintManagerHolder
return ParsingSQLRouter(sharding_rule, sharding_meta_data, database_type, show_sql)
| 34 | 87 | 0.838235 | 34 | 272 | 6.352941 | 0.588235 | 0.12963 | 0.185185 | 0.222222 | 0.435185 | 0.435185 | 0.435185 | 0.435185 | 0.435185 | 0 | 0 | 0 | 0.102941 | 272 | 7 | 88 | 38.857143 | 0.885246 | 0.080882 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 8 |
2590c27561a04158bd9465346a21eddd722f4dd1 | 26,269 | py | Python | easy_vk/bot/api/groups.py | UmbrellaMalware/easy_vk | 2a84b6bbf7fa9f65633a3fc1cbbe3235a6ee1651 | [
"MIT"
] | 5 | 2020-05-03T12:23:06.000Z | 2020-08-07T16:55:53.000Z | easy_vk/bot/api/groups.py | UmbrellaMalware/easy_vk | 2a84b6bbf7fa9f65633a3fc1cbbe3235a6ee1651 | [
"MIT"
] | 4 | 2020-05-03T12:28:58.000Z | 2021-09-07T22:39:02.000Z | easy_vk/bot/api/groups.py | UmbrellaMalware/easy_vk | 2a84b6bbf7fa9f65633a3fc1cbbe3235a6ee1651 | [
"MIT"
] | 3 | 2021-09-04T22:46:11.000Z | 2021-09-07T22:20:19.000Z | # This file was autogenerated from vk-api json schema
from typing import List, Union, Optional, overload
from easy_vk.types import objects
from easy_vk.types import responses
from easy_vk.api_category import BaseCategory
try:
from typing import Literal
except Exception:
from typing_extensions import Literal
class Groups(BaseCategory):
def add_address(self, group_id: int, title: str, address: str, country_id: int, city_id: int, latitude: float, longitude: float, additional_address: Optional[str] = None, metro_id: Optional[int] = None, phone: Optional[str] = None, work_info_status: Optional[str] = None, timetable: Optional[str] = None, is_main_address: Optional[bool] = None) -> responses.GroupsAddAddress:
"""
:param group_id:
:param title:
:param address:
:param country_id:
:param city_id:
:param latitude:
:param longitude:
:param additional_address:
:param metro_id:
:param phone:
:param work_info_status:
:param timetable:
:param is_main_address:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.addAddress'
response_type = responses.GroupsAddAddress
return self._call(method_name, method_parameters, param_aliases, response_type)
def add_callback_server(self, group_id: int, url: str, title: str, secret_key: Optional[str] = None) -> responses.GroupsAddCallbackServer:
"""
:param group_id:
:param url:
:param title:
:param secret_key:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.addCallbackServer'
response_type = responses.GroupsAddCallbackServer
return self._call(method_name, method_parameters, param_aliases, response_type)
def delete_callback_server(self, group_id: int, server_id: int) -> responses.BaseOk:
"""
:param group_id:
:param server_id:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.deleteCallbackServer'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
def disable_online(self, group_id: int) -> responses.BaseOk:
"""
:param group_id:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.disableOnline'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
def edit_address(self, group_id: int, address_id: int, title: Optional[str] = None, address: Optional[str] = None, additional_address: Optional[str] = None, country_id: Optional[int] = None, city_id: Optional[int] = None, metro_id: Optional[int] = None, latitude: Optional[float] = None, longitude: Optional[float] = None, phone: Optional[str] = None, work_info_status: Optional[str] = None, timetable: Optional[str] = None, is_main_address: Optional[bool] = None) -> responses.GroupsEditAddress:
"""
:param group_id:
:param address_id:
:param title:
:param address:
:param additional_address:
:param country_id:
:param city_id:
:param metro_id:
:param latitude:
:param longitude:
:param phone:
:param work_info_status:
:param timetable:
:param is_main_address:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.editAddress'
response_type = responses.GroupsEditAddress
return self._call(method_name, method_parameters, param_aliases, response_type)
def edit_callback_server(self, group_id: int, server_id: int, url: str, title: str, secret_key: Optional[str] = None) -> responses.BaseOk:
"""
:param group_id:
:param server_id:
:param url:
:param title:
:param secret_key:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.editCallbackServer'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
def enable_online(self, group_id: int) -> responses.BaseOk:
"""
:param group_id:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.enableOnline'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_banned(self, group_id: int, offset: Optional[int] = None, count: Optional[int] = None, fields: Optional[List[Union[objects.BaseUserGroupFields, str]]] = None, owner_id: Optional[int] = None) -> responses.GroupsGetBanned:
"""
Returns a list of users on a community blacklist.
:param group_id: Community ID.
:param offset: Offset needed to return a specific subset of users.
:param count: Number of users to return.
:param fields:
:param owner_id:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getBanned'
response_type = responses.GroupsGetBanned
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_by_id(self, group_ids: Optional[List[str]] = None, group_id: Optional[str] = None, fields: Optional[List[Union[objects.GroupsFields, str]]] = None) -> responses.GroupsGetById:
"""
Returns information about communities by their IDs.
:param group_ids: IDs or screen names of communities.
:param group_id: ID or screen name of the community.
:param fields: Group fields to return.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getById'
response_type = responses.GroupsGetById
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_callback_confirmation_code(self, group_id: int) -> responses.GroupsGetCallbackConfirmationCode:
"""
Returns Callback API confirmation code for the community.
:param group_id: Community ID.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getCallbackConfirmationCode'
response_type = responses.GroupsGetCallbackConfirmationCode
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_callback_servers(self, group_id: int, server_ids: Optional[List[int]] = None) -> responses.GroupsGetCallbackServers:
"""
:param group_id:
:param server_ids:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getCallbackServers'
response_type = responses.GroupsGetCallbackServers
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_callback_settings(self, group_id: int, server_id: Optional[int] = None) -> responses.GroupsGetCallbackSettings:
"""
Returns [vk.com/dev/callback_api|Callback API] notifications settings.
:param group_id: Community ID.
:param server_id: Server ID.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getCallbackSettings'
response_type = responses.GroupsGetCallbackSettings
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_long_poll_server(self, group_id: int) -> responses.GroupsGetLongPollServer:
"""
Returns the data needed to query a Long Poll server for events
:param group_id: Community ID
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getLongPollServer'
response_type = responses.GroupsGetLongPollServer
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_long_poll_settings(self, group_id: int) -> responses.GroupsGetLongPollSettings:
"""
Returns Long Poll notification settings
:param group_id: Community ID.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getLongPollSettings'
response_type = responses.GroupsGetLongPollSettings
return self._call(method_name, method_parameters, param_aliases, response_type)
@overload
def get_members(self, group_id: Optional[str] = None, sort: Optional[str] = None, offset: Optional[int] = None, count: Optional[int] = None, fields: None = None, filter_: None = None) -> responses.GroupsGetMembers: ...
@overload
def get_members(self, fields: List[Union[objects.UsersFields, str]], group_id: Optional[str] = None, sort: Optional[str] = None, offset: Optional[int] = None, count: Optional[int] = None, filter_: None = None) -> responses.GroupsGetMembersFields: ...
@overload
def get_members(self, filter_: str, group_id: Optional[str] = None, sort: Optional[str] = None, offset: Optional[int] = None, count: Optional[int] = None, fields: None = None) -> responses.GroupsGetMembersFilter: ...
def get_members(self, group_id: Optional[str] = None, sort: Optional[str] = None, offset: Optional[int] = None, count: Optional[int] = None, fields: Optional[List[Union[objects.UsersFields, str]]] = None, filter_: Optional[str] = None):
"""
Returns a list of community members.
:param group_id: ID or screen name of the community.
:param sort: Sort order. Available values: 'id_asc', 'id_desc', 'time_asc', 'time_desc'. 'time_asc' and 'time_desc' are availavle only if the method is called by the group's 'moderator'.
:param offset: Offset needed to return a specific subset of community members.
:param count: Number of community members to return.
:param fields: List of additional fields to be returned. Available values: 'sex, bdate, city, country, photo_50, photo_100, photo_200_orig, photo_200, photo_400_orig, photo_max, photo_max_orig, online, online_mobile, lists, domain, has_mobile, contacts, connections, site, education, universities, schools, can_post, can_see_all_posts, can_see_audio, can_write_private_message, status, last_seen, common_count, relation, relatives, counters'.
:param filter_: *'friends' – only friends in this community will be returned,, *'unsure' – only those who pressed 'I may attend' will be returned (if it's an event).
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = [('filter_', 'filter')]
method_name = 'groups.getMembers'
if not fields and not filter_:
response_type = responses.GroupsGetMembers
if fields and not filter_:
response_type = responses.GroupsGetMembersFields
if not fields and filter_:
response_type = responses.GroupsGetMembersFilter
return self._call(method_name, method_parameters, param_aliases, response_type)
def get_token_permissions(self) -> responses.GroupsGetTokenPermissions:
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.getTokenPermissions'
response_type = responses.GroupsGetTokenPermissions
return self._call(method_name, method_parameters, param_aliases, response_type)
@overload
def is_member(self, group_id: str, user_id: Optional[int] = None, user_ids: None = None, extended: None = None) -> responses.GroupsIsMember: ...
@overload
def is_member(self, group_id: str, user_ids: List[int], user_id: Optional[int] = None, extended: None = None) -> responses.GroupsIsMemberUserIds: ...
@overload
def is_member(self, group_id: str, extended: bool, user_id: Optional[int] = None, user_ids: None = None) -> responses.GroupsIsMemberExtended: ...
@overload
def is_member(self, group_id: str, user_ids: List[int], extended: bool, user_id: Optional[int] = None) -> responses.GroupsIsMemberUserIdsExtended: ...
def is_member(self, group_id: str, user_id: Optional[int] = None, user_ids: Optional[List[int]] = None, extended: Optional[bool] = None):
"""
Returns information specifying whether a user is a member of a community.
:param group_id: ID or screen name of the community.
:param user_id: User ID.
:param user_ids: User IDs.
:param extended: '1' — to return an extended response with additional fields. By default: '0'.
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.isMember'
if not user_ids and not extended:
response_type = responses.GroupsIsMember
if user_ids and not extended:
response_type = responses.GroupsIsMemberUserIds
if not user_ids and extended:
response_type = responses.GroupsIsMemberExtended
if user_ids and extended:
response_type = responses.GroupsIsMemberUserIdsExtended
return self._call(method_name, method_parameters, param_aliases, response_type)
def set_callback_settings(self, group_id: int, server_id: Optional[int] = None, api_version: Optional[str] = None, message_new: Optional[bool] = None, message_reply: Optional[bool] = None, message_allow: Optional[bool] = None, message_edit: Optional[bool] = None, message_deny: Optional[bool] = None, message_typing_state: Optional[bool] = None, photo_new: Optional[bool] = None, audio_new: Optional[bool] = None, video_new: Optional[bool] = None, wall_reply_new: Optional[bool] = None, wall_reply_edit: Optional[bool] = None, wall_reply_delete: Optional[bool] = None, wall_reply_restore: Optional[bool] = None, wall_post_new: Optional[bool] = None, wall_repost: Optional[bool] = None, board_post_new: Optional[bool] = None, board_post_edit: Optional[bool] = None, board_post_restore: Optional[bool] = None, board_post_delete: Optional[bool] = None, photo_comment_new: Optional[bool] = None, photo_comment_edit: Optional[bool] = None, photo_comment_delete: Optional[bool] = None, photo_comment_restore: Optional[bool] = None, video_comment_new: Optional[bool] = None, video_comment_edit: Optional[bool] = None, video_comment_delete: Optional[bool] = None, video_comment_restore: Optional[bool] = None, market_comment_new: Optional[bool] = None, market_comment_edit: Optional[bool] = None, market_comment_delete: Optional[bool] = None, market_comment_restore: Optional[bool] = None, poll_vote_new: Optional[bool] = None, group_join: Optional[bool] = None, group_leave: Optional[bool] = None, group_change_settings: Optional[bool] = None, group_change_photo: Optional[bool] = None, group_officers_edit: Optional[bool] = None, user_block: Optional[bool] = None, user_unblock: Optional[bool] = None, lead_forms_new: Optional[bool] = None, like_add: Optional[bool] = None, like_remove: Optional[bool] = None, message_event: Optional[bool] = None) -> responses.BaseOk:
"""
Allow to set notifications settings for group.
:param group_id: Community ID.
:param server_id: Server ID.
:param api_version:
:param message_new: A new incoming message has been received ('0' — disabled, '1' — enabled).
:param message_reply: A new outcoming message has been received ('0' — disabled, '1' — enabled).
:param message_allow: Allowed messages notifications ('0' — disabled, '1' — enabled).
:param message_edit:
:param message_deny: Denied messages notifications ('0' — disabled, '1' — enabled).
:param message_typing_state:
:param photo_new: New photos notifications ('0' — disabled, '1' — enabled).
:param audio_new: New audios notifications ('0' — disabled, '1' — enabled).
:param video_new: New videos notifications ('0' — disabled, '1' — enabled).
:param wall_reply_new: New wall replies notifications ('0' — disabled, '1' — enabled).
:param wall_reply_edit: Wall replies edited notifications ('0' — disabled, '1' — enabled).
:param wall_reply_delete: A wall comment has been deleted ('0' — disabled, '1' — enabled).
:param wall_reply_restore: A wall comment has been restored ('0' — disabled, '1' — enabled).
:param wall_post_new: New wall posts notifications ('0' — disabled, '1' — enabled).
:param wall_repost: New wall posts notifications ('0' — disabled, '1' — enabled).
:param board_post_new: New board posts notifications ('0' — disabled, '1' — enabled).
:param board_post_edit: Board posts edited notifications ('0' — disabled, '1' — enabled).
:param board_post_restore: Board posts restored notifications ('0' — disabled, '1' — enabled).
:param board_post_delete: Board posts deleted notifications ('0' — disabled, '1' — enabled).
:param photo_comment_new: New comment to photo notifications ('0' — disabled, '1' — enabled).
:param photo_comment_edit: A photo comment has been edited ('0' — disabled, '1' — enabled).
:param photo_comment_delete: A photo comment has been deleted ('0' — disabled, '1' — enabled).
:param photo_comment_restore: A photo comment has been restored ('0' — disabled, '1' — enabled).
:param video_comment_new: New comment to video notifications ('0' — disabled, '1' — enabled).
:param video_comment_edit: A video comment has been edited ('0' — disabled, '1' — enabled).
:param video_comment_delete: A video comment has been deleted ('0' — disabled, '1' — enabled).
:param video_comment_restore: A video comment has been restored ('0' — disabled, '1' — enabled).
:param market_comment_new: New comment to market item notifications ('0' — disabled, '1' — enabled).
:param market_comment_edit: A market comment has been edited ('0' — disabled, '1' — enabled).
:param market_comment_delete: A market comment has been deleted ('0' — disabled, '1' — enabled).
:param market_comment_restore: A market comment has been restored ('0' — disabled, '1' — enabled).
:param poll_vote_new: A vote in a public poll has been added ('0' — disabled, '1' — enabled).
:param group_join: Joined community notifications ('0' — disabled, '1' — enabled).
:param group_leave: Left community notifications ('0' — disabled, '1' — enabled).
:param group_change_settings:
:param group_change_photo:
:param group_officers_edit:
:param user_block: User added to community blacklist
:param user_unblock: User removed from community blacklist
:param lead_forms_new: New form in lead forms
:param like_add:
:param like_remove:
:param message_event:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.setCallbackSettings'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
def set_long_poll_settings(self, group_id: int, enabled: Optional[bool] = None, api_version: Optional[str] = None, message_new: Optional[bool] = None, message_reply: Optional[bool] = None, message_allow: Optional[bool] = None, message_deny: Optional[bool] = None, message_edit: Optional[bool] = None, message_typing_state: Optional[bool] = None, photo_new: Optional[bool] = None, audio_new: Optional[bool] = None, video_new: Optional[bool] = None, wall_reply_new: Optional[bool] = None, wall_reply_edit: Optional[bool] = None, wall_reply_delete: Optional[bool] = None, wall_reply_restore: Optional[bool] = None, wall_post_new: Optional[bool] = None, wall_repost: Optional[bool] = None, board_post_new: Optional[bool] = None, board_post_edit: Optional[bool] = None, board_post_restore: Optional[bool] = None, board_post_delete: Optional[bool] = None, photo_comment_new: Optional[bool] = None, photo_comment_edit: Optional[bool] = None, photo_comment_delete: Optional[bool] = None, photo_comment_restore: Optional[bool] = None, video_comment_new: Optional[bool] = None, video_comment_edit: Optional[bool] = None, video_comment_delete: Optional[bool] = None, video_comment_restore: Optional[bool] = None, market_comment_new: Optional[bool] = None, market_comment_edit: Optional[bool] = None, market_comment_delete: Optional[bool] = None, market_comment_restore: Optional[bool] = None, poll_vote_new: Optional[bool] = None, group_join: Optional[bool] = None, group_leave: Optional[bool] = None, group_change_settings: Optional[bool] = None, group_change_photo: Optional[bool] = None, group_officers_edit: Optional[bool] = None, user_block: Optional[bool] = None, user_unblock: Optional[bool] = None, like_add: Optional[bool] = None, like_remove: Optional[bool] = None, message_event: Optional[bool] = None) -> responses.BaseOk:
"""
Sets Long Poll notification settings
:param group_id: Community ID.
:param enabled: Sets whether Long Poll is enabled ('0' — disabled, '1' — enabled).
:param api_version:
:param message_new: A new incoming message has been received ('0' — disabled, '1' — enabled).
:param message_reply: A new outcoming message has been received ('0' — disabled, '1' — enabled).
:param message_allow: Allowed messages notifications ('0' — disabled, '1' — enabled).
:param message_deny: Denied messages notifications ('0' — disabled, '1' — enabled).
:param message_edit: A message has been edited ('0' — disabled, '1' — enabled).
:param message_typing_state:
:param photo_new: New photos notifications ('0' — disabled, '1' — enabled).
:param audio_new: New audios notifications ('0' — disabled, '1' — enabled).
:param video_new: New videos notifications ('0' — disabled, '1' — enabled).
:param wall_reply_new: New wall replies notifications ('0' — disabled, '1' — enabled).
:param wall_reply_edit: Wall replies edited notifications ('0' — disabled, '1' — enabled).
:param wall_reply_delete: A wall comment has been deleted ('0' — disabled, '1' — enabled).
:param wall_reply_restore: A wall comment has been restored ('0' — disabled, '1' — enabled).
:param wall_post_new: New wall posts notifications ('0' — disabled, '1' — enabled).
:param wall_repost: New wall posts notifications ('0' — disabled, '1' — enabled).
:param board_post_new: New board posts notifications ('0' — disabled, '1' — enabled).
:param board_post_edit: Board posts edited notifications ('0' — disabled, '1' — enabled).
:param board_post_restore: Board posts restored notifications ('0' — disabled, '1' — enabled).
:param board_post_delete: Board posts deleted notifications ('0' — disabled, '1' — enabled).
:param photo_comment_new: New comment to photo notifications ('0' — disabled, '1' — enabled).
:param photo_comment_edit: A photo comment has been edited ('0' — disabled, '1' — enabled).
:param photo_comment_delete: A photo comment has been deleted ('0' — disabled, '1' — enabled).
:param photo_comment_restore: A photo comment has been restored ('0' — disabled, '1' — enabled).
:param video_comment_new: New comment to video notifications ('0' — disabled, '1' — enabled).
:param video_comment_edit: A video comment has been edited ('0' — disabled, '1' — enabled).
:param video_comment_delete: A video comment has been deleted ('0' — disabled, '1' — enabled).
:param video_comment_restore: A video comment has been restored ('0' — disabled, '1' — enabled).
:param market_comment_new: New comment to market item notifications ('0' — disabled, '1' — enabled).
:param market_comment_edit: A market comment has been edited ('0' — disabled, '1' — enabled).
:param market_comment_delete: A market comment has been deleted ('0' — disabled, '1' — enabled).
:param market_comment_restore: A market comment has been restored ('0' — disabled, '1' — enabled).
:param poll_vote_new: A vote in a public poll has been added ('0' — disabled, '1' — enabled).
:param group_join: Joined community notifications ('0' — disabled, '1' — enabled).
:param group_leave: Left community notifications ('0' — disabled, '1' — enabled).
:param group_change_settings:
:param group_change_photo:
:param group_officers_edit:
:param user_block: User added to community blacklist
:param user_unblock: User removed from community blacklist
:param like_add:
:param like_remove:
:param message_event:
"""
method_parameters = {k: v for k, v in locals().items() if k not in {'self', 'raw_response'}}
param_aliases = []
method_name = 'groups.setLongPollSettings'
response_type = responses.BaseOk
return self._call(method_name, method_parameters, param_aliases, response_type)
| 65.345771 | 1,866 | 0.681792 | 3,449 | 26,269 | 5.037112 | 0.083792 | 0.061475 | 0.081966 | 0.041789 | 0.796523 | 0.763771 | 0.750532 | 0.734128 | 0.719622 | 0.704772 | 0 | 0.007123 | 0.20903 | 26,269 | 401 | 1,867 | 65.508728 | 0.822505 | 0.369104 | 0 | 0.463087 | 1 | 0 | 0.049553 | 0.0193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.174497 | false | 0 | 0.040268 | 0 | 0.348993 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
25bb0626184ab34c90d6000efa15e1791332c147 | 31,353 | py | Python | genomics_data_index/test/integration/api/query/features/test_MLSTFeaturesComparator.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | 1 | 2021-04-21T00:19:49.000Z | 2021-04-21T00:19:49.000Z | genomics_data_index/test/integration/api/query/features/test_MLSTFeaturesComparator.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | null | null | null | genomics_data_index/test/integration/api/query/features/test_MLSTFeaturesComparator.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | null | null | null | from genomics_data_index.api.query.GenomicsDataIndex import GenomicsDataIndex
from genomics_data_index.api.query.features.MLSTFeaturesComparator import MLSTFeaturesComparator
from genomics_data_index.storage.SampleSet import SampleSet
from genomics_data_index.storage.model.db import Sample
def test_summary_all(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
assert 9 == len(all_sample_ids)
mlst_summarizier = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection)
present_set = SampleSet(all_sample_ids)
summary_df = mlst_summarizier.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 24 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 5, 9, 55] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 3, 9, 33] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 2, 9, 22] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['ecoli', 'adk', '100', 2, 9, 22] == summary_df.loc['mlst:ecoli:adk:100'].tolist()
assert ['ecoli', 'recA', '7', 2, 9, 22] == summary_df.loc['mlst:ecoli:recA:7'].tolist()
assert ['campylobacter', 'uncA', '6', 1, 9, 11] == summary_df.loc['mlst:campylobacter:uncA:6'].tolist()
def test_unique_summary(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
sample_CFSAN002349 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
sample_2014D_0068 = db.get_session().query(Sample).filter(Sample.name == '2014D-0068').one()
sample_2014D_0067 = db.get_session().query(Sample).filter(Sample.name == '2014D-0067').one()
sample_2014C_3598 = db.get_session().query(Sample).filter(Sample.name == '2014C-3598').one()
sample_2014C_3599 = db.get_session().query(Sample).filter(Sample.name == '2014C-3599').one()
assert 9 == len(all_sample_ids)
mlst_summarizier = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection)
# Test unique on all (should give me identical results to all since nothing is absent from the selection)
present_set = SampleSet(all_sample_ids)
complement_set = SampleSet.create_empty()
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 24 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 5, 9, 55] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
# Test unique on single sample (only a singl feature)
present_set = SampleSet({sample_CFSAN002349.id})
complement_set = SampleSet(all_sample_ids - {sample_CFSAN002349.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 1 == len(summary_df)
print(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'lhkA', '4', 1, 1, 100] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
# Test unique on two samples
present_set = SampleSet({sample_CFSAN002349.id, sampleC.id})
complement_set = SampleSet(all_sample_ids - {sample_CFSAN002349.id, sampleC.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'lhkA', '4', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['lmonocytogenes', 'cat', '12', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:cat:12'].tolist()
# Test unique within a scheme
present_set = SampleSet({sample_2014C_3598.id, sample_2014C_3599.id})
complement_set = SampleSet(all_sample_ids - {sample_2014C_3598.id, sample_2014C_3599.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 7 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['ecoli', 'adk', '100', 2, 2, 100] == summary_df.loc['mlst:ecoli:adk:100'].tolist()
assert ['ecoli', 'fumC', '23', 2, 2, 100] == summary_df.loc['mlst:ecoli:fumC:23'].tolist()
assert ['ecoli', 'gyrB', '68', 2, 2, 100] == summary_df.loc['mlst:ecoli:gyrB:68'].tolist()
assert ['ecoli', 'icd', '45', 2, 2, 100] == summary_df.loc['mlst:ecoli:icd:45'].tolist()
assert ['ecoli', 'mdh', '1', 2, 2, 100] == summary_df.loc['mlst:ecoli:mdh:1'].tolist()
assert ['ecoli', 'purA', '35', 2, 2, 100] == summary_df.loc['mlst:ecoli:purA:35'].tolist()
assert ['ecoli', 'recA', '7', 2, 2, 100] == summary_df.loc['mlst:ecoli:recA:7'].tolist()
# Test unique across schemes
present_set = SampleSet({sample_CFSAN002349.id, sample_2014D_0068.id})
complement_set = SampleSet(all_sample_ids - {sample_CFSAN002349.id, sample_2014D_0068.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'lhkA', '4', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['campylobacter', 'uncA', '6', 1, 2, 50] == summary_df.loc['mlst:campylobacter:uncA:6'].tolist()
# Test unique only unknown
mlst_summarizier = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
include_present=False, include_unknown=True)
present_set = SampleSet({sampleB.id, sample_2014D_0067.id})
complement_set = SampleSet(all_sample_ids - {sampleB.id, sample_2014D_0067.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
assert ['campylobacter', 'uncA', '?', 1, 2, 50] == summary_df.loc['mlst:campylobacter:uncA:?'].tolist()
# Test unique only unknown, restricted to specific scheme
mlst_summarizier = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
include_present=False, include_unknown=True,
scheme='lmonocytogenes')
present_set = SampleSet({sampleB.id, sample_2014D_0067.id})
complement_set = SampleSet(all_sample_ids - {sampleB.id, sample_2014D_0067.id})
summary_df = mlst_summarizier.unique_summary(present_set, other_set=complement_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 1 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
def test_summary_selections(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
assert 9 == len(all_sample_ids)
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sample_CFSAN002349 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
sample_2014D_0067 = db.get_session().query(Sample).filter(Sample.name == '2014D-0067').one()
mlst_summarizer = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection)
# Test only single sample features
present_set = SampleSet([sampleA.id])
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 7 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 1, 1, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 1, 1, 100] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
# Test two samples
present_set = SampleSet([sampleA.id, sampleB.id])
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 8 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 2, 2, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 1, 2, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
# Test three samples
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id])
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 9 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 3, 3, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 2, 3, 66] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 3, 33] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 2, 3, 66] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 2, 3, 66] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 3, 33] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
# Test multiple schemes
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id, sample_2014D_0067.id])
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 15 == len(summary_df)
assert {'lmonocytogenes', 'campylobacter'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 3, 4, 75] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['campylobacter', 'aspA', '2', 1, 4, 25] == summary_df.loc['mlst:campylobacter:aspA:2'].tolist()
assert ['campylobacter', 'glyA', '3', 1, 4, 25] == summary_df.loc['mlst:campylobacter:glyA:3'].tolist()
assert 6 == len(summary_df[summary_df['Scheme'] == 'campylobacter']) # Missing one feature since it's unknown
# Test multiple schemes sample set but summarize for only a particular scheme
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id, sample_2014D_0067.id])
mlst_summarizer = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
scheme='lmonocytogenes')
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 9 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist()) # Only results for one scheme
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 3, 4, 75] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
# Test multiple schemes sample set but summarize for only a particular scheme/locus
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id, sample_2014D_0067.id])
mlst_summarizer = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
scheme='lmonocytogenes', locus='bglA')
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert {'lmonocytogenes'} == set(summary_df['Scheme'].tolist()) # Only results for one scheme
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'bglA', '51', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
# Test multiple schemes, include unknown
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id, sample_2014D_0067.id])
mlst_summarizer = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
include_unknown=True)
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 17 == len(summary_df)
assert {'lmonocytogenes', 'campylobacter'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 3, 4, 75] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'ldh', '?', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 2, 4, 50] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['campylobacter', 'aspA', '2', 1, 4, 25] == summary_df.loc['mlst:campylobacter:aspA:2'].tolist()
assert ['campylobacter', 'glyA', '3', 1, 4, 25] == summary_df.loc['mlst:campylobacter:glyA:3'].tolist()
assert ['campylobacter', 'uncA', '?', 1, 4, 25] == summary_df.loc['mlst:campylobacter:uncA:?'].tolist()
# Test multiple schemes, only unknown
present_set = SampleSet([sampleA.id, sampleB.id, sample_CFSAN002349.id, sample_2014D_0067.id])
mlst_summarizer = MLSTFeaturesComparator(connection=loaded_database_genomic_data_store.connection,
include_present=False, include_unknown=True)
summary_df = mlst_summarizer.summary(present_set)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert {'lmonocytogenes', 'campylobacter'} == set(summary_df['Scheme'].tolist())
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 4, 25] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
assert ['campylobacter', 'uncA', '?', 1, 4, 25] == summary_df.loc['mlst:campylobacter:uncA:?'].tolist()
def test_features_comparison(loaded_database_genomic_data_store: GenomicsDataIndex):
db = loaded_database_genomic_data_store.connection.database
all_sample_ids = {s.id for s in db.get_session().query(Sample).all()}
sampleA = db.get_session().query(Sample).filter(Sample.name == 'SampleA').one()
sampleB = db.get_session().query(Sample).filter(Sample.name == 'SampleB').one()
sampleC = db.get_session().query(Sample).filter(Sample.name == 'SampleC').one()
sample_CFSAN002349 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN002349').one()
sample_CFSAN023463 = db.get_session().query(Sample).filter(Sample.name == 'CFSAN023463').one()
lmonocytogenes = {sampleA.id, sampleB.id, sampleC.id, sample_CFSAN002349.id, sample_CFSAN023463.id}
assert 9 == len(all_sample_ids)
present_set = SampleSet(all_sample_ids)
mlst_summarizer = MLSTFeaturesComparator(
connection=loaded_database_genomic_data_store.connection)
# Test single category of all
sample_categories = [present_set]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['All'],
unit='count')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total', 'All_count', 'All_total'] == list(comparison_df.columns)
assert {9} == set(comparison_df['Total'].tolist())
assert {9} == set(comparison_df['All_total'].tolist())
assert 5 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'All_count']
assert 3 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'All_count']
assert 2 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'All_count']
assert 2 == comparison_df.loc['mlst:ecoli:adk:100', 'All_count']
assert 2 == comparison_df.loc['mlst:ecoli:recA:7', 'All_count']
assert 1 == comparison_df.loc['mlst:campylobacter:uncA:6', 'All_count']
# Test two categories: one of lmonocytogenes and one of the rest
sample_categories = [SampleSet(lmonocytogenes), SampleSet(all_sample_ids - lmonocytogenes)]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['lmonocytogenes', 'other'],
unit='count')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_count', 'other_count',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 5 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_count']
assert 3 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_count']
assert 2 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_count']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_count']
assert 2 == comparison_df.loc['mlst:ecoli:adk:100', 'other_count']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_count']
assert 2 == comparison_df.loc['mlst:ecoli:recA:7', 'other_count']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_count']
assert 1 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_count']
# Test two categories percent: one of lmonocytogenes and one of the rest
sample_categories = [SampleSet(lmonocytogenes), SampleSet(all_sample_ids - lmonocytogenes)]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['lmonocytogenes', 'other'],
unit='percent')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_percent', 'other_percent',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
comparison_df['lmonocytogenes_percent'] = comparison_df['lmonocytogenes_percent'].astype(
int) # Convert to int for easier comparison
comparison_df['other_percent'] = comparison_df['other_percent'].astype(int) # Convert to int for easier comparison
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 100 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_percent']
assert 60 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_percent']
assert 40 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_percent']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:adk:100', 'other_percent']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_percent']
assert 50 == comparison_df.loc['mlst:ecoli:recA:7', 'other_percent']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_percent']
assert 25 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_percent']
# Test two categories proportion: one of lmonocytogenes and one of the rest
sample_categories = [SampleSet(lmonocytogenes), SampleSet(all_sample_ids - lmonocytogenes)]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['lmonocytogenes', 'other'],
unit='proportion')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_proportion', 'other_proportion',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
comparison_df['lmonocytogenes_proportion'] = (comparison_df['lmonocytogenes_proportion'] * 100).astype(
int) # Convert to percent as int for easier comparison
comparison_df['other_proportion'] = (comparison_df['other_proportion'] * 100).astype(int)
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 100 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_proportion']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_proportion']
assert 60 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_proportion']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_proportion']
assert 40 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_proportion']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_proportion']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_proportion']
assert 50 == comparison_df.loc['mlst:ecoli:adk:100', 'other_proportion']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_proportion']
assert 50 == comparison_df.loc['mlst:ecoli:recA:7', 'other_proportion']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_proportion']
assert 25 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_proportion']
# Test two categories: one of lmonocytogenes and one of the rest threshold below
sample_categories = [SampleSet(lmonocytogenes), SampleSet(all_sample_ids - lmonocytogenes)]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['lmonocytogenes', 'other'],
category_samples_threshold=4,
unit='count')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_count', 'other_count',
'lmonocytogenes_total', 'other_total'] == list(comparison_df.columns)
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert {4} == set(comparison_df['other_total'].tolist())
assert 5 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'other_count']
assert 3 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'other_count']
assert 2 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'other_count']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_count']
assert 2 == comparison_df.loc['mlst:ecoli:adk:100', 'other_count']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_count']
assert 2 == comparison_df.loc['mlst:ecoli:recA:7', 'other_count']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_count']
assert 1 == comparison_df.loc['mlst:campylobacter:uncA:6', 'other_count']
# Test two categories: one of lmonocytogenes and one of the rest threshold above
sample_categories = [SampleSet(lmonocytogenes), SampleSet(all_sample_ids - lmonocytogenes)]
comparison_df = mlst_summarizer.features_comparison(selected_samples=present_set,
sample_categories=sample_categories,
category_prefixes=['lmonocytogenes', 'other'],
category_samples_threshold=5,
unit='count')
assert 24 == len(comparison_df)
assert 'MLST Feature' == comparison_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Total',
'lmonocytogenes_count',
'lmonocytogenes_total'] == list(comparison_df.columns)
assert {9} == set(comparison_df['Total'].tolist())
assert {5} == set(comparison_df['lmonocytogenes_total'].tolist())
assert 5 == comparison_df.loc['mlst:lmonocytogenes:abcZ:1', 'lmonocytogenes_count']
assert 3 == comparison_df.loc['mlst:lmonocytogenes:bglA:51', 'lmonocytogenes_count']
assert 2 == comparison_df.loc['mlst:lmonocytogenes:bglA:52', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:ecoli:adk:100', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:ecoli:recA:7', 'lmonocytogenes_count']
assert 0 == comparison_df.loc['mlst:campylobacter:uncA:6', 'lmonocytogenes_count']
| 69.984375 | 119 | 0.679712 | 3,861 | 31,353 | 5.329707 | 0.044807 | 0.073914 | 0.053358 | 0.08271 | 0.930217 | 0.919914 | 0.904218 | 0.898824 | 0.870687 | 0.846681 | 0 | 0.037768 | 0.168182 | 31,353 | 447 | 120 | 70.14094 | 0.751265 | 0.058846 | 0 | 0.702413 | 0 | 0 | 0.250916 | 0.09666 | 0 | 0 | 0 | 0 | 0.581769 | 1 | 0.010724 | false | 0 | 0.010724 | 0 | 0.021448 | 0.002681 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
25e66851024003f5eb71351c6257ff81351699b8 | 5,101 | py | Python | tests/test_provider_F5Networks_bigip.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 507 | 2017-07-26T02:58:38.000Z | 2022-01-21T12:35:13.000Z | tests/test_provider_F5Networks_bigip.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 135 | 2017-07-20T12:01:59.000Z | 2021-10-04T22:25:40.000Z | tests/test_provider_F5Networks_bigip.py | mjuenema/python-terrascript | 6d8bb0273a14bfeb8ff8e950fe36f97f7c6e7b1d | [
"BSD-2-Clause"
] | 81 | 2018-02-20T17:55:28.000Z | 2022-01-31T07:08:40.000Z | # tests/test_provider_F5Networks_bigip.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:13:25 UTC)
def test_provider_import():
import terrascript.provider.F5Networks.bigip
def test_resource_import():
from terrascript.resource.F5Networks.bigip import bigip_as3
from terrascript.resource.F5Networks.bigip import bigip_bigiq_as3
from terrascript.resource.F5Networks.bigip import bigip_cm_device
from terrascript.resource.F5Networks.bigip import bigip_cm_devicegroup
from terrascript.resource.F5Networks.bigip import bigip_command
from terrascript.resource.F5Networks.bigip import bigip_common_license_manage_bigiq
from terrascript.resource.F5Networks.bigip import bigip_do
from terrascript.resource.F5Networks.bigip import bigip_event_service_discovery
from terrascript.resource.F5Networks.bigip import bigip_fast_application
from terrascript.resource.F5Networks.bigip import bigip_fast_template
from terrascript.resource.F5Networks.bigip import bigip_ipsec_policy
from terrascript.resource.F5Networks.bigip import bigip_ltm_datagroup
from terrascript.resource.F5Networks.bigip import bigip_ltm_irule
from terrascript.resource.F5Networks.bigip import bigip_ltm_monitor
from terrascript.resource.F5Networks.bigip import bigip_ltm_node
from terrascript.resource.F5Networks.bigip import (
bigip_ltm_persistence_profile_cookie,
)
from terrascript.resource.F5Networks.bigip import (
bigip_ltm_persistence_profile_dstaddr,
)
from terrascript.resource.F5Networks.bigip import (
bigip_ltm_persistence_profile_srcaddr,
)
from terrascript.resource.F5Networks.bigip import bigip_ltm_persistence_profile_ssl
from terrascript.resource.F5Networks.bigip import bigip_ltm_policy
from terrascript.resource.F5Networks.bigip import bigip_ltm_pool
from terrascript.resource.F5Networks.bigip import bigip_ltm_pool_attachment
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_client_ssl
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_fasthttp
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_fastl4
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_ftp
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_http
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_http2
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_httpcompress
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_oneconnect
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_server_ssl
from terrascript.resource.F5Networks.bigip import bigip_ltm_profile_tcp
from terrascript.resource.F5Networks.bigip import bigip_ltm_snat
from terrascript.resource.F5Networks.bigip import bigip_ltm_snatpool
from terrascript.resource.F5Networks.bigip import bigip_ltm_virtual_address
from terrascript.resource.F5Networks.bigip import bigip_ltm_virtual_server
from terrascript.resource.F5Networks.bigip import bigip_net_ike_peer
from terrascript.resource.F5Networks.bigip import bigip_net_route
from terrascript.resource.F5Networks.bigip import bigip_net_selfip
from terrascript.resource.F5Networks.bigip import bigip_net_tunnel
from terrascript.resource.F5Networks.bigip import bigip_net_vlan
from terrascript.resource.F5Networks.bigip import bigip_ssl_certificate
from terrascript.resource.F5Networks.bigip import bigip_ssl_key
from terrascript.resource.F5Networks.bigip import bigip_sys_bigiplicense
from terrascript.resource.F5Networks.bigip import bigip_sys_dns
from terrascript.resource.F5Networks.bigip import bigip_sys_iapp
from terrascript.resource.F5Networks.bigip import bigip_sys_ntp
from terrascript.resource.F5Networks.bigip import bigip_sys_provision
from terrascript.resource.F5Networks.bigip import bigip_sys_snmp
from terrascript.resource.F5Networks.bigip import bigip_sys_snmp_traps
from terrascript.resource.F5Networks.bigip import bigip_traffic_selector
def test_datasource_import():
from terrascript.data.F5Networks.bigip import bigip_ltm_datagroup
from terrascript.data.F5Networks.bigip import bigip_ltm_irule
from terrascript.data.F5Networks.bigip import bigip_ltm_monitor
from terrascript.data.F5Networks.bigip import bigip_ltm_node
from terrascript.data.F5Networks.bigip import bigip_ltm_pool
from terrascript.data.F5Networks.bigip import bigip_ssl_certificate
from terrascript.data.F5Networks.bigip import bigip_vwan_config
# TODO: Shortcut imports without namespace for official and supported providers.
# TODO: This has to be moved into a required_providers block.
# def test_version_source():
#
# import terrascript.provider.F5Networks.bigip
#
# t = terrascript.provider.F5Networks.bigip.bigip()
# s = str(t)
#
# assert 'https://github.com/F5Networks/terraform-provider-bigip' in s
# assert '1.11.1' in s
| 34.70068 | 87 | 0.819839 | 638 | 5,101 | 6.315047 | 0.19279 | 0.230827 | 0.302308 | 0.374286 | 0.823033 | 0.803177 | 0.803177 | 0.709605 | 0.331844 | 0.100273 | 0 | 0.018719 | 0.130759 | 5,101 | 146 | 88 | 34.938356 | 0.889941 | 0.097628 | 0 | 0.044118 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006849 | 0 | 1 | 0.044118 | true | 0 | 0.911765 | 0 | 0.955882 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 11 |
d39e32a53c3d31ffa90efd229c92831ee3e41033 | 3,520 | py | Python | python/data_quarters.py | NEU-DS-4200-S20/s-l-project-mothers-out-front-2 | 82610215549b72958c2a63b68410a8b430dde76f | [
"BSD-3-Clause"
] | null | null | null | python/data_quarters.py | NEU-DS-4200-S20/s-l-project-mothers-out-front-2 | 82610215549b72958c2a63b68410a8b430dde76f | [
"BSD-3-Clause"
] | null | null | null | python/data_quarters.py | NEU-DS-4200-S20/s-l-project-mothers-out-front-2 | 82610215549b72958c2a63b68410a8b430dde76f | [
"BSD-3-Clause"
] | 1 | 2020-06-01T20:16:53.000Z | 2020-06-01T20:16:53.000Z | import numpy
import pandas as pd
# copy and paste arrays generated from data processing.js
dates = ["2020-03-31T04:00:00.000Z","2020-03-24T04:00:00.000Z","2020-03-17T04:00:00.000Z","2020-03-12T04:00:00.000Z","2020-03-03T05:00:00.000Z","2020-02-25T05:00:00.000Z","2020-02-18T05:00:00.000Z","2020-02-11T05:00:00.000Z","2020-02-04T05:00:00.000Z","2020-01-28T05:00:00.000Z","2020-01-14T05:00:00.000Z","2020-01-07T05:00:00.000Z","2019-12-14T05:00:00.000Z","2019-12-04T05:00:00.000Z","2019-11-26T05:00:00.000Z","2019-11-20T05:00:00.000Z","2019-11-13T05:00:00.000Z","2019-11-06T05:00:00.000Z","2019-10-30T04:00:00.000Z","2019-10-23T04:00:00.000Z","2019-10-16T04:00:00.000Z","2019-10-09T04:00:00.000Z","2019-10-02T04:00:00.000Z","2019-09-25T04:00:00.000Z","2019-09-19T04:00:00.000Z","2019-09-10T04:00:00.000Z","2019-09-03T04:00:00.000Z","2019-08-28T04:00:00.000Z","2019-08-21T04:00:00.000Z","2019-08-14T04:00:00.000Z","2019-08-08T04:00:00.000Z","2019-07-31T04:00:00.000Z","2019-07-24T04:00:00.000Z","2019-07-17T04:00:00.000Z","2019-07-10T04:00:00.000Z","2019-07-03T04:00:00.000Z","2019-06-26T04:00:00.000Z","2019-06-18T04:00:00.000Z","2019-06-12T04:00:00.000Z","2019-06-05T04:00:00.000Z","2019-05-29T04:00:00.000Z","2019-05-23T04:00:00.000Z","2019-05-16T04:00:00.000Z","2019-05-08T04:00:00.000Z","2019-05-01T04:00:00.000Z","2019-04-22T04:00:00.000Z","2019-04-16T04:00:00.000Z","2019-04-08T04:00:00.000Z","2019-04-01T04:00:00.000Z","2019-03-26T04:00:00.000Z","2019-03-19T04:00:00.000Z","2019-03-12T04:00:00.000Z","2019-03-05T05:00:00.000Z","2019-02-26T05:00:00.000Z","2019-02-19T05:00:00.000Z","2019-02-11T05:00:00.000Z","2019-02-06T05:00:00.000Z","2019-01-29T05:00:00.000Z","2019-01-24T05:00:00.000Z","2019-01-15T05:00:00.000Z","2019-01-08T05:00:00.000Z","2018-12-12T05:00:00.000Z","2018-12-04T05:00:00.000Z","2018-11-28T05:00:00.000Z","2018-11-15T05:00:00.000Z","2018-11-06T05:00:00.000Z","2018-10-31T04:00:00.000Z","2018-10-24T04:00:00.000Z","2018-10-17T04:00:00.000Z","2018-10-11T04:00:00.000Z","2018-10-01T04:00:00.000Z","2018-09-26T04:00:00.000Z","2018-09-18T04:00:00.000Z","2018-09-10T04:00:00.000Z","2018-09-03T04:00:00.000Z","2018-08-27T04:00:00.000Z"]
supporters = [210,210,210,210,210,210,209,208,208,207,206,206,205,203,203,202,201,200,199,200,198,198,198,192,191,191,190,190,189,188,187,187,186,186,185,182,182,182,182,182,178,178,178,173,172,172,172,173,171,170,170,169,169,168,167,164,164,164,164,163,162,153,153,154,151,150,149,149,147,147,139,138,138,138,138,134]
volunteers = [6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,5,5,5,5,5,5,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
leaders = [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
total = [212,212,212,212,212,212,212,212,212,212,212,212,211,209,209,208,207,206,205,205,203,203,203,197,196,194,193,193,192,191,190,190,189,189,188,185,185,185,185,185,181,181,181,175,174,174,174,173,171,170,170,169,169,168,167,164,164,164,164,163,162,153,153,154,151,150,149,149,147,147,139,138,138,138,138,134]
# create a data frame with these arrays
df = pd.DataFrame({"dates" : dates, "supporters" : supporters, 'leaders' :leaders, "volunteers":volunteers, "total":total})
df['dates'] = pd.to_datetime(df['dates']).dt.date
#set index as date
df.set_index('dates', inplace=True)
df.index = pd.to_datetime(df.index)
# resample by quarters and print as csv
df.resample(rule='Q').last().to_csv("PA_quarters.csv") | 176 | 2,061 | 0.696591 | 843 | 3,520 | 2.902728 | 0.188612 | 0.084185 | 0.123825 | 0.161831 | 0.645689 | 0.14385 | 0.14385 | 0.14385 | 0.14385 | 0.14385 | 0 | 0.551364 | 0.021023 | 3,520 | 20 | 2,062 | 176 | 0.158735 | 0.042045 | 0 | 0 | 1 | 0 | 0.561758 | 0.541568 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9f12a1c8962316fe8fbc0369ba5f7107af1d2cdc | 119 | py | Python | app/http/middleware/generators.py | israel-fl/PersonalPage | 8292de0fd7ad11fdc2b89521658c72a2d3135440 | [
"Unlicense"
] | null | null | null | app/http/middleware/generators.py | israel-fl/PersonalPage | 8292de0fd7ad11fdc2b89521658c72a2d3135440 | [
"Unlicense"
] | null | null | null | app/http/middleware/generators.py | israel-fl/PersonalPage | 8292de0fd7ad11fdc2b89521658c72a2d3135440 | [
"Unlicense"
] | null | null | null | import os, binascii
# Generate random token
def generate_hash(size=15):
return binascii.b2a_hex(os.urandom(size))
| 19.833333 | 45 | 0.764706 | 18 | 119 | 4.944444 | 0.777778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029126 | 0.134454 | 119 | 5 | 46 | 23.8 | 0.834951 | 0.176471 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
9f2ef9cdc368600d3c89be37c04b73110d71e131 | 5,178 | py | Python | PointClouds/classifier.py | haoruilee/DeepSets | b405dd6b51a34fb1ef622e25e6685b417b7b7cbb | [
"Apache-2.0"
] | 213 | 2018-04-14T19:24:29.000Z | 2022-03-27T07:58:48.000Z | PointClouds/classifier.py | haoruilee/DeepSets | b405dd6b51a34fb1ef622e25e6685b417b7b7cbb | [
"Apache-2.0"
] | 2 | 2019-07-30T14:39:01.000Z | 2019-07-30T15:48:06.000Z | PointClouds/classifier.py | haoruilee/DeepSets | b405dd6b51a34fb1ef622e25e6685b417b7b7cbb | [
"Apache-2.0"
] | 60 | 2018-04-16T20:12:55.000Z | 2022-03-25T04:47:48.000Z | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
import torch.autograd as autograd
import h5py
import pdb
from tqdm import tqdm, trange
class PermEqui1_max(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui1_max, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
def forward(self, x):
xm, _ = x.max(1, keepdim=True)
x = self.Gamma(x-xm)
return x
class PermEqui1_mean(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui1_mean, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
def forward(self, x):
xm = x.mean(1, keepdim=True)
x = self.Gamma(x-xm)
return x
class PermEqui2_max(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui2_max, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
self.Lambda = nn.Linear(in_dim, out_dim, bias=False)
def forward(self, x):
xm, _ = x.max(1, keepdim=True)
xm = self.Lambda(xm)
x = self.Gamma(x)
x = x - xm
return x
class PermEqui2_mean(nn.Module):
def __init__(self, in_dim, out_dim):
super(PermEqui2_mean, self).__init__()
self.Gamma = nn.Linear(in_dim, out_dim)
self.Lambda = nn.Linear(in_dim, out_dim, bias=False)
def forward(self, x):
xm = x.mean(1, keepdim=True)
xm = self.Lambda(xm)
x = self.Gamma(x)
x = x - xm
return x
class D(nn.Module):
def __init__(self, d_dim, x_dim=3, pool = 'mean'):
super(D, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
if pool == 'max':
self.phi = nn.Sequential(
PermEqui2_max(self.x_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui2_max(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui2_max(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
)
elif pool == 'max1':
self.phi = nn.Sequential(
PermEqui1_max(self.x_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui1_max(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui1_max(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
)
elif pool == 'mean':
self.phi = nn.Sequential(
PermEqui2_mean(self.x_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
)
elif pool == 'mean1':
self.phi = nn.Sequential(
PermEqui1_mean(self.x_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui1_mean(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
PermEqui1_mean(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
)
self.ro = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(self.d_dim, self.d_dim),
nn.ELU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(self.d_dim, 40),
)
print(self)
def forward(self, x):
phi_output = self.phi(x)
sum_output = phi_output.mean(1)
ro_output = self.ro(sum_output)
return ro_output
class DTanh(nn.Module):
def __init__(self, d_dim, x_dim=3, pool = 'mean'):
super(DTanh, self).__init__()
self.d_dim = d_dim
self.x_dim = x_dim
if pool == 'max':
self.phi = nn.Sequential(
PermEqui2_max(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui2_max(self.d_dim, self.d_dim),
nn.Tanh(),
)
elif pool == 'max1':
self.phi = nn.Sequential(
PermEqui1_max(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui1_max(self.d_dim, self.d_dim),
nn.Tanh(),
)
elif pool == 'mean':
self.phi = nn.Sequential(
PermEqui2_mean(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui2_mean(self.d_dim, self.d_dim),
nn.Tanh(),
)
elif pool == 'mean1':
self.phi = nn.Sequential(
PermEqui1_mean(self.x_dim, self.d_dim),
nn.Tanh(),
PermEqui1_mean(self.d_dim, self.d_dim),
nn.Tanh(),
PermEqui1_mean(self.d_dim, self.d_dim),
nn.Tanh(),
)
self.ro = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(self.d_dim, self.d_dim),
nn.Tanh(),
nn.Dropout(p=0.5),
nn.Linear(self.d_dim, 40),
)
print(self)
def forward(self, x):
phi_output = self.phi(x)
sum_output, _ = phi_output.max(1)
ro_output = self.ro(sum_output)
return ro_output
def clip_grad(model, max_norm):
total_norm = 0
for p in model.parameters():
param_norm = p.grad.data.norm(2)
total_norm += param_norm ** 2
total_norm = total_norm ** (0.5)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for p in model.parameters():
p.grad.data.mul_(clip_coef)
return total_norm
| 27.396825 | 56 | 0.589417 | 783 | 5,178 | 3.64751 | 0.095785 | 0.072829 | 0.140056 | 0.10014 | 0.865546 | 0.85084 | 0.844538 | 0.844538 | 0.843137 | 0.843137 | 0 | 0.017301 | 0.27443 | 5,178 | 188 | 57 | 27.542553 | 0.74288 | 0 | 0 | 0.746988 | 0 | 0 | 0.007725 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078313 | false | 0 | 0.054217 | 0 | 0.210843 | 0.012048 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9f4538e696b3118b119f2e728aadfb95a06423ad | 51,773 | py | Python | tests/test_0115-generic-reducer-operation.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0115-generic-reducer-operation.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | tests/test_0115-generic-reducer-operation.py | martindurant/awkward-1.0 | a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38 | [
"BSD-3-Clause"
] | null | null | null | # BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
primes = [x for x in range(2, 1000) if all(x % n != 0 for n in range(2, x))]
def test_reproduce_numpy():
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 30], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(axis=-1)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=2)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=-2)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=1)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=-3)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
assert awkward1.tolist(depth2.prod(axis=0)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
content2 = awkward1.layout.NumpyArray(numpy.array(primes[:12], dtype=numpy.int64))
offsets3 = awkward1.layout.Index64(numpy.array([0, 4, 8, 12], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1.prod(-1)) == [
2*3*5*7,
11*13*17*19,
23*29*31*37]
assert awkward1.tolist(depth1.prod(1)) == [
2*3*5*7,
11*13*17*19,
23*29*31*37]
assert awkward1.tolist(depth1.prod(-2)) == [
2*11*23,
3*13*29,
5*17*31,
7*19*37]
assert awkward1.tolist(depth1.prod(0)) == [
2*11*23,
3*13*29,
5*17*31,
7*19*37]
def test_gaps():
content1 = awkward1.layout.NumpyArray(numpy.array([123] + primes[:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 1, 6, 11, 16, 21, 26, 31], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([1, 4, 7], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5 - 1], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 29], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687, 47]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5 - 2], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 43, 47]]
content1 = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 101, 103, 107, 109], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 24, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, ],
[101, 103, 107, 109 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047, 29],
[3131, 3811, 4387, 4687, 47]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[1:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 4, 9, 14, 19, 24, 29], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 3, 5, 7, 11 ],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 159, 295, 427, 737, 71],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[2:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 8, 13, 18, 23, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 5, 7, 11 ],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 265, 413, 671, 67, 71],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array([3, 5, 7, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 8, 13, 18, 23, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 3, 5, 7, ],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 159, 295, 427, 67, 71],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array([3, 5, 7, 11, 13, 17, 19, 23, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 4, 8, 13, 18, 23, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 3, 5, 7, 11 ],
[ 13, 17, 19, 23 ],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 159, 295, 427, 737, 71],
[ 949, 1343, 1577, 2047, 97],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 14, 19, 24, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43 ]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047, 2813],
[3131, 3811, 4387, 4687]]
content1 = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 13, 17, 19, 23, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 101, 103, 107, 109, 113], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 9, 14, 19, 23, 28], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23 ],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89 ],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 106, 177, 305, 469, 781],
[ 949, 1343, 1577, 2047 ],
[3131, 3811, 4387, 4687, 5311]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 4, 6, 6, 7, 9], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 2, 4, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ 7 ]],
[[ 11, 13 ],
[ ]],
[[ 17 ],
[ 19, 23 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[2*11*17, 3*13, 5],
[7*19 , 23 ]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 4, 6, 7, 9], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 2, 3, 5], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ 7 ]],
[[ 11, 13 ]],
[[ 17 ],
[ 19, 23 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[2*11*17, 3*13, 5],
[7*19 , 23 ]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:10], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 5, 6, 8, 9, 10], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ 7, 11 ],
[13 ]],
[[17, 19 ],
[23 ],
[29 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[ 34, 57, 5],
[161, 11 ],
[377 ]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 8, 9], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 4, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ ],
[ 7, 11 ],
[13 ]],
[[17, 19 ],
[23 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[34, 57, 5],
[23 ],
[ 7, 11 ],
[13 ]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 8, 9], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 4, 4, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ ],
[ 7, 11 ],
[13 ]],
[],
[[17, 19 ],
[23 ]]]
assert awkward1.tolist(depth2.prod(-3)) == [
[34, 57, 5],
[23 ],
[ 7, 11 ],
[13 ]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 30], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(-2)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 8, 9], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 4, 4, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5],
[ ],
[ 7, 11 ],
[13 ]],
[],
[[17, 19 ],
[23 ]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[2*3*5, 1, 7*11, 13],
[],
[17*19, 23]]
assert awkward1.tolist(depth2.prod(-2)) == [
[2*7*13, 3*11, 5],
[],
[17*23, 19]]
def test_complicated():
offsets1 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5], dtype=numpy.int64))
content1 = awkward1.layout.ListOffsetArray64(offsets1, awkward1.layout.NumpyArray(numpy.array(primes[:5], dtype=numpy.int64)))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 8, 9], dtype=numpy.int64))
offsets3 = awkward1.layout.Index64(numpy.array([0, 4, 4, 6], dtype=numpy.int64))
content2 = awkward1.layout.ListOffsetArray64(offsets3, awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.NumpyArray(numpy.array(primes[:9], dtype=numpy.int64))))
offsets4 = awkward1.layout.Index64(numpy.array([0, 1, 1, 3], dtype=numpy.int64))
complicated = awkward1.layout.ListOffsetArray64(offsets4, awkward1.layout.RecordArray([content1, content2], ["x", "y"]))
assert awkward1.tolist(complicated) == [[{"x": [2, 3, 5], "y": [[2, 3, 5], [], [7, 11], [13]]}], [], [{"x": [], "y": []}, {"x": [7, 11], "y": [[17, 19], [23]]}]]
assert awkward1.tolist(complicated["x"]) == [
[[2, 3, 5]],
[],
[[],
[7, 11]]]
assert awkward1.tolist(complicated["y"]) == [
[[[ 2, 3, 5],
[ ],
[ 7, 11 ],
[13 ]]],
[ ],
[[ ],
[[17, 19 ],
[23 ]]]]
assert awkward1.tolist(complicated.prod(-1)) == [{"x": [30], "y": [[30, 1, 77, 13]]}, {"x": [], "y": []}, {"x": [1, 77], "y": [[], [323, 23]]}]
assert awkward1.tolist(complicated["x"].prod(-1)) == [[30], [], [1, 77]]
assert awkward1.tolist(complicated["y"].prod(-1)) == [[[30, 1, 77, 13]], [], [[], [323, 23]]]
assert awkward1.tolist(complicated.prod(-2)) == [{"x": [2, 3, 5], "y": [[182, 33, 5]]}, {"x": [], "y": []}, {"x": [7, 11], "y": [[], [391, 19]]}]
assert awkward1.tolist(complicated["x"].prod(-2)) == [[2, 3, 5], [], [7, 11]]
assert awkward1.tolist(complicated["y"].prod(-2)) == [[[182, 33, 5]], [], [[], [391, 19]]]
assert awkward1.tolist(complicated[0]) == [{"x": [2, 3, 5], "y": [[2, 3, 5], [], [7, 11], [13]]}]
assert awkward1.tolist(complicated[0].prod(-1)) == {"x": [30], "y": [[30, 1, 77, 13]]}
def test_EmptyArray():
offsets = awkward1.layout.Index64(numpy.array([0, 0, 0, 0], dtype=numpy.int64))
array = awkward1.layout.ListOffsetArray64(offsets, awkward1.layout.EmptyArray())
assert awkward1.tolist(array) == [[], [], []]
assert awkward1.tolist(array.prod(-1)) == [1, 1, 1]
offsets = awkward1.layout.Index64(numpy.array([0, 0, 0, 0], dtype=numpy.int64))
array = awkward1.layout.ListOffsetArray64(offsets, awkward1.layout.NumpyArray(numpy.array([], dtype=numpy.int64)))
assert awkward1.tolist(array) == [[], [], []]
assert awkward1.tolist(array.prod(-1)) == [1, 1, 1]
def test_IndexedOptionArray():
content = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 30], dtype=numpy.int64))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets1, content)
index = awkward1.layout.Index64(numpy.array([5, 4, 3, 2, 1, 0], dtype=numpy.int64))
indexedarray = awkward1.layout.IndexedArray64(index, listoffsetarray)
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, indexedarray)
assert awkward1.tolist(depth2) == [
[[101, 103, 107, 109, 113],
[ 73, 79, 83, 89, 97],
[ 53, 59, 61, 67, 71]],
[[ 31, 37, 41, 43, 47],
[ 13, 17, 19, 23, 29],
[ 2, 3, 5, 7, 11]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[101 * 103 * 107 * 109 * 113,
73 * 79 * 83 * 89 * 97,
53 * 59 * 61 * 67 * 71],
[ 31 * 37 * 41 * 43 * 47,
13 * 17 * 19 * 23 * 29,
2 * 3 * 5 * 7 * 11]]
assert awkward1.tolist(depth2.prod(-2)) == [
[101*73*53, 103*79*59, 107*83*61, 109*89*67, 113*97*71],
[ 31*13*2, 37*17*3, 41*19*5, 43*23*7, 47*29*11]]
assert awkward1.tolist(depth2.prod(-3)) == [
[101*31, 103*37, 107*41, 109*43, 113*47],
[ 73*13, 79*17, 83*19, 89*23, 97*29],
[ 53*2, 59*3, 61*5, 67*7, 71*11]]
content = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 101, 103, 107, 109, 113], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20], dtype=numpy.int64))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets1, content)
index = awkward1.layout.Index64(numpy.array([3, -1, 2, 1, -1, 0], dtype=numpy.int64))
indexedoptionarray = awkward1.layout.IndexedOptionArray64(index, listoffsetarray)
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, indexedoptionarray)
assert awkward1.tolist(depth2) == [
[[101, 103, 107, 109, 113],
None,
[ 53, 59, 61, 67, 71]],
[[ 31, 37, 41, 43, 47],
None,
[ 2, 3, 5, 7, 11]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[101 * 103 * 107 * 109 * 113,
53 * 59 * 61 * 67 * 71],
[ 31 * 37 * 41 * 43 * 47,
2 * 3 * 5 * 7 * 11]]
assert awkward1.tolist(depth2.prod(-2)) == [
[101*53, 103*59, 107*61, 109*67, 113*71],
[ 31*2, 37*3, 41*5, 43*7, 47*11]]
assert awkward1.tolist(depth2.prod(-3)) == [
[101*31, 103*37, 107*41, 109*43, 113*47],
[],
[ 53*2, 59*3, 61*5, 67*7, 71*11]]
content = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 101, 103, 107, 109, 113], dtype=numpy.int64))
index = awkward1.layout.Index64(numpy.array([15, 16, 17, 18, 19, -1, -1, -1, -1, -1, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, -1, -1, -1, -1, -1, 0, 1, 2, 3, 4], dtype=numpy.int64))
indexedoptionarray = awkward1.layout.IndexedOptionArray64(index, content)
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 30], dtype=numpy.int64))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets1, indexedoptionarray)
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, listoffsetarray)
assert awkward1.tolist(depth2) == [
[[ 101, 103, 107, 109, 113],
[None, None, None, None, None],
[ 53, 59, 61, 67, 71]],
[[ 31, 37, 41, 43, 47],
[None, None, None, None, None],
[ 2, 3, 5, 7, 11]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[101 * 103 * 107 * 109 * 113,
1 * 1 * 1 * 1 * 1,
53 * 59 * 61 * 67 * 71],
[ 31 * 37 * 41 * 43 * 47,
1 * 1 * 1 * 1 * 1,
2 * 3 * 5 * 7 * 11]]
assert awkward1.tolist(depth2.prod(-2)) == [
[101*53, 103*59, 107*61, 109*67, 113*71],
[ 31*2, 37*3, 41*5, 43*7, 47*11]]
assert awkward1.tolist(depth2.prod(-3)) == [
[101*31, 103*37, 107*41, 109*43, 113*47],
[ 1, 1, 1, 1, 1],
[ 53*2, 59*3, 61*5, 67*7, 71*11]]
content = awkward1.layout.NumpyArray(numpy.array([2, 3, 5, 7, 11, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 101, 103, 107, 109, 113], dtype=numpy.int64))
index = awkward1.layout.Index64(numpy.array([15, 16, 17, 18, 19, -1, 10, 11, 12, 13, 14, 5, 6, 7, 8, 9, -1, 0, 1, 2, 3, 4], dtype=numpy.int64))
indexedoptionarray = awkward1.layout.IndexedOptionArray64(index, content)
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 6, 11, 16, 17, 22], dtype=numpy.int64))
listoffsetarray = awkward1.layout.ListOffsetArray64(offsets1, indexedoptionarray)
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, listoffsetarray)
assert awkward1.tolist(depth2) == [
[[ 101, 103, 107, 109, 113],
[None],
[ 53, 59, 61, 67, 71]],
[[ 31, 37, 41, 43, 47],
[None],
[ 2, 3, 5, 7, 11]]]
assert awkward1.tolist(depth2.prod(-1)) == [
[101 * 103 * 107 * 109 * 113,
1,
53 * 59 * 61 * 67 * 71],
[ 31 * 37 * 41 * 43 * 47,
1,
2 * 3 * 5 * 7 * 11]]
assert awkward1.tolist(depth2.prod(-2)) == [
[101*53, 103*59, 107*61, 109*67, 113*71],
[ 31*2, 37*3, 41*5, 43*7, 47*11]]
assert awkward1.tolist(depth2.prod(-3)) == [
[101*31, 103*37, 107*41, 109*43, 113*47],
[ 1],
[ 53*2, 59*3, 61*5, 67*7, 71*11]]
def test_UnionArray():
content1 = awkward1.Array([
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]]], checkvalid=True).layout
content2 = awkward1.Array([
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]], checkvalid=True).layout
tags = awkward1.layout.Index8(numpy.array([0, 1], dtype=numpy.int8))
index = awkward1.layout.Index64(numpy.array([0, 0], dtype=numpy.int64))
depth2 = awkward1.layout.UnionArray8_64(tags, index, [content1, content2])
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(axis=-1)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=2)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=-2)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=1)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=-3)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
assert awkward1.tolist(depth2.prod(axis=0)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5], dtype=numpy.int64))
offsets1a = awkward1.layout.Index64(numpy.array([0, 5, 10, 15], dtype=numpy.int64))
offsets1b = awkward1.layout.Index64(numpy.array([15, 20, 25, 30], dtype=numpy.int64))
tags = awkward1.layout.Index8(numpy.array([0, 0, 0, 1, 1, 1], dtype=numpy.int8))
index = awkward1.layout.Index64(numpy.array([0, 1, 2, 0, 1, 2], dtype=numpy.int64))
unionarray = awkward1.layout.UnionArray8_64(tags, index, [awkward1.layout.ListOffsetArray64(offsets1a, content1), awkward1.layout.ListOffsetArray64(offsets1b, content1)])
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, unionarray)
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(axis=-1)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=2)) == [
[ 2 * 3 * 5 * 7 * 11,
13 * 17 * 19 * 23 * 29,
31 * 37 * 41 * 43 * 47],
[ 53 * 59 * 61 * 67 * 71,
73 * 79 * 83 * 89 * 97,
101 * 103 * 107 * 109 * 113]]
assert awkward1.tolist(depth2.prod(axis=-2)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=1)) == [
[2*13*31, 3*17*37, 5*19*41, 7*23*43, 11*29*47],
[53*73*101, 59*79*103, 61*83*107, 67*89*109, 71*97*113]]
assert awkward1.tolist(depth2.prod(axis=-3)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
assert awkward1.tolist(depth2.prod(axis=0)) == [
[2*53, 3*59, 5*61, 7*67, 11*71],
[13*73, 17*79, 19*83, 23*89, 29*97],
[31*101, 37*103, 41*107, 43*109, 47*113]]
def test_sum():
content2 = awkward1.layout.NumpyArray(numpy.array([1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048], dtype=numpy.int64))
offsets3 = awkward1.layout.Index64(numpy.array([0, 4, 8, 12], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1.sum(-1)) == [
1 + 2 + 4 + 8,
16 + 32 + 64 + 128,
256 + 512 + 1024 + 2048]
assert awkward1.tolist(depth1.sum(1)) == [
1 + 2 + 4 + 8,
16 + 32 + 64 + 128,
256 + 512 + 1024 + 2048]
assert awkward1.tolist(depth1.sum(-2)) == [
1 + 16 + 256,
2 + 32 + 512,
4 + 64 + 1024,
8 + 128 + 2048]
assert awkward1.tolist(depth1.sum(0)) == [
1 + 16 + 256,
2 + 32 + 512,
4 + 64 + 1024,
8 + 128 + 2048]
def test_sumprod_types():
def prod(xs):
out = 1
for x in xs:
out *= x
return out
array = numpy.array([[True, False, False], [True, False, False]])
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.int8)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.uint8)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.int16)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.uint16)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.int32)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.uint32)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.int64)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
array = numpy.array([[0, 1, 2], [3, 4, 5]], dtype=numpy.uint64)
content2 = awkward1.layout.NumpyArray(array.reshape(-1))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert numpy.sum(array, axis=-1).dtype == numpy.asarray(depth1.sum(axis=-1)).dtype
assert numpy.prod(array, axis=-1).dtype == numpy.asarray(depth1.prod(axis=-1)).dtype
assert sum(awkward1.tolist(numpy.sum(array, axis=-1))) == sum(awkward1.tolist(depth1.sum(axis=-1)))
assert prod(awkward1.tolist(numpy.prod(array, axis=-1))) == prod(awkward1.tolist(depth1.prod(axis=-1)))
def test_any():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 0.0, 0.0, 0.0]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 0.0, 0.0, 0.0]]
assert awkward1.tolist(depth1.any(-1)) == [
True,
True,
False]
assert awkward1.tolist(depth1.any(1)) == [
True,
True,
False]
assert awkward1.tolist(depth1.any(-2)) == [
True,
True,
True,
False]
assert awkward1.tolist(depth1.any(0)) == [
True,
True,
True,
False]
def test_all():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 2.2, 0.0, 4.4]]
assert awkward1.tolist(depth1.all(-1)) == [
True,
False,
False]
assert awkward1.tolist(depth1.all(1)) == [
True,
False,
False]
assert awkward1.tolist(depth1.all(-2)) == [
False,
True,
False,
True]
assert awkward1.tolist(depth1.all(0)) == [
False,
True,
False,
True]
def test_count():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 2.2, 0.0, 4.4]]
assert awkward1.tolist(depth1.count(-1)) == [
3,
3,
4]
assert awkward1.tolist(depth1.count(1)) == [
3,
3,
4]
assert awkward1.tolist(depth1.count(-2)) == [
3,
3,
3,
1]
assert awkward1.tolist(depth1.count(0)) == [
3,
3,
3,
1]
def test_count_nonzero():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 2.2, 0.0, 4.4]]
assert awkward1.tolist(depth1.count_nonzero(-1)) == [
3,
1,
2]
assert awkward1.tolist(depth1.count_nonzero(1)) == [
3,
1,
2]
assert awkward1.tolist(depth1.count_nonzero(-2)) == [
1,
3,
1,
1]
assert awkward1.tolist(depth1.count_nonzero(0)) == [
1,
3,
1,
1]
def test_count_min():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 2.2, 0.0, 4.4]]
assert awkward1.tolist(depth1.min(-1)) == [
1.1,
0.0,
0.0]
assert awkward1.tolist(depth1.min(1)) == [
1.1,
0.0,
0.0]
assert awkward1.tolist(depth1.min(-2)) == [
0.0,
2.2,
0.0,
4.4]
assert awkward1.tolist(depth1.min(0)) == [
0.0,
2.2,
0.0,
4.4]
content2 = awkward1.layout.NumpyArray(numpy.array([True, True, True, False, True, False, False, True, False, True]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[ True, True, True],
[False, True, False],
[False, True, False, True]]
assert awkward1.tolist(depth1.min(-1)) == [
True,
False,
False]
assert awkward1.tolist(depth1.min(1)) == [
True,
False,
False]
assert awkward1.tolist(depth1.min(-2)) == [
False,
True,
False,
True]
assert awkward1.tolist(depth1.min(0)) == [
False,
True,
False,
True]
def test_count_max():
content2 = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 0.0, 2.2, 0.0, 0.0, 2.2, 0.0, 4.4]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[1.1, 2.2, 3.3],
[0.0, 2.2, 0.0],
[0.0, 2.2, 0.0, 4.4]]
assert awkward1.tolist(depth1.max(-1)) == [
3.3,
2.2,
4.4]
assert awkward1.tolist(depth1.max(1)) == [
3.3,
2.2,
4.4]
assert awkward1.tolist(depth1.max(-2)) == [
1.1,
2.2,
3.3,
4.4]
assert awkward1.tolist(depth1.max(0)) == [
1.1,
2.2,
3.3,
4.4]
content2 = awkward1.layout.NumpyArray(numpy.array([False, True, True, False, True, False, False, False, False, False]))
offsets3 = awkward1.layout.Index64(numpy.array([0, 3, 6, 10], dtype=numpy.int64))
depth1 = awkward1.layout.ListOffsetArray64(offsets3, content2)
assert awkward1.tolist(depth1) == [
[False, True, True],
[False, True, False],
[False, False, False, False]]
assert awkward1.tolist(depth1.max(-1)) == [
True,
True,
False]
assert awkward1.tolist(depth1.max(1)) == [
True,
True,
False]
assert awkward1.tolist(depth1.max(-2)) == [
False,
True,
True,
False]
assert awkward1.tolist(depth1.max(0)) == [
False,
True,
True,
False]
def test_mask():
content = awkward1.layout.NumpyArray(numpy.array([1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
offsets = awkward1.layout.Index64(numpy.array([0, 3, 3, 5, 6, 6, 6, 9], dtype=numpy.int64))
array = awkward1.layout.ListOffsetArray64(offsets, content)
assert awkward1.tolist(array.min(axis=-1, mask=False)) == [1.1, numpy.inf, 4.4, 6.6, numpy.inf, numpy.inf, 7.7]
assert awkward1.tolist(array.min(axis=-1, mask=True)) == [1.1, None, 4.4, 6.6, None, None, 7.7]
def test_keepdims():
nparray = numpy.array(primes[:2*3*5], dtype=numpy.int64).reshape(2, 3, 5)
content1 = awkward1.layout.NumpyArray(numpy.array(primes[:2*3*5], dtype=numpy.int64))
offsets1 = awkward1.layout.Index64(numpy.array([0, 5, 10, 15, 20, 25, 30], dtype=numpy.int64))
offsets2 = awkward1.layout.Index64(numpy.array([0, 3, 6], dtype=numpy.int64))
depth2 = awkward1.layout.ListOffsetArray64(offsets2, awkward1.layout.ListOffsetArray64(offsets1, content1))
assert awkward1.tolist(depth2) == [
[[ 2, 3, 5, 7, 11],
[ 13, 17, 19, 23, 29],
[ 31, 37, 41, 43, 47]],
[[ 53, 59, 61, 67, 71],
[ 73, 79, 83, 89, 97],
[101, 103, 107, 109, 113]]]
assert awkward1.tolist(depth2.prod(axis=-1, keepdims=False)) == awkward1.tolist(nparray.prod(axis=-1, keepdims=False))
assert awkward1.tolist(depth2.prod(axis=-2, keepdims=False)) == awkward1.tolist(nparray.prod(axis=-2, keepdims=False))
assert awkward1.tolist(depth2.prod(axis=-3, keepdims=False)) == awkward1.tolist(nparray.prod(axis=-3, keepdims=False))
assert awkward1.tolist(depth2.prod(axis=-1, keepdims=True)) == awkward1.tolist(nparray.prod(axis=-1, keepdims=True))
assert awkward1.tolist(depth2.prod(axis=-2, keepdims=True)) == awkward1.tolist(nparray.prod(axis=-2, keepdims=True))
assert awkward1.tolist(depth2.prod(axis=-3, keepdims=True)) == awkward1.tolist(nparray.prod(axis=-3, keepdims=True))
def test_highlevel():
array = awkward1.Array([
[[ 2, 3, 5],
[ ],
[ 7, 11 ],
[13 ]],
[],
[[17, 19 ],
[23 ]]], checkvalid=True)
assert awkward1.count(array) == 9
assert awkward1.tolist(awkward1.count(array, axis=-1)) == [
[3, 0, 2, 1],
[],
[2, 1]]
assert awkward1.tolist(awkward1.count(array, axis=2)) == [
[3, 0, 2, 1],
[],
[2, 1]]
assert awkward1.tolist(awkward1.count(array, axis=-1, keepdims=True)) == [
[[3], [0], [2], [1]],
[],
[[2], [1]]]
assert awkward1.tolist(awkward1.count(array, axis=-2)) == [
[3, 2, 1],
[],
[2, 1]]
assert awkward1.tolist(awkward1.count(array, axis=1)) == [
[3, 2, 1],
[],
[2, 1]]
assert awkward1.tolist(awkward1.count(array, axis=-2, keepdims=True)) == [
[[3, 2, 1]],
[[]],
[[2, 1]]]
assert awkward1.count_nonzero(array) == 9
assert awkward1.tolist(awkward1.count_nonzero(array, axis=-1)) == [
[3, 0, 2, 1],
[],
[2, 1]]
assert awkward1.tolist(awkward1.count_nonzero(array, axis=-2)) == [
[3, 2, 1],
[],
[2, 1]]
assert awkward1.sum(array) == 2 + 3 + 5 + 7 + 11 + 13 + 17 + 19 + 23
assert awkward1.tolist(awkward1.sum(array, axis=-1)) == [
[2 + 3 + 5, 0, 7 + 11, 13],
[],
[17 + 19, 23]]
assert awkward1.tolist(awkward1.sum(array, axis=-2)) == [
[2 + 7 + 13, 3 + 11, 5],
[],
[17 + 23, 19]]
assert awkward1.prod(array) == 2*3*5*7*11*13*17*19*23
assert awkward1.tolist(awkward1.prod(array, axis=-1)) == [
[2*3*5, 1, 7*11, 13],
[],
[17*19, 23]]
assert awkward1.tolist(awkward1.prod(array, axis=-2)) == [
[2*7*13, 3*11, 5],
[],
[17*23, 19]]
assert awkward1.min(array) == 2
assert awkward1.tolist(awkward1.min(array, axis=-1)) == [
[2, None, 7, 13],
[],
[17, 23]]
assert awkward1.tolist(awkward1.min(array, axis=-2)) == [
[2, 3, 5],
[],
[17, 19]]
assert awkward1.max(array) == 23
assert awkward1.tolist(awkward1.max(array, axis=-1)) == [
[5, None, 11, 13],
[],
[19, 23]]
assert awkward1.tolist(awkward1.max(array, axis=-2)) == [
[13, 11, 5],
[],
[23, 19]]
array = awkward1.Array([
[[ True, False, True],
[ ],
[False, False ],
[ True ]],
[],
[[False, True ],
[ True ]]], checkvalid=True)
assert awkward1.any(array) == True
assert awkward1.tolist(awkward1.any(array, axis=-1)) == [
[True, False, False, True],
[],
[True, True]]
assert awkward1.tolist(awkward1.any(array, axis=-2)) == [
[True, False, True],
[],
[True, True]]
assert awkward1.all(array) == False
assert awkward1.tolist(awkward1.all(array, axis=-1)) == [
[False, True, False, True],
[],
[False, True]]
assert awkward1.tolist(awkward1.all(array, axis=-2)) == [
[False, False, True],
[],
[False, True]]
def test_nonreducers():
x = awkward1.Array([[1, 2, 3, 4, 5], [1, 2, 3, 4, 5]], checkvalid=True)
y = awkward1.Array([[1.1, 2.2, 2.9, 4.0, 5.1], [0.9, 2.1, 3.2, 4.1, 4.9]], checkvalid=True)
assert awkward1.mean(y) == numpy.mean(awkward1.tonumpy(y))
assert awkward1.var(y) == numpy.var(awkward1.tonumpy(y))
assert awkward1.var(y, ddof=1) == numpy.var(awkward1.tonumpy(y), ddof=1)
assert awkward1.std(y) == numpy.std(awkward1.tonumpy(y))
assert awkward1.std(y, ddof=1) == numpy.std(awkward1.tonumpy(y), ddof=1)
assert awkward1.moment(y, 1) == numpy.mean(awkward1.tonumpy(y))
assert awkward1.moment(y - awkward1.mean(y), 2) == numpy.var(awkward1.tonumpy(y))
assert awkward1.covar(y, y) == numpy.var(awkward1.tonumpy(y))
assert awkward1.corr(y, y) == 1.0
assert awkward1.corr(x, y) == pytest.approx(0.9968772535047296)
fit = awkward1.linearfit(x, y)
assert awkward1.tolist(fit) == pytest.approx({"intercept": 0.07999999999999773, "slope": 0.99, "intercept_error": 0.7416198487095663, "slope_error": 0.22360679774997896})
assert awkward1.tolist(awkward1.mean(y, axis=-1)) == awkward1.tolist(numpy.mean(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.var(y, axis=-1)) == awkward1.tolist(numpy.var(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.var(y, axis=-1, ddof=1)) == awkward1.tolist(numpy.var(awkward1.tonumpy(y), axis=-1, ddof=1))
assert awkward1.tolist(awkward1.std(y, axis=-1)) == awkward1.tolist(numpy.std(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.std(y, axis=-1, ddof=1)) == awkward1.tolist(numpy.std(awkward1.tonumpy(y), axis=-1, ddof=1))
assert awkward1.tolist(awkward1.moment(y, 1, axis=-1)) == awkward1.tolist(numpy.mean(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.moment(y - awkward1.mean(y, axis=-1), 2, axis=-1)) == awkward1.tolist(numpy.var(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.covar(y, y, axis=-1)) == awkward1.tolist(numpy.var(awkward1.tonumpy(y), axis=-1))
assert awkward1.tolist(awkward1.corr(y, y, axis=-1)) == [1.0, 1.0]
assert awkward1.tolist(awkward1.corr(x, y, axis=-1)) == pytest.approx([0.9975103695813371, 0.9964193240901015])
fit = awkward1.linearfit(x, y, axis=-1)
assert awkward1.tolist(fit[0]) == pytest.approx({"intercept": 0.11999999999999772, "slope": 0.9800000000000005, "intercept_error": 1.0488088481701516, "slope_error": 0.31622776601683794})
assert awkward1.tolist(fit[1]) == pytest.approx({"intercept": 0.04000000000000228, "slope": 0.9999999999999994, "intercept_error": 1.0488088481701516, "slope_error": 0.31622776601683794})
def test_softmax():
array = awkward1.Array([[numpy.log(2), numpy.log(2), numpy.log(4)], [], [numpy.log(5), numpy.log(5)]], checkvalid=True)
assert awkward1.tolist(awkward1.softmax(array, axis=-1)) == [pytest.approx([0.25, 0.25, 0.5]), [], pytest.approx([0.5, 0.5])]
| 43.616681 | 191 | 0.554922 | 7,178 | 51,773 | 3.996378 | 0.030231 | 0.111762 | 0.1248 | 0.073416 | 0.922506 | 0.904832 | 0.875967 | 0.846092 | 0.802343 | 0.788433 | 0 | 0.198893 | 0.256794 | 51,773 | 1,186 | 192 | 43.653457 | 0.546624 | 0.001661 | 0 | 0.741843 | 0 | 0 | 0.002902 | 0 | 0 | 0 | 0 | 0 | 0.223608 | 1 | 0.019194 | false | 0 | 0.004798 | 0 | 0.024952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
9f883d17678d2ebe4a0944892ef6b64ae78f5a11 | 2,430 | py | Python | exercises/practice/binary-search/binary_search_test.py | Stigjb/python | cfb620d1603eb9b08511f96f00f872c67cac0d05 | [
"MIT"
] | 1,177 | 2017-06-21T20:24:06.000Z | 2022-03-29T02:30:55.000Z | exercises/practice/binary-search/binary_search_test.py | Stigjb/python | cfb620d1603eb9b08511f96f00f872c67cac0d05 | [
"MIT"
] | 1,890 | 2017-06-18T20:06:10.000Z | 2022-03-31T18:35:51.000Z | exercises/practice/binary-search/binary_search_test.py | Stigjb/python | cfb620d1603eb9b08511f96f00f872c67cac0d05 | [
"MIT"
] | 1,095 | 2017-06-26T23:06:19.000Z | 2022-03-29T03:25:38.000Z | import unittest
from binary_search import (
find,
)
# Tests adapted from `problem-specifications//canonical-data.json`
class BinarySearchTest(unittest.TestCase):
def test_finds_a_value_in_an_array_with_one_element(self):
self.assertEqual(find([6], 6), 0)
def test_finds_a_value_in_the_middle_of_an_array(self):
self.assertEqual(find([1, 3, 4, 6, 8, 9, 11], 6), 3)
def test_finds_a_value_at_the_beginning_of_an_array(self):
self.assertEqual(find([1, 3, 4, 6, 8, 9, 11], 1), 0)
def test_finds_a_value_at_the_end_of_an_array(self):
self.assertEqual(find([1, 3, 4, 6, 8, 9, 11], 11), 6)
def test_finds_a_value_in_an_array_of_odd_length(self):
self.assertEqual(
find([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377, 634], 144), 9
)
def test_finds_a_value_in_an_array_of_even_length(self):
self.assertEqual(find([1, 3, 5, 8, 13, 21, 34, 55, 89, 144, 233, 377], 21), 5)
def test_identifies_that_a_value_is_not_included_in_the_array(self):
with self.assertRaises(ValueError) as err:
find([1, 3, 4, 6, 8, 9, 11], 7)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "value not in array")
def test_a_value_smaller_than_the_array_s_smallest_value_is_not_found(self):
with self.assertRaises(ValueError) as err:
find([1, 3, 4, 6, 8, 9, 11], 0)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "value not in array")
def test_a_value_larger_than_the_array_s_largest_value_is_not_found(self):
with self.assertRaises(ValueError) as err:
find([1, 3, 4, 6, 8, 9, 11], 13)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "value not in array")
def test_nothing_is_found_in_an_empty_array(self):
with self.assertRaises(ValueError) as err:
find([], 1)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "value not in array")
def test_nothing_is_found_when_the_left_and_right_bounds_cross(self):
with self.assertRaises(ValueError) as err:
find([1, 2], 0)
self.assertEqual(type(err.exception), ValueError)
self.assertEqual(err.exception.args[0], "value not in array")
| 31.973684 | 86 | 0.670782 | 377 | 2,430 | 4.04244 | 0.225464 | 0.15748 | 0.031496 | 0.051181 | 0.766404 | 0.766404 | 0.751969 | 0.721785 | 0.704068 | 0.637139 | 0 | 0.066077 | 0.209054 | 2,430 | 75 | 87 | 32.4 | 0.726847 | 0.026337 | 0 | 0.340909 | 0 | 0 | 0.038071 | 0 | 0 | 0 | 0 | 0 | 0.477273 | 1 | 0.25 | false | 0 | 0.045455 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e23eaf40df00aef545557ba9bd7a456ed4d6b5e4 | 30,730 | py | Python | accountsplus/tests/test_signals.py | GhalebKhaled/django-users-plus | 467f6cb528672a1eafc336640d2c7d0f06c378c6 | [
"MIT"
] | 3 | 2016-05-26T13:25:19.000Z | 2020-12-30T07:40:02.000Z | accountsplus/tests/test_signals.py | GhalebKhaled/django-users-plus | 467f6cb528672a1eafc336640d2c7d0f06c378c6 | [
"MIT"
] | 31 | 2016-05-26T13:20:48.000Z | 2021-06-10T19:57:19.000Z | accountsplus/tests/test_signals.py | GhalebKhaled/django-users-plus | 467f6cb528672a1eafc336640d2c7d0f06c378c6 | [
"MIT"
] | 1 | 2018-05-24T13:01:40.000Z | 2018-05-24T13:01:40.000Z | from __future__ import unicode_literals
import django.test
import django.test.utils
import logging
import mock
import accountsplus.models
import accountsplus.signals
from .. import signals, models
from test_models import (UnitTestCompany, UnitTestUser, UnitTestAuditLogEvent)
logging.disable(logging.CRITICAL)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class SignalTestCase(django.test.TestCase):
@classmethod
def setUpTestData(cls):
company_1 = UnitTestCompany.objects.create(name='Example')
company_2 = UnitTestCompany.objects.create(name='Other Company')
superuser = UnitTestUser.objects.create_superuser(
email='superuser@example.com', password='password', first_name='Super', last_name='User')
superuser.company = company_1
superuser.save()
staffuser = UnitTestUser.objects.create_user(
email='staffuser@example.com', password='password', first_name='Staff', last_name='User')
staffuser.is_staff = True
staffuser.company = company_1
staffuser.save()
regular_user = UnitTestUser.objects.create_user(
email='regularuser@example.com', password='password', first_name='Regular', last_name='User')
regular_user.company = company_1
regular_user.save()
def setUp(self):
self.session_dict = {
'is_masquerading': False,
}
self.session_dict_masquerade = {
'is_masquerading': True,
'masquerade_user_id': 1,
'masquerade_is_superuser': True,
}
def get_item_generator(session_dict):
def get_item(k, default=None):
if k in session_dict:
return session_dict[k]
else:
return default
return get_item
self.user_1 = UnitTestUser.objects.get(pk=1)
self.user_2 = UnitTestUser.objects.get(pk=2)
self.user_3 = UnitTestUser.objects.get(pk=3)
self.company_1 = UnitTestCompany.objects.get(pk=1)
self.company_2 = UnitTestCompany.objects.get(pk=2)
# create a mock request
self.request = mock.MagicMock()
self.request.session = mock.MagicMock(spec_set=dict)
self.request.session.__getitem__.side_effect = get_item_generator(self.session_dict)
self.request.session.get.side_effect = get_item_generator(self.session_dict)
self.request.user = self.user_1
# create a mock request for masquerading
self.request_masquerade = mock.MagicMock()
self.request_masquerade.session = mock.MagicMock(spec_set=dict)
self.request_masquerade.session.__getitem__.side_effect = get_item_generator(self.session_dict_masquerade)
self.request_masquerade.session.get.side_effect = get_item_generator(self.session_dict_masquerade)
self.request_masquerade.user = self.user_2
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class AuditLogEventHelperCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_is_audit_log_enabled_true(self):
self.assertTrue(signals.is_audit_log_enabled())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_is_audit_log_enabled_false(self):
self.assertFalse(signals.is_audit_log_enabled())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_log_audit_event(self):
signals.log_audit_event(message='Test', request=self.request, user=self.user_1)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Test')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_log_audit_event_masquerade(self):
signals.log_audit_event(message='Test', request=self.request_masquerade, user=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 2)
self.assertEqual(audit_log_event.user_email, 'staffuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Test')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_log_audit_event_no_audit_log(self):
signals.log_audit_event(message='Test', request=self.request, user=self.user_1)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_log_audit_event_masquerade_no_audit_log(self):
signals.log_audit_event(message='Test', request=self.request_masquerade, user=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class LoginCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_login_callback(self):
signals.login_callback(sender=self, request=self.request, user=self.user_1)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Sign in')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_login_callback_masquerade(self):
signals.login_callback(sender=self, request=self.request_masquerade, user=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 2)
self.assertEqual(audit_log_event.user_email, 'staffuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Sign in')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_login_callback_no_audit_log(self):
signals.login_callback(sender=self, request=self.request, user=self.user_1)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_login_callback_masquerade_no_audit_log(self):
signals.login_callback(sender=self, request=self.request_masquerade, user=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
import django.contrib.auth.signals
receivers = django.contrib.auth.signals.user_logged_in._live_receivers(self)
self.assertIn(signals.login_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class LogoutCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_logout_callback(self):
signals.logout_callback(sender=self, request=self.request, user=self.user_1)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Sign out')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_logout_callback_masquerade(self):
signals.logout_callback(sender=self, request=self.request_masquerade, user=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 2)
self.assertEqual(audit_log_event.user_email, 'staffuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Sign out')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_logout_callback_no_audit_log(self):
signals.logout_callback(sender=self, request=self.request, user=self.user_1)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_logout_callback_masquerade_no_audit_log(self):
signals.logout_callback(sender=self, request=self.request_masquerade, user=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
import django.contrib.auth.signals
receivers = django.contrib.auth.signals.user_logged_out._live_receivers(self)
self.assertIn(signals.logout_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class MasqueradeStartCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_masquerade_start_callback(self):
signals.masquerade_start_callback(sender=self, request=self.request, user=self.user_1, masquerade_as=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertIsNone(audit_log_event.masquerading_user_id)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Masquerade start as staffuser@example.com (2)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_masquerade_start_callback_no_audit_log(self):
signals.masquerade_start_callback(sender=self, request=self.request, user=self.user_1, masquerade_as=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.masquerade_start._live_receivers(self)
self.assertIn(signals.masquerade_start_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class MasqueradeEndCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_masquerade_end_callback(self):
signals.masquerade_end_callback(sender=self, request=self.request, user=self.user_1, masquerade_as=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertIsNone(audit_log_event.masquerading_user_id)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Masquerade end as staffuser@example.com (2)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_masquerade_end_callback_no_audit_log(self):
signals.masquerade_end_callback(sender=self, request=self.request, user=self.user_1, masquerade_as=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.masquerade_end._live_receivers(self)
self.assertIn(signals.masquerade_end_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class PasswordResetCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_password_reset_callback(self):
signals.password_reset_request_callback(sender=self, request=self.request, user=self.user_1)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Request password reset')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_password_reset_callback_masquerade(self):
signals.password_reset_request_callback(sender=self, request=self.request_masquerade, user=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 2)
self.assertEqual(audit_log_event.user_email, 'staffuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Request password reset')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_password_reset_callback_no_audit_log(self):
signals.password_reset_request_callback(sender=self, request=self.request, user=self.user_1)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_password_reset_callback_masquerade_no_audit_log(self):
signals.password_reset_request_callback(sender=self, request=self.request_masquerade, user=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_password_reset_request._live_receivers(self)
self.assertIn(signals.password_reset_request_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class PasswordChangeCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_password_change_callback(self):
signals.password_change_callback(sender=self, request=self.request, user=self.user_1)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 1)
self.assertEqual(audit_log_event.user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Change password')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_password_change_callback_masquerade(self):
signals.password_change_callback(sender=self, request=self.request_masquerade, user=self.user_2)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 2)
self.assertEqual(audit_log_event.user_email, 'staffuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Change password')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_password_change_callback_no_audit_log(self):
signals.password_change_callback(sender=self, request=self.request, user=self.user_1)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_password_change_callback_masquerade_no_audit_log(self):
signals.password_change_callback(sender=self, request=self.request_masquerade, user=self.user_2)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_password_change._live_receivers(self)
self.assertIn(signals.password_change_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class CreateCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_create_callback(self):
signals.create_callback(sender=self, request=self.request, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Create by: superuser@example.com (1)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_create_callback_masquerade(self):
signals.create_callback(sender=self, request=self.request_masquerade, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Create by: staffuser@example.com (2)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_create_callback_no_audit_log(self):
signals.create_callback(sender=self, request=self.request, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_create_callback_masquerade_no_audit_log(self):
signals.create_callback(sender=self, request=self.request_masquerade, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_create._live_receivers(self)
self.assertIn(signals.create_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class EmailChangeCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_email_change_callback(self):
signals.email_change_callback(
sender=self, request=self.request, user=self.user_3, old_email='regularuser@example.com',
new_email='change@example.com')
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Email change from: regularuser@example.com to: change@example.com')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_email_change_callback_masquerade(self):
signals.email_change_callback(
sender=self, request=self.request_masquerade, user=self.user_3, old_email='regularuser@example.com',
new_email='change@example.com')
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Email change from: regularuser@example.com to: change@example.com')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_email_change_callback_no_audit_log(self):
signals.email_change_callback(
sender=self, request=self.request, user=self.user_3, old_email='regularuser@example.com',
new_email='change@example.com')
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_email_change_callback_masquerade_no_audit_log(self):
signals.email_change_callback(
sender=self, request=self.request_masquerade, user=self.user_3, old_email='regularuser@example.com',
new_email='change@example.com')
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_email_change._live_receivers(self)
self.assertIn(signals.email_change_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class DeactivateCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_deactivate_callback(self):
signals.deactivate_callback(sender=self, request=self.request, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Deactivate by: superuser@example.com (1)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_deactivate_callback_masquerade(self):
signals.deactivate_callback(sender=self, request=self.request_masquerade, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Deactivate by: staffuser@example.com (2)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_deactivate_callback_no_audit_log(self):
signals.deactivate_callback(sender=self, request=self.request, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_deactivate_callback_masquerade_no_audit_log(self):
signals.deactivate_callback(sender=self, request=self.request_masquerade, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_deactivate._live_receivers(self)
self.assertIn(signals.deactivate_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class ActivateCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_activate_callback(self):
signals.activate_callback(sender=self, request=self.request, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Activate by: superuser@example.com (1)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_activate_callback_masquerade(self):
signals.activate_callback(sender=self, request=self.request_masquerade, user=self.user_3)
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Activate by: staffuser@example.com (2)')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_activate_callback_no_audit_log(self):
signals.activate_callback(sender=self, request=self.request, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_activate_callback_masquerade_no_audit_log(self):
signals.activate_callback(sender=self, request=self.request_masquerade, user=self.user_3)
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.user_activate._live_receivers(self)
self.assertIn(signals.activate_callback, receivers)
@django.test.utils.override_settings(
AUTH_USER_MODEL='accountsplus.UnitTestUser',
ACCOUNTS_AUDIT_LOG_EVENT_MODEL='accountsplus.UnitTestAuditLogEvent',
)
class CompanyNameChangeCallbackTestCase(SignalTestCase):
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_company_name_change_callback(self):
signals.company_name_change_callback(
sender=self, request=self.request, user=self.user_3, company=self.company_2, old_name='Old Name', new_name='New Name')
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, None)
self.assertEqual(audit_log_event.masquerading_user_email, '')
self.assertEqual(audit_log_event.message, 'Company id: 2 name change from: Old Name to: New Name')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=True)
def test_company_name_change_callback_masquerade(self):
signals.company_name_change_callback(
sender=self, request=self.request_masquerade, user=self.user_3, company=self.company_2, old_name='Old Name',
new_name='New Name')
audit_log_event = UnitTestAuditLogEvent.objects.get()
self.assertEqual(audit_log_event.user_id, 3)
self.assertEqual(audit_log_event.user_email, 'regularuser@example.com')
self.assertEqual(audit_log_event.company_id, 1)
self.assertEqual(audit_log_event.masquerading_user_id, 1)
self.assertEqual(audit_log_event.masquerading_user_email, 'superuser@example.com')
self.assertEqual(audit_log_event.message, 'Company id: 2 name change from: Old Name to: New Name')
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_company_name_change_callback_no_audit_log(self):
signals.company_name_change_callback(
sender=self, request=self.request, user=self.user_3, company=self.company_2, old_name='Old Name', new_name='New Name')
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
@django.test.utils.override_settings(ACCOUNTS_ENABLE_AUDIT_LOG=False)
def test_company_name_change_callback_masquerade_no_audit_log(self):
signals.company_name_change_callback(
sender=self, request=self.request_masquerade, user=self.user_3, company=self.company_2, old_name='Old Name',
new_name='New Name')
self.assertEqual(0, UnitTestAuditLogEvent.objects.count())
def test_signal_registration(self):
receivers = accountsplus.signals.company_name_change._live_receivers(self)
self.assertIn(signals.company_name_change_callback, receivers)
| 53.350694 | 130 | 0.76671 | 3,818 | 30,730 | 5.83473 | 0.034311 | 0.085828 | 0.097455 | 0.134219 | 0.922566 | 0.911748 | 0.8946 | 0.877587 | 0.869866 | 0.869866 | 0 | 0.005897 | 0.13918 | 30,730 | 575 | 131 | 53.443478 | 0.836238 | 0.001952 | 0 | 0.666667 | 0 | 0 | 0.084002 | 0.060425 | 0 | 0 | 0 | 0 | 0.343621 | 1 | 0.125514 | false | 0.059671 | 0.022634 | 0 | 0.18107 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
e2f028b45c62b52a3fee95fac5248817b7535301 | 183 | py | Python | learner/__init__.py | WhitneyOnTheWeb/BetaFlapZero | 40c41721257434accb8c0263f0c121067129ddf5 | [
"MIT"
] | null | null | null | learner/__init__.py | WhitneyOnTheWeb/BetaFlapZero | 40c41721257434accb8c0263f0c121067129ddf5 | [
"MIT"
] | null | null | null | learner/__init__.py | WhitneyOnTheWeb/BetaFlapZero | 40c41721257434accb8c0263f0c121067129ddf5 | [
"MIT"
] | null | null | null | from learner.flappy_util import Utility
from learner.flappy_inputs import Inputs
from learner.flappy_processor import FlappyProcessor
from learner.flappy_callback import FlappySession | 45.75 | 52 | 0.896175 | 24 | 183 | 6.666667 | 0.458333 | 0.275 | 0.425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081967 | 183 | 4 | 53 | 45.75 | 0.952381 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
39138081dd7fadacc3b6a3ae75da50bcfee43483 | 22,426 | py | Python | mmtbx/utils/tst_switch_rotamers.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 155 | 2016-11-23T12:52:16.000Z | 2022-03-31T15:35:44.000Z | mmtbx/utils/tst_switch_rotamers.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 590 | 2016-12-10T11:31:18.000Z | 2022-03-30T23:10:09.000Z | mmtbx/utils/tst_switch_rotamers.py | dperl-sol/cctbx_project | b9e390221a2bc4fd00b9122e97c3b79c632c6664 | [
"BSD-3-Clause-LBNL"
] | 115 | 2016-11-15T08:17:28.000Z | 2022-02-09T15:30:14.000Z | from __future__ import absolute_import, division, print_function
import iotbx.pdb
from libtbx.test_utils import approx_equal
from scitbx.array_family import flex
import mmtbx.utils
pdb_str = """\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.155 20.825 10.457 1.00 20.00 C
ATOM 7 CD1 TYR A 58 10.204 21.809 10.700 1.00 20.00 C
ATOM 8 CD2 TYR A 58 12.494 21.155 10.603 1.00 20.00 C
ATOM 9 CE1 TYR A 58 10.579 23.084 11.076 1.00 20.00 C
ATOM 10 CE2 TYR A 58 12.879 22.429 10.980 1.00 20.00 C
ATOM 11 CZ TYR A 58 11.918 23.388 11.215 1.00 20.00 C
ATOM 12 OH TYR A 58 12.294 24.659 11.588 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 6.811 15.089 8.604 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 12.852 13.907 10.220 1.00 20.00 C
ATOM 38 CG2 ILE A 60 13.321 11.565 9.451 1.00 20.00 C
ATOM 39 CD1 ILE A 60 13.852 13.963 11.353 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 9.023 8.166 5.330 1.00 8.40 C
ATOM 57 CG2 VAL A 61 8.127 10.228 6.485 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 15.297 5.470 6.407 1.00 20.00 C
ATOM 73 CG2 VAL A 62 14.857 7.004 8.337 1.00 20.00 C
"""
max_distant = """\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.415 18.526 9.036 1.00 20.00 C
ATOM 7 CD1 TYR A 58 11.229 18.716 7.671 1.00 20.00 C
ATOM 8 CD2 TYR A 58 12.237 17.488 9.448 1.00 20.00 C
ATOM 9 CE1 TYR A 58 11.844 17.894 6.747 1.00 20.00 C
ATOM 10 CE2 TYR A 58 12.857 16.660 8.530 1.00 20.00 C
ATOM 11 CZ TYR A 58 12.657 16.868 7.183 1.00 20.00 C
ATOM 12 OH TYR A 58 13.270 16.046 6.263 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 6.438 15.638 10.879 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 11.838 11.859 11.302 1.00 20.00 C
ATOM 38 CG2 ILE A 60 12.836 13.900 10.238 1.00 20.00 C
ATOM 39 CD1 ILE A 60 11.874 10.347 11.268 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 8.097 10.135 6.519 1.00 8.40 C
ATOM 57 CG2 VAL A 61 9.757 10.373 4.628 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 14.849 7.004 8.342 1.00 20.00 C
ATOM 73 CG2 VAL A 62 13.279 5.181 7.645 1.00 20.00 C
"""
min_distant="""\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.032 20.755 9.375 1.00 20.00 C
ATOM 7 CD1 TYR A 58 10.946 20.890 7.995 1.00 20.00 C
ATOM 8 CD2 TYR A 58 11.391 21.866 10.124 1.00 20.00 C
ATOM 9 CE1 TYR A 58 11.212 22.098 7.381 1.00 20.00 C
ATOM 10 CE2 TYR A 58 11.658 23.080 9.517 1.00 20.00 C
ATOM 11 CZ TYR A 58 11.567 23.190 8.147 1.00 20.00 C
ATOM 12 OH TYR A 58 11.834 24.395 7.536 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 7.098 13.577 10.226 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 12.803 13.915 10.280 1.00 20.00 C
ATOM 38 CG2 ILE A 60 13.354 11.625 9.415 1.00 20.00 C
ATOM 39 CD1 ILE A 60 12.132 14.595 11.453 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 9.784 10.315 4.599 1.00 8.40 C
ATOM 57 CG2 VAL A 61 8.845 8.160 5.528 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 13.305 5.129 7.570 1.00 20.00 C
ATOM 73 CG2 VAL A 62 15.496 5.710 6.506 1.00 20.00 C
"""
exact_match="""\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.032 20.755 9.375 1.00 20.00 C
ATOM 7 CD1 TYR A 58 10.946 20.890 7.995 1.00 20.00 C
ATOM 8 CD2 TYR A 58 11.391 21.866 10.124 1.00 20.00 C
ATOM 9 CE1 TYR A 58 11.212 22.098 7.381 1.00 20.00 C
ATOM 10 CE2 TYR A 58 11.658 23.080 9.517 1.00 20.00 C
ATOM 11 CZ TYR A 58 11.567 23.190 8.147 1.00 20.00 C
ATOM 12 OH TYR A 58 11.834 24.395 7.536 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 6.829 15.030 8.594 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 12.808 13.915 10.275 1.00 20.00 C
ATOM 38 CG2 ILE A 60 13.351 11.619 9.419 1.00 20.00 C
ATOM 39 CD1 ILE A 60 13.808 13.957 11.408 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 9.064 8.175 5.283 1.00 8.40 C
ATOM 57 CG2 VAL A 61 8.107 10.171 6.504 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 15.313 5.487 6.412 1.00 20.00 C
ATOM 73 CG2 VAL A 62 14.836 7.001 8.350 1.00 20.00 C
"""
fix_outliers="""\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.032 20.755 9.375 1.00 20.00 C
ATOM 7 CD1 TYR A 58 10.946 20.890 7.995 1.00 20.00 C
ATOM 8 CD2 TYR A 58 11.391 21.866 10.124 1.00 20.00 C
ATOM 9 CE1 TYR A 58 11.212 22.098 7.381 1.00 20.00 C
ATOM 10 CE2 TYR A 58 11.658 23.080 9.517 1.00 20.00 C
ATOM 11 CZ TYR A 58 11.567 23.190 8.147 1.00 20.00 C
ATOM 12 OH TYR A 58 11.834 24.395 7.536 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 6.811 15.089 8.604 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 12.852 13.907 10.220 1.00 20.00 C
ATOM 38 CG2 ILE A 60 13.321 11.565 9.451 1.00 20.00 C
ATOM 39 CD1 ILE A 60 13.852 13.963 11.353 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 9.023 8.166 5.330 1.00 8.40 C
ATOM 57 CG2 VAL A 61 8.127 10.228 6.485 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 15.297 5.470 6.407 1.00 20.00 C
ATOM 73 CG2 VAL A 62 14.857 7.004 8.337 1.00 20.00 C
"""
selection="""\
ATOM 1 N TYR A 58 8.659 20.073 11.185 1.00 7.73 N
ATOM 2 CA TYR A 58 9.250 19.144 10.233 1.00 8.65 C
ATOM 3 C TYR A 58 9.039 17.721 10.706 1.00 9.84 C
ATOM 4 O TYR A 58 9.023 17.464 11.919 1.00 8.58 O
ATOM 5 CB TYR A 58 10.740 19.429 10.045 1.00 20.00 C
ATOM 6 CG TYR A 58 11.155 20.825 10.457 1.00 20.00 C
ATOM 7 CD1 TYR A 58 10.204 21.809 10.700 1.00 20.00 C
ATOM 8 CD2 TYR A 58 12.494 21.155 10.603 1.00 20.00 C
ATOM 9 CE1 TYR A 58 10.579 23.084 11.076 1.00 20.00 C
ATOM 10 CE2 TYR A 58 12.879 22.429 10.980 1.00 20.00 C
ATOM 11 CZ TYR A 58 11.918 23.388 11.215 1.00 20.00 C
ATOM 12 OH TYR A 58 12.294 24.659 11.588 1.00 20.00 O
ATOM 21 N SER A 59 8.887 16.797 9.756 1.00 9.65 N
ATOM 22 CA SER A 59 8.720 15.382 10.080 1.00 5.80 C
ATOM 23 C SER A 59 9.606 14.539 9.169 1.00 10.35 C
ATOM 24 O SER A 59 9.972 14.972 8.075 1.00 10.56 O
ATOM 25 CB SER A 59 7.257 14.957 9.943 1.00 20.00 C
ATOM 26 OG SER A 59 6.811 15.089 8.604 1.00 20.00 O
ATOM 32 N ILE A 60 9.964 13.344 9.625 1.00 9.39 N
ATOM 33 CA ILE A 60 11.067 12.594 9.017 1.00 11.89 C
ATOM 34 C ILE A 60 10.635 11.189 8.608 1.00 9.81 C
ATOM 35 O ILE A 60 10.120 10.434 9.430 1.00 8.97 O
ATOM 36 CB ILE A 60 12.262 12.515 9.987 1.00 20.00 C
ATOM 37 CG1 ILE A 60 12.852 13.907 10.220 1.00 20.00 C
ATOM 38 CG2 ILE A 60 13.321 11.565 9.451 1.00 20.00 C
ATOM 39 CD1 ILE A 60 13.852 13.963 11.353 1.00 20.00 C
ATOM 51 N VAL A 61 10.858 10.832 7.348 1.00 7.72 N
ATOM 52 CA VAL A 61 10.510 9.497 6.870 1.00 9.11 C
ATOM 53 C VAL A 61 11.692 8.812 6.178 1.00 10.61 C
ATOM 54 O VAL A 61 11.963 9.081 5.006 1.00 11.05 O
ATOM 55 CB VAL A 61 9.349 9.563 5.856 1.00 10.49 C
ATOM 56 CG1 VAL A 61 9.023 8.166 5.330 1.00 8.40 C
ATOM 57 CG2 VAL A 61 8.127 10.228 6.485 1.00 9.13 C
ATOM 67 N VAL A 62 12.408 7.927 6.895 1.00 10.62 N
ATOM 68 CA VAL A 62 13.520 7.240 6.220 1.00 7.26 C
ATOM 69 C VAL A 62 13.019 6.486 5.000 1.00 10.75 C
ATOM 70 O VAL A 62 11.946 5.890 5.048 1.00 11.44 O
ATOM 71 CB VAL A 62 14.239 6.259 7.164 1.00 20.00 C
ATOM 72 CG1 VAL A 62 15.297 5.470 6.407 1.00 20.00 C
ATOM 73 CG2 VAL A 62 14.857 7.004 8.337 1.00 20.00 C
"""
def core(mode, result, t1, t2):
prefix = "exercise_%s"%mode
ph = iotbx.pdb.input(source_info=None, lines=pdb_str).construct_hierarchy()
s0 = ph.atoms().extract_xyz()
ph.write_pdb_file(file_name="%s_in.pdb"%prefix)
ph = mmtbx.utils.switch_rotamers(pdb_hierarchy=ph, mode=mode)
ph.write_pdb_file(file_name="%s_out.pdb"%prefix)
s1 = iotbx.pdb.input(source_info=None,lines=result).atoms().extract_xyz()
s2 = ph.atoms().extract_xyz()
d = flex.sqrt((s1 - s2).dot()).min_max_mean().as_tuple()
assert approx_equal(d, t1, 1.e-3)
d = flex.sqrt((s2 - s0).dot()).min_max_mean().as_tuple()
assert approx_equal(d, t2, 0.1)
def exercise_fix_outliers(prefix="exercise_fix_outliers"):
ph = iotbx.pdb.input(source_info=None, lines=pdb_str).construct_hierarchy()
sel = ph.atom_selection_cache().selection(string =
"resname TYR and not (name O or name CA or name N or name C or name CB)")
s0 = ph.atoms().extract_xyz()
ph.write_pdb_file(file_name="%s_in.pdb"%prefix)
ph = mmtbx.utils.switch_rotamers(pdb_hierarchy=ph, mode="fix_outliers")
ph.write_pdb_file(file_name="%s_out.pdb"%prefix)
s1 =iotbx.pdb.input(source_info=None,lines=fix_outliers).atoms().extract_xyz()
s2 = ph.atoms().extract_xyz()
# assert fixed do not move
d = flex.sqrt((s1.select(~sel) - s2.select(~sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
d = flex.sqrt((s1.select(~sel) - s0.select(~sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
d = flex.sqrt((s2.select(~sel) - s0.select(~sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
#
d = flex.sqrt((s1.select(sel)-s2.select(sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0], 1.e-3)
d = flex.sqrt((s1.select(sel)-s0.select(sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [1.1, 4.0, 2.6], 0.1)
d = flex.sqrt((s2.select(sel)-s0.select(sel)).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [1.1, 4.0, 2.6], 0.1)
def exercise_selection(prefix="exercise_selection"):
ph = iotbx.pdb.input(source_info=None, lines=pdb_str).construct_hierarchy()
sel = ph.atom_selection_cache().selection(string = "not resname TYR")
s0 = ph.atoms().extract_xyz()
ph.write_pdb_file(file_name="%s_in.pdb"%prefix)
ph = mmtbx.utils.switch_rotamers(pdb_hierarchy=ph, mode="fix_outliers",
selection = sel)
ph.write_pdb_file(file_name="%s_out.pdb"%prefix)
s1 =iotbx.pdb.input(source_info=None,lines=selection).atoms().extract_xyz()
s2 = ph.atoms().extract_xyz()
# assert fixed do not move
d = flex.sqrt((s1 - s2).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
d = flex.sqrt((s1 - s0).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
d = flex.sqrt((s2 - s0).dot()).min_max_mean().as_tuple()
assert approx_equal(d, [0,0,0])
if (__name__ == "__main__"):
core(mode="max_distant", result=max_distant, t1=[0,0,0], t2=[0, 10.2, 1.5])
core(mode="min_distant", result=min_distant, t1=[0,0,0], t2=[0, 4.1, 0.8])
core(mode="exact_match", result=exact_match, t1=[0,0,0], t2=[0, 4.1, 0.4])
exercise_fix_outliers()
exercise_selection()
| 68.371951 | 84 | 0.48096 | 4,679 | 22,426 | 2.275272 | 0.080573 | 0.067631 | 0.047905 | 0.067067 | 0.908792 | 0.906538 | 0.905974 | 0.903908 | 0.900714 | 0.898647 | 0 | 0.469687 | 0.43512 | 22,426 | 327 | 85 | 68.58104 | 0.370698 | 0.002185 | 0 | 0.770701 | 0 | 0.764331 | 0.859473 | 0.000939 | 0 | 0 | 0 | 0 | 0.035032 | 1 | 0.009554 | false | 0 | 0.015924 | 0 | 0.025478 | 0.003185 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
39321c43fbcb5ce0a14c22eb51a53a8ad5b6eced | 20,133 | py | Python | prediction_utils/prediction_utils/pytorch_utils/datasets.py | som-shahlab/sepsis_transfer_learning_public | e41b3d1f43f0e59726e04215ea0da9c9919c0f68 | [
"MIT"
] | null | null | null | prediction_utils/prediction_utils/pytorch_utils/datasets.py | som-shahlab/sepsis_transfer_learning_public | e41b3d1f43f0e59726e04215ea0da9c9919c0f68 | [
"MIT"
] | null | null | null | prediction_utils/prediction_utils/pytorch_utils/datasets.py | som-shahlab/sepsis_transfer_learning_public | e41b3d1f43f0e59726e04215ea0da9c9919c0f68 | [
"MIT"
] | 1 | 2021-04-08T01:23:32.000Z | 2021-04-08T01:23:32.000Z | import pandas as pd
import torch
import scipy as sp
import numpy as np
import dask
import dask.dataframe as dd
import random
import math
from torch.utils.data import Dataset, DataLoader, RandomSampler, BatchSampler
from torch.utils.data.dataloader import default_collate
from dask.distributed import Client
class LoaderGenerator:
"""
A class that constructs data loaders
"""
def __init__(self, *args, **kwargs):
self.config_dict = self.get_default_config()
self.config_dict = self.override_config(**kwargs)
def init_loaders(self):
"""
Returns a dictionary of dataloaders with keys indicating phases
"""
raise NotImplementedError
def get_default_config(self):
"""
Defines the default config_dict
"""
raise NotImplementedError
def override_config(self):
"""
Overrides the config dict with provided kwargs
"""
raise NotImplementedError
class ArrayLoaderGenerator(LoaderGenerator):
"""
LoaderGenerator corresponding to ArrayDataset
"""
def __init__(
self,
*args,
features=None,
cohort=None,
fold_id_test="test",
train_key="train",
eval_key="val",
test_key="test",
row_id_col="row_id",
**kwargs
):
super().__init__(self, *args, **kwargs)
self.num_workers = kwargs.get("num_workers", 0)
self.data_dict = self.get_data_dict(
features=features,
cohort=cohort,
fold_id_test=fold_id_test,
train_key=train_key,
eval_key=eval_key,
test_key=test_key,
row_id_col=row_id_col,
**kwargs
)
def init_datasets(self):
"""
Creates data loaders from inputs
"""
convert_sparse = self.config_dict.get("sparse_mode") == "convert"
phases = self.data_dict["row_id"].keys()
tensor_dict_dict = {
key: {
"features": self.data_dict["features"][key],
"labels": torch.as_tensor(
self.data_dict["labels"][key], dtype=torch.long
),
"row_id": torch.LongTensor(self.data_dict["row_id"][key]),
}
for key in phases
}
if self.config_dict.get("include_group_in_dataset"):
for key in phases:
tensor_dict_dict[key]["group"] = torch.as_tensor(
np.copy(self.data_dict["group"][key]), dtype=torch.long
)
dataset_dict = {
key: ArrayDataset(
tensor_dict=tensor_dict_dict[key], convert_sparse=convert_sparse,
)
for key in phases
}
return dataset_dict
def init_loaders(self, sample_keys=None):
"""
Method that converts data and labels to instances of class torch.utils.data.DataLoader
Returns:
a dictionary with the same keys as data_dict and label_dict.
Each element of the dictionary is an instance of torch.utils.data.DataLoader
that yields paired elements of data and labels
"""
# Convert the data to Dataset
dataset_dict = self.init_datasets()
# If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used
if hasattr(dataset_dict["train"], "collate_fn") and callable(
getattr(dataset_dict["train"], "collate_fn")
):
collate_fn = dataset_dict["train"].collate_fn
else:
collate_fn = default_collate
# If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set
# are drawn per epoch.
# Otherwise, an epoch is defined by a full run through all of the data in the dataloader.
if self.config_dict.get("iters_per_epoch") is not None:
num_samples = (
self.config_dict["iters_per_epoch"] * self.config_dict["batch_size"]
)
if sample_keys is None:
sample_keys = ["train"]
else:
if sample_keys is None:
sample_keys = []
loaders_dict = {}
for key in dataset_dict.keys():
if key in sample_keys:
loaders_dict[key] = DataLoader(
dataset_dict[key],
batch_sampler=BatchSampler(
RandomSampler(
dataset_dict[key], replacement=True, num_samples=num_samples
),
batch_size=self.config_dict["batch_size"],
drop_last=False,
),
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
else:
loaders_dict[key] = DataLoader(
dataset_dict[key],
batch_size=self.config_dict["batch_size"],
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
return loaders_dict
def init_loaders_predict(self, *args):
"""
Creates data loaders from inputs - for use at prediction time
"""
# Convert the data to Dataset
dataset_dict = self.init_datasets()
# If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used
if hasattr(dataset_dict["train"], "collate_fn") and callable(
getattr(dataset_dict["train"], "collate_fn")
):
collate_fn = dataset_dict["train"].collate_fn
else:
collate_fn = default_collate
loaders_dict = {
key: DataLoader(
dataset_dict[key],
batch_size=self.config_dict["batch_size"],
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
for key in dataset_dict.keys()
}
return loaders_dict
def get_data_dict(
self,
features=None,
cohort=None,
fold_id_test="test",
train_key="train",
eval_key="val",
test_key="test",
row_id_col="row_id",
label_col="outcome",
sensitive_attribute=None,
load_features=True,
**kwargs
):
"""
Generates a data_dict from a features array and a cohort dataframe.
Args:
features: The input feature matrix
cohort: A dataframe with a column called "fold_id" that maps to fold_id
fold_id: The fold_id corresponding to the validation set
fold_id_test: The fold_id corresponding to the test set
train_key: A string that will be used to refer to the training set in the result
eval_key: A string that will be used to refer to the validation set in the result
test_key: A string that will be used to refer to the test set in the result
"""
# Get the validation fold
fold_id = self.config_dict.get("fold_id")
if fold_id is None:
# raise Warning("fold_id not provided")
fold_id = ""
fold_id = str(fold_id)
train_eval_df = cohort.query("fold_id != @fold_id_test")
# Partition the cohort data into the training phases
cohort_dict = {
train_key: train_eval_df.query("fold_id != @fold_id"),
eval_key: train_eval_df.query("fold_id == @fold_id"),
test_key: cohort.query("fold_id == @fold_id_test"),
}
# # Ensure that each partition is sorted and not empty
cohort_dict = {
key: value.sort_values(row_id_col)
for key, value in cohort_dict.items()
if value.shape[0] > 0
}
# # Initialize the data_dict
data_dict = {}
# Save the row_id corresponding to unique predictions
data_dict["row_id"] = {
key: value[row_id_col].values for key, value in cohort_dict.items()
}
# store the sensitive_attribute
if sensitive_attribute is not None:
categories = cohort[sensitive_attribute].sort_values().unique()
print(categories)
data_dict["group"] = {
key: pd.Categorical(
value[sensitive_attribute], categories=categories
).codes
for key, value in cohort_dict.items()
}
self.config_dict["num_groups"] = len(categories)
# If features should be loaded
if load_features:
data_dict["features"] = {}
for key in cohort_dict.keys():
data_dict["features"][key] = features[data_dict["row_id"][key], :]
data_dict["labels"] = {
key: np.int64((value[self.config_dict["label_col"]] > 0).values)
for key, value in cohort_dict.items()
}
return data_dict
def get_default_config(self):
return {"batch_size": 256, "iters_per_epoch": 100}
def override_config(self, **override_dict):
return {**self.config_dict, **override_dict}
class ArrayLoaderGenerator_Alt(LoaderGenerator):
def __init__(
self,
*args,
features=None,
cohort=None,
# fold_id_test="test",
fold_id_test_list=["test"],
train_key="train",
eval_key="val",
# test_key="test",
row_id_col="row_id",
**kwargs
):
super().__init__(self, *args, **kwargs)
self.num_workers = kwargs.get("num_workers", 0)
self.data_dict = self.get_data_dict(
features=features,
cohort=cohort,
fold_id_test_list=fold_id_test_list,
# fold_id_test=fold_id_test,
train_key=train_key,
eval_key=eval_key,
# test_key=test_key,
row_id_col=row_id_col,
**kwargs
)
def init_datasets(self):
"""
Creates data loaders from inputs
"""
convert_sparse = self.config_dict.get("sparse_mode") == "convert"
phases = self.data_dict["row_id"].keys()
tensor_dict_dict = {
key: {
"features": self.data_dict["features"][key],
"labels": torch.as_tensor(
self.data_dict["labels"][key], dtype=torch.long
),
"row_id": torch.LongTensor(self.data_dict["row_id"][key]),
}
for key in phases
}
if self.config_dict.get("include_group_in_dataset"):
for key in phases:
tensor_dict_dict[key]["group"] = torch.as_tensor(
np.copy(self.data_dict["group"][key]), dtype=torch.long
)
dataset_dict = {
key: ArrayDataset(
tensor_dict=tensor_dict_dict[key], convert_sparse=convert_sparse,
)
for key in phases
}
return dataset_dict
def init_loaders(self, sample_keys=None):
"""
Method that converts data and labels to instances of class torch.utils.data.DataLoader
Returns:
a dictionary with the same keys as data_dict and label_dict.
Each element of the dictionary is an instance of torch.utils.data.DataLoader
that yields paired elements of data and labels
"""
# Convert the data to Dataset
dataset_dict = self.init_datasets()
# If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used
if hasattr(dataset_dict["train"], "collate_fn") and callable(
getattr(dataset_dict["train"], "collate_fn")
):
collate_fn = dataset_dict["train"].collate_fn
else:
collate_fn = default_collate
# If 'iters_per_epoch' is defined, then a fixed number of random sample batches from the training set
# are drawn per epoch.
# Otherwise, an epoch is defined by a full run through all of the data in the dataloader.
if self.config_dict.get("iters_per_epoch") is not None:
num_samples = (
self.config_dict["iters_per_epoch"] * self.config_dict["batch_size"]
)
if sample_keys is None:
sample_keys = ["train"]
else:
if sample_keys is None:
sample_keys = []
loaders_dict = {}
for key in dataset_dict.keys():
if key in sample_keys:
loaders_dict[key] = DataLoader(
dataset_dict[key],
batch_sampler=BatchSampler(
RandomSampler(
dataset_dict[key], replacement=True, num_samples=num_samples
),
batch_size=self.config_dict["batch_size"],
drop_last=False,
),
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
else:
loaders_dict[key] = DataLoader(
dataset_dict[key],
batch_size=self.config_dict["batch_size"],
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
return loaders_dict
def init_loaders_predict(self, *args):
"""
Creates data loaders from inputs - for use at prediction time
"""
# Convert the data to Dataset
dataset_dict = self.init_datasets()
# If the Dataset implements collate_fn, that is used. Otherwise, default_collate is used
if hasattr(dataset_dict["train"], "collate_fn") and callable(
getattr(dataset_dict["train"], "collate_fn")
):
collate_fn = dataset_dict["train"].collate_fn
else:
collate_fn = default_collate
loaders_dict = {
key: DataLoader(
dataset_dict[key],
batch_size=self.config_dict["batch_size"],
collate_fn=collate_fn,
num_workers=self.num_workers,
pin_memory=True,
)
for key in dataset_dict.keys()
}
return loaders_dict
def get_data_dict(
self,
features=None,
cohort=None,
fold_id_test_list=["test"],
# fold_id_test="test",
train_key="train",
eval_key="val",
# test_key="test",
row_id_col="row_id",
label_col="outcome",
sensitive_attribute=None,
load_features=True,
**kwargs
):
"""
Generates a data_dict from a features array and a cohort dataframe.
Args:
features: The input feature matrix
cohort: A dataframe with a column called "fold_id" that maps to fold_id
fold_id: The fold_id corresponding to the validation set
fold_id_test: The fold_id corresponding to the test set
train_key: A string that will be used to refer to the training set in the result
eval_key: A string that will be used to refer to the validation set in the result
test_key: A string that will be used to refer to the test set in the result
"""
# Get the validation fold
fold_id = self.config_dict.get("fold_id")
if fold_id is None:
# raise Warning("fold_id not provided")
fold_id = ""
fold_id = str(fold_id)
heldout_dict = {
key: cohort.query('fold_id == "{}"'.format(key))
for key in fold_id_test_list
}
train_eval_fold_ids = list(set(cohort.fold_id) - set(fold_id_test_list))
# train_eval_df = cohort.query("fold_id != @fold_id_test")
train_eval_df = cohort.query("fold_id in @train_eval_fold_ids")
# Partition the cohort data into the training phases
cohort_dict = {
train_key: train_eval_df.query("fold_id != @fold_id"),
eval_key: train_eval_df.query("fold_id == @fold_id"),
}
cohort_dict = {**cohort_dict, **heldout_dict}
# # Ensure that each partition is sorted and not empty
cohort_dict = {
key: value.sort_values(row_id_col)
for key, value in cohort_dict.items()
if value.shape[0] > 0
}
# # Initialize the data_dict
data_dict = {}
# Save the row_id corresponding to unique predictions
data_dict["row_id"] = {
key: value[row_id_col].values for key, value in cohort_dict.items()
}
# store the sensitive_attribute
if sensitive_attribute is not None:
categories = cohort[sensitive_attribute].sort_values().unique()
print(categories)
data_dict["group"] = {
key: pd.Categorical(
value[sensitive_attribute], categories=categories
).codes
for key, value in cohort_dict.items()
}
self.config_dict["num_groups"] = len(categories)
# If features should be loaded
if load_features:
data_dict["features"] = {}
for key in cohort_dict.keys():
data_dict["features"][key] = features[data_dict["row_id"][key], :]
data_dict["labels"] = {
key: np.int64((value[self.config_dict["label_col"]] > 0).values)
for key, value in cohort_dict.items()
}
return data_dict
def get_default_config(self):
return {"batch_size": 256, "iters_per_epoch": 100}
def override_config(self, **override_dict):
return {**self.config_dict, **override_dict}
class ArrayDataset(Dataset):
"""Dataset wrapping arrays (tensor, numpy, or scipy CSR sparse)
Each sample will be retrieved by indexing arrays along the first dimension.
Arguments:
tensor_dict: a dictionary of array inputs that have the same size in the first dimension
convert_sparse: whether CSR inputs should be converted to torch.SparseTensor
"""
def __init__(self, tensor_dict, convert_sparse=False):
self.convert_sparse = convert_sparse
self.the_len = list(tensor_dict.values())[0].shape[0]
assert all(self.the_len == tensor.shape[0] for tensor in tensor_dict.values())
self.tensor_dict = tensor_dict
def __getitem__(self, index):
return {key: tensor[index] for key, tensor in self.tensor_dict.items()}
def __len__(self):
return self.the_len
def collate_fn(self, batch):
"""
Called by Dataloader to aggregate elements into a batch.
Delegates to collate_helper for typed aggregation
Arguments:
batch: a list of dictionaries with same keys as self.tensor_dict
"""
result = {}
keys = batch[0].keys()
for key in keys:
result[key] = self.collate_helper(tuple(element[key] for element in batch))
return result
def collate_helper(self, batch):
"""
Aggregates a tuple of elements of the same type
"""
if isinstance(batch[0], sp.sparse.csr_matrix):
batch_concat = sp.sparse.vstack(batch)
if not self.convert_sparse:
return batch_concat
else:
return self.csr_to_tensor(batch_concat)
else:
return default_collate(batch)
def csr_to_tensor(self, x):
"""
Converts CSR matrix to torch.sparse.Tensor
"""
x = x.tocoo()
return torch.sparse.FloatTensor(
torch.LongTensor([x.row, x.col]),
torch.FloatTensor(x.data),
torch.Size(x.shape),
)
| 34.892548 | 109 | 0.570109 | 2,365 | 20,133 | 4.608879 | 0.098943 | 0.031376 | 0.033395 | 0.025321 | 0.811376 | 0.803119 | 0.799358 | 0.792661 | 0.792661 | 0.792661 | 0 | 0.002196 | 0.344112 | 20,133 | 576 | 110 | 34.953125 | 0.823249 | 0.220682 | 0 | 0.760925 | 0 | 0 | 0.063235 | 0.003188 | 0 | 0 | 0 | 0 | 0.002571 | 1 | 0.061697 | false | 0 | 0.028278 | 0.015424 | 0.1491 | 0.005141 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1a4fa03587578779ec4cb8f06f740418bb97d87e | 82 | py | Python | utils/__init__.py | thehappy1/Contrastive-Clustering | 9b2f577076f9df00c65a99bb5411f0a94f03d786 | [
"MIT"
] | 164 | 2020-12-09T08:38:12.000Z | 2022-03-17T16:32:20.000Z | utils/__init__.py | TomGoh/Contrastive-Clustering | ea6ecd9281bf67aefe3721003e7390b44c4ca281 | [
"MIT"
] | 32 | 2021-01-12T07:02:53.000Z | 2022-03-16T08:50:05.000Z | utils/__init__.py | TomGoh/Contrastive-Clustering | ea6ecd9281bf67aefe3721003e7390b44c4ca281 | [
"MIT"
] | 47 | 2020-12-10T13:10:32.000Z | 2022-03-19T07:44:14.000Z | from .yaml_config_hook import yaml_config_hook
from .save_model import save_model
| 27.333333 | 46 | 0.878049 | 14 | 82 | 4.714286 | 0.5 | 0.30303 | 0.424242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.097561 | 82 | 2 | 47 | 41 | 0.891892 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
1a5b84883910b659d2da987d642095ff8d96d585 | 9,240 | py | Python | tests/ethereumetl/service/test_token_transfer_v2_extractor.py | BitskiCo/ethereum-etl | cee9004444396ec64f4363e12e5d74508c190d26 | [
"MIT"
] | null | null | null | tests/ethereumetl/service/test_token_transfer_v2_extractor.py | BitskiCo/ethereum-etl | cee9004444396ec64f4363e12e5d74508c190d26 | [
"MIT"
] | null | null | null | tests/ethereumetl/service/test_token_transfer_v2_extractor.py | BitskiCo/ethereum-etl | cee9004444396ec64f4363e12e5d74508c190d26 | [
"MIT"
] | null | null | null | from ethereumetl.domain.receipt_log import EthReceiptLog
from ethereumetl.service.token_transfer_v2_extractor import EthTokenTransferV2Extractor, word_to_address
from ethereumetl.service.token_transfer_v2_extractor import TRANSFER_EVENT_TOPICS, ERC1155_TRANSFER_SINGLE_TOPIC, ERC721_ERC_20_TRANSFER_TOPIC, ERC1155_TRANSFER_BATCH_TOPIC
from ethereumetl.utils import to_normalized_address
token_transfer_extractor = EthTokenTransferV2Extractor()
#https://etherscan.io/tx/0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8#eventlog
def test_extract_transfer_from_receipt_log_erc20():
log = EthReceiptLog()
log.address = '0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48'
log.block_number = 14051054
log.log_index = 0
log.topics = [ERC721_ERC_20_TRANSFER_TOPIC,
'0x0000000000000000000000007a686933fc67023aabd424f35ad0b883332e2222',
'0x00000000000000000000000016011b51e022766c352b29b0c1ed423489f4d3ca']
log.data = '0x0000000000000000000000000000000000000000000000000000000002faf080'
log.transaction_hash = '0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000002faf080'
assert token_transfers[0].block_number == 14051054
assert token_transfers[0].from_address == word_to_address('0x0000000000000000000000007a686933fc67023aabd424f35ad0b883332e2222')
assert token_transfers[0].to_address == word_to_address('0x00000000000000000000000016011b51e022766c352b29b0c1ed423489f4d3ca')
assert token_transfers[0].token_type == "ERC20"
assert token_transfers[0].contract_address == to_normalized_address('0xa0b86991c6218b36c1d19d4a2e9eb0ce3606eb48')
assert token_transfers[0].transaction_hash == '0x5ec4c69bcff7ec3f9fbe33b93573c0e81357e36689e606fc070a52831e3586b8'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0x9fb4dd639dd74a24c8b1253a6199da294d08ce7587ada810c72fe89bc2225510#eventlog
def test_extract_transfer_from_receipt_log_erc721():
log = EthReceiptLog()
log.address = '0x716039ab9ce2780e35450b86dc6420f22460c380'
log.block_number = 14051620
log.log_index = 0
log.topics = [ERC721_ERC_20_TRANSFER_TOPIC,
'0x000000000000000000000000b5fdfbbddc872d08d0203cd6d69d5ce67eb4c761',
'0x00000000000000000000000040b060a0ac95db3d5211b687511632b46c5d3bb7',
'0x0000000000000000000000000000000000000000000000000000000000000735']
log.data = '0x'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000735'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[0].block_number == 14051620
assert token_transfers[0].from_address == word_to_address('0x000000000000000000000000b5fdfbbddc872d08d0203cd6d69d5ce67eb4c761')
assert token_transfers[0].to_address == word_to_address('0x00000000000000000000000040b060a0ac95db3d5211b687511632b46c5d3bb7')
assert token_transfers[0].token_type == "ERC721"
assert token_transfers[0].contract_address == to_normalized_address('0x716039ab9ce2780e35450b86dc6420f22460c380')
assert token_transfers[0].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0xd72e66497d1614eff8136898043c22ad1d7c88e2831c57866fa5683430ef37c1#eventlog
def test_extract_transfer_from_receipt_log_erc1155_single():
log = EthReceiptLog()
log.address = '0x25c6413359059694A7FCa8e599Ae39Ce1C944Da2'
log.block_number = 1061946
log.log_index = 0
log.topics = [ERC1155_TRANSFER_SINGLE_TOPIC,
'0x0000000000000000000000004fee7b061c97c9c496b01dbce9cdb10c02f0a0be',
'0x000000000000000000000000ab3e5a900663ea8c573b8f893d540d331fbab9f5',
'0x0000000000000000000000006a36f56e0a1bc32e187408f1651195d58cf688bd']
log.data = '0x00000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000004'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 1
assert token_transfers[0].token_id == '0x0000000000000000000000000000000000000000000000000000000000000002'
assert token_transfers[0].amount == '0x0000000000000000000000000000000000000000000000000000000000000004'
assert token_transfers[0].block_number == 1061946
assert token_transfers[0].from_address == word_to_address('0x000000000000000000000000ab3e5a900663ea8c573b8f893d540d331fbab9f5')
assert token_transfers[0].to_address == word_to_address('0x0000000000000000000000006a36f56e0a1bc32e187408f1651195d58cf688bd')
assert token_transfers[0].token_type == "ERC1155"
assert token_transfers[0].contract_address == to_normalized_address('0x25c6413359059694A7FCa8e599Ae39Ce1C944Da2')
assert token_transfers[0].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[0].log_index == 0
#https://etherscan.io/tx/0xca0a113c842a1305a49107ed7b9ebef69ccca9bee2a06d5c8230cedf72284498#eventlog
def test_extract_transfer_from_receipt_log_erc1155_batch():
log = EthReceiptLog()
log.address = '0x6cad6e1abc83068ea98924aef37e996ed02abf1c'
log.block_number = 1061946
log.log_index = 0
log.topics = [ERC1155_TRANSFER_BATCH_TOPIC,
'0x0000000000000000000000005bd25d2f4f26bc82a34de016d34612a28a0cd492',
'0x0000000000000000000000000000000000000000000000000000000000000000',
'0x000000000000000000000000991f3775c81d6f8331b9a812eda34ea48a7ea76d']
log.data = '0x000000000000000000000000000000000000000000000000000000000000004000000000000000000000000000000000000000000000000000000000000001a0000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000003000000000000000000000000000000000000000000000000000000000000000400000000000000000000000000000000000000000000000000000000000000050000000000000000000000000000000000000000000000000000000000000006000000000000000000000000000000000000000000000000000000000000000700000000000000000000000000000000000000000000000000000000000000080000000000000000000000000000000000000000000000000000000000000009000000000000000000000000000000000000000000000000000000000000000a000000000000000000000000000000000000000000000000000000000000000a0000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001000000000000000000000000000000000000000000000000000000000000000100000000000000000000000000000000000000000000000000000000000000010000000000000000000000000000000000000000000000000000000000000001'
log.transaction_hash = '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
token_transfers = token_transfer_extractor.extract_transfer_from_log(log)
assert len(token_transfers) == 10
for iter in range(len(token_transfers)):
assert token_transfers[iter].token_id == '0x%064x' % (iter + 1)
assert token_transfers[iter].amount == '0x0000000000000000000000000000000000000000000000000000000000000001'
assert token_transfers[iter].block_number == 1061946
assert token_transfers[iter].from_address == word_to_address('0x0000000000000000000000000000000000000000000000000000000000000000')
assert token_transfers[iter].to_address == word_to_address('0x000000000000000000000000991f3775c81d6f8331b9a812eda34ea48a7ea76d')
assert token_transfers[iter].token_type == "ERC1155"
assert token_transfers[iter].contract_address == to_normalized_address('0x6cad6e1abc83068ea98924aef37e996ed02abf1c')
assert token_transfers[iter].transaction_hash == '0xd62a74c7b04e8e0539398f6ba6a5eb11ad8aa862e77f0af718f0fad19b0b0480'
assert token_transfers[iter].log_index == 0
def word_to_address(param):
if param is None:
return None
elif len(param) >= 40:
return to_normalized_address('0x' + param[-40:])
else:
return to_normalized_address(param)
| 78.305085 | 1,555 | 0.846104 | 611 | 9,240 | 12.461538 | 0.155483 | 0.082742 | 0.094563 | 0.074468 | 0.376412 | 0.310087 | 0.256107 | 0.256107 | 0.23089 | 0.161151 | 0 | 0.486185 | 0.099134 | 9,240 | 117 | 1,556 | 78.974359 | 0.42852 | 0.042424 | 0 | 0.27 | 0 | 0 | 0.492081 | 0.488009 | 0 | 1 | 0.488009 | 0 | 0.4 | 1 | 0.05 | false | 0 | 0.04 | 0 | 0.12 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1abd70b6a21fa73e515aebb804867ab07d1b7c06 | 6,066 | py | Python | christoffel.py | ronekko/differential_geometry | bee5f8d0c13a3900835fdd6fda251e6022f3cac6 | [
"MIT"
] | null | null | null | christoffel.py | ronekko/differential_geometry | bee5f8d0c13a3900835fdd6fda251e6022f3cac6 | [
"MIT"
] | null | null | null | christoffel.py | ronekko/differential_geometry | bee5f8d0c13a3900835fdd6fda251e6022f3cac6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 17 14:59:08 2021
@author: ryuhei
"""
import matplotlib.pyplot as plt # type: ignore
import numpy as np # type: ignore
import torch
def compute_christoffel(f, u):
r"""Compute Christoffel symbol.
Parameters
----------
f : function: torch.tensor of shape (2,) -> torch.tensor of shape (3,)
Coordinate transform from U to X.
u : An array-like of length 2.
Point u.
Returns
-------
Christoffel symbol $\Gamma_{ij}^k$ at point u.
"""
u0, u1 = u[0], u[1]
u0 = torch.tensor([u0], requires_grad=True)
u1 = torch.tensor([u1], requires_grad=True)
u = torch.cat((u0, u1))
x = f(u).reshape(3, 1)
x0_0, x0_1 = torch.autograd.grad(x[0], (u0, u1),
retain_graph=True, create_graph=True)
x1_0, x1_1 = torch.autograd.grad(x[1], (u0, u1),
retain_graph=True, create_graph=True)
x2_0, x2_1 = torch.autograd.grad(x[2], (u0, u1),
retain_graph=True, create_graph=True)
e0 = torch.cat((x0_0, x1_0, x2_0)).requires_grad_(True) # = x_0
e1 = torch.cat((x0_1, x1_1, x2_1)).requires_grad_(True) # = x_1
g00 = e0.dot(e0)
g01 = e0.dot(e1)
g10 = e1.dot(e0)
g11 = e1.dot(e1)
g0 = torch.hstack((g00, g01))
g1 = torch.hstack((g10, g11))
g = torch.vstack((g0, g1))
g_inv = g.inverse()
g00_0, g00_1 = torch.autograd.grad(g00, (u0, u1),
retain_graph=True, allow_unused=True)
g01_0, g01_1 = torch.autograd.grad(g01, (u0, u1),
retain_graph=True, allow_unused=True)
g10_0, g10_1 = torch.autograd.grad(g10, (u0, u1),
retain_graph=True, allow_unused=True)
g11_0, g11_1 = torch.autograd.grad(g11, (u0, u1),
retain_graph=True, allow_unused=True)
gl0_0 = torch.vstack((g00_0, g10_0))
g0l_0 = torch.vstack((g00_0, g01_0))
g00_l = torch.vstack((g00_0, g00_1))
gamma00k = 0.5 * g_inv.matmul(gl0_0 + g0l_0 - g00_l)
gl1_0 = torch.vstack((g01_0, g11_0))
g0l_1 = torch.vstack((g00_1, g01_1))
g01_l = torch.vstack((g01_0, g01_1))
gamma01k = 0.5 * g_inv.matmul(gl1_0 + g0l_1 - g01_l)
gl0_1 = torch.vstack((g00_1, g10_1))
g1l_0 = torch.vstack((g10_0, g11_0))
g10_l = torch.vstack((g10_0, g10_1))
gamma10k = 0.5 * g_inv.matmul(gl0_1 + g1l_0 - g10_l)
gl1_1 = torch.vstack((g01_1, g11_1))
g1l_1 = torch.vstack((g10_1, g11_1))
g11_l = torch.vstack((g11_0, g11_1))
gamma11k = 0.5 * g_inv.matmul(gl1_1 + g1l_1 - g11_l)
chirstoffel = np.concatenate((
gamma00k.detach().numpy().T,
gamma01k.detach().numpy().T,
gamma10k.detach().numpy().T,
gamma11k.detach().numpy().T)).reshape(2, 2, 2)
return chirstoffel
def compute_christoffel_2d_to_2d(f, u):
r"""Compute Christoffel symbol.
Parameters
----------
f : function: torch.tensor of shape (2,) -> torch.tensor of shape (2,)
Coordinate transform from U to X.
u : An array-like of length 2.
Point u.
Returns
-------
Christoffel symbol $\Gamma_{ij}^k$ at point u.
"""
u0, u1 = u[0], u[1]
u0 = torch.tensor([u0], requires_grad=True)
u1 = torch.tensor([u1], requires_grad=True)
u = torch.cat((u0, u1))
x = f(u).reshape(2, 1)
x0_0, x0_1 = torch.autograd.grad(x[0], (u0, u1),
retain_graph=True, create_graph=True)
x1_0, x1_1 = torch.autograd.grad(x[1], (u0, u1),
retain_graph=True, create_graph=True)
e0 = torch.cat((x0_0, x1_0)).requires_grad_(True) # = x_0
e1 = torch.cat((x0_1, x1_1)).requires_grad_(True) # = x_1
g00 = e0.dot(e0)
g01 = e0.dot(e1)
g10 = e1.dot(e0)
g11 = e1.dot(e1)
g0 = torch.hstack((g00, g01))
g1 = torch.hstack((g10, g11))
g = torch.vstack((g0, g1))
g_inv = g.inverse()
g00_0, g00_1 = torch.autograd.grad(g00, (u0, u1),
retain_graph=True, allow_unused=True)
g01_0, g01_1 = torch.autograd.grad(g01, (u0, u1),
retain_graph=True, allow_unused=True)
g10_0, g10_1 = torch.autograd.grad(g10, (u0, u1),
retain_graph=True, allow_unused=True)
g11_0, g11_1 = torch.autograd.grad(g11, (u0, u1),
retain_graph=True, allow_unused=True)
gl0_0 = torch.vstack((g00_0, g10_0))
g0l_0 = torch.vstack((g00_0, g01_0))
g00_l = torch.vstack((g00_0, g00_1))
gamma00k = 0.5 * g_inv.matmul(gl0_0 + g0l_0 - g00_l)
gl1_0 = torch.vstack((g01_0, g11_0))
g0l_1 = torch.vstack((g00_1, g01_1))
g01_l = torch.vstack((g01_0, g01_1))
gamma01k = 0.5 * g_inv.matmul(gl1_0 + g0l_1 - g01_l)
gl0_1 = torch.vstack((g00_1, g10_1))
g1l_0 = torch.vstack((g10_0, g11_0))
g10_l = torch.vstack((g10_0, g10_1))
gamma10k = 0.5 * g_inv.matmul(gl0_1 + g1l_0 - g10_l)
gl1_1 = torch.vstack((g01_1, g11_1))
g1l_1 = torch.vstack((g10_1, g11_1))
g11_l = torch.vstack((g11_0, g11_1))
gamma11k = 0.5 * g_inv.matmul(gl1_1 + g1l_1 - g11_l)
chirstoffel = np.concatenate((
gamma00k.detach().numpy().T,
gamma01k.detach().numpy().T,
gamma10k.detach().numpy().T,
gamma11k.detach().numpy().T)).reshape(2, 2, 2)
return chirstoffel
def spherical_to_cartesian(u, radius=1.0):
if not isinstance(u, torch.Tensor):
u = torch.tensor(u)
u_transposed = u.T
theta = u_transposed[0]
phi = u_transposed[1]
sin_theta = torch.sin(theta)
x = radius * sin_theta * torch.cos(phi)
y = radius * sin_theta * torch.sin(phi)
z = radius * torch.cos(theta)
return torch.vstack((x, y, z)).T
if __name__ == '__main__':
u = np.array([0.4, 0], dtype=np.float32)
christoffel = compute_christoffel(spherical_to_cartesian, u)
print(christoffel)
| 33.147541 | 76 | 0.572535 | 941 | 6,066 | 3.467588 | 0.128587 | 0.091021 | 0.055777 | 0.071713 | 0.844009 | 0.837879 | 0.837879 | 0.837879 | 0.837879 | 0.837879 | 0 | 0.12463 | 0.276459 | 6,066 | 182 | 77 | 33.32967 | 0.61882 | 0.108968 | 0 | 0.762295 | 0 | 0 | 0.00151 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02459 | false | 0 | 0.02459 | 0 | 0.07377 | 0.008197 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
46dcee79a748d14cfe1170364932ba67b0d0fb79 | 5,521 | py | Python | tests/test_columns.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | tests/test_columns.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | tests/test_columns.py | eeroel/sqllineage | 568b76eee83c390639a017167b2ec1a24414277e | [
"MIT"
] | null | null | null | from .helpers import assert_column_lineage_equal
def test_select_column():
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1 AS col2
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col2")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT tab2.col1 AS col2
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col2")])
def test_select_column_wildcard():
sql = """INSERT OVERWRITE TABLE tab1
SELECT *
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.*", "tab1.*")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT *
FROM tab2 a
INNER JOIN tab3 b
ON a.id = b.id"""
assert_column_lineage_equal(sql, [("tab2.*", "tab1.*"), ("tab3.*", "tab1.*")])
def test_select_column_using_function():
sql = """INSERT OVERWRITE TABLE tab1
SELECT max(col1)
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.max(col1)")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT max(col1) AS col2
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col2")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT cast(col1 as timestamp)
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.cast(col1 as timestamp)")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT cast(col1 as timestamp) as col2
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col2")])
def test_select_column_using_window_function():
sql = """INSERT OVERWRITE TABLE tab1
SELECT row_number() OVER (PARTITION BY col1 ORDER BY col2 DESC) AS rnum
FROM tab2"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.rnum"), ("tab2.col2", "tab1.rnum")]
)
def test_select_column_using_expression():
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1 + col2
FROM tab2"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col1 + col2"), ("tab2.col2", "tab1.col1 + col2")]
)
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1 + col2 AS col3
FROM tab2"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col3"), ("tab2.col2", "tab1.col3")]
)
def test_select_column_using_case_when():
sql = """INSERT OVERWRITE TABLE tab1
SELECT CASE WHEN col1 = 1 THEN "V1" WHEN col1 = 2 THEN "V2" END
FROM tab2"""
assert_column_lineage_equal(
sql,
[
(
"tab2.col1",
'tab1.CASE WHEN col1 = 1 THEN "V1" WHEN col1 = 2 THEN "V2" END',
)
],
)
sql = """INSERT OVERWRITE TABLE tab1
SELECT CASE WHEN col1 = 1 THEN "V1" WHEN col1 = 2 THEN "V2" END AS col2
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col2")])
def test_select_column_with_table_prefix():
sql = """INSERT OVERWRITE TABLE tab1
SELECT tab2.col1
FROM tab2"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT t.col1
FROM tab2 AS t"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
def test_select_columns():
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1,
col2
FROM tab2"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col1"), ("tab2.col2", "tab1.col2")]
)
sql = """INSERT OVERWRITE TABLE tab1
SELECT max(col1),
max(col2)
FROM tab2"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.max(col1)"), ("tab2.col2", "tab1.max(col2)")]
)
def test_select_column_in_subquery():
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1
FROM (SELECT col1 FROM tab2) dt"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1
FROM (SELECT col1, col2 FROM tab2) dt"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1
FROM (SELECT col1 FROM tab2)"""
assert_column_lineage_equal(sql, [("tab2.col1", "tab1.col1")])
def test_select_column_from_table_join():
sql = """INSERT OVERWRITE TABLE tab1
SELECT tab2.col1,
tab3.col2
FROM tab2
INNER JOIN tab3
ON tab2.id = tab3.id"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col1"), ("tab3.col2", "tab1.col2")]
)
sql = """INSERT OVERWRITE TABLE tab1
SELECT tab2.col1 AS col3,
tab3.col2 AS col4
FROM tab2
INNER JOIN tab3
ON tab2.id = tab3.id"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col3"), ("tab3.col2", "tab1.col4")]
)
sql = """INSERT OVERWRITE TABLE tab1
SELECT a.col1 AS col3,
b.col2 AS col4
FROM tab2 a
INNER JOIN tab3 b
ON a.id = b.id"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col3"), ("tab3.col2", "tab1.col4")]
)
def test_select_column_without_table_prefix_from_table_join():
sql = """INSERT OVERWRITE TABLE tab1
SELECT col1
FROM tab2 a
INNER JOIN tab3 b
ON a.id = b.id"""
assert_column_lineage_equal(sql, [("col1", "tab1.col1")])
def test_select_column_from_same_table_multiple_time_using_different_alias():
sql = """INSERT OVERWRITE TABLE tab1
SELECT a.col1 AS col2,
b.col1 AS col3
FROM tab2 a
JOIN tab2 b
ON a.parent_id = b.id"""
assert_column_lineage_equal(
sql, [("tab2.col1", "tab1.col2"), ("tab2.col1", "tab1.col3")]
)
| 29.524064 | 85 | 0.643724 | 762 | 5,521 | 4.480315 | 0.086614 | 0.065612 | 0.150264 | 0.189807 | 0.865261 | 0.82894 | 0.82894 | 0.811072 | 0.719977 | 0.642648 | 0 | 0.062067 | 0.214997 | 5,521 | 186 | 86 | 29.682796 | 0.725658 | 0 | 0 | 0.546584 | 0 | 0.006211 | 0.522731 | 0 | 0 | 0 | 0 | 0 | 0.167702 | 1 | 0.074534 | false | 0 | 0.006211 | 0 | 0.080745 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.