hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0217748a0139f68548c7994754958fd143434d5
| 14,926
|
py
|
Python
|
python/torx/module/layer.py
|
ASU-ESIC-FAN-Lab/pytorx
|
6926895e75e8b383c2eba73c2a409da163f62ab9
|
[
"Apache-2.0"
] | null | null | null |
python/torx/module/layer.py
|
ASU-ESIC-FAN-Lab/pytorx
|
6926895e75e8b383c2eba73c2a409da163f62ab9
|
[
"Apache-2.0"
] | null | null | null |
python/torx/module/layer.py
|
ASU-ESIC-FAN-Lab/pytorx
|
6926895e75e8b383c2eba73c2a409da163f62ab9
|
[
"Apache-2.0"
] | null | null | null |
import math
import torch
import torch.functional as F
import torch.nn as nn
class crxb_Conv2d(nn.Conv2d):
def __init__(self, in_channels, out_channels, kernel_size, stride=1,
padding=0, dilation=1, groups=1, bias=True, crxb_size=64,
quantize=8, enable_ec_SAF=False):
super(crxb_Conv2d, self).__init__(in_channels, out_channels, kernel_size,
stride, padding, dilation, groups, bias)
assert self.groups == 1, "currently not support grouped convolution for custom conv"
################## Crossbar conversion #############################
self.crxb_size = crxb_size
self.enable_ec_SAF = enable_ec_SAF
self.nchout_index = nn.Parameter(torch.arange(
self.out_channels), requires_grad=False)
weight_flatten = self.weight.view(self.out_channels, -1)
self.crxb_row, self.crxb_row_pads = self.num_pad(
weight_flatten.shape[1], self.crxb_size)
self.crxb_col, self.crxb_col_pads = self.num_pad(
weight_flatten.shape[0], self.crxb_size)
self.h_out = None
self.w_out = None
self.w_pad = (0, self.crxb_row_pads, 0, self.crxb_col_pads)
self.input_pad = (0, 0, 0, self.crxb_row_pads)
weight_padded = F.pad(weight_flatten, self.w_pad,
mode='constant', value=0)
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
################# Hardware conversion ##############################
# weight and input levels
self.n_lvl = 2**8
self.h_lvl = (self.n_lvl-2)/2
# ReRAM cells
self.Gmax = 1/3000 # max conductance
self.Gmin = 1/3e6 # min conductance
self.delta_g = (self.Gmax-self.Gmin)/(2**7) # conductance step
self.w2g = w2g(self.delta_g, Gmin=self.Gmin, G_SA0=self.Gmax,
G_SA1=self.Gmin, weight_shape=weight_crxb.shape)
# DAC
self.Vdd = 3.3 # unit: volt
self.delta_v = self.Vdd/(self.n_lvl-1)
self.delta_in_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.delta_out_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.counter = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.max_i_LSB = ((self.Vdd/2)*self.Gmax*self.crxb_size)/self.h_lvl
def num_pad(self, source, target):
crxb_index = math.ceil(source/target)
num_padding = crxb_index * target - source
return crxb_index, num_padding
def forward(self, input):
# 1. input data and weight quantization
with torch.no_grad():
self.delta_w = self.weight.abs().max()/self.h_lvl
if self.training:
self.counter.data += 1
self.delta_x = input.abs().max()/self.h_lvl
self.delta_in_sum.data += self.delta_x
else:
self.delta_x = self.delta_in_sum.data/self.counter.data
input_clip = F.hardtanh(input, min_val=-self.h_lvl*self.delta_x.item(),
max_val=self.h_lvl*self.delta_x.item())
input_quan = quantize_input(
input_clip, self.delta_x)*self.delta_v # convert to voltage
weight_quan = quantize_weight(self.weight, self.delta_w)
# 2. Perform the computation between input voltage and weight conductance
if self.h_out is None and self.w_out is None:
self.h_out = int(
(input.shape[2]-self.kernel_size[0]+2*self.padding[0])/self.stride[0] + 1)
self.w_out = int(
(input.shape[3]-self.kernel_size[0]+2*self.padding[0])/self.stride[0] + 1)
# 2.1 flatten and unfold the weight and input
input_unfold = F.unfold(input_quan, kernel_size=self.kernel_size[0],
dilation=self.dilation, padding=self.padding,
stride=self.stride)
weight_flatten = weight_quan.view(self.out_channels, -1)
# 2.2. add paddings
weight_padded = F.pad(weight_flatten, self.w_pad,
mode='constant', value=0)
input_padded = F.pad(input_unfold, self.input_pad,
mode='constant', value=0)
# 2.3. reshape to crxb size
input_crxb = input_padded.view(input.shape[0], 1, self.crxb_row,
self.crxb_size, input_padded.shape[2])
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
# convert the floating point weight into conductance pair values
G_crxb = self.w2g(weight_crxb)
# 2.4. compute matrix multiplication followed by reshapes
if ir_drop:
from IR_solver import IrSolver
crxb_pos = IrSolver(Rsize=self.crxb_size,
Csize=self.crxb_size,
Gwire=self.Gwire,
Gload=self.Gload,
input_x=input_crxb.permute(3, 0, 1, 2, 4),
Gmat=G_crxb[0].permute(3, 2, 0, 1),
device=device)
crxb_pos.resetcoo()
crxb_neg = IrSolver(Rsize=self.crxb_size,
Csize=self.crxb_size,
Gwire=self.Gwire,
Gload=self.Gload,
input_x=input_crxb.permute(3, 0, 1, 2, 4),
Gmat=G_crxb[1].permute(3, 2, 0, 1),
device=device)
crxb_neg.resetcoo()
output_crxb = (crxb_pos.caliout() - crxb_neg.caliout())
output_crxb = output_crxb.contiguous().view(self.crxb_col, self.crxb_row, self.crxb_size,
input.shape[0],
input_padded.shape[2])
output_crxb = output_crxb.permute(3, 0, 1, 2, 4)
else:
output_crxb = torch.matmul(G_crxb[0], input_crxb) - \
torch.matmul(G_crxb[1], input_crxb)
# perform ADC operation (i.e., current to digital conversion)
with torch.no_grad():
if self.training:
self.delta_i = output_crxb.abs().max()/(self.h_lvl)
self.delta_out_sum.data += self.delta_i
else:
self.delta_i = self.delta_out_sum.data/self.counter.data
self.delta_y = self.delta_w*self.delta_x * \
self.delta_i/(self.delta_v*self.delta_g)
# print('adc LSB ration:', self.delta_i/self.max_i_LSB)
output_clip = F.hardtanh(output_crxb, min_val=-self.h_lvl*self.delta_i.item(),
max_val=self.h_lvl*self.delta_i.item())
output_adc = adc(output_clip, self.delta_i, self.delta_y)
if self.w2g.enable_SAF:
if self.enable_ec_SAF:
G_pos_diff, G_neg_diff = self.w2g.error_compensation()
ec_scale = self.delta_y/self.delta_i
output_adc += (torch.matmul(G_pos_diff, input_crxb)
- torch.matmul(G_neg_diff, input_crxb))*ec_scale
output_sum = torch.sum(output_adc, dim=2)
output = output_sum.view(output_sum.shape[0],
output_sum.shape[1]*output_sum.shape[2],
self.h_out,
self.w_out).index_select(dim=1, index=self.nchout_index)
if self.bias is not None:
output += self.bias.unsqueeze(1).unsqueeze(1)
return output
def _reset_delta(self):
self.delta_in_sum.data[0] = 0
self.delta_out_sum.data[0] = 0
self.counter.data[0] = 0
class crxb_Linear(nn.Linear):
def __init__(self, in_features, out_features, bias=True, crxb_size=64,
quantize=8, enable_ec_SAF=False):
super(crxb_Linear, self).__init__(in_features, out_features, bias)
################## Crossbar conversion #############################
self.crxb_size = crxb_size
self.enable_ec_SAF = enable_ec_SAF
self.out_index = nn.Parameter(
torch.arange(out_features), requires_grad=False)
self.crxb_row, self.crxb_row_pads = self.num_pad(
self.weight.shape[1], self.crxb_size)
self.crxb_col, self.crxb_col_pads = self.num_pad(
self.weight.shape[0], self.crxb_size)
self.w_pad = (0, self.crxb_row_pads, 0, self.crxb_col_pads)
self.input_pad = (0, self.crxb_row_pads)
weight_padded = F.pad(self.weight, self.w_pad,
mode='constant', value=0)
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
################# Hardware conversion ##############################
# weight and input levels
self.n_lvl = 2**8
self.h_lvl = (self.n_lvl-2)/2
# ReRAM cells
self.Gmax = 1/3000 # max conductance
self.Gmin = 1/3e6 # min conductance
self.delta_g = (self.Gmax-self.Gmin)/(2**7) # conductance step
self.w2g = w2g(self.delta_g, Gmin=self.Gmin, G_SA0=self.Gmax,
G_SA1=self.Gmin, weight_shape=weight_crxb.shape)
# DAC
self.Vdd = 3.3 # unit: volt
self.delta_v = self.Vdd/(self.n_lvl-1)
self.delta_in_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.delta_out_sum = nn.Parameter(torch.Tensor(1), requires_grad=False)
self.counter = nn.Parameter(torch.Tensor(1), requires_grad=False)
# self.max_i_LSB = ((self.Vdd/2)*self.Gmax*self.crxb_size)/self.h_lvl
def num_pad(self, source, target):
crxb_index = math.ceil(source/target)
num_padding = crxb_index * target - source
return crxb_index, num_padding
def forward(self, input):
# 1. input data and weight quantization
with torch.no_grad():
self.delta_w = self.weight.abs().max()/self.h_lvl
if self.training:
self.counter.data += 1
self.delta_x = input.abs().max()/self.h_lvl
self.delta_in_sum.data += self.delta_x
else:
self.delta_x = self.delta_in_sum.data/self.counter.data
input_clip = F.hardtanh(input, min_val=-self.h_lvl*self.delta_x.item(),
max_val=self.h_lvl*self.delta_x.item())
input_quan = quantize_input(
input_clip, self.delta_x)*self.delta_v # convert to voltage
weight_quan = quantize_weight(self.weight, self.delta_w)
# 2. Perform the computation between input voltage and weight conductance
# 2.1. skip the input unfold and weight flatten for fully-connected layers
# 2.2. add padding
weight_padded = F.pad(weight_quan, self.w_pad,
mode='constant', value=0)
input_padded = F.pad(input_quan, self.input_pad,
mode='constant', value=0)
# 2.3. reshape
input_crxb = input_padded.view(
input.shape[0], 1, self.crxb_row, self.crxb_size, 1)
weight_crxb = weight_padded.view(self.crxb_col, self.crxb_size,
self.crxb_row, self.crxb_size).transpose(1, 2)
# convert the floating point weight into conductance pair values
G_crxb = self.w2g(weight_crxb)
# 2.4. compute matrix multiplication
if ir_drop:
from IR_solver import IrSolver
crxb_pos = IrSolver(Rsize=self.crxb_size,
Csize=self.crxb_size,
Gwire=self.Gwire,
Gload=self.Gload,
input_x=input_crxb.permute(3, 0, 1, 2, 4),
Gmat=G_crxb[0].permute(3, 2, 0, 1),
device=device)
crxb_pos.resetcoo()
crxb_neg = IrSolver(Rsize=self.crxb_size,
Csize=self.crxb_size,
Gwire=self.Gwire,
Gload=self.Gload,
input_x=input_crxb.permute(3, 0, 1, 2, 4),
Gmat=G_crxb[1].permute(3, 2, 0, 1),
device=device)
crxb_neg.resetcoo()
output_crxb = (crxb_pos.caliout() - crxb_neg.caliout())
output_crxb = output_crxb.contiguous().view(self.crxb_col,
self.crxb_row,
self.crxb_size,
input.shape[0],
1)
output_crxb = output_crxb.permute(3, 0, 1, 2, 4)
else:
output_crxb = torch.matmul(G_crxb[0], input_crxb) \
- torch.matmul(G_crxb[1], input_crxb)
# perform ADC operation (i.e., current to digital conversion)
with torch.no_grad():
if self.training:
self.delta_i = output_crxb.abs().max()/(self.h_lvl)
self.delta_out_sum.data += self.delta_i
else:
self.delta_i = self.delta_out_sum.data/self.counter.data
self.delta_y = self.delta_w*self.delta_x * \
self.delta_i/(self.delta_v*self.delta_g)
# print('adc LSB ration:', self.delta_i/self.max_i_LSB)
output_clip = F.hardtanh(output_crxb, min_val=-self.h_lvl*self.delta_i.item(),
max_val=self.h_lvl*self.delta_i.item())
output_adc = adc(output_clip, self.delta_i, self.delta_y)
if self.w2g.enable_SAF:
if self.enable_ec_SAF:
G_pos_diff, G_neg_diff = self.w2g.error_compensation()
ec_scale = self.delta_y/self.delta_i
output_adc += (torch.matmul(G_pos_diff, input_crxb)
- torch.matmul(G_neg_diff, input_crxb))*ec_scale
output_sum = torch.sum(output_adc, dim=2).squeeze(dim=3)
output = output_sum.view(input.shape[0],
output_sum.shape[1]*output_sum.shape[2]).index_select(dim=1, index=self.out_index)
if self.bias is not None:
output += self.bias
return output
def _reset_delta(self):
self.delta_in_sum.data[0] = 0
self.delta_out_sum.data[0] = 0
self.counter.data[0] = 0
| 45.785276
| 115
| 0.549176
| 1,934
| 14,926
| 3.998966
| 0.097208
| 0.083786
| 0.043445
| 0.021722
| 0.883243
| 0.862296
| 0.852082
| 0.834626
| 0.834626
| 0.828549
| 0
| 0.022463
| 0.337867
| 14,926
| 325
| 116
| 45.926154
| 0.760093
| 0.090111
| 0
| 0.740741
| 0
| 0
| 0.007862
| 0
| 0
| 0
| 0
| 0
| 0.004115
| 1
| 0.032922
| false
| 0
| 0.024691
| 0
| 0.082305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e05dba1d7d818a21a63e58551944f7f8ca98df61
| 1,212
|
py
|
Python
|
forums/store.py
|
zoheers/fourmworkshop
|
370b1e2759fcfa2a6a9b18eb339538edaa589e88
|
[
"MIT"
] | null | null | null |
forums/store.py
|
zoheers/fourmworkshop
|
370b1e2759fcfa2a6a9b18eb339538edaa589e88
|
[
"MIT"
] | null | null | null |
forums/store.py
|
zoheers/fourmworkshop
|
370b1e2759fcfa2a6a9b18eb339538edaa589e88
|
[
"MIT"
] | null | null | null |
class MemberStore:
Members=[]
last_id= 1
def add(self,s):
s.id=MemberStore.last_id
MemberStore.Members.append(s)
MemberStore.last_id += 1
def get_all(self):
for p in self.Members:
print p
def get_by_id(self,id):
for x in MemberStore.Members:
if x.id==id:
return x
return "not found"
def delete(self,id):
x= self.get_by_id(id)
if x!="not found":
MemberStore.Members.remove(x)
print "member id dele"
else:
print "not exixt"
def entity_exists(self,id):
x=self.get_by_id(id)
if x == "not found":
return False
else:
return True
class PostStore:
Posts=[]
last_id=1
def add(self,s):
s.id=PostStore.last_id
PostStore.Posts.append(s)
PostStore.last_id+=1
def get_all(self):
for p in self.Posts:
print p
def get_by_id (self,id):
for x in PostStore.Posts:
if x.id==id:
return x
return "not found"
def delete(self,id):
x= self.get_by_id(id)
if x!="not found":
PostStore.Posts.remove(x)
print "member id dele"
else:
print "not exixt"
def entity_exists(self,id):
x=self.get_by_id(id)
if x == "not found":
return False
else:
return True
| 18.089552
| 33
| 0.623762
| 200
| 1,212
| 3.67
| 0.175
| 0.049046
| 0.057221
| 0.054496
| 0.705722
| 0.705722
| 0.705722
| 0.705722
| 0.705722
| 0.648501
| 0
| 0.004444
| 0.257426
| 1,212
| 66
| 34
| 18.363636
| 0.811111
| 0
| 0
| 0.714286
| 0
| 0
| 0.08726
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.107143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0eda2185672630167fadee5840fcee9266cf1c8c
| 1,830
|
py
|
Python
|
config_training7.py
|
CONTINUE12/DeepLung
|
c6dc4debc55677a48be762b4c36d0725e7f93af1
|
[
"Apache-2.0"
] | 16
|
2020-08-25T08:11:04.000Z
|
2022-03-25T01:32:46.000Z
|
config_training7.py
|
CONTINUE12/DeepLung
|
c6dc4debc55677a48be762b4c36d0725e7f93af1
|
[
"Apache-2.0"
] | null | null | null |
config_training7.py
|
CONTINUE12/DeepLung
|
c6dc4debc55677a48be762b4c36d0725e7f93af1
|
[
"Apache-2.0"
] | 7
|
2020-06-12T04:28:29.000Z
|
2021-09-20T12:06:24.000Z
|
config = {'train_data_path':['/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset0/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset1/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset3/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset4/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset5/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset6/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset7/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset8/',
'/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset9/'],
'val_data_path':['/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset2/'],
'test_data_path':['/home/zhaojie/zhaojie/Lung/data/luna16/subset_data/subset2/'],
'train_preprocess_result_path':'/home/zhaojie/zhaojie/Lung/DeepLung-Minerva/Data/LUNA16PROPOCESSPATH/', # contains numpy for the data and label, which is generated by prepare.py
'val_preprocess_result_path':'/home/zhaojie/zhaojie/Lung/DeepLung-Minerva/Data/LUNA16PROPOCESSPATH/',
'test_preprocess_result_path':'/home/zhaojie/zhaojie/Lung/DeepLung-Minerva/Data/LUNA16PROPOCESSPATH/',
'train_annos_path':'/home/zhaojie/zhaojie/Lung/data/luna16/CSVFILES/annotations.csv',
'val_annos_path':'/home/zhaojie/zhaojie/Lung/data/luna16/CSVFILES/annotations.csv',
'test_annos_path':'/home/zhaojie/zhaojie/Lung/data/luna16/CSVFILES/annotations.csv',
'black_list':[],
'preprocessing_backend':'python',
}
| 73.2
| 187
| 0.634426
| 199
| 1,830
| 5.663317
| 0.236181
| 0.165927
| 0.271517
| 0.331854
| 0.824312
| 0.824312
| 0.824312
| 0.824312
| 0.526176
| 0.526176
| 0
| 0.03178
| 0.22623
| 1,830
| 24
| 188
| 76.25
| 0.764124
| 0.038798
| 0
| 0
| 0
| 0
| 0.71144
| 0.652817
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0ef3d0c45104f15dd4b985ea7a32b9ecbd7b769d
| 40,754
|
py
|
Python
|
correction_utility_functions.py
|
ctderoo/axroOptimization
|
ba69323f3e3762c08c1918895e16e0b46554c5f7
|
[
"MIT"
] | null | null | null |
correction_utility_functions.py
|
ctderoo/axroOptimization
|
ba69323f3e3762c08c1918895e16e0b46554c5f7
|
[
"MIT"
] | 1
|
2017-05-31T17:50:30.000Z
|
2017-05-31T17:50:30.000Z
|
correction_utility_functions.py
|
ctderoo/axroOptimization
|
ba69323f3e3762c08c1918895e16e0b46554c5f7
|
[
"MIT"
] | 2
|
2017-10-24T20:22:17.000Z
|
2018-12-28T13:37:07.000Z
|
from numpy import *
import matplotlib.pyplot as plt
import os
import glob
import pickle
import astropy.io.fits as pyfits
from astropy.modeling import models
from matplotlib import gridspec
import matplotlib.patches as patches
from mpl_toolkits.axes_grid1 import make_axes_locatable
import pdb
import utilities.imaging.man as man
import utilities.imaging.stitch as stitch
import utilities.metrology as met
import utilities.fourier as fourier
import utilities.imaging.fitting as fit
import axroOptimization.evaluateMirrors as eva
home_directory = os.getcwd()
##########################################################################################################
# Utility functions.
def pv(img):
return nanmax(img) - nanmin(img)
def convertToAxialSlopes(img,dx):
return gradient(img,dx)[0]*3600*180/pi
def stripWithShade(dist,shade):
output = copy(dist)
output = man.newGridSize(dist,shape(shade))
output[shade == 0] = NaN
return man.stripnans(output)
##########################################################################################################
# CTF specific functions
def generateAxialSineModel(amp,period,ylen,xlen,dy,phase = 0.0):
'''
Constructs a 1D sine model, oriented in the axial direction, from the
format of an example image.
'''
x,y = meshgrid(linspace(0,1,xlen),linspace(0,1,ylen))
g = models.Sine1D(amp,ylen*dy/period,phase)
return g(y)
def generate2DLegendreModel(xo,yo,xlen,ylen,coeffs = None):
'''
'''
x,y = meshgrid(linspace(-1,1,xlen),linspace(-1,1,ylen))
g = models.Legendre2D(xo,yo)
if (coeffs is not None) & (len(g.__dict__['_parameters']) == len(coeffs)):
try:
g.__dict__['_parameters'] = coeffs
except:
pdb.set_trace()
return g(x,y)
##########################################################################################################
# Iteration assessment functions.
def readCylWFSRaw(fn):
"""
Load in data from WFS measurement of cylindrical mirror.
Assumes that data was processed using processHAS, and loaded into
a .fits file.
Scale to microns, remove misalignments,
strip NaNs.
If rotate is set to an array of angles, the rotation angle
which minimizes the number of NaNs in the image after
stripping perimeter nans is selected.
Distortion is bump positive looking at concave surface.
Imshow will present distortion in proper orientation as if
viewing the concave surface.
"""
#Remove NaNs and rescale
d = pyfits.getdata(fn)
d = man.stripnans(d)
# Negate to make bump positive.
d = -d
return d
def reshapeMeasToCorrection(raw_correction,shape_match,mask_fraction):
# Loading the as-measured correction and processing it appropriately to be stripped of
# exterior NaNs, bump positive, and have best fit cylinder removed (like dist_map and the ifs).
# This raw correction has its own distinct shape of order 120 by 100.
# Creating a perimeter shademask consistent with the size of the measured change.
meas_shade = eva.slv.createShadePerimeter(shape(raw_correction),axialFraction = mask_fraction,azFraction = mask_fraction)
# Now making the measured relative change directly comparable to the area of the
# distortion map we are trying to correct by putting the shade mask in place, and
# then interpolating to the size of dist_map.
rel_change = copy(raw_correction)
rel_change[meas_shade == 0] = NaN
rel_change = man.newGridSize(rel_change,shape_match)
return rel_change
def getIterMeasResults(directory,desired_shape,mask_fraction = 30./101.6,name_search = 'DistortionToCorrect'):
os.chdir(directory)
fig_files = glob.glob('*' + name_search + '*')
figs = [reshapeMeasToCorrection(readCylWFSRaw(fn),desired_shape,mask_fraction) for fn in fig_files]
os.chdir(home_directory)
return figs
def getIterTheoResults(directory,desired_shape,mask_fraction = 30./101.6,name_search = 'DistortionToCorrect'):
os.chdir(directory)
fig_files = glob.glob('*' + name_search + '*')
figs = [reshapeMeasToCorrection(pyfits.getdata(fn),desired_shape,mask_fraction) for fn in fig_files]
os.chdir(home_directory)
return figs
def getIterVoltages(directory,name_search = 'OptVolts'):
os.chdir(directory)
volt_files = glob.glob('*' + name_search + '*')
volts = [loadtxt(fn) for fn in volt_files]
os.chdir(home_directory)
return volts
def evalAxSlopes(img,dx):
return gradient(img,dx)[0]*3600*180/pi
def evalSlopeImprovement(dist,cor,fig,dx_eff):
dist_std = nanstd(evalAxSlopes(dist*10**-4,dx_eff))
theo_cor_std = nanstd(evalAxSlopes((dist + cor)*10**-3,dx_eff))
meas_cor_std = nanstd(evalAxSlopes((dist + fig)*10**-3,dx_eff))
res_std = nanstd(evalAxSlopes((cor - fig)*10**-3,dx_eff))
return dist_std,theo_cor_std,meas_cor_std,res_std
##########################################################################################################
# Plotting functions.
def mirror_subplot(data_img,ax,title,cbar_label,extent = None,vmin = None,vmax = None,draw_cbar = True,merit = None,
merit1_label = 'PSF E68', merit2_label = 'PSF HPD',merit1_unit = 'asec.',merit2_unit = 'asec.'):
'''
The default figure plot style I want to use. Needs a specified input
data set, plotting axis and title. Options include an extent, vmin/vmax args,
and adding a merit function to the plot.
'''
im = ax.imshow(data_img,extent = extent,vmin = vmin,vmax = vmax)
ax.set_xlabel('Azimuthal Dimension (mm)',fontsize = 16)
ax.set_ylabel('Axial Dimension (mm)',fontsize = 16)
ax.set_title(title,fontsize = 16)
if draw_cbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.10)
cbar = plt.colorbar(im, cax = cax)
cbar.set_label(cbar_label,fontsize = 16)
if merit is not None:
ax.text(0.05,0.05,merit1_label + ': ' + "{:4.3f}".format(merit[0]) + ' ' + merit1_unit,ha = 'left',transform = ax.transAxes)
ax.text(0.05,0.10,merit2_label + ': ' + "{:3.3f}".format(merit[1]) + ' ' + merit2_unit,ha = 'left',transform = ax.transAxes)
def plot_correction_inline(input_dist,fc,cor,dx,first_title = '',second_title = '',third_title = '',
cbar_label = '',global_title = '',save_file = None,vbounds = None,dist_merit = None,\
fc_merit = None,cor_merit = None,
merit1_label = 'PSF E68', merit2_label = 'PSF HPD',merit1_unit = 'asec.',merit2_unit = 'asec.'):
'''
'''
fig = plt.figure(figsize = (18,5))
gs = gridspec.GridSpec(1,3)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
plot_dist = man.stripnans(input_dist - nanmean(input_dist))
plot_fc = man.newGridSize(man.stripnans(fc - nanmean(fc)),shape(plot_dist))
plot_cor = man.newGridSize(man.stripnans(cor - nanmean(cor)),shape(plot_dist))
extent = [-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx,-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx]
if vbounds is None:
vmin,vmax = nanmin([plot_dist,plot_fc,plot_cor]),nanmax([plot_dist,plot_fc,plot_cor])
else:
[vmin,vmax] = vbounds
mirror_subplot(plot_dist,ax1,first_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = dist_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
mirror_subplot(plot_fc,ax2,second_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = fc_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
mirror_subplot(plot_cor,ax3,third_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = cor_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
fig.subplots_adjust(top = 0.74,hspace = 0.4,wspace = 0.4)
plt.suptitle(global_title,fontsize = 20)
if save_file != None:
plt.savefig(save_file)
plt.close()
return plot_dist,plot_fc,plot_cor
def plot_measured_correction_sixfig(input_dist,theo_corr,meas_corr0,meas_corr1,dx,first_title = '',second_title = '',third_title = '',
fourth_title = '',fifth_title = '',sixth_title = '', cbar_label = '',global_title = '',save_file = None,
dist_merit = None, meas_corr_merit0 = None,meas_corr_merit1 = None,vbounds = None):
'''
'''
fig = plt.figure(figsize = (12,16))
gs = gridspec.GridSpec(3,2)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
ax4 = fig.add_subplot(gs[3])
ax5 = fig.add_subplot(gs[4])
ax6 = fig.add_subplot(gs[5])
plot_dist = man.stripnans(input_dist - nanmean(input_dist))
plot_theo_corr = man.newGridSize(man.stripnans(theo_corr - nanmean(theo_corr)),shape(plot_dist))
plot_meas_corr0 = man.newGridSize(man.stripnans(meas_corr0 - nanmean(meas_corr0)),shape(plot_dist))
plot_meas_corr1 = man.newGridSize(man.stripnans(meas_corr1 - nanmean(meas_corr1)),shape(plot_dist))
extent = [-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx,-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx]
if vbounds == None:
vmin = nanmin([plot_dist,plot_theo_corr,plot_meas_corr0,plot_dist + plot_meas_corr0,plot_meas_corr1,plot_dist + plot_meas_corr1]),
vmax = nanmax([plot_dist,plot_theo_corr,plot_meas_corr,plot_dist + plot_meas_corr,plot_meas_corr1,plot_dist + plot_meas_corr1])
else:
[vmin,vmax] = vbounds
mirror_subplot(plot_dist,ax1,first_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = dist_merit)
mirror_subplot(plot_theo_corr,ax2,second_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = None)
mirror_subplot(plot_meas_corr0,ax3,third_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = None)
mirror_subplot(plot_meas_corr0 + plot_dist,ax4,fourth_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = meas_corr_merit0)
mirror_subplot(plot_meas_corr1,ax5,fifth_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = None)
mirror_subplot(plot_meas_corr1 + plot_dist,ax6,sixth_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax, merit = meas_corr_merit1)
fig.subplots_adjust(hspace = 0.4,wspace = 0.3)
plt.suptitle(global_title,fontsize = 20)
if save_file != None:
plt.savefig(save_file)
plt.close()
return fig,(ax1,ax2,ax3,ax4,ax5,ax6)
def plot_compare_theo_meas_corr(plot_dist,plot_theo_corr,plot_meas_corr,plot_compare_theo_meas,dx,first_title = '',second_title = '',third_title = '',fourth_title = '',
cbar_label = '',global_title = '',save_file = None,vbounds = [-1.,1.],dist_merit = None,\
theo_corr_merit = None,meas_corr_merit = None,slope = False):
'''
'''
fig = plt.figure(figsize = (12,12))
gs = gridspec.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
ax4 = fig.add_subplot(gs[3])
extent = [-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx,-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx]
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_corr,plot_dist + plot_corr]),nanmax([plot_dist,plot_corr,plot_dist + plot_corr])
else:
[vmin,vmax] = vbounds
mirror_subplot(plot_dist,ax1,first_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax,merit = dist_merit)
mirror_subplot(plot_theo_corr + plot_dist,ax2,second_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax,merit = theo_corr_merit)
mirror_subplot(plot_meas_corr + plot_dist,ax3,third_title,cbar_label,extent = extent,vmin = vmin,vmax = vmax,merit = meas_corr_merit)
if slope is False:
mirror_subplot(plot_compare_theo_meas,ax4,fourth_title,cbar_label,extent = extent,vmin = -0.100,vmax = 0.100,merit = [nanstd(plot_compare_theo_meas*10**3),pv(plot_compare_theo_meas)*10**3],
merit1_label = 'RMS', merit2_label = 'PV',merit1_unit = 'nm',merit2_unit = 'nm')
else:
mirror_subplot(plot_compare_theo_meas,ax4,fourth_title,cbar_label,extent = extent,vmin = -2,vmax = 2,merit = [nanstd(plot_compare_theo_meas),pv(plot_compare_theo_meas)],
merit1_label = 'RMS', merit2_label = 'PV',merit1_unit = 'asec.',merit2_unit = 'asec.')
fig.subplots_adjust(top = 0.85,hspace = 0.4,wspace = 0.4)
plt.suptitle(global_title,fontsize = 20)
if save_file != None:
plt.savefig(save_file)
return plot_dist,plot_theo_corr,plot_meas_corr
def plot_fig_slope_sidebyside(input_data,input_slopes,dx,individual_title = '',global_title = '',
save_file = None,vbounds_fig = None,vbounds_slope = None,
fig_merit = None,slope_merit = None,slope_unit = 'asec.',plot_to_use = None,draw_cbar = True):
'''
'''
if plot_to_use is None:
fig = plt.figure(figsize = (12,5))
gs = gridspec.GridSpec(1,2)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
else:
fig,(ax1,ax2) = plot_to_use
plot_figdata,plot_slopedata = input_data,input_slopes
extent = [-shape(plot_figdata)[0]/2*dx,shape(plot_figdata)[0]/2*dx,-shape(plot_figdata)[0]/2*dx,shape(plot_figdata)[0]/2*dx]
if vbounds_fig is None:
vbounds_fig = [nanmin([plot_figdata]),nanmax([plot_figdata])]
if vbounds_slope is None:
vbounds_slope = [nanmin([plot_slopedata]),nanmax([plot_slopedata])]
mirror_subplot(plot_figdata,ax1,individual_title + 'Figure Space',cbar_label = 'Figure (microns)',extent = extent,vmin = vbounds_fig[0],vmax = vbounds_fig[1], merit = fig_merit,
merit1_label = 'RMS', merit2_label = 'PV',merit1_unit = 'um',merit2_unit = 'um',draw_cbar = draw_cbar)
mirror_subplot(plot_slopedata,ax2,individual_title + 'Axial Slope Space',cbar_label = 'Slope (arcseconds)',extent = extent,vmin = vbounds_slope[0],vmax = vbounds_slope[1], merit = slope_merit,
merit1_label = 'RMS', merit2_label = 'PV',merit1_unit = slope_unit,merit2_unit = slope_unit,draw_cbar = draw_cbar)
fig.subplots_adjust(top = 0.9,hspace = 0.4,wspace = 0.4)
plt.suptitle(global_title,fontsize = 20)
if save_file != None:
plt.savefig(save_file)
plt.close()
return fig,(ax1,ax2)
def mirror_subplot_vlad(data_img,ax,title,cbar_label,extent = None,vmin = None,vmax = None,draw_cbar = True,merit = None,
merit1_label = 'PSF E68', merit2_label = 'PSF HPD',merit1_unit = 'asec.',merit2_unit = 'asec.'):
'''
The default figure plot style I want to use. Needs a specified input
data set, plotting axis and title. Options include an extent, vmin/vmax args,
and adding a merit function to the plot.
'''
im = ax.imshow(data_img,extent = extent,vmin = vmin,vmax = vmax)
ax.set_xlabel('Azimuthal Dimension (mm)',fontsize = 12)
ax.set_ylabel('Axial Dimension (mm)',fontsize = 12)
ax.set_title(title,fontsize = 16)
if draw_cbar is True:
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.10)
cbar = plt.colorbar(im, cax = cax,format='%.0e')
cbar.set_label(cbar_label,fontsize = 12)
if merit is not None:
ax.text(0.05,0.05,merit1_label + ': ' + "{:3.2e}".format(merit[0]) + ' ' + merit1_unit,ha = 'left',transform = ax.transAxes)
ax.text(0.05,0.10,merit2_label + ': ' + "{:3.2e}".format(merit[1]) + ' ' + merit2_unit,ha = 'left',transform = ax.transAxes)
def plot_correction_inline_vlad(input_dist,fc,cor,dx,first_title = '',second_title = '',third_title = '',
cbar_label = '',global_title = '',save_file = None,dist_merit = None,vbounds = None,\
fc_merit = None,cor_merit = None,
merit1_label = 'PSF E68', merit2_label = 'PSF HPD',merit1_unit = 'asec.',merit2_unit = 'asec.'):
'''
'''
fig = plt.figure(figsize = (18,5))
gs = gridspec.GridSpec(1,3)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
plot_dist = man.stripnans(input_dist - nanmean(input_dist))
plot_fc = man.newGridSize(man.stripnans(fc - nanmean(fc)),shape(plot_dist))
plot_cor = man.newGridSize(man.stripnans(cor - nanmean(cor)),shape(plot_dist))
extent = [-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx,-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx]
#if vbounds is None:
# vmin,vmax = nanmin([plot_dist,plot_fc,plot_cor]),nanmax([plot_dist,plot_fc,plot_cor])
#else:
# [vmin,vmax] = vbounds
mirror_subplot_vlad(plot_dist,ax1,first_title,cbar_label,extent = extent, merit = dist_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
mirror_subplot_vlad(plot_fc,ax2,second_title,cbar_label,extent = extent, merit = fc_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
mirror_subplot_vlad(plot_cor,ax3,third_title,cbar_label,extent = extent, merit = cor_merit,merit1_label = merit1_label, merit2_label = merit2_label,merit1_unit = merit1_unit,merit2_unit = merit2_unit)
fig.subplots_adjust(top = 0.74,hspace = 0.3,wspace = 0.5)
plt.suptitle(global_title,fontsize = 20)
if save_file != None:
plt.savefig(save_file)
plt.close()
return plot_dist,plot_fc,plot_cor
#####################################################################################
#####################################################################################
def plot_slumped_data_map(slump_data,shademask):
fig = plt.figure(figsize = (10,10))
ax1 = fig.add_subplot(111)
im = ax1.imshow(slump_data,extent = [-50,50,-50,50])
ax1.set_xlabel('Azimuthal Dimension (mm)',fontsize = 16)
ax1.set_ylabel('Axial Dimension (mm)',fontsize = 16)
ax1.set_title('Dimple Removed, 10th Order\nLegendre Fit To Slumped Data',fontsize = 16)
inner_region = stripWithShade(slump_data,shademask)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar = plt.colorbar(im, cax = cax1)
cbar.set_label('Figure (microns)')
ax1.add_patch(patches.Rectangle((-35.05,-35.05),70.1,70.1,fill = False))
ax1.text(0.05,0.05,'PV: ' + "{:3.1f}".format(pv(slump_data)) + ' um',ha = 'left',transform = ax1.transAxes,fontsize = 16)
ax1.text(0.05,0.08,'RMS: ' + "{:3.1f}".format(nanstd(slump_data)) + ' um',ha = 'left',transform = ax1.transAxes,fontsize = 16)
ax1.text(-32,-32,'PV: ' + "{:3.1f}".format(pv(inner_region)) + ' um',ha = 'left',fontsize = 16)
ax1.text(-32,-29,'RMS: ' + "{:3.1f}".format(nanstd(inner_region)) + ' um',ha = 'left',fontsize = 16)
def plot_bffc_map(bffc):
fig = plt.figure(figsize = (10,10))
ax1 = fig.add_subplot(111)
im = ax1.imshow(bffc,extent = [-30.1,30.1,-30.1,30.1])
ax1.set_xlabel('Azimuthal Dimension (mm)',fontsize = 16)
ax1.set_ylabel('Axial Dimension (mm)',fontsize = 16)
ax1.set_title('Theoretical Best Fit Figure Change\nTo Correct Slumped Mirror Data',fontsize = 16)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar = plt.colorbar(im, cax = cax1)
cbar.set_label('Figure (microns)')
ax1.add_patch(patches.Rectangle((-35.05,-35.05),70.1,70.1,fill = False))
ax1.text(0.05,0.05,'PV: ' + "{:3.1f}".format(pv(bffc)) + ' um',ha = 'left',transform = ax1.transAxes,fontsize = 16)
ax1.text(0.05,0.08,'RMS: ' + "{:3.1f}".format(nanstd(bffc)) + ' um',ha = 'left',transform = ax1.transAxes,fontsize = 16)
def plot_computed_correction(input_dist,comp_corr,dx,shade,first_title = '',second_title = '',sum_title = '', \
cbar_label = '',global_title = '',save_file = None,est_perf = False, \
dist_merit = None,corr_merit = None,vbounds = None):
'''
'''
fig = plt.figure(figsize = (12,10))
gs = gridspec.GridSpec(4,4)
ax1 = fig.add_subplot(gs[0:2,0:2])
ax2 = fig.add_subplot(gs[0:2,2:4])
ax3 = fig.add_subplot(gs[2:4,1:3])
fig.subplots_adjust(top = 0.9,hspace = 1.0,wspace = 1.0)
plot_corr = man.stripnans(comp_corr)
corr_shade = ~isnan(comp_corr)
plot_dist = stripWithShade(input_dist,corr_shade)
extent = [-shape(plot_corr)[0]/2*dx,shape(plot_corr)[0]/2*dx,-shape(plot_corr)[0]/2*dx,shape(plot_corr)[0]/2*dx]
if shape(plot_dist) != shape(plot_corr):
print "Something's fucked here, mate"
pdb.set_trace()
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_corr,plot_dist + plot_corr]),nanmax([plot_dist,plot_corr,plot_dist + plot_corr])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_corr,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_dist + plot_corr,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(sum_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
fig.subplots_adjust(top = 0.83,hspace = 0.5,wspace = 1.5)
plt.suptitle(global_title,fontsize = 20)
if est_perf == True:
print 'Computing performance for plotting... Be patient!'
dist_merit = eva.computeMeritFunctions(plot_dist,[dx])
corr_merit = eva.computeMeritFunctions(plot_dist + plot_corr,[dx])
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax3.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(corr_merit[0]) + ' asec.',ha = 'left',transform = ax3.transAxes)
ax3.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(corr_merit[1]) + ' asec.',ha = 'left',transform = ax3.transAxes)
elif logical_and(dist_merit is not None,corr_merit is not None):
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax3.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(corr_merit[0]) + ' asec.',ha = 'left',transform = ax3.transAxes)
ax3.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(corr_merit[1]) + ' asec.',ha = 'left',transform = ax3.transAxes)
if save_file != None:
plt.savefig(save_file)
return fig,(ax1,ax2,ax3)
def plot_computed_correction_inline(input_dist,fc,cor,dx,shade,first_title = '',second_title = '',sum_title = '', \
cbar_label = '',global_title = '',save_file = None,est_perf = False, \
dist_merit = None,corr_merit = None,vbounds = None):
'''
'''
fig = plt.figure(figsize = (18,5))
gs = gridspec.GridSpec(1,3)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
#fig.subplots_adjust(top = 0.9,hspace = 1.0,wspace = 1.0)
plot_corr = man.stripnans(comp_corr)
corr_shade = ~isnan(comp_corr)
plot_dist = stripWithShade(input_dist,corr_shade)
extent = [-shape(plot_corr)[0]/2*dx,shape(plot_corr)[0]/2*dx,-shape(plot_corr)[0]/2*dx,shape(plot_corr)[0]/2*dx]
if shape(plot_dist) != shape(plot_corr):
print "Something's fucked here, mate"
pdb.set_trace()
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_corr,plot_dist + plot_corr]),nanmax([plot_dist,plot_corr,plot_dist + plot_corr])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_corr,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_dist + plot_corr,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(sum_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
fig.subplots_adjust(top = 0.7,hspace = 0.05,wspace = 0.6)
plt.suptitle(global_title,fontsize = 20)
if est_perf == True:
print 'Computing performance for plotting... Be patient!'
dist_merit = eva.computeMeritFunctions(plot_dist,[dx])
corr_merit = eva.computeMeritFunctions(plot_dist + plot_corr,[dx])
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax3.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(corr_merit[0]) + ' asec.',ha = 'left',transform = ax3.transAxes)
ax3.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(corr_merit[1]) + ' asec.',ha = 'left',transform = ax3.transAxes)
elif logical_and(dist_merit is not None,corr_merit is not None):
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax3.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(corr_merit[0]) + ' asec.',ha = 'left',transform = ax3.transAxes)
ax3.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(corr_merit[1]) + ' asec.',ha = 'left',transform = ax3.transAxes)
if save_file != None:
plt.savefig(save_file)
return fig,(ax1,ax2,ax3)
def plot_measured_correction(input_dist,theo_corr,meas_corr,dx,first_title = '',second_title = '',third_title = '',sum_title = '',
cbar_label = '',global_title = '',save_file = None,est_perf = False,dist_merit = None, meas_corr_merit = None,vbounds = None):
'''
'''
fig = plt.figure(figsize = (12,10))
gs = gridspec.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
ax4 = fig.add_subplot(gs[3])
#fig.subplots_adjust(top = 0.9,hspace = 0.1,wspace = 0.1)
plot_dist = man.stripnans(input_dist - nanmean(input_dist))
plot_theo_corr = man.newGridSize(man.stripnans(theo_corr - nanmean(theo_corr)),shape(plot_dist))
plot_meas_corr = man.newGridSize(man.stripnans(meas_corr - nanmean(meas_corr)),shape(plot_dist))
extent = [-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx,-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx]
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_theo_corr,plot_meas_corr,plot_dist + plot_meas_corr]),nanmax([plot_dist,plot_theo_corr,plot_meas_corr,plot_dist + plot_meas_corr])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_theo_corr,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_meas_corr,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(third_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
ax4.imshow(plot_meas_corr + plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax4.set_xlabel('Azimuthal Dimension (mm)')
ax4.set_ylabel('Axial Dimension (mm)')
ax4.set_title(sum_title)
divider = make_axes_locatable(ax4)
cax4 = divider.append_axes("right", size="5%", pad=0.10)
cbar4 = plt.colorbar(im, cax = cax4)
cbar4.set_label(cbar_label)
fig.subplots_adjust(top = 0.85,hspace = 0.4,wspace = 0.4)
plt.suptitle(global_title,fontsize = 20)
if est_perf == True:
print 'Computing performance for plotting... Be patient!'
dist_merit = eva.computeMeritFunctions(plot_dist,[dx])
corr_merit = eva.computeMeritFunctions(plot_dist + plot_meas_corr,[dx])
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax4.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(meas_corr_merit[0]) + ' asec.',ha = 'left',transform = ax4.transAxes)
ax4.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(meas_corr_merit[1]) + ' asec.',ha = 'left',transform = ax4.transAxes)
if save_file != None:
plt.savefig(save_file)
return fig,(ax1,ax2,ax3,ax4)
def plot_measured_correction_for_iteration(fig,input_dist,theo_corr,meas_corr,dx,first_title = '',second_title = '',third_title = '',sum_title = '',
cbar_label = '',global_title = '',save_file = None,est_perf = False,dist_merit = None, meas_corr_merit = None, vbounds = [-1.,1.]):
'''
'''
gs = gridspec.GridSpec(2,2)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
ax4 = fig.add_subplot(gs[3])
plot_dist = man.stripnans(input_dist - nanmean(input_dist))
plot_theo_corr = man.newGridSize(man.stripnans(theo_corr - nanmean(theo_corr)),shape(plot_dist))
plot_meas_corr = man.newGridSize(man.stripnans(meas_corr - nanmean(meas_corr)),shape(plot_dist))
extent = [-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx,-shape(plot_theo_corr)[0]/2*dx,shape(plot_theo_corr)[0]/2*dx]
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_corr,plot_dist + plot_corr]),nanmax([plot_dist,plot_corr,plot_dist + plot_corr])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_theo_corr,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_meas_corr,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(third_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
ax4.imshow(plot_meas_corr + plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax4.set_xlabel('Azimuthal Dimension (mm)')
ax4.set_ylabel('Axial Dimension (mm)')
ax4.set_title(sum_title)
divider = make_axes_locatable(ax4)
cax4 = divider.append_axes("right", size="5%", pad=0.10)
cbar4 = plt.colorbar(im, cax = cax4)
cbar4.set_label(cbar_label)
fig.subplots_adjust(top = 0.9,hspace = 0.4,wspace = 0.4)
plt.suptitle(global_title,fontsize = 20)
if est_perf == True:
ax1.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax4.text(0.05,0.05,'PSF RMS: ' + "{:4.1f}".format(meas_corr_merit[0]) + ' asec.',ha = 'left',transform = ax4.transAxes)
ax4.text(0.05,0.10,'PSF HPD: ' + "{:3.1f}".format(meas_corr_merit[1]) + ' asec.',ha = 'left',transform = ax4.transAxes)
if save_file != None:
plt.savefig(save_file)
return plot_dist,plot_theo_corr,plot_meas_corr
def plot_computed_correction_trifig_inline(input_dist,fc,cor,dx,first_title = '',second_title = '',third_title = '', \
cbar_label = '',global_title = '',save_file = None,est_perf = False, \
dist_merit = None,fc_merit = None,cor_merit = None,vbounds = None):
'''
'''
fig = plt.figure(figsize = (18,5))
gs = gridspec.GridSpec(1,3)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
plot_dist,plot_fc,plot_cor = input_dist,fc,cor
extent = [-shape(plot_dist)[1]/2*dx,shape(plot_dist)[1]/2*dx,-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx]
if (shape(plot_dist) != shape(plot_cor)) | (shape(plot_dist) != shape(plot_fc)):
print "Something's fucked here, mate"
pdb.set_trace()
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_fc,plot_cor]),nanmax([plot_dist,plot_fc,plot_cor])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_fc,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_cor,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(third_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
fig.subplots_adjust(top = 0.7,hspace = 0.05,wspace = 0.6)
plt.suptitle(global_title,fontsize = 20)
#if est_perf == True:
# if logical_and(dist_merit is None,cor_merit is None):
# dist_merit = eva.computeMeritFunctions(plot_dist,[dx])
# cor_merit = eva.computeMeritFunctions(plot_cor,[dx])
if dist_merit is not None:
ax1.text(0.05,0.05,'PSF RMS: ' + "{:5.2f}".format(dist_merit[0]) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax1.text(0.05,0.10,'PSF HPD: ' + "{:4.2f}".format(dist_merit[1]) + ' asec.',ha = 'left',transform = ax1.transAxes)
if fc_merit is not None:
ax2.text(0.05,0.05,'PSF RMS: ' + "{:5.2f}".format(fc_merit[0]) + ' asec.',ha = 'left',transform = ax2.transAxes)
ax2.text(0.05,0.10,'PSF HPD: ' + "{:4.2f}".format(fc_merit[1]) + ' asec.',ha = 'left',transform = ax2.transAxes)
if cor_merit is not None:
ax3.text(0.05,0.05,'PSF RMS: ' + "{:5.2f}".format(cor_merit[0]) + ' asec.',ha = 'left',transform = ax3.transAxes)
ax3.text(0.05,0.10,'PSF HPD: ' + "{:4.2f}".format(cor_merit[1]) + ' asec.',ha = 'left',transform = ax3.transAxes)
if save_file != None:
plt.savefig(save_file)
return fig,(ax1,ax2,ax3)
def plot_computed_corrections_trifig_inline_slopes(input_dist,fc,cor,dx,first_title = '',second_title = '',third_title = '', \
cbar_label = '',global_title = '',save_file = None,est_perf = False,vbounds = None):
'''
'''
fig = plt.figure(figsize = (18,5))
gs = gridspec.GridSpec(1,3)
ax1 = fig.add_subplot(gs[0])
ax2 = fig.add_subplot(gs[1])
ax3 = fig.add_subplot(gs[2])
plot_dist,plot_fc,plot_cor = input_dist,fc,cor
extent = [-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx,-shape(plot_dist)[0]/2*dx,shape(plot_dist)[0]/2*dx]
if (shape(plot_dist) != shape(plot_cor)) | (shape(plot_dist) != shape(plot_fc)):
print "Something's fucked here, mate"
pdb.set_trace()
if vbounds == None:
vmin,vmax = nanmin([plot_dist,plot_fc,plot_cor]),nanmax([plot_dist,plot_fc,plot_cor])
else:
[vmin,vmax] = vbounds
im = ax1.imshow(plot_dist,extent = extent,vmin = vmin,vmax = vmax)
ax1.set_xlabel('Azimuthal Dimension (mm)')
ax1.set_ylabel('Axial Dimension (mm)')
ax1.set_title(first_title)
divider = make_axes_locatable(ax1)
cax1 = divider.append_axes("right", size="5%", pad=0.10)
cbar1 = plt.colorbar(im, cax = cax1)
cbar1.set_label(cbar_label)
ax2.imshow(plot_fc,extent = extent,vmin = vmin,vmax = vmax)
ax2.set_xlabel('Azimuthal Dimension (mm)')
ax2.set_ylabel('Axial Dimension (mm)')
ax2.set_title(second_title)
divider = make_axes_locatable(ax2)
cax2 = divider.append_axes("right", size="5%", pad=0.10)
cbar2 = plt.colorbar(im, cax = cax2)
cbar2.set_label(cbar_label)
ax3.imshow(plot_cor,extent = extent,vmin = vmin,vmax = vmax)
ax3.set_xlabel('Azimuthal Dimension (mm)')
ax3.set_ylabel('Axial Dimension (mm)')
ax3.set_title(third_title)
divider = make_axes_locatable(ax3)
cax3 = divider.append_axes("right", size="5%", pad=0.10)
cbar3 = plt.colorbar(im, cax = cax3)
cbar3.set_label(cbar_label)
fig.subplots_adjust(top = 0.7,hspace = 0.05,wspace = 0.6)
plt.suptitle(global_title,fontsize = 20)
if est_perf == True:
ax1.text(0.05,0.05,'RMS Ax. Slope: ' + "{:5.2f}".format(nanstd(plot_dist)) + ' asec.',ha = 'left',transform = ax1.transAxes)
ax2.text(0.05,0.05,'RMS Ax. Slope: ' + "{:5.2f}".format(nanstd(plot_fc)) + ' asec.',ha = 'left',transform = ax2.transAxes)
ax3.text(0.05,0.05,'RMS Ax. Slope: ' + "{:5.2f}".format(nanstd(plot_cor)) + ' asec.',ha = 'left',transform = ax3.transAxes)
if save_file != None:
plt.savefig(save_file)
return fig,(ax1,ax2,ax3)
| 47.945882
| 225
| 0.647544
| 5,947
| 40,754
| 4.238271
| 0.070624
| 0.033962
| 0.024757
| 0.013013
| 0.826701
| 0.8073
| 0.788574
| 0.77925
| 0.769093
| 0.750724
| 0
| 0.043593
| 0.189454
| 40,754
| 850
| 226
| 47.945882
| 0.719432
| 0.027703
| 0
| 0.702922
| 0
| 0
| 0.078427
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.027597
| null | null | 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
160487b49cb5af0ca442cc31664efe1074c4630b
| 4,519
|
py
|
Python
|
legacy/MarketRisk.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 7
|
2020-09-28T23:36:40.000Z
|
2022-02-22T02:00:32.000Z
|
legacy/MarketRisk.py
|
LaoKpa/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 4
|
2020-11-13T18:48:52.000Z
|
2022-02-10T01:29:47.000Z
|
legacy/MarketRisk.py
|
lzcaisg/reinforcement_trader
|
1465731269e6d58900a28a040346bf45ffb5cf97
|
[
"MIT"
] | 3
|
2020-11-23T17:31:59.000Z
|
2021-04-08T10:55:03.000Z
|
from Environment import *
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
MAX_WINDOW = 180
LARGE_WINDOW = 90
MID_WINDOW = 60
SMALL_WINDOW = 30
MIN_WINDOW = 7
env = Environment()
currentDate = EARLIEST_DATE+datetime.timedelta(days=SMALL_WINDOW-1) # SMALL_WINDOW: 30 days
etfList = ["S&P 500", "Shanghai Composite", "Bovespa", "DAX", "Nifty 50"]
marketName = "MSCI World"
def getBeta(etfName, marketName):
smallestEtfDate = env.db[etfName].find_one(sort=[("Date", 1)])["Date"]
smallestMarketDate = env.db[marketName].find_one(sort=[("Date", 1)])["Date"]
earliestDate = max(smallestEtfDate,smallestMarketDate,EARLIEST_DATE)
currentDate = earliestDate + datetime.timedelta(days=SMALL_WINDOW - 1) # SMALL_WINDOW: 30 days
dbResult = env.getRecordFromEndLengthByETFList(
todayDate=datetime.datetime.now(),
endDate=currentDate,
length=SMALL_WINDOW-1,
etfList=[etfName, marketName])
if len(dbResult[etfName]) != len(dbResult[marketName]):
delta = len(dbResult[etfName]) - len(dbResult[marketName])
if delta > 0: # etf got more data:
for i in range(delta):
dbResult[marketName].insert(0, dbResult[marketName][0])
else: # market got more data:
for i in range(abs(delta)):
dbResult[etfName].insert(0, dbResult[etfName][0])
currentDate += datetime.timedelta(days=1)
betaList = []
counter = 0
while currentDate < LATEST_DATE:
# 1. Push the record of currentDate as the first item of dbResult
newResult = env.getRecordFromStartLengthByETFList(datetime.datetime.now(), currentDate, 1, [etfName, marketName])
if newResult[etfName]: # If there IS a new record
dbResult[etfName].insert(0, newResult[etfName][0])
if newResult[marketName]: # The dimension must match
dbResult[marketName].insert(0, newResult[marketName][0])
else:
dbResult[marketName].insert(0, dbResult[marketName][0])
marketChange = np.array([d['Change'] for d in dbResult[marketName]])
etfChange = np.array([d['Change'] for d in dbResult[etfName]])
beta = (np.cov(etfChange,marketChange)[0][1])/np.var(marketChange)
betaList.append({"date": currentDate, "beta": beta})
currentDate += datetime.timedelta(days=1)
if counter%200 == 0:
print(counter, beta)
counter += 1
return betaList
def getStd(etfName, marketName):
currentDate = EARLIEST_DATE + datetime.timedelta(days=SMALL_WINDOW - 1) # SMALL_WINDOW: 30 days
dbResult = env.getRecordFromEndLengthByETFList(
todayDate=datetime.datetime.now(),
endDate=currentDate,
length=SMALL_WINDOW-1,
etfList=[etfName, marketName])
if len(dbResult[etfName]) != len(dbResult[marketName]):
delta = len(dbResult[etfName]) - len(dbResult[marketName])
if delta > 0: # etf got more data:
for i in range(delta):
dbResult[marketName].insert(0, dbResult[marketName][0])
else: # market got more data:
for i in range(abs(delta)):
dbResult[etfName].insert(0, dbResult[etfName][0])
currentDate += datetime.timedelta(days=1)
betaList = []
counter = 0
while currentDate < LATEST_DATE:
# 1. Push the record of currentDate as the first item of dbResult
newResult = env.getRecordFromStartLengthByETFList(datetime.datetime.now(), currentDate, 1, [etfName, marketName])
if newResult[etfName]: # If there IS a new record
dbResult[etfName].insert(0, newResult[etfName][0])
if newResult[marketName]: # The dimension must match
dbResult[marketName].insert(0, newResult[marketName][0])
else:
dbResult[marketName].insert(0, dbResult[marketName][0])
marketChange = np.array([d['Change'] for d in dbResult[marketName]])
etfChange = np.array([d['Change'] for d in dbResult[etfName]])
beta = (np.cov(etfChange,marketChange)[0][1])/np.var(marketChange)
betaList.append({"date": currentDate, "beta": beta})
currentDate += datetime.timedelta(days=1)
if counter%200 == 0:
print(counter, beta)
counter += 1
return betaList
for etfName in etfList:
betaList = getBeta(etfName, marketName)
df = pd.DataFrame(betaList)
plt.plot(df['date'], df['beta'], label=etfName)
plt.legend()
plt.show()
| 39.295652
| 121
| 0.648374
| 525
| 4,519
| 5.542857
| 0.215238
| 0.098969
| 0.050515
| 0.051546
| 0.815808
| 0.815808
| 0.802062
| 0.802062
| 0.802062
| 0.802062
| 0
| 0.021246
| 0.229254
| 4,519
| 114
| 122
| 39.640351
| 0.814241
| 0.082983
| 0
| 0.73913
| 0
| 0
| 0.028329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021739
| false
| 0
| 0.043478
| 0
| 0.086957
| 0.021739
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
16072a495990d06254ffd32074716cfcc91b50e0
| 146
|
py
|
Python
|
base_constants.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | null | null | null |
base_constants.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | 4
|
2017-12-25T12:32:42.000Z
|
2018-01-02T13:17:40.000Z
|
base_constants.py
|
focusonecc/common
|
d61631d5b1c068422dcf40be199972ed36fa26be
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Author: theo-l
# @Date: 2017-06-26 18:51:24
# @Last Modified by: theo-l
# @Last Modified time: 2017-06-26 18:51:24
| 24.333333
| 42
| 0.60274
| 27
| 146
| 3.259259
| 0.62963
| 0.113636
| 0.181818
| 0.227273
| 0.318182
| 0.318182
| 0
| 0
| 0
| 0
| 0
| 0.243697
| 0.184932
| 146
| 5
| 43
| 29.2
| 0.495798
| 0.924658
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
162411cabff670198e84a712e67a355a0b66c86d
| 69,458
|
py
|
Python
|
test/scenarios/synapse/output/ext_default_folder/src/synapse/azext_synapse/generated/custom.py
|
kairu-ms/autorest.az
|
c3370f3d4d394e580615d8d97df05515533b035e
|
[
"MIT"
] | null | null | null |
test/scenarios/synapse/output/ext_default_folder/src/synapse/azext_synapse/generated/custom.py
|
kairu-ms/autorest.az
|
c3370f3d4d394e580615d8d97df05515533b035e
|
[
"MIT"
] | null | null | null |
test/scenarios/synapse/output/ext_default_folder/src/synapse/azext_synapse/generated/custom.py
|
kairu-ms/autorest.az
|
c3370f3d4d394e580615d8d97df05515533b035e
|
[
"MIT"
] | 1
|
2021-03-21T03:59:29.000Z
|
2021-03-21T03:59:29.000Z
|
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=too-many-lines
# pylint: disable=unused-argument
from azure.cli.core.util import sdk_no_wait
def synapse_big_data_pool_list(client,
resource_group_name,
workspace_name):
return client.list_by_workspace(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_big_data_pool_show(client,
resource_group_name,
workspace_name,
big_data_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name)
def synapse_big_data_pool_create(client,
resource_group_name,
workspace_name,
big_data_pool_name,
location,
force=None,
tags=None,
provisioning_state=None,
auto_scale=None,
creation_date=None,
auto_pause=None,
spark_events_folder=None,
node_count=None,
library_requirements=None,
spark_version=None,
default_spark_log_folder=None,
node_size=None,
node_size_family=None,
no_wait=False):
if force is None:
force = False
big_data_pool_info = {}
big_data_pool_info['tags'] = tags
big_data_pool_info['location'] = location
big_data_pool_info['provisioning_state'] = provisioning_state
big_data_pool_info['auto_scale'] = auto_scale
big_data_pool_info['creation_date'] = creation_date
big_data_pool_info['auto_pause'] = auto_pause
big_data_pool_info['spark_events_folder'] = spark_events_folder
big_data_pool_info['node_count'] = node_count
big_data_pool_info['library_requirements'] = library_requirements
big_data_pool_info['spark_version'] = spark_version
big_data_pool_info['default_spark_log_folder'] = default_spark_log_folder
big_data_pool_info['node_size'] = node_size
big_data_pool_info['node_size_family'] = node_size_family
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name,
force=force,
big_data_pool_info=big_data_pool_info)
def synapse_big_data_pool_update(client,
resource_group_name,
workspace_name,
big_data_pool_name,
tags=None):
big_data_pool_patch_info = {}
big_data_pool_patch_info['tags'] = tags
return client.update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name,
big_data_pool_patch_info=big_data_pool_patch_info)
def synapse_big_data_pool_delete(client,
resource_group_name,
workspace_name,
big_data_pool_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
big_data_pool_name=big_data_pool_name)
def synapse_operation_show_azure_async_header_result(client,
resource_group_name,
workspace_name,
operation_id):
return client.get_azure_async_header_result(resource_group_name=resource_group_name,
workspace_name=workspace_name,
operation_id=operation_id)
def synapse_operation_show_location_header_result(client,
resource_group_name,
workspace_name,
operation_id):
return client.get_location_header_result(resource_group_name=resource_group_name,
workspace_name=workspace_name,
operation_id=operation_id)
def synapse_ip_firewall_rule_list(client,
resource_group_name,
workspace_name):
return client.list_by_workspace(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_ip_firewall_rule_show(client,
resource_group_name,
workspace_name,
rule_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name)
def synapse_ip_firewall_rule_create(client,
resource_group_name,
workspace_name,
rule_name,
end_ip_address=None,
start_ip_address=None,
no_wait=False):
ip_firewall_rule_info = {}
ip_firewall_rule_info['end_ip_address'] = end_ip_address
ip_firewall_rule_info['start_ip_address'] = start_ip_address
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name,
ip_firewall_rule_info=ip_firewall_rule_info)
def synapse_ip_firewall_rule_update(instance,
resource_group_name,
workspace_name,
rule_name,
end_ip_address=None,
start_ip_address=None,
no_wait=False):
if end_ip_address is not None:
instance.end_ip_address = end_ip_address
if start_ip_address is not None:
instance.start_ip_address = start_ip_address
return instance
def synapse_ip_firewall_rule_delete(client,
resource_group_name,
workspace_name,
rule_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
rule_name=rule_name)
def synapse_ip_firewall_rule_replace_all(client,
resource_group_name,
workspace_name,
ip_firewall_rules=None,
no_wait=False):
request = {}
request['ip_firewall_rules'] = ip_firewall_rules
return sdk_no_wait(no_wait,
client.begin_replace_all,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
request=request)
def synapse_sql_pool_list(client,
resource_group_name,
workspace_name):
return client.list_by_workspace(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_sql_pool_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
location,
tags=None,
sku=None,
max_size_bytes=None,
collation=None,
source_database_id=None,
recoverable_database_id=None,
provisioning_state=None,
status=None,
restore_point_in_time=None,
create_mode=None,
creation_date=None,
no_wait=False):
sql_pool_info = {}
sql_pool_info['tags'] = tags
sql_pool_info['location'] = location
sql_pool_info['sku'] = sku
sql_pool_info['max_size_bytes'] = max_size_bytes
sql_pool_info['collation'] = collation
sql_pool_info['source_database_id'] = source_database_id
sql_pool_info['recoverable_database_id'] = recoverable_database_id
sql_pool_info['provisioning_state'] = provisioning_state
sql_pool_info['status'] = status
sql_pool_info['restore_point_in_time'] = restore_point_in_time
sql_pool_info['create_mode'] = create_mode
sql_pool_info['creation_date'] = creation_date
return sdk_no_wait(no_wait,
client.begin_create,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
sql_pool_info=sql_pool_info)
def synapse_sql_pool_update(client,
resource_group_name,
workspace_name,
sql_pool_name,
tags=None,
location=None,
sku=None,
max_size_bytes=None,
collation=None,
source_database_id=None,
recoverable_database_id=None,
provisioning_state=None,
status=None,
restore_point_in_time=None,
create_mode=None,
creation_date=None):
sql_pool_info = {}
sql_pool_info['tags'] = tags
sql_pool_info['location'] = location
sql_pool_info['sku'] = sku
sql_pool_info['max_size_bytes'] = max_size_bytes
sql_pool_info['collation'] = collation
sql_pool_info['source_database_id'] = source_database_id
sql_pool_info['recoverable_database_id'] = recoverable_database_id
sql_pool_info['provisioning_state'] = provisioning_state
sql_pool_info['status'] = status
sql_pool_info['restore_point_in_time'] = restore_point_in_time
sql_pool_info['create_mode'] = create_mode
sql_pool_info['creation_date'] = creation_date
return client.update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
sql_pool_info=sql_pool_info)
def synapse_sql_pool_delete(client,
resource_group_name,
workspace_name,
sql_pool_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_pause(client,
resource_group_name,
workspace_name,
sql_pool_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_pause,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_rename(client,
resource_group_name,
workspace_name,
sql_pool_name,
id_):
parameters = {}
parameters['id'] = id_
return client.rename(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
parameters=parameters)
def synapse_sql_pool_resume(client,
resource_group_name,
workspace_name,
sql_pool_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_resume,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_metadata_sync_config_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_metadata_sync_config_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
enabled=None):
metadata_sync_configuration = {}
metadata_sync_configuration['enabled'] = enabled
return client.create(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
metadata_sync_configuration=metadata_sync_configuration)
def synapse_sql_pool_operation_result_show_location_header_result(client,
resource_group_name,
workspace_name,
sql_pool_name,
operation_id):
return client.get_location_header_result(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
operation_id=operation_id)
def synapse_sql_pool_geo_backup_policy_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
geo_backup_policy_name="Default")
def synapse_sql_pool_data_warehouse_user_activity_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
data_warehouse_user_activity_name="current")
def synapse_sql_pool_restore_point_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_restore_point_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
restore_point_label):
parameters = {}
parameters['restore_point_label'] = restore_point_label
return client.begin_create(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
parameters=parameters)
def synapse_sql_pool_replication_link_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_transparent_data_encryption_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
transparent_data_encryption_name="current")
def synapse_sql_pool_transparent_data_encryption_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
status=None):
parameters = {}
parameters['status'] = status
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
transparent_data_encryption_name="current",
parameters=parameters)
def synapse_sql_pool_transparent_data_encryption_update(instance,
resource_group_name,
workspace_name,
sql_pool_name,
status=None):
if status is not None:
instance.status = status
return instance
def synapse_sql_pool_blob_auditing_policy_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_blob_auditing_policy_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
state=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None,
audit_actions_and_groups=None,
storage_account_subscription_id=None,
is_storage_secondary_key_in_use=None,
is_azure_monitor_target_enabled=None):
parameters = {}
parameters['state'] = state
parameters['storage_endpoint'] = storage_endpoint
parameters['storage_account_access_key'] = storage_account_access_key
parameters['retention_days'] = retention_days
parameters['audit_actions_and_groups'] = audit_actions_and_groups
parameters['storage_account_subscription_id'] = storage_account_subscription_id
parameters['is_storage_secondary_key_in_use'] = is_storage_secondary_key_in_use
parameters['is_azure_monitor_target_enabled'] = is_azure_monitor_target_enabled
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
parameters=parameters)
def synapse_sql_pool_blob_auditing_policy_update(instance,
resource_group_name,
workspace_name,
sql_pool_name,
state=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None,
audit_actions_and_groups=None,
storage_account_subscription_id=None,
is_storage_secondary_key_in_use=None,
is_azure_monitor_target_enabled=None):
if state is not None:
instance.state = state
if storage_endpoint is not None:
instance.storage_endpoint = storage_endpoint
if storage_account_access_key is not None:
instance.storage_account_access_key = storage_account_access_key
if retention_days is not None:
instance.retention_days = retention_days
if audit_actions_and_groups is not None:
instance.audit_actions_and_groups = audit_actions_and_groups
if storage_account_subscription_id is not None:
instance.storage_account_subscription_id = storage_account_subscription_id
if is_storage_secondary_key_in_use is not None:
instance.is_storage_secondary_key_in_use = is_storage_secondary_key_in_use
if is_azure_monitor_target_enabled is not None:
instance.is_azure_monitor_target_enabled = is_azure_monitor_target_enabled
return instance
def synapse_sql_pool_operation_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_usage_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_sensitivity_label_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name,
label_name=None,
label_id=None,
information_type=None,
information_type_id=None):
parameters = {}
parameters['label_name'] = label_name
parameters['label_id'] = label_id
parameters['information_type'] = information_type
parameters['information_type_id'] = information_type_id
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
parameters=parameters)
def synapse_sql_pool_sensitivity_label_update(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name,
label_name=None,
label_id=None,
information_type=None,
information_type_id=None):
parameters = {}
parameters['label_name'] = label_name
parameters['label_id'] = label_id
parameters['information_type'] = information_type
parameters['information_type_id'] = information_type_id
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name,
parameters=parameters)
def synapse_sql_pool_sensitivity_label_delete(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name):
return client.delete(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name)
def synapse_sql_pool_sensitivity_label_disable_recommendation(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name):
return client.disable_recommendation(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name)
def synapse_sql_pool_sensitivity_label_enable_recommendation(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
column_name):
return client.enable_recommendation(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
column_name=column_name)
def synapse_sql_pool_sensitivity_label_list_current(client,
resource_group_name,
workspace_name,
sql_pool_name,
filter_=None):
return client.list_current(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
filter=filter_)
def synapse_sql_pool_sensitivity_label_list_recommended(client,
resource_group_name,
workspace_name,
sql_pool_name,
include_disabled_recommendations=None,
skip_token=None,
filter_=None):
return client.list_recommended(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
include_disabled_recommendations=include_disabled_recommendations,
skip_token=skip_token,
filter=filter_)
def synapse_sql_pool_schema_list(client,
resource_group_name,
workspace_name,
sql_pool_name,
filter_=None):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
filter=filter_)
def synapse_sql_pool_table_list(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
filter_=None):
return client.list_by_schema(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
filter=filter_)
def synapse_sql_pool_table_column_list(client,
resource_group_name,
workspace_name,
sql_pool_name,
schema_name,
table_name,
filter_=None):
return client.list_by_table_name(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
schema_name=schema_name,
table_name=table_name,
filter=filter_)
def synapse_sql_pool_connection_policy_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
connection_policy_name="default")
def synapse_sql_pool_vulnerability_assessment_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name)
def synapse_sql_pool_vulnerability_assessment_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default")
def synapse_sql_pool_vulnerability_assessment_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
storage_container_path=None,
storage_container_sas_key=None,
storage_account_access_key=None,
recurring_scans=None):
parameters = {}
parameters['storage_container_path'] = storage_container_path
parameters['storage_container_sas_key'] = storage_container_sas_key
parameters['storage_account_access_key'] = storage_account_access_key
parameters['recurring_scans'] = recurring_scans
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
parameters=parameters)
def synapse_sql_pool_vulnerability_assessment_update(instance,
resource_group_name,
workspace_name,
sql_pool_name,
storage_container_path=None,
storage_container_sas_key=None,
storage_account_access_key=None,
recurring_scans=None):
if storage_container_path is not None:
instance.storage_container_path = storage_container_path
if storage_container_sas_key is not None:
instance.storage_container_sas_key = storage_container_sas_key
if storage_account_access_key is not None:
instance.storage_account_access_key = storage_account_access_key
if recurring_scans is not None:
instance.recurring_scans = recurring_scans
return instance
def synapse_sql_pool_vulnerability_assessment_delete(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.delete(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default")
def synapse_sql_pool_vulnerability_assessment_scan_list(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default")
def synapse_sql_pool_vulnerability_assessment_scan_export(client,
resource_group_name,
workspace_name,
sql_pool_name,
scan_id):
return client.export(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
scan_id=scan_id)
def synapse_sql_pool_vulnerability_assessment_scan_initiate_scan(client,
resource_group_name,
workspace_name,
sql_pool_name,
scan_id):
return client.begin_initiate_scan(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
scan_id=scan_id)
def synapse_sql_pool_security_alert_policy_show(client,
resource_group_name,
workspace_name,
sql_pool_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
security_alert_policy_name="default")
def synapse_sql_pool_security_alert_policy_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
state=None,
disabled_alerts=None,
email_addresses=None,
email_account_admins=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None):
parameters = {}
parameters['state'] = state
parameters['disabled_alerts'] = disabled_alerts
parameters['email_addresses'] = email_addresses
parameters['email_account_admins'] = email_account_admins
parameters['storage_endpoint'] = storage_endpoint
parameters['storage_account_access_key'] = storage_account_access_key
parameters['retention_days'] = retention_days
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
security_alert_policy_name="default",
parameters=parameters)
def synapse_sql_pool_security_alert_policy_update(instance,
resource_group_name,
workspace_name,
sql_pool_name,
state=None,
disabled_alerts=None,
email_addresses=None,
email_account_admins=None,
storage_endpoint=None,
storage_account_access_key=None,
retention_days=None):
if state is not None:
instance.state = state
if disabled_alerts is not None:
instance.disabled_alerts = disabled_alerts
if email_addresses is not None:
instance.email_addresses = email_addresses
if email_account_admins is not None:
instance.email_account_admins = email_account_admins
if storage_endpoint is not None:
instance.storage_endpoint = storage_endpoint
if storage_account_access_key is not None:
instance.storage_account_access_key = storage_account_access_key
if retention_days is not None:
instance.retention_days = retention_days
return instance
def synapse_sql_pool_vulnerability_assessment_rule_baseline_create(client,
resource_group_name,
workspace_name,
sql_pool_name,
rule_id,
baseline_name,
baseline_results=None):
parameters = {}
parameters['baseline_results'] = baseline_results
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
rule_id=rule_id,
baseline_name=baseline_name,
parameters=parameters)
def synapse_sql_pool_vulnerability_assessment_rule_baseline_update(client,
resource_group_name,
workspace_name,
sql_pool_name,
rule_id,
baseline_name,
baseline_results=None):
parameters = {}
parameters['baseline_results'] = baseline_results
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
rule_id=rule_id,
baseline_name=baseline_name,
parameters=parameters)
def synapse_sql_pool_vulnerability_assessment_rule_baseline_delete(client,
resource_group_name,
workspace_name,
sql_pool_name,
rule_id,
baseline_name):
return client.delete(resource_group_name=resource_group_name,
workspace_name=workspace_name,
sql_pool_name=sql_pool_name,
vulnerability_assessment_name="default",
rule_id=rule_id,
baseline_name=baseline_name)
def synapse_workspace_list(client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def synapse_workspace_show(client,
resource_group_name,
workspace_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_workspace_create(client,
resource_group_name,
workspace_name,
location,
tags=None,
default_data_lake_storage=None,
sql_administrator_login_password=None,
managed_resource_group_name=None,
sql_administrator_login=None,
connectivity_endpoints=None,
managed_virtual_network=None,
private_endpoint_connections=None,
compute_subnet_id=None,
type_=None,
no_wait=False):
workspace_info = {}
workspace_info['tags'] = tags
workspace_info['location'] = location
workspace_info['default_data_lake_storage'] = default_data_lake_storage
workspace_info['sql_administrator_login_password'] = sql_administrator_login_password
workspace_info['managed_resource_group_name'] = managed_resource_group_name
workspace_info['sql_administrator_login'] = sql_administrator_login
workspace_info['connectivity_endpoints'] = connectivity_endpoints
workspace_info['managed_virtual_network'] = managed_virtual_network
workspace_info['private_endpoint_connections'] = private_endpoint_connections
workspace_info['virtual_network_profile'] = {}
workspace_info['virtual_network_profile']['compute_subnet_id'] = compute_subnet_id
workspace_info['identity'] = {}
workspace_info['identity']['type'] = type_
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
workspace_info=workspace_info)
def synapse_workspace_update(client,
resource_group_name,
workspace_name,
tags=None,
sql_administrator_login_password=None,
type_=None,
no_wait=False):
workspace_patch_info = {}
workspace_patch_info['tags'] = tags
workspace_patch_info['sql_administrator_login_password'] = sql_administrator_login_password
workspace_patch_info['identity'] = {}
workspace_patch_info['identity']['type'] = type_
return sdk_no_wait(no_wait,
client.begin_update,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
workspace_patch_info=workspace_patch_info)
def synapse_workspace_delete(client,
resource_group_name,
workspace_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_workspace_aad_admin_show(client,
resource_group_name,
workspace_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_workspace_aad_admin_create(client,
resource_group_name,
workspace_name,
tenant_id=None,
login=None,
administrator_type=None,
sid=None,
no_wait=False):
aad_admin_info = {}
aad_admin_info['tenant_id'] = tenant_id
aad_admin_info['login'] = login
aad_admin_info['administrator_type'] = administrator_type
aad_admin_info['sid'] = sid
return sdk_no_wait(no_wait,
client.begin_create_or_update,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
aad_admin_info=aad_admin_info)
def synapse_workspace_aad_admin_update(instance,
resource_group_name,
workspace_name,
tenant_id=None,
login=None,
administrator_type=None,
sid=None,
no_wait=False):
if tenant_id is not None:
instance.tenant_id = tenant_id
if login is not None:
instance.login = login
if administrator_type is not None:
instance.administrator_type = administrator_type
if sid is not None:
instance.sid = sid
return instance
def synapse_workspace_aad_admin_delete(client,
resource_group_name,
workspace_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_workspace_managed_identity_sql_control_setting_show(client,
resource_group_name,
workspace_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_workspace_managed_identity_sql_control_setting_create(client,
resource_group_name,
workspace_name,
desired_state=None):
managed_identity_sql_control_settings = {}
managed_identity_sql_control_settings['grant_sql_control_to_managed_identity'] = {}
managed_identity_sql_control_settings['grant_sql_control_to_managed_identity']['desired_state'] = desired_state
return client.create_or_update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
managed_identity_sql_control_settings=managed_identity_sql_control_settings)
def synapse_workspace_managed_identity_sql_control_setting_update(instance,
resource_group_name,
workspace_name,
desired_state=None):
if desired_state is not None:
instance.grant_sql_control_to_managed_identity.desired_state = desired_state
return instance
def synapse_integration_runtime_list(client,
resource_group_name,
workspace_name):
return client.list_by_workspace(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_integration_runtime_show(client,
resource_group_name,
workspace_name,
integration_runtime_name,
if_none_match=None):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
if_none_match=if_none_match)
def synapse_integration_runtime_create(client,
resource_group_name,
workspace_name,
integration_runtime_name,
properties,
if_match=None,
no_wait=False):
integration_runtime = {}
integration_runtime['properties'] = properties
return sdk_no_wait(no_wait,
client.begin_create,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
if_match=if_match,
integration_runtime=integration_runtime)
def synapse_integration_runtime_update(client,
resource_group_name,
workspace_name,
integration_runtime_name,
auto_update=None,
update_delay_offset=None):
update_integration_runtime_request = {}
update_integration_runtime_request['auto_update'] = auto_update
update_integration_runtime_request['update_delay_offset'] = update_delay_offset
return client.update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
update_integration_runtime_request=update_integration_runtime_request)
def synapse_integration_runtime_delete(client,
resource_group_name,
workspace_name,
integration_runtime_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_start(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.start(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_stop(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.stop(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_upgrade(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.upgrade(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_node_ip_address_get(client,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
node_name=node_name)
def synapse_integration_runtime_object_metadata_get(client,
resource_group_name,
workspace_name,
integration_runtime_name,
metadata_path=None):
get_metadata_request = {}
get_metadata_request['metadata_path'] = metadata_path
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
get_metadata_request=get_metadata_request)
def synapse_integration_runtime_object_metadata_refresh(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.refresh(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_node_show(client,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
node_name=node_name)
def synapse_integration_runtime_node_update(client,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name,
concurrent_jobs_limit=None):
update_integration_runtime_node_request = {}
update_integration_runtime_node_request['concurrent_jobs_limit'] = concurrent_jobs_limit
return client.update(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
node_name=node_name,
update_integration_runtime_node_request=update_integration_runtime_node_request)
def synapse_integration_runtime_node_delete(client,
resource_group_name,
workspace_name,
integration_runtime_name,
node_name):
return client.delete(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
node_name=node_name)
def synapse_integration_runtime_credentials_sync(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.sync(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_connection_info_get(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_auth_key_list(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_auth_key_regenerate(client,
resource_group_name,
workspace_name,
integration_runtime_name,
key_name=None):
regenerate_key_parameters = {}
regenerate_key_parameters['key_name'] = key_name
return client.regenerate(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name,
regenerate_key_parameters=regenerate_key_parameters)
def synapse_integration_runtime_monitoring_data_get(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_integration_runtime_status_get(client,
resource_group_name,
workspace_name,
integration_runtime_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
integration_runtime_name=integration_runtime_name)
def synapse_private_link_resource_list(client,
resource_group_name,
workspace_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_private_link_resource_show(client,
resource_group_name,
workspace_name,
private_link_resource_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_link_resource_name=private_link_resource_name)
def synapse_private_endpoint_connection_list(client,
resource_group_name,
workspace_name):
return client.list(resource_group_name=resource_group_name,
workspace_name=workspace_name)
def synapse_private_endpoint_connection_show(client,
resource_group_name,
workspace_name,
private_endpoint_connection_name):
return client.get(resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name)
def synapse_private_endpoint_connection_create(client,
resource_group_name,
workspace_name,
private_endpoint_connection_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_create,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name)
def synapse_private_endpoint_connection_delete(client,
resource_group_name,
workspace_name,
private_endpoint_connection_name,
no_wait=False):
return sdk_no_wait(no_wait,
client.begin_delete,
resource_group_name=resource_group_name,
workspace_name=workspace_name,
private_endpoint_connection_name=private_endpoint_connection_name)
def synapse_private_link_hub_list(client,
resource_group_name=None):
if resource_group_name:
return client.list_by_resource_group(resource_group_name=resource_group_name)
return client.list()
def synapse_private_link_hub_show(client,
resource_group_name,
private_link_hub_name):
return client.get(resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name)
def synapse_private_link_hub_create(client,
resource_group_name,
private_link_hub_name,
location,
tags=None):
private_link_hub_info = {}
private_link_hub_info['tags'] = tags
private_link_hub_info['location'] = location
return client.create_or_update(resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
private_link_hub_info=private_link_hub_info)
def synapse_private_link_hub_update(client,
resource_group_name,
private_link_hub_name,
tags=None):
private_link_hub_patch_info = {}
private_link_hub_patch_info['tags'] = tags
return client.update(resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name,
private_link_hub_patch_info=private_link_hub_patch_info)
def synapse_private_link_hub_delete(client,
resource_group_name,
private_link_hub_name):
return client.delete(resource_group_name=resource_group_name,
private_link_hub_name=private_link_hub_name)
| 49.612857
| 115
| 0.496055
| 5,708
| 69,458
| 5.496146
| 0.042221
| 0.12763
| 0.165817
| 0.159123
| 0.880499
| 0.856018
| 0.808842
| 0.780409
| 0.737664
| 0.730747
| 0
| 0
| 0.463172
| 69,458
| 1,399
| 116
| 49.64832
| 0.841366
| 0.007227
| 0
| 0.731521
| 0
| 0
| 0.025136
| 0.010255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08921
| false
| 0.003398
| 0.00085
| 0.057774
| 0.180969
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
166149b044e633e9b1f2c34277defb93528f52cc
| 1,163
|
py
|
Python
|
src/models/model_trainers/archive.py
|
saArbabi/sim
|
cc6fd2d71c3621f47616e830db83244e51d28a28
|
[
"MIT"
] | 1
|
2021-03-26T15:28:31.000Z
|
2021-03-26T15:28:31.000Z
|
src/models/model_trainers/archive.py
|
saArbabi/DriverActionEstimators
|
a9519a685da6f96f689a09d928fa6bc4a09c29e0
|
[
"MIT"
] | null | null | null |
src/models/model_trainers/archive.py
|
saArbabi/DriverActionEstimators
|
a9519a685da6f96f689a09d928fa6bc4a09c29e0
|
[
"MIT"
] | null | null | null |
"""Some code that is most probably useless
"""
"""
Driver model - lstm
"""
history_future_seqs = data_gen.sequence(features, 20, 1)
history_future_seqs_scaled = data_gen.sequence(features_scaled, 20, 1)
data_list = data_gen.split_data(history_future_seqs, history_future_seqs_scaled)
# data_list = [data_array[:5000, :, :] for data_array in data_list]
history_future_usc, history_sca, future_sca, future_idm_s, \
future_m_veh_c, future_e_veh_a = data_list
future_e_veh_a.shape
# %%
"""
Driver model - mlp
"""
history_future_seqs = data_gen.sequence(features, 1, 1)
history_future_seqs_scaled = data_gen.sequence(features_scaled, 1, 1)
data_list = data_gen.split_data(history_future_seqs, history_future_seqs_scaled)
# data_list = [data_array[:5000, :, :] for data_array in data_list]
history_future_usc, history_sca, future_sca, future_idm_s, \
future_m_veh_c, future_e_veh_a = data_list
future_e_veh_a.shape
history_sca.flatten().shape
future_e_veh_a[0]
history_future_usc[0]
#########################################################
#########################################################
| 34.205882
| 81
| 0.671539
| 164
| 1,163
| 4.286585
| 0.237805
| 0.203414
| 0.193457
| 0.078236
| 0.830725
| 0.830725
| 0.830725
| 0.716927
| 0.716927
| 0.716927
| 0
| 0.02004
| 0.141874
| 1,163
| 33
| 82
| 35.242424
| 0.684369
| 0.150473
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
166da2386bc8209e9f89891ad94d66818b751dc6
| 154
|
py
|
Python
|
src/binary_structs/__init__.py
|
Idomass/binary_structs
|
36650ae127760427dd33081326e4fb6fef2592cd
|
[
"Xnet",
"X11"
] | 2
|
2021-12-12T13:34:20.000Z
|
2022-03-22T13:26:59.000Z
|
src/binary_structs/__init__.py
|
Idomass/binary_structs
|
36650ae127760427dd33081326e4fb6fef2592cd
|
[
"Xnet",
"X11"
] | null | null | null |
src/binary_structs/__init__.py
|
Idomass/binary_structs
|
36650ae127760427dd33081326e4fb6fef2592cd
|
[
"Xnet",
"X11"
] | null | null | null |
from binary_structs.utils import *
from binary_structs.binary_struct import binary_struct
from binary_structs.endianness import big_endian, little_endian
| 38.5
| 63
| 0.883117
| 22
| 154
| 5.863636
| 0.454545
| 0.232558
| 0.395349
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084416
| 154
| 3
| 64
| 51.333333
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
167657ad126e6698dc8928a2309ae8ba6c13e778
| 7,599
|
py
|
Python
|
tests/scanner/test_data/fake_blacklist_scanner_data.py
|
daniel-infosec/forseti-security
|
59c2262db4c6ace1289a2ebdbcc6131aca0f0d65
|
[
"Apache-2.0"
] | 1
|
2018-10-06T23:16:59.000Z
|
2018-10-06T23:16:59.000Z
|
tests/scanner/test_data/fake_blacklist_scanner_data.py
|
daniel-infosec/forseti-security
|
59c2262db4c6ace1289a2ebdbcc6131aca0f0d65
|
[
"Apache-2.0"
] | null | null | null |
tests/scanner/test_data/fake_blacklist_scanner_data.py
|
daniel-infosec/forseti-security
|
59c2262db4c6ace1289a2ebdbcc6131aca0f0d65
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake instance data."""
from google.cloud.forseti.scanner.audit import blacklist_rules_engine
FAKE_BLACKLIST_SOURCE_1 = '\n'.join(['#sdfsdf',
'1.2.3.4',
'#127.0.0.1',
'5.6.7.0/24',
'#104.199.142.52',
'#104.199.142.52/0'])
FAKE_BLACKLIST_SOURCE_2 = '\n'.join(['5.5.5.5'])
EXPECTED_BLACKLIST_1 = [['1.2.3.4'], ['5.6.7.0/24']]
INSTANCE_DATA = [
{
'full_name': 'fake_full_name111',
'network_interfaces': [
{"kind": "compute#networkInterface", "name": "nic0", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", "networkIP": "1.2.0.2", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/us-central1/subnetworks/default", "fingerprint": "x=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT"}]}],
},
{
'full_name': 'fake_full_name222',
'network_interfaces': [
{"kind": "compute#networkInterface", "name": "nic0", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", "networkIP": "1.2.0.2", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/default", "fingerprint": "y=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT", "natIP": "1.2.3.4"}]},
{"kind": "compute#networkInterface", "name": "nic1", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/testnetwork", "networkIP": "1.1.0.2", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/sadadasd", "fingerprint": "z=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT", "natIP": "5.6.7.8"}]}]
},
{
'full_name': 'fake_full_name333',
'network_interfaces': [
{"kind": "compute#networkInterface", "name": "nic0", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", "networkIP": "1.2.0.3", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/default", "fingerprint": "d=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT", "natIP": "9.10.11.12"}]},
{"kind": "compute#networkInterface", "name": "nic1", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/testnetwork", "networkIP": "1.1.0.3", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/sadadasd", "fingerprint": "c=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT", "natIP": "5.5.5.5"}]}]
},
{
'full_name': 'fake_full_name444',
'network_interfaces': [
{"kind": "compute#networkInterface", "name": "nic0", "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", "networkIP": "1.2.0.2", "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/us-east4/subnetworks/default", "fingerprint": "v=", "accessConfigs": [{"kind": "compute#accessConfig", "name": "External NAT", "type": "ONE_TO_ONE_NAT", "natIP": "5.6.7.254"}]}]
}
]
RuleViolation = blacklist_rules_engine.Rule.RuleViolation
EXPECTED_VIOLATIONS = [
[],
[RuleViolation(resource_type='instance', resource_name='dev-project', full_name='fake_full_name222', rule_blacklist='ET', rule_name='ET', rule_index=0, violation_type='BLACKLIST_VIOLATION', project='dev-project', network='default', ip='1.2.3.4', resource_data='{\n "accessConfigs": [\n {\n "kind": "compute#accessConfig", \n "name": "External NAT", \n "natIP": "1.2.3.4", \n "type": "ONE_TO_ONE_NAT"\n }\n ], \n "fingerprint": "y=", \n "full_name": "fake_full_name222", \n "kind": "compute#networkInterface", \n "name": "nic0", \n "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", \n "networkIP": "1.2.0.2", \n "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/default"\n}'),
RuleViolation(resource_type='instance', resource_name='dev-project', full_name='fake_full_name222', rule_blacklist='ET', rule_name='ET', rule_index=0, violation_type='BLACKLIST_VIOLATION', project='dev-project', network='testnetwork', ip='5.6.7.8', resource_data='{\n "accessConfigs": [\n {\n "kind": "compute#accessConfig", \n "name": "External NAT", \n "natIP": "5.6.7.8", \n "type": "ONE_TO_ONE_NAT"\n }\n ], \n "fingerprint": "z=", \n "full_name": "fake_full_name222", \n "kind": "compute#networkInterface", \n "name": "nic1", \n "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/testnetwork", \n "networkIP": "1.1.0.2", \n "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/sadadasd"\n}')],
[RuleViolation(resource_type='instance', resource_name='dev-project', full_name='fake_full_name333', rule_blacklist='Spam', rule_name='Spam', rule_index=1, violation_type='BLACKLIST_VIOLATION', project='dev-project', network='testnetwork', ip='5.5.5.5', resource_data='{\n "accessConfigs": [\n {\n "kind": "compute#accessConfig", \n "name": "External NAT", \n "natIP": "5.5.5.5", \n "type": "ONE_TO_ONE_NAT"\n }\n ], \n "fingerprint": "c=", \n "full_name": "fake_full_name333", \n "kind": "compute#networkInterface", \n "name": "nic1", \n "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/testnetwork", \n "networkIP": "1.1.0.3", \n "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/asia-east1/subnetworks/sadadasd"\n}')],
[RuleViolation(resource_type='instance', resource_name='dev-project', full_name='fake_full_name444', rule_blacklist='ET', rule_name='ET', rule_index=0, violation_type='BLACKLIST_VIOLATION', project='dev-project', network='default', ip='5.6.7.254', resource_data='{\n "accessConfigs": [\n {\n "kind": "compute#accessConfig", \n "name": "External NAT", \n "natIP": "5.6.7.254", \n "type": "ONE_TO_ONE_NAT"\n }\n ], \n "fingerprint": "v=", \n "full_name": "fake_full_name444", \n "kind": "compute#networkInterface", \n "name": "nic0", \n "network": "https://www.googleapis.com/compute/beta/projects/dev-project/global/networks/default", \n "networkIP": "1.2.0.2", \n "subnetwork": "https://www.googleapis.com/compute/beta/projects/dev-project/regions/us-east4/subnetworks/default"\n}')]
]
| 115.136364
| 834
| 0.665482
| 998
| 7,599
| 4.950902
| 0.158317
| 0.056669
| 0.07286
| 0.085003
| 0.820684
| 0.787088
| 0.787088
| 0.787088
| 0.787088
| 0.787088
| 0
| 0.034158
| 0.133175
| 7,599
| 65
| 835
| 116.907692
| 0.715956
| 0.079879
| 0
| 0.097561
| 0
| 0.390244
| 0.706625
| 0.049326
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02439
| 0
| 0.02439
| 0.243902
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
167f0879ed28f3561270b0578751ed87eea98a3f
| 65
|
py
|
Python
|
mliyweb/api/v2/__init__.py
|
FINRAOS/MLiy
|
a6fd56ad8a0de97d9862569d02e5d0f65c181acf
|
[
"Apache-2.0"
] | 13
|
2018-06-19T18:28:38.000Z
|
2021-12-02T13:08:52.000Z
|
mliyweb/api/v2/__init__.py
|
FINRAOS/MLiy
|
a6fd56ad8a0de97d9862569d02e5d0f65c181acf
|
[
"Apache-2.0"
] | 14
|
2018-08-09T14:49:08.000Z
|
2022-02-10T10:54:55.000Z
|
mliyweb/api/v2/__init__.py
|
FINRAOS/MLiy
|
a6fd56ad8a0de97d9862569d02e5d0f65c181acf
|
[
"Apache-2.0"
] | 2
|
2018-07-11T14:13:23.000Z
|
2019-02-08T14:17:26.000Z
|
import mliyweb.api.v2.instances
import mliyweb.api.v2.serializers
| 32.5
| 33
| 0.861538
| 10
| 65
| 5.6
| 0.6
| 0.464286
| 0.571429
| 0.642857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032258
| 0.046154
| 65
| 2
| 33
| 32.5
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
16b4c7406cf0ccd42e95aa56c3a9e776e4df8d67
| 9,196
|
py
|
Python
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_check_content_file_type.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 1
|
2021-07-24T17:22:50.000Z
|
2021-07-24T17:22:50.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_check_content_file_type.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-28T03:40:31.000Z
|
2022-02-28T03:40:52.000Z
|
Incident-Response/Tools/dfirtrack/dfirtrack_main/tests/system_importer/test_system_importer_file_csv_check_content_file_type.py
|
sn0b4ll/Incident-Playbook
|
cf519f58fcd4255674662b3620ea97c1091c1efb
|
[
"MIT"
] | 2
|
2022-02-25T08:34:51.000Z
|
2022-03-16T17:29:44.000Z
|
from django.contrib.auth.models import User
from django.contrib.messages import get_messages
from django.test import TestCase
from dfirtrack.settings import BASE_DIR
from dfirtrack_main.importer.file.csv import system_cron
from dfirtrack_main.tests.system_importer.config_functions import set_csv_import_username, set_csv_import_filename, set_csv_import_path
import os
import urllib.parse
class SystemImporterFileCsvCheckContentFileTypeViewTestCase(TestCase):
""" system importer file CSV view tests """
@classmethod
def setUpTestData(cls):
# create users
test_user = User.objects.create_user(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
User.objects.create_user(username='message_user', password='a3ZEI74fr0lmA3pSh96b')
# change config
set_csv_import_username(test_user)
def test_system_importer_file_csv_upload_post_no_file_submitted(self):
""" test importer view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# create post data
data_dict = {}
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# compare
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed(response, 'dfirtrack_main/system/system_importer_file_csv.html')
def test_system_importer_file_csv_cron_wrong_type(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_04_wrong_type.png')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_type')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] Wrong file type for CSV import. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='a3ZEI74fr0lmA3pSh96b')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] Wrong file type for CSV import. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_instant_wrong_type(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_04_wrong_type.png')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'Wrong file type for CSV import. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_upload_post_wrong_type(self):
""" test importer view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# open upload file
systemcsv = open(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_04_wrong_type.png'), 'rb')
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'Wrong file type for CSV import. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_cron_corrupted(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_05_corrupted.csv')
# execute cron job / scheduled task
system_cron()
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'testuser_system_importer_file_csv_check_content_file_type')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] File is corrupted. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
# switch user context
self.client.logout()
self.client.login(username='message_user', password='a3ZEI74fr0lmA3pSh96b')
# get response
response = self.client.get('/system/')
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertEqual(str(response.context['user']), 'message_user')
self.assertEqual(messages[0].message, '[Scheduled task CSV system importer] File is corrupted. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_instant_corrupted(self):
""" test importer view """
# change config
set_csv_import_path(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files'))
# change config
set_csv_import_filename('system_importer_file_csv_testfile_05_corrupted.csv')
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.get('/system/importer/file/csv/instant/', follow=True)
# get messages
messages = list(get_messages(response.wsgi_request))
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'File is corrupted. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
def test_system_importer_file_csv_upload_post_corrupted(self):
""" test importer view """
# login testuser
self.client.login(username='testuser_system_importer_file_csv_check_content_file_type', password='3oKsgNPVdlmNPneLhdr9')
# open upload file
systemcsv = open(os.path.join(BASE_DIR, 'dfirtrack_main/tests/system_importer/system_importer_file_csv_files/system_importer_file_csv_testfile_05_corrupted.csv'), 'r')
# create post data
data_dict = {
'systemcsv': systemcsv,
}
# create url
destination = urllib.parse.quote('/system/', safe='/')
# get response
response = self.client.post('/system/importer/file/csv/upload/', data_dict)
# get messages
messages = list(get_messages(response.wsgi_request))
# close file
systemcsv.close()
# compare
self.assertRedirects(response, destination, status_code=302, target_status_code=200)
self.assertEqual(messages[0].message, 'File is corrupted. Check config or file system!')
self.assertEqual(messages[0].level_tag, 'error')
| 48.4
| 177
| 0.706394
| 1,096
| 9,196
| 5.642336
| 0.09854
| 0.106404
| 0.110608
| 0.122251
| 0.904431
| 0.87694
| 0.875485
| 0.869664
| 0.861255
| 0.858991
| 0
| 0.012451
| 0.196498
| 9,196
| 189
| 178
| 48.656085
| 0.824469
| 0.105807
| 0
| 0.72
| 0
| 0
| 0.307503
| 0.184181
| 0
| 0
| 0
| 0
| 0.26
| 1
| 0.08
| false
| 0.11
| 0.49
| 0
| 0.58
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
|
0
| 9
|
bc782d896dbba37797c0a7082b71efd18104cd76
| 1,814
|
py
|
Python
|
tests/test_regex_character_class.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
tests/test_regex_character_class.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
tests/test_regex_character_class.py
|
mxdzi/hackerrank
|
4455f73e4479a4204b2e1167253f6a02351aa5b7
|
[
"MIT"
] | null | null | null |
from regex.character_class import *
def test_q1_matching_specific_characters(capsys, monkeypatch):
inputs = ["1203x."]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_matching_specific_characters.main()
captured = capsys.readouterr()
output = "true\n"
assert captured.out == output
inputs = ["3000s.."]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_matching_specific_characters.main()
captured = capsys.readouterr()
output = "false\n"
assert captured.out == output
inputs = ["13000u."]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q1_matching_specific_characters.main()
captured = capsys.readouterr()
output = "false\n"
assert captured.out == output
def test_q2_excluding_specific_characters(capsys, monkeypatch):
inputs = ["think?"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q2_excluding_specific_characters.main()
captured = capsys.readouterr()
output = "true\n"
assert captured.out == output
def test_q3_matching_range_of_characters(capsys, monkeypatch):
inputs = ["h4CkR"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q3_matching_range_of_characters.main()
captured = capsys.readouterr()
output = "true\n"
assert captured.out == output
inputs = ["h4CkRank"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q3_matching_range_of_characters.main()
captured = capsys.readouterr()
output = "true\n"
assert captured.out == output
inputs = ["hh4CkRank"]
monkeypatch.setattr('builtins.input', lambda: inputs.pop(0))
q3_matching_range_of_characters.main()
captured = capsys.readouterr()
output = "false\n"
assert captured.out == output
| 28.34375
| 64
| 0.699008
| 208
| 1,814
| 5.913462
| 0.197115
| 0.102439
| 0.147967
| 0.176423
| 0.891057
| 0.802439
| 0.797561
| 0.786179
| 0.747967
| 0.747967
| 0
| 0.022044
| 0.174752
| 1,814
| 63
| 65
| 28.793651
| 0.799599
| 0
| 0
| 0.73913
| 0
| 0
| 0.105292
| 0
| 0
| 0
| 0
| 0
| 0.152174
| 1
| 0.065217
| false
| 0
| 0.021739
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bc928216dd510919ffce92671c65957c9b1ac70a
| 60,037
|
py
|
Python
|
testSuite/operations_inputgen.py
|
ProjetEtudeMLFI/TensorFI
|
961a0205ec90935a238c58112e8119c34a70ba7c
|
[
"MIT"
] | 35
|
2018-10-28T22:41:31.000Z
|
2022-03-27T21:47:40.000Z
|
testSuite/operations_inputgen.py
|
bigmpc/TensorFI
|
3d75830ff202fdf98cf28acd842209ad7f6e6e2a
|
[
"MIT"
] | 28
|
2019-08-26T17:52:37.000Z
|
2021-06-13T01:04:00.000Z
|
testSuite/operations_inputgen.py
|
bigmpc/TensorFI
|
3d75830ff202fdf98cf28acd842209ad7f6e6e2a
|
[
"MIT"
] | 30
|
2018-11-08T02:52:06.000Z
|
2022-03-27T21:57:47.000Z
|
#!/usr/bin/python
# This file holds all the functions that generate test inputs for the operations in operations_runTests.py
# The inputgenMap table at the end of the file maps each operation supported by TensorFI to one of the functions in this
# To add support of an operation to this test script, add the operation to inputgenMap
# If a function exists that already supports the input requirements of the new operation you can map to that function; otherwise you must write a new function to specify the input test cases
# each inputgen function must return a list object containing each set of inputs for the test (i.e., a list of lists)
# the returned inputs list should be a list of all the sets of inputs you wish to test for a specific operation
# NOTE: for reproducibility the random numbers should use random.seed() to produce the same random numbers each time
import tensorflow as tf
import random
import string
def inputgen_simple():
# basic inputgen function for example
# generates list of input tensor pairs of various shapes
# datatype: int
inputs = []
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
return inputs
def inputgen_Add():
# operations supported:
# tf.math.add( x, y, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, uint8, int8, int16, int32, int64, complex64, complex128, string.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
random.seed(j)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
inputs.append([input_x,input_y])
# datatype: string
rand_strings = []
for x in range(0,100):
random.seed(x)
N = 8 # size of random string
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
random.seed(j)
input_y = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
inputs.append([input_x,input_y])
return inputs
def inputgen_Sub():
# operations supported:
# tf.math.subtract( x, y, name=None )
# tf.math.multiply( x, y, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, uint8, int8, uint16, int16, int32, int64, complex64, complex128.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
random.seed(j)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
inputs.append([input_x,input_y])
return inputs
def inputgen_Square():
# operations supported:
# tf.math.square( x, name=None )
# tf.shape( input, name=None, out_type=tf.dtypes.int32)
# tf.math.negative( x, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64, complex64, complex128.
# name: A name for the operation (optional).
# input: A Tensor or SparseTensor.
# out_type: (Optional) The specified output type of the operation (int32 or int64). Defaults to tf.int32.
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
inputs.append([input_x])
return inputs
def inputgen_Identity():
# operations supported:
# tf.identity( input, name=None )
# tf.size( input, name=None, out_type=tf.dtypes.int32 )
# tf.rank( input, name=None )
# input: A Tensor. (any type)
# name: A name for the operation (optional).
# out_type: (Optional) The specified non-quantized numeric output type of the operation. Defaults to tf.int32.
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
inputs.append([input_x])
# datatype: string
rand_strings = []
for x in range(0,100):
random.seed(x)
N = 8 # size of random string
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
inputs.append([input_x])
return inputs
def inputgen_Fill():
# operations supported:
# tf.fill( dims, value, name=None )
# dims: A Tensor. Must be one of the following types: int32, int64. 1-D. Represents the shape of the output tensor.
# value: A Tensor. 0-D (scalar). Value to fill the returned tensor.
# name: A name for the operation (optional).
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
random.seed(i+j)
dims = tf.constant([i,j], dtype=tf.int32)
value = tf.constant(random.sample(rand_ints, 1), shape=[], dtype=tf.int32)
inputs.append([dims,value])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
random.seed(i+j)
dims = tf.constant([i,j], dtype=tf.int32)
value = tf.constant(random.sample(rand_floats, 1), shape=[], dtype=tf.float32)
inputs.append([dims,value])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
dims = tf.constant([i,j], dtype=tf.int32)
random.seed(i)
val_real = tf.constant(random.sample(rand_floats, 1), shape=[], dtype=tf.float32)
random.seed(j)
val_imag = tf.constant(random.sample(rand_floats, 1), shape=[], dtype=tf.float32)
value = tf.complex(val_real,val_imag)
inputs.append([dims,value])
return inputs
def inputgen_FloorMod():
# operations supported:
# tf.math.floormod( x, y, name=None )
# x: A Tensor. Must be one of the following types: int32, int64, bfloat16, half, float32, float64.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_int = random.randint(-100,100)
if rand_int == 0:
rand_int = 1 # to avoid division by zero
rand_ints.append(rand_int)
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_float = random.uniform(-100,100)
if rand_float == 0.0:
rand_float = 1.0 # avoid division by zero
rand_floats.append(rand_float)
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
return inputs
def inputgen_Range():
# operations supported:
# tf.range( start, limit, delta=1, dtype=None, name='range' )
# start: A 0-D Tensor (scalar). Acts as first entry in the range if limit is not None; otherwise, acts as range limit and first entry defaults to 0.
# limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None, defaults to the value of start while the first entry of the range defaults to 0.
# delta: A 0-D Tensor (scalar). Number that increments start. Defaults to 1.
# dtype: The type of the elements of the resulting tensor.
# name: A name for the operation. Defaults to "range".
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
# create inputs
for i in range(-100,100,5):
for j in range(-100,100,5):
if i > j:
start = tf.constant(j, dtype=tf.int32)
limit = tf.constant(i, dtype=tf.int32)
elif j > i:
start = tf.constant(i, dtype=tf.int32)
limit = tf.constant(j, dtype=tf.int32)
else:
continue # i cannot equal j
random.seed(i+j)
delta = tf.constant( random.randint(1,10), dtype=tf.int32)
inputs.append([start,limit,delta])
# datatype: float
# create inputs
for i in range(-100,100,5):
for j in range(-100,100,5):
if i > j:
start = tf.constant(j, dtype=tf.float32)
limit = tf.constant(i, dtype=tf.float32)
elif j > i:
start = tf.constant(i, dtype=tf.float32)
limit = tf.constant(j, dtype=tf.float32)
else:
continue # i cannot equal j
random.seed(i+j)
delta = tf.constant( random.uniform(1,10), dtype=tf.float32)
inputs.append([start,limit,delta])
return inputs
def inputgen_MatMul():
# operations supported:
# tf.linalg.matmul( a, b, transpose_a=False, transpose_b=False, adjoint_a=False, adjoint_b=False, a_is_sparse=False, b_is_sparse=False, name=None )
# a: Tensor of type float16, float32, float64, int32, complex64, complex128 and rank > 1.
# b: Tensor with same type and rank as a.
# transpose_a: If True, a is transposed before multiplication.
# transpose_b: If True, b is transposed before multiplication.
# adjoint_a: If True, a is conjugated and transposed before multiplication.
# adjoint_b: If True, b is conjugated and transposed before multiplication.
# a_is_sparse: If True, a is treated as a sparse matrix.
# b_is_sparse: If True, b is treated as a sparse matrix.
# name: Name for the operation (optional).
# shape of input a is (i,j)
# shape of input b is (j,k)
# result is a*b with shape (i,k)
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,1000):
random.seed(x)
rand_ints.append(random.randint(-1000,1000))
for i in range(1,15):
for j in range(1,15):
for k in range(1,15):
# generate input a
num_elements_a = i * j
random.seed(num_elements_a)
input_a = tf.constant(random.sample(rand_ints, num_elements_a), shape=(i,j), dtype=tf.int32)
# generate input b
num_elements_b = j * k
random.seed(num_elements_b)
input_b = tf.constant(random.sample(rand_ints, num_elements_b), shape=(j,k), dtype=tf.int32)
inputs.append([input_a,input_b])
# datatype: float
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-1000,1000))
for i in range(1,15):
for j in range(1,15):
for k in range(1,15):
# generate input a
num_elements_a = i * j
random.seed(num_elements_a)
input_a = tf.constant(random.sample(rand_floats, num_elements_a), shape=(i,j), dtype=tf.float32)
# generate input b
num_elements_b = j * k
random.seed(num_elements_b)
input_b = tf.constant(random.sample(rand_floats, num_elements_b), shape=(j,k), dtype=tf.float32)
inputs.append([input_a,input_b])
# datatype: complex
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-1000,1000))
for i in range(1,15):
for j in range(1,15):
for k in range(1,15):
# generate input a
num_elements_a = i * j
random.seed(num_elements_a)
real_a = tf.constant(random.sample(rand_floats, num_elements_a), shape=(i,j), dtype=tf.float32)
random.seed(num_elements_a + i)
imag_a = tf.constant(random.sample(rand_floats, num_elements_a), shape=(i,j), dtype=tf.float32)
input_a = tf.complex(real_a,imag_a)
# generate input b
num_elements_b = j * k
random.seed(num_elements_b)
real_b = tf.constant(random.sample(rand_floats, num_elements_b), shape=(j,k), dtype=tf.float32)
random.seed(num_elements_b + k)
imag_b = tf.constant(random.sample(rand_floats, num_elements_b), shape=(j,k), dtype=tf.float32)
input_b = tf.complex(real_b,imag_b)
inputs.append([input_a,input_b])
return inputs
def inputgen_ArgMax():
# operations supported:
# tf.math.argmax( input, axis=None, name=None, dimension=None, output_type=tf.dtypes.int64 )
# tf.math.argmin( input, axis=None, name=None, dimension=None, output_type=tf.dtypes.int64 )
# input: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, complex64, int64, qint8, quint8, qint32, bfloat16, uint16, complex128, half, uint32, uint64.
# axis: A Tensor. Must be one of the following types: int32, int64. int32 or int64, must be in the range [-rank(input), rank(input)). Describes which axis of the input Tensor to reduce across. For vectors, use axis = 0.
# output_type: An optional tf.DType from: tf.int32, tf.int64. Defaults to tf.int64.
# name: A name for the operation (optional).
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,1000):
random.seed(x)
rand_ints.append(random.randint(-1000,1000))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: float
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-1000,1000))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: complex
if not tf.test.is_gpu_available():
# NOTE: must have a GPU to test the remaining inputs
return inputs
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-1000,1000))
# create inputs of different tensor shapes
for i in range(2,10):
for j in range(2,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
axis = tf.constant(random.randint(0,1))
inputs.append([input_x, axis])
return inputs
def inputgen_Equal():
# operations supported:
# tf.math.equal( x, y, name=None )
# tf.math.not_equal( x, y, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, uint8, int8, int16, int32, int64, complex64, quint8, qint8, qint32, string, bool, complex128.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
if random.choice([True,False]):
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
if random.choice([True,False]):
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
if random.choice([True,False]):
random.seed(i)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
else:
random.seed(j)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
inputs.append([input_x,input_y])
# datatype: string
rand_strings = []
for x in range(0,100):
random.seed(x)
N = 8 # size of random string
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
if random.choice([True,False]):
random.seed(j)
input_y = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
inputs.append([input_x,input_y])
# datatype: bool
rand_bools = []
for x in range(0,100):
random.seed(x)
rand_bools.append(random.choice([True,False]))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_bools, num_elements), shape=(i,j), dtype=tf.bool)
if random.choice([True,False]):
random.seed(j)
input_y = tf.constant(random.sample(rand_bools, num_elements), shape=(i,j), dtype=tf.bool)
inputs.append([input_x,input_y])
return inputs
def inputgen_LessEqual():
# operations supported:
# tf.math.less_equal( x, y, name=None )
# x: A Tensor. Must be one of the following types: float32, float64, int32, uint8, int16, int8, int64, bfloat16, uint16, half, uint32, uint64.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
if random.choice([True,False]):
input_y = tf.constant(100, shape=(i,j), dtype=tf.int32)
else:
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
if random.choice([True,False]):
input_y = tf.constant(100.0, shape=(i,j), dtype=tf.float32)
else:
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
return inputs
def inputgen_Cast():
# operations supported:
# tf.dtypes.cast( x, dtype, name=None )
# x: A Tensor or SparseTensor or IndexedSlices of numeric type. It could be uint8, uint16, uint32, uint64, int8, int16, int32, int64, float16, float32, float64, complex64, complex128, bfloat16.
# dtype: The destination type. The list of supported dtypes is the same as x.
# name: A name for the operation (optional).
# general approach: create tensors of varying shapes (both input shapes must match) filled with random constant numbers
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
dtypes = [tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.int32, tf.int64, tf.float16, tf.float32, tf.float64, tf.complex64, tf.complex128, tf.bfloat16]
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
dtype = random.choice(dtypes)
inputs.append([input_x, dtype])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
dtype = random.choice(dtypes)
inputs.append([input_x, dtype])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
dtype = random.choice(dtypes)
inputs.append([input_x, dtype])
return inputs
def inputgen_Mean():
# operations supported:
# tf.math.reduce_mean( input_tensor, axis=None, keepdims=None, name=None, reduction_indices=None, keep_dims=None )
# input_tensor: The tensor to reduce. Should have numeric type.
# axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)).
# keepdims: If true, retains reduced dimensions with length 1.
# name: A name for the operation (optional).
# reduction_indices: The old (deprecated) name for axis.
# keep_dims: Deprecated alias for keepdims.
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,1000):
random.seed(x)
rand_ints.append(random.randint(-1000,1000))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: float
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-1000,1000))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
return inputs
def inputgen_NonZero():
# operations supported:
# tf.math.count_nonzero( input_tensor=None, axis=None, keepdims=None, dtype=tf.dtypes.int64, name=None, reduction_indices=None, keep_dims=None, input=None )
# input_tensor: The tensor to reduce. Should be of numeric type, bool, or string.
# axis: The dimensions to reduce. If None (the default), reduces all dimensions. Must be in the range [-rank(input_tensor), rank(input_tensor)).
# keepdims: If true, retains reduced dimensions with length 1.
# dtype: The output dtype; defaults to tf.int64.
# name: A name for the operation (optional).
# reduction_indices: The old (deprecated) name for axis.
# keep_dims: Deprecated alias for keepdims.
# input: Overrides input_tensor. For compatibility.
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
if random.choice([True,False]):
rand_ints.append(int(0))
else:
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
if random.choice([True,False]):
rand_floats.append(float(0.0))
else:
rand_floats.append(random.uniform(-100.0,100.0))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
if random.choice([True,False]):
rand_floats.append(float(0.0))
else:
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x, axis])
# datatype: string
rand_strings = []
N = 8 # size of random string
for x in range(0,100):
random.seed(x)
if random.choice([True,False]):
rand_strings.append('')
else:
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
axis = tf.constant(random.randint(0,1), dtype=tf.int32)
inputs.append([input_x,axis])
return inputs
def inputgen_Reshape():
# operations supported:
# tf.reshape( tensor, shape, name=None )
# tensor: A Tensor.
# shape: A Tensor. Must be one of the following types: int32, int64. Defines the shape of the output tensor.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,1000):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
for k in reversed(range(1,20)):
if num_elements % k == 0:
shape = tf.constant([k,int(num_elements / k)],dtype=tf.int32)
if (k,int(num_elements/k)) != (i,j):
break
inputs.append([input_x,shape])
# datatype: float
rand_floats = []
for x in range(0,1000):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
for k in reversed(range(1,20)):
if num_elements % k == 0:
shape = tf.constant([k,int(num_elements / k)],dtype=tf.int32)
if (k,int(num_elements/k)) != (i,j):
break
inputs.append([input_x,shape])
# datatype: string
rand_strings = []
for x in range(0,1000):
random.seed(x)
N = 8 # size of random string
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(2,15):
for j in range(2,15):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
for k in reversed(range(1,20)):
if num_elements % k == 0:
shape = tf.constant([k,int(num_elements / k)],dtype=tf.int32)
if (k,int(num_elements/k)) != (i,j):
break
inputs.append([input_x,shape])
return inputs
def inputgen_Max():
# operations supported:
# tf.math.maximum( x, y, name=None )
# tf.math.minimum( x, y, name=None )
# tf.math.greater( x, y, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, int32, int64.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
return inputs
def inputgen_Switch():
# operations supported:
# tf.keras.backend.switch( condition, then_expression, else_expression )
# condition: tensor (int or bool).
# then_expression: either a tensor, or a callable that returns a tensor.
# else_expression: either a tensor, or a callable that returns a tensor.
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
condition = tf.constant(random.choice([True,False]), dtype=tf.bool)
inputs.append([condition,input_x,input_y])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
condition = tf.constant(random.choice([True,False]), dtype=tf.bool)
inputs.append([condition,input_x,input_y])
# datatype: complex
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
random.seed(j)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
condition = tf.constant(random.choice([True,False]), dtype=tf.bool)
inputs.append([input_x,input_y])
# datatype: string
rand_strings = []
for x in range(0,100):
random.seed(x)
N = 8 # size of random string
rand_strings.append(''.join(random.choice(string.ascii_letters + string.punctuation) for x in range(N)))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
random.seed(j)
input_y = tf.constant(random.sample(rand_strings, num_elements), shape=(i,j))
condition = tf.constant(random.choice([True,False]), dtype=tf.bool)
inputs.append([condition,input_x,input_y])
return inputs
def inputgen_Pow():
# operations supported:
# tf.math.pow( x, y, name=None )
# x: A Tensor of type float16, float32, float64, int32, int64, complex64, or complex128.
# y: A Tensor of type float16, float32, float64, int32, int64, complex64, or complex128.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
rand_ints_pos = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
rand_ints_pos.append(random.randint(0,20))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints_pos, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
return inputs # return before float inputs (instrumented output is different from original with float inputs)
# datatype: float
rand_floats = []
rand_floats_pos = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-50,50))
rand_floats_pos.append(random.uniform(0,20))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats_pos, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
# datatype: complex
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
random.seed(j)
real_y = tf.constant(random.sample(rand_floats_pos, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats_pos, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
inputs.append([input_x,input_y])
def inputgen_RealDiv():
# operations supported:
# tf.realdiv( x, y, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, uint8, int8, uint16, int16, int32, int64, complex64, complex128.
# y: A Tensor. Must have the same type as x.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_float = random.uniform(-100,100)
while rand_float == 0.0: # avoid divide by zero
rand_float = random.uniform(-100,100)
rand_floats.append(rand_float)
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j)
input_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x,input_y])
# datatype: complex
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
real_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(i*2)
imag_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_x = tf.complex(real_x,imag_x)
random.seed(j)
real_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
random.seed(j*2)
imag_y = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
input_y = tf.complex(real_y,imag_y)
inputs.append([input_x,input_y])
return inputs # NOTE: skips the integer inputs because they throw an error
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_int = random.randint(-100,100)
while rand_int == 0: # avoid divide by zero
rand_int = random.randint(-100,100)
rand_ints.append(rand_int)
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
random.seed(j)
input_y = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x,input_y])
def inputgen_Abs():
# operations supported:
# tf.math.abs( x, name=None )
# x: A Tensor or SparseTensor of type float16, float32, float64, int32, int64, complex64 or complex128.
# name: A name for the operation (optional).
inputs = [] # each item in this list is a set of inputs passed to a create_op() in the main script
# datatype: int
rand_ints = []
for x in range(0,100):
random.seed(x)
rand_ints.append(random.randint(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_ints, num_elements), shape=(i,j), dtype=tf.int32)
inputs.append([input_x])
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x])
return inputs
def inputgen_Tanh():
# operations supported:
# tf.math.tanh( x, name=None )
# x: A Tensor. Must be one of the following types: bfloat16, half, float32, float64, complex64, complex128.
# name: A name for the operation (optional).
inputs = []
# datatype: float
rand_floats = []
for x in range(0,100):
random.seed(x)
rand_floats.append(random.uniform(-100,100))
# create inputs of different tensor shapes
for i in range(1,10):
for j in range(1,10):
# shape of tensor is (i,j)
num_elements = i * j
random.seed(i+j)
input_x = tf.constant(random.sample(rand_floats, num_elements), shape=(i,j), dtype=tf.float32)
inputs.append([input_x])
return inputs
# This table is used to store all of the operations that will be tested by operations_runTests.py
# By default this should contain all the operations currently supported by TensorFI
# When implementing a new operation in TensorFI, add an entry to the list below for that operation
# Each dictionary entry is in the following form
# op_type: inputgen_function
# op_type: (String) The op_type that is passable to the tf.create_op(op_type, inputs) function, i.e., the "type" property of the Operation object (op.type) and should be same as the opTable entry in injectFault.py
# inputgen_function: (Function) The function that is called to generate the set of test inputs. Depends on the types of inputs the operation supports (refer to the tensorflow documentation for each operation). Try to re-use functions for other operations if they fit.
inputgenMap = {
"Identity": inputgen_Identity,
"Add": inputgen_Add,
"Sub": inputgen_Sub,
"Mul": inputgen_Sub,
"Square": inputgen_Square,
"Shape": inputgen_Square,
"Size": inputgen_Identity,
"Fill": inputgen_Fill,
"FloorMod": inputgen_FloorMod,
"Range": inputgen_Range,
"Rank": inputgen_Identity,
"MatMul": inputgen_MatMul,
"ArgMax": inputgen_ArgMax,
"ArgMin": inputgen_ArgMax,
"Equal": inputgen_Equal,
"NotEqual": inputgen_Equal,
"LessEqual": inputgen_LessEqual,
"Mean": inputgen_Mean,
"Reshape": inputgen_Reshape,
"Maximum": inputgen_Max,
"Minimum": inputgen_Max,
"Greater": inputgen_Max,
"Neg": inputgen_Square,
"RealDiv": inputgen_RealDiv,
"Abs": inputgen_Abs,
"Tanh": inputgen_Tanh,
#"Assign": ,
#"Rsqrt": ,
#"Log": ,
#"Conv2D": ,
#"Relu": ,
#"MaxPool": ,
#"Softmax": ,
#"ExpandDims": ,
#"BiasAdd": ,
#"Sigmoid": ,
#"Pack": ,
#"Sum": ,
#"Unpack": ,
#"Pow": inputgen_Pow, # NOTE: operation fails when using input tensors of type float (instrumented graph outputs are different)
#"Count_nonzero": inputgen_NonZero, # NOTE: this returns an error, seems like "Count_nonzero" is not a valid op name for tf.math.count_nonzero. Need to look into this
#"Switch": inputgen_Switch, # NOTE: returns an error. We assume "Switch" refers to tf.keras.backend.switch but that may be incorrect
#"Cast": inputgen_Cast, # NOTE: this raises an exception, apparently cannot pass the dtype parameter to create_op(), must figure out a way around this
"end_of_ops": None # placeholder for end of list
}
| 40.925017
| 267
| 0.610174
| 8,755
| 60,037
| 4.08418
| 0.042833
| 0.014039
| 0.057723
| 0.07014
| 0.835697
| 0.823335
| 0.80731
| 0.799088
| 0.790894
| 0.77789
| 0
| 0.038313
| 0.276146
| 60,037
| 1,466
| 268
| 40.952933
| 0.784482
| 0.306827
| 0
| 0.88172
| 1
| 0
| 0.003688
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023656
| false
| 0
| 0.003226
| 0
| 0.051613
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
bc9eebdb79b6328a66dbf47845f5b05ceb967331
| 9,209
|
py
|
Python
|
tests/image/test_image_supervised.py
|
francotheengineer/autokeras
|
d873aa41226b958004c3ff1e5694912b9fad10e1
|
[
"MIT"
] | 1
|
2019-01-25T02:20:37.000Z
|
2019-01-25T02:20:37.000Z
|
tests/image/test_image_supervised.py
|
dspshin/autokeras
|
eac91bad8a90f78a68933992cc1ff4b7df4ee30f
|
[
"MIT"
] | 1
|
2018-12-09T16:46:30.000Z
|
2018-12-09T16:46:30.000Z
|
tests/image/test_image_supervised.py
|
dspshin/autokeras
|
eac91bad8a90f78a68933992cc1ff4b7df4ee30f
|
[
"MIT"
] | 2
|
2018-11-12T19:43:31.000Z
|
2018-11-26T08:14:32.000Z
|
from unittest.mock import patch
import pytest
from autokeras.image.image_supervised import *
from tests.common import clean_dir, MockProcess, simple_transform, mock_train, TEST_TEMP_DIR
def test_train_x_array_exception():
clf = ImageClassifier()
with pytest.raises(Exception) as info:
clf.fit(15, [])
assert str(info.value) == 'x_train should at least has 2 dimensions.'
def test_xy_dim_exception():
clf = ImageClassifier()
with pytest.raises(Exception) as info:
clf.fit([[1, 2], [3, 4]], [6, 7, 8])
assert str(info.value) == 'x_train and y_train should have the same number of instances.'
def test_x_float_exception():
clf = ImageClassifier()
with pytest.raises(Exception) as info:
clf.fit([[1, 'abc'], [3, 4]], [7, 8])
assert str(info.value) == 'x_train should only contain numerical data.'
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
Constant.DATA_AUGMENTATION = False
clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=True)
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
clf = ImageClassifier1D(path=TEST_TEMP_DIR, verbose=True)
train_x = np.random.rand(100, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
clf = ImageClassifier3D(path=TEST_TEMP_DIR, verbose=True)
train_x = np.random.rand(100, 25, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
clf = ImageRegressor1D(path=TEST_TEMP_DIR, verbose=True)
train_x = np.random.rand(100, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert len(results) == len(train_y)
clf = ImageRegressor3D(path=TEST_TEMP_DIR, verbose=True)
train_x = np.random.rand(100, 25, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert len(results) == len(train_y)
clean_dir(TEST_TEMP_DIR)
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
def test_timeout(_):
# Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1000
Constant.T_MIN = 0.0001
Constant.DATA_AUGMENTATION = False
clean_dir(TEST_TEMP_DIR)
clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False)
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
with pytest.raises(TimeoutError):
clf.fit(train_x, train_y, time_limit=0)
clean_dir(TEST_TEMP_DIR)
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
# @patch('autokeras.bayesian.transform', side_effect=simple_transform)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_final_fit(_, _2):
Constant.LIMIT_MEMORY = True
clean_dir(TEST_TEMP_DIR)
clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False)
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.N_NEIGHBOURS = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
test_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
clf.final_fit(train_x, train_y, test_x, test_y)
results = clf.predict(test_x)
assert len(results) == 100
clean_dir(TEST_TEMP_DIR)
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_save_continue(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
clean_dir(TEST_TEMP_DIR)
clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
assert len(clf.cnn.searcher.history) == 1
Constant.MAX_MODEL_NUM = 2
clf = ImageClassifier(verbose=False, path=TEST_TEMP_DIR, resume=True)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.cnn.searcher.history) == 2
Constant.MAX_MODEL_NUM = 1
clf = ImageClassifier(verbose=False, path=TEST_TEMP_DIR, resume=False)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.cnn.searcher.history) == 1
clean_dir(TEST_TEMP_DIR)
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.bayesian.transform', side_effect=simple_transform)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_csv_file(_, _1, _2):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
path = 'tests/resources'
clf = ImageClassifier(verbose=False, path=os.path.join(path, "temp"), resume=False)
x_train, y_train = load_image_dataset(csv_file_path=os.path.join(path, "images_test/images_name.csv"),
images_path=os.path.join(path, "images_test/Color_images"))
clf.fit(x_train, y_train)
x_test, y_test = load_image_dataset(csv_file_path=os.path.join(path, "images_test/images_name.csv"),
images_path=os.path.join(path, "images_test/Color_images"))
results = clf.predict(x_test)
assert len(clf.cnn.searcher.history) == 1
assert len(results) == 5
clean_dir(os.path.join(path, "temp"))
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict_regression(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
Constant.DATA_AUGMENTATION = False
clean_dir(TEST_TEMP_DIR)
clf = ImageRegressor(path=TEST_TEMP_DIR, verbose=False)
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
results = clf.predict(train_x)
assert len(results) == len(train_x)
clean_dir(TEST_TEMP_DIR)
@patch('torch.multiprocessing.get_context', side_effect=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_export_keras_model(_, _1):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25, 25, 1)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25, 25, 1)
clean_dir(TEST_TEMP_DIR)
clf = ImageClassifier(path=TEST_TEMP_DIR, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
score = clf.evaluate(train_x, train_y)
assert score <= 1.0
model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
clf.export_keras_model(model_file_name)
from keras.models import load_model
model = load_model(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
del model, results, model_file_name
model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
clf.export_autokeras_model(model_file_name)
from autokeras.utils import pickle_from_file
model = pickle_from_file(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
score = model.evaluate(train_x, train_y)
assert score <= 1.0
before = model.graph
model.fit(train_x, train_y, train_x, train_y)
assert model.graph == before
clean_dir(TEST_TEMP_DIR)
clf = ImageRegressor(path=TEST_TEMP_DIR, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
score = clf.evaluate(train_x, train_y)
assert score >= 0.0
model_file_name = os.path.join(TEST_TEMP_DIR, 'test_keras_model.graph')
clf.export_keras_model(model_file_name)
from keras.models import load_model
model = load_model(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
del model, results, model_file_name
model_file_name = os.path.join(TEST_TEMP_DIR, 'test_autokeras_model.pkl')
clf.export_autokeras_model(model_file_name)
from autokeras.utils import pickle_from_file
model = pickle_from_file(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
score = model.evaluate(train_x, train_y)
assert score >= 0.0
clean_dir(TEST_TEMP_DIR)
| 37.434959
| 106
| 0.712564
| 1,419
| 9,209
| 4.351656
| 0.100775
| 0.037895
| 0.053441
| 0.038866
| 0.886802
| 0.859271
| 0.853117
| 0.83417
| 0.825749
| 0.801619
| 0
| 0.033845
| 0.172223
| 9,209
| 245
| 107
| 37.587755
| 0.776204
| 0.010316
| 0
| 0.75
| 0
| 0
| 0.095489
| 0.07672
| 0
| 0
| 0
| 0
| 0.125
| 1
| 0.048077
| false
| 0
| 0.038462
| 0
| 0.086538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bcb666c2a3bd9965f2316a6d0b86f11c19f25636
| 98,188
|
py
|
Python
|
scripts/exact_solution.py
|
srio/report_diaboloid_resources
|
1c9013a397b8bb5a6d164affafb2670864f727e1
|
[
"MIT"
] | null | null | null |
scripts/exact_solution.py
|
srio/report_diaboloid_resources
|
1c9013a397b8bb5a6d164affafb2670864f727e1
|
[
"MIT"
] | null | null | null |
scripts/exact_solution.py
|
srio/report_diaboloid_resources
|
1c9013a397b8bb5a6d164affafb2670864f727e1
|
[
"MIT"
] | null | null | null |
"""
Height == Re[-Sec[θ] (-tt Sin[θ]-r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ])-1/2 \[Sqrt](2/3 Sec[θ]4 (-2 r1 r2 Cos[θ]2-2 r22 Cos[θ]2+2 r2 tt Cos[θ]3-2 r12 Cos[θ]2 Cos[2 θ]+2 r1 tt Cos[θ]3 Cos[2 θ]+2 r12 Cos[θ]2 Cos[2 θ]2-2 r12 Sin[θ]2-4 r1 r2 Sin[θ]2-4 r2 tt Cos[θ] Sin[θ]2+3 tt2 Cos[θ]2 Sin[θ]2+4 r1 r2 Cos[2 θ] Sin[θ]2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]2+2 r12 Cos[2 θ]2 Sin[θ]2-4 r12 Cos[θ]2 Sin[θ]4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]2 Sin[θ] Sin[2 θ]-4 r12 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r12 Cos[θ]2 Sin[2 θ]2)+(2 21/3 (12 r13 r2+59 r12 r22+2 r1 r23+3 r24-4 r12 tt2-8 r1 r2 tt2+4 r22 tt2-9 r12 x2-18 r1 r2 x2-9 r22 x2-12 r13 tt Cos[θ]-20 r12 r2 tt Cos[θ]+12 r1 r22 tt Cos[θ]-12 r23 tt Cos[θ]-60 r12 r22 Cos[2 θ]+8 r1 r23 Cos[2 θ]+4 r24 Cos[2 θ]+8 r1 r2 tt2 Cos[2 θ]+4 r22 tt2 Cos[2 θ]-12 r12 x2 Cos[2 θ]-24 r1 r2 x2 Cos[2 θ]-12 r22 x2 Cos[2 θ]+6 r13 tt Cos[3 θ]+16 r12 r2 tt Cos[3 θ]-42 r1 r22 tt Cos[3 θ]-4 r23 tt Cos[3 θ]-12 r13 r2 Cos[4 θ]+9 r12 r22 Cos[4 θ]+6 r1 r23 Cos[4 θ]+r24 Cos[4 θ]+8 r12 tt2 Cos[4 θ]+16 r1 r2 tt2 Cos[4 θ]-3 r12 x2 Cos[4 θ]-6 r1 r2 x2 Cos[4 θ]-3 r22 x2 Cos[4 θ]+6 r13 tt Cos[5 θ]-12 r12 r2 tt Cos[5 θ]-2 r1 r22 tt Cos[5 θ]+4 r12 tt2 Cos[6 θ]) Sec[θ]4)/(3 (16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^44 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ]
12
Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^22 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ]
13
Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3)+1/(3 21/3) Sec[θ]4 (16 (2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ]
14
Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^22 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2
15
Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt
16
Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3+4 Sec[θ]2 (-tt Sin[θ]-r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ])2-2 Sec[θ] (2 r2 tt+2 r1 tt Cos[2 θ]-2 r1 r2 Sec[θ]-2 r22 Sec[θ]-2 r12 Cos[2 θ] Sec[θ]+2 r12 Cos[2 θ]2 Sec[θ]+3 r12 Sec[θ] Sin[2 θ]2+3 tt2 Sin[θ] Tan[θ]-4 r12 Sin[θ]3 Tan[θ]+6 r1 tt Sin[2 θ] Tan[θ]-4 r1 r2 Sec[θ] Sin[2 θ] Tan[θ]-4 r12 Cos[2 θ] Sec[θ] Sin[2 θ] Tan[θ]-4 r2 tt Tan[θ]2-4 r1 tt Cos[2 θ] Tan[θ]2-2 r12 Sec[θ] Tan[θ]2-4 r1 r2 Sec[θ] Tan[θ]2+4 r1 r2 Cos[2 θ] Sec[θ] Tan[θ]2+2 r12 Cos[2 θ]2 Sec[θ] Tan[θ]2))+1/2 \[Sqrt](-(2/3) Sec[θ]4 (-2 r1 r2 Cos[θ]2-2 r22 Cos[θ]2+2 r2 tt Cos[θ]3-2 r12 Cos[θ]2 Cos[2 θ]+2 r1 tt Cos[θ]3 Cos[2 θ]+2 r12 Cos[θ]2 Cos[2 θ]2-2 r12 Sin[θ]2-4 r1 r2 Sin[θ]2-4 r2 tt Cos[θ] Sin[θ]2+3 tt2 Cos[θ]2 Sin[θ]2+4 r1 r2 Cos[2 θ] Sin[θ]2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]2+2 r12 Cos[2 θ]2 Sin[θ]24 r12 Cos[θ]2 Sin[θ]4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]2 Sin[θ] Sin[2 θ]-4 r12 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r12 Cos[θ]2 Sin[2 θ]2)-(2 21/3 (12 r13 r2+59 r12 r22+2 r1 r23+3 r24-4 r12 tt2-8 r1 r2 tt2+4 r22 tt2-9 r12 x2-18 r1 r2 x2-9 r22 x2-12 r13 tt Cos[θ]-20 r12 r2 tt Cos[θ]+12 r1 r22 tt Cos[θ]-12 r23 tt Cos[θ]-60 r12 r22 Cos[2 θ]+8 r1 r23 Cos[2 θ]+4 r24 Cos[2 θ]+8 r1 r2 tt2 Cos[2 θ]+4 r22 tt2 Cos[2 θ]-12 r12 x2 Cos[2 θ]-24 r1 r2 x2 Cos[2 θ]-12 r22 x2 Cos[2 θ]+6 r13 tt Cos[3 θ]+16 r12 r2 tt Cos[3 θ]-42 r1 r22 tt Cos[3 θ]-4 r23 tt Cos[3 θ]-12 r13 r2 Cos[4 θ]+9 r12 r22 Cos[4 θ]+6 r1 r23 Cos[4 θ]+r24 Cos[4 θ]+8 r12 tt2 Cos[4 θ]+16 r1 r2 tt2 Cos[4 θ]-3 r12 x2 Cos[4 θ]-6 r1 r2 x2 Cos[4 θ]-3 r22 x2 Cos[4 θ]+6 r13 tt Cos[5 θ]-12 r12 r2 tt Cos[5 θ]-2 r1 r22 tt Cos[5 θ]+4 r12 tt2 Cos[6 θ]) Sec[θ]4)/(3 (16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^22 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^24 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2
17
Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^24 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^28 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ]
18
Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2
19
θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3)-1/(3 21/3) Sec[θ]4 (16 (2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2
20
Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^22 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2
21
r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3+8 Sec[θ]2 (-tt Sin[θ]-r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ])2-2 Sec[θ] (2 r2 tt+2 r1 tt Cos[2 θ]-2 r1 r2 Sec[θ]-2 r22 Sec[θ]-2 r12 Cos[2 θ] Sec[θ]+2 r12 Cos[2 θ]2 Sec[θ]+3 r12 Sec[θ] Sin[2 θ]2+3 tt2 Sin[θ] Tan[θ]-4 r12 Sin[θ]3 Tan[θ]+6 r1 tt Sin[2 θ] Tan[θ]-4 r1 r2 Sec[θ] Sin[2 θ] Tan[θ]-4 r12 Cos[2 θ] Sec[θ] Sin[2 θ] Tan[θ]-4 r2 tt Tan[θ]2-4 r1 tt
22
Cos[2 θ] Tan[θ]2-2 r12 Sec[θ] Tan[θ]2-4 r1 r2 Sec[θ] Tan[θ]2+4 r1 r2 Cos[2 θ] Sec[θ] Tan[θ]2+2 r12 Cos[2 θ]2 Sec[θ] Tan[θ]2)-(-64 Sec[θ]3 (-tt Sin[θ]-r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ])3+32 Sec[θ]2 (-tt Sin[θ]r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ]) (2 r2 tt+2 r1 tt Cos[2 θ]-2 r1 r2 Sec[θ]-2 r22 Sec[θ]-2 r12 Cos[2 θ] Sec[θ]+2 r12 Cos[2 θ]2 Sec[θ]+3 r12 Sec[θ] Sin[2 θ]2+3 tt2 Sin[θ] Tan[θ]-4 r12 Sin[θ]3 Tan[θ]+6 r1 tt Sin[2 θ] Tan[θ]-4 r1 r2 Sec[θ] Sin[2 θ] Tan[θ]-4 r12 Cos[2 θ] Sec[θ] Sin[2 θ] Tan[θ]-4 r2 tt Tan[θ]2-4 r1 tt Cos[2 θ] Tan[θ]2-2 r12 Sec[θ] Tan[θ]2-4 r1 r2 Sec[θ] Tan[θ]2+4 r1 r2 Cos[2 θ] Sec[θ] Tan[θ]2+2 r12 Cos[2 θ]2 Sec[θ] Tan[θ]2)+32 Sec[θ]2 (2 r2 tt2 Sin[θ]+2 r1 tt2 Cos[2 θ] Sin[θ]+2 r1 r2 tt Sin[2 θ]+2 r12 tt Cos[2 θ] Sin[2 θ]-2 r12 r2 Sec[θ] Sin[2 θ]-2 r1 r22 Sec[θ] Sin[2 θ]-2 r13 Cos[2 θ] Sec[θ] Sin[2 θ]+2 r13 Cos[2 θ]2 Sec[θ] Sin[2 θ]+r13 Sec[θ] Sin[2 θ]3+2 r12 tt Tan[θ]+2 r1 r2 tt Tan[θ]-2 r22 tt Tan[θ]-2 r12 tt Cos[2 θ] Tan[θ]-4 r1 r2 tt Cos[2 θ] Tan[θ]-2 r12 r2 Sec[θ] Tan[θ]-2 r1 r22 Sec[θ] Tan[θ]+4 r12 r2 Cos[2 θ] Sec[θ] Tan[θ]+2 r1 r22 Cos[2 θ] Sec[θ] Tan[θ]+2 r13 Cos[2 θ]2 Sec[θ] Tan[θ]-2 r12 r2 Cos[2 θ]2 Sec[θ] Tan[θ]-2 r13 Cos[2 θ]3 Sec[θ] Tan[θ]+tt3 Sin[θ]2 Tan[θ]-4 r12 tt Sin[θ]4 Tan[θ]+3 r1 tt2 Sin[θ] Sin[2 θ] Tan[θ]-4 r13 Sin[θ]3 Sin[2 θ] Tan[θ]+3 r12 tt Sin[2 θ]2 Tan[θ]-r12 r2 Sec[θ] Sin[2 θ]2 Tan[θ]-r13 Cos[2 θ] Sec[θ] Sin[2 θ]2 Tan[θ]-r2 tt2 Sin[θ] Tan[θ]2-r1 tt2 Cos[2 θ] Sin[θ] Tan[θ]2+4 r12 r2 Sin[θ]3 Tan[θ]2+4 r13 Cos[2 θ] Sin[θ]3 Tan[θ]2-2 r1 r2 tt Sin[2 θ] Tan[θ]2-2 r12 tt Cos[2 θ] Sin[2 θ] Tan[θ]2))/(4 \[Sqrt](2/3 Sec[θ]4 (-2 r1 r2 Cos[θ]2-2 r22 Cos[θ]2+2 r2 tt Cos[θ]3-2 r12 Cos[θ]2 Cos[2 θ]+2 r1 tt Cos[θ]3 Cos[2 θ]+2 r12 Cos[θ]2 Cos[2 θ]2-2 r12 Sin[θ]2-4 r1 r2 Sin[θ]2-4 r2 tt Cos[θ] Sin[θ]2+3 tt2 Cos[θ]2 Sin[θ]2+4 r1 r2 Cos[2 θ] Sin[θ]2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]2+2 r12 Cos[2 θ]2 Sin[θ]2-4 r12 Cos[θ]2 Sin[θ]4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]2 Sin[θ] Sin[2 θ]-4 r12 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r12 Cos[θ]2 Sin[2 θ]2)+(2 21/3 (12 r13 r2+59 r12 r22+2 r1 r23+3 r24-4 r12 tt28 r1 r2 tt2+4 r22 tt2-9 r12 x2-18 r1 r2 x2-9 r22 x2-12 r13 tt Cos[θ]-20 r12 r2 tt Cos[θ]+12 r1 r22 tt Cos[θ]-12 r23 tt Cos[θ]-60 r12 r22 Cos[2 θ]+8 r1 r23 Cos[2 θ]+4 r24 Cos[2 θ]+8 r1 r2 tt2 Cos[2 θ]+4 r22 tt2 Cos[2 θ]-12 r12 x2 Cos[2 θ]-24 r1 r2 x2 Cos[2 θ]-12 r22 x2 Cos[2 θ]+6 r13 tt Cos[3 θ]+16 r12 r2 tt Cos[3 θ]-42 r1 r22 tt Cos[3 θ]-4 r23 tt Cos[3 θ]-12 r13 r2 Cos[4 θ]+9 r12 r22 Cos[4 θ]+6 r1 r23 Cos[4 θ]+r24 Cos[4 θ]+8 r12 tt2 Cos[4 θ]+16 r1 r2 tt2 Cos[4 θ]-3 r12 x2 Cos[4 θ]-6 r1 r2 x2 Cos[4 θ]-3 r22 x2 Cos[4 θ]+6 r13 tt Cos[5 θ]-12 r12 r2 tt Cos[5 θ]-2 r1 r22 tt Cos[5 θ]+4 r12 tt2 Cos[6 θ]) Sec[θ]4)/(3 (16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^24 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^22 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^24 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2
23
Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^24 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^28 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ]
24
Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2
25
θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3)+1/(3 21/3) Sec[θ]4 (16 (2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2
26
Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)+\[Sqrt](-4 (4 (-2 r1 r2 Cos[θ]^22 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^2-48 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+12 Cos[θ]^4 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^3+(16 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2)^3-288 Cos[θ]^2 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ]) (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)+432 Cos[θ]^4 (-2 r1^2 r2 Sin[θ]-2 r1 r2^2 Sin[θ]+2 r1^2 tt Cos[θ] Sin[θ]+2 r1 r2 tt Cos[θ] Sin[θ]-2 r2^2 tt Cos[θ] Sin[θ]+2 r2 tt^2 Cos[θ]^2 Sin[θ]+4 r1^2 r2 Cos[2 θ] Sin[θ]+2 r1 r2^2 Cos[2 θ] Sin[θ]-2 r1^2 tt Cos[θ] Cos[2 θ] Sin[θ]-4 r1 r2 tt Cos[θ] Cos[2 θ] Sin[θ]+2 r1 tt^2 Cos[θ]^2 Cos[2 θ] Sin[θ]+2 r1^3 Cos[2 θ]^2 Sin[θ]-2 r1^2 r2 Cos[2 θ]^2 Sin[θ]-2 r1^3 Cos[2 θ]^3 Sin[θ]-r2 tt^2 Sin[θ]^3+tt^3 Cos[θ] Sin[θ]^3-r1 tt^2 Cos[2 θ] Sin[θ]^3+4 r1^2 r2 Sin[θ]^5-4 r1^2 tt Cos[θ] Sin[θ]^5+4 r1^3 Cos[2 θ] Sin[θ]^5-2 r1^2 r2 Cos[θ] Sin[2 θ]-2 r1 r2^2 Cos[θ] Sin[2 θ]+2 r1 r2 tt Cos[θ]^2 Sin[2 θ]-2 r1^3 Cos[θ] Cos[2 θ] Sin[2 θ]+2 r1^2 tt Cos[θ]^2 Cos[2 θ] Sin[2 θ]+2 r1^3 Cos[θ] Cos[2 θ]^2 Sin[2 θ]-2 r1 r2 tt
27
Sin[θ]^2 Sin[2 θ]+3 r1 tt^2 Cos[θ] Sin[θ]^2 Sin[2 θ]-2 r1^2 tt Cos[2 θ] Sin[θ]^2 Sin[2 θ]-4 r1^3 Cos[θ] Sin[θ]^4 Sin[2 θ]-r1^2 r2 Sin[θ] Sin[2 θ]^2+3 r1^2 tt Cos[θ] Sin[θ] Sin[2 θ]^2-r1^3 Cos[2 θ] Sin[θ] Sin[2 θ]^2+r1^3 Cos[θ] Sin[2 θ]^3)^2+432 Cos[θ]^4 (-r2 Sin[θ]+tt Cos[θ] Sin[θ]-r1 Cos[2 θ] Sin[θ]+r1 Cos[θ] Sin[2 θ])^2 (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^28 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4)-144 Cos[θ]^4 (-2 r1 r2 Cos[θ]^2-2 r2^2 Cos[θ]^2+2 r2 tt Cos[θ]^3-2 r1^2 Cos[θ]^2 Cos[2 θ]+2 r1 tt Cos[θ]^3 Cos[2 θ]+2 r1^2 Cos[θ]^2 Cos[2 θ]^2-2 r1^2 Sin[θ]^2-4 r1 r2 Sin[θ]^2-4 r2 tt Cos[θ] Sin[θ]^2+3 tt^2 Cos[θ]^2 Sin[θ]^2+4 r1 r2 Cos[2 θ] Sin[θ]^2-4 r1 tt Cos[θ] Cos[2 θ] Sin[θ]^2+2 r1^2 Cos[2 θ]^2 Sin[θ]^2-4 r1^2 Cos[θ]^2 Sin[θ]^4-4 r1 r2 Cos[θ] Sin[θ] Sin[2 θ]+6 r1 tt Cos[θ]^2 Sin[θ] Sin[2 θ]-4 r1^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+3 r1^2 Cos[θ]^2 Sin[2 θ]^2) (-4 r1^2 x^2-8 r1 r2 x^2-4 r2^2 x^2+8 r1^2 r2 tt Cos[θ]+8 r1 r2^2 tt Cos[θ]-4 r1^2 tt^2 Cos[θ]^2-8 r1 r2 tt^2 Cos[θ]^2+8 r1^3 r2 Cos[2 θ]+8 r1^2 r2^2 Cos[2 θ]-16 r1^2 r2 tt Cos[θ] Cos[2 θ]-8 r1 r2^2 tt Cos[θ] Cos[2 θ]+8 r1 r2 tt^2 Cos[θ]^2 Cos[2 θ]+4 r1^4 Cos[2 θ]^2-8 r1^3 r2 Cos[2 θ]^2-8 r1^2 r2^2 Cos[2 θ]^2-8 r1^3 tt Cos[θ] Cos[2 θ]^2+8 r1^2 r2 tt Cos[θ] Cos[2 θ]^2+4 r1^2 tt^2 Cos[θ]^2 Cos[2 θ]^2-8 r1^4 Cos[2 θ]^3+8 r1^3 tt Cos[θ] Cos[2 θ]^3+4 r1^4 Cos[2 θ]^4-4 r1 r2 tt^2 Sin[θ]^2-4 r2^2 tt^2 Sin[θ]^2+4 r2 tt^3 Cos[θ] Sin[θ]^2-4 r1^2 tt^2 Cos[2 θ] Sin[θ]^2+4 r1 tt^3 Cos[θ] Cos[2 θ] Sin[θ]^2+4 r1^2 tt^2 Cos[2 θ]^2 Sin[θ]^2+16 r1^3 r2 Sin[θ]^4+16 r1^2 r2^2 Sin[θ]^4+tt^4 Sin[θ]^4-16 r1^2 r2 tt Cos[θ] Sin[θ]^4+16 r1^4 Cos[2 θ] Sin[θ]^4-16 r1^3 tt Cos[θ] Cos[2 θ] Sin[θ]^4-16 r1^4 Cos[2 θ]^2 Sin[θ]^4-8 r1^2 tt^2 Sin[θ]^6+16 r1^4 Sin[θ]^8-8 r1^2 r2 tt Sin[θ] Sin[2 θ]-8 r1 r2^2 tt Sin[θ] Sin[2 θ]+8 r1 r2 tt^2 Cos[θ] Sin[θ] Sin[2 θ]-8 r1^3 tt Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^2 tt^2 Cos[θ] Cos[2 θ] Sin[θ] Sin[2 θ]+8 r1^3 tt Cos[2 θ]^2 Sin[θ] Sin[2 θ]+4 r1 tt^3 Sin[θ]^3 Sin[2 θ]-16 r1^3 tt Sin[θ]^5 Sin[2 θ]-4 r1^3 r2 Sin[2 θ]^2-4 r1^2 r2^2 Sin[2 θ]^2+4 r1^2 r2 tt Cos[θ] Sin[2 θ]^2-4 r1^4 Cos[2 θ] Sin[2 θ]^2+4 r1^3 tt Cos[θ] Cos[2 θ] Sin[2 θ]^2+4 r1^4 Cos[2 θ]^2 Sin[2 θ]^2+6 r1^2 tt^2 Sin[θ]^2 Sin[2 θ]^2-8 r1^4 Sin[θ]^4 Sin[2 θ]^2+4 r1^3 tt Sin[θ] Sin[2 θ]^3+r1^4 Sin[2 θ]^4))^2))1/3+4 Sec[θ]2 (-tt Sin[θ]-r1 Sin[2 θ]+r2 Tan[θ]+r1 Cos[2 θ] Tan[θ])2-2 Sec[θ] (2 r2 tt+2 r1 tt Cos[2 θ]-2 r1 r2 Sec[θ]-2 r22 Sec[θ]-2 r12 Cos[2 θ] Sec[θ]+2 r12 Cos[2 θ]2 Sec[θ]+3 r12 Sec[θ] Sin[2 θ]2+3 tt2 Sin[θ] Tan[θ]-4 r12 Sin[θ]3 Tan[θ]+6 r1 tt Sin[2 θ] Tan[θ]-4 r1 r2 Sec[θ] Sin[2 θ] Tan[θ]-4 r12 Cos[2 θ] Sec[θ] Sin[2 θ] Tan[θ]-4 r2 tt Tan[θ]2-4 r1 tt Cos[2 θ] Tan[θ]2-2 r12 Sec[θ] Tan[θ]2-4 r1 r2 Sec[θ] Tan[θ]2+4 r1 r2 Cos[2 θ] Sec[θ] Tan[θ]2+2 r12 Cos[2 θ]2 Sec[θ] Tan[θ]2))))]
"""
| 1,852.603774
| 6,032
| 0.57339
| 34,304
| 98,188
| 1.641208
| 0.001632
| 0.101314
| 0.146359
| 0.105009
| 0.997229
| 0.996217
| 0.994849
| 0.993286
| 0.992433
| 0.990036
| 0
| 0.229845
| 0.172058
| 98,188
| 53
| 6,033
| 1,852.603774
| 0.462703
| 0.999898
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
bcefca259fc5d9fd8747a34763abf63c99297d7c
| 6,965
|
py
|
Python
|
generator/templates/test_class.py
|
mcassini/pyepw
|
8da1b22b7834eff3350906d2295ede3a6bd13a53
|
[
"Apache-2.0"
] | 27
|
2015-05-18T20:16:12.000Z
|
2021-05-10T23:58:13.000Z
|
generator/templates/test_class.py
|
mcassini/pyepw
|
8da1b22b7834eff3350906d2295ede3a6bd13a53
|
[
"Apache-2.0"
] | null | null | null |
generator/templates/test_class.py
|
mcassini/pyepw
|
8da1b22b7834eff3350906d2295ede3a6bd13a53
|
[
"Apache-2.0"
] | 10
|
2015-03-11T18:03:18.000Z
|
2021-06-22T07:58:01.000Z
|
import os
import tempfile
import unittest
from pyepw.epw import {{ obj.class_name }},{% for field in obj.fields %}{% if field.is_list %}{{field.object_name}} ,{% endif %}{% endfor %}EPW
class Test{{ obj.class_name }}(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_{{ obj.var_name }}(self):
obj = {{ obj.class_name }}()
{%- for field in obj.fields %}
{%- if not field.is_list %}
{%- if field.attributes.pytype == "str" %}
{%- if field.attributes.type == "choice" %}
var_{{field.field_name}} = "{{ field.attributes.key[0] }}"
{%- else %}
var_{{field.field_name}} = "{{field.field_name }}"
{%- endif %}
{%- elif field.attributes.pytype == "float" %}
{%- if (field.attributes['maximum<'] or field.attributes.maximum) and (field.attributes['minimum>'] or field.attributes.minimum) %}
var_{{field.field_name}} = ({%if field.attributes['maximum<'] %} ({{ field.attributes["maximum<"] }} - 1.0 ) {%- else %} {{ field.attributes.maximum }} {%- endif %} + {%if field.attributes['minimum>'] %} ({{ field.attributes["minimum>"] }} + 1.0 ) {%- else %} {{ field.attributes.minimum }} {%- endif %}) * 0.5
{%- elif (field.attributes['maximum<'] or field.attributes.maximum) and not (field.attributes['minimum>'] or field.attributes.minimum) %}
var_{{field.field_name}} = {%if field.attributes['maximum<'] %} ({{ field.attributes["maximum<"] }} - 1.0 ) {%- else %} {{ field.attributes.maximum }} {%- endif %}
{%- else %}
var_{{field.field_name}} = {{loop.index}}.{{loop.index}}
{%- endif %}
{%- elif field.attributes.pytype == "int" %}
{%- if (field.attributes['maximum<'] or field.attributes.maximum) and (field.attributes['minimum>'] or field.attributes.minimum) %}
var_{{field.field_name}} = int(({%if field.attributes['maximum<'] %} ({{ field.attributes["maximum<"] }} - 1) {%- else %} {{ field.attributes.maximum }} {%- endif %} + {%if field.attributes['minimum>'] %} ({{ field.attributes["minimum>"] }} + 1) {%- else %} {{ field.attributes.minimum }} {%- endif %}) * 0.5)
{%- elif (field.attributes['maximum<'] or field.attributes.maximum) and not (field.attributes['minimum>'] or field.attributes.minimum) %}
var_{{field.field_name}} = {%if field.attributes['maximum<'] %} ({{ field.attributes["maximum<"] }} - 1 ) {%- else %} {{ field.attributes.maximum }} {%- endif %}
{%- else %}
var_{{field.field_name}} = {{loop.index}}
{%- endif %}
{%- endif %}
obj.{{field.field_name}} = var_{{field.field_name}}
{%- else %}
{{field.field_name}}_obj = {{objs[field.object_name].class_name}}()
{%- for field2 in objs[field.object_name].fields %}
{%- if field2.attributes.pytype == "str" %}
{%- if field2.attributes.type == "choice" %}
var_{{field.field_name}}_{{field2.field_name}} = "{{ field2.attributes.key[0] }}"
{%- else %}
var_{{field.field_name}}_{{field2.field_name}} = "{{field2.field_name }}"
{%- endif %}
{%- elif field2.attributes.pytype == "float" %}
{%- if (field2.attributes['maximum<'] or field2.attributes.maximum) and (field2.attributes['minimum>'] or field2.attributes.minimum) %}
var_{{field.field_name}}_{{field2.field_name}} = ({%if field2.attributes['maximum<'] %} ({{ field2.attributes["maximum<"] }} - 1.0 ) {%- else %} {{ field2.attributes.maximum }} {%- endif %} + {%if field2.attributes['minimum>'] %} ({{ field2.attributes["minimum>"] }} + 1.0 ) {%- else %} {{ field2.attributes.minimum }} {%- endif %}) * 0.5
{%- elif (field.attributes['maximum<'] or field.attributes.maximum) and not (field.attributes['minimum>'] or field.attributes.minimum) %}
var_{{field.field_name}}_{{field2.field_name}} = {%if field2.attributes['maximum<'] %} ({{ field2.attributes["maximum<"] }} - 1.0 ) {%- else %} {{ field2.attributes.maximum }} {%- endif %}
{%- else %}
var_{{field.field_name}}_{{field2.field_name}} = {{loop.index}}.{{loop.index}}
{%- endif %}
{%- elif field2.attributes.pytype == "int" %}
{%- if (field2.attributes['maximum<'] or field2.attributes.maximum) and (field2.attributes['minimum>'] or field2.attributes.minimum) %}
var_{{field.field_name}}_{{field2.field_name}} = int(({%if field2.attributes['maximum<'] %} ({{ field2.attributes["maximum<"] }} - 1) {%- else %} {{ field2.attributes.maximum }} {%- endif %} + {%if field2.attributes['minimum>'] %} ({{ field2.attributes["minimum>"] }} + 1) {%- else %} {{ field2.attributes.minimum }} {%- endif %}) * 0.5)
{%- elif (field2.attributes['maximum<'] or field2.attributes.maximum) and not (field2.attributes['minimum>'] or field2.attributes.minimum) %}
var_{{field.field_name}}_{{field2.field_name}} = {%if field2.attributes['maximum<'] %} ({{ field2.attributes["maximum<"] }} - 1 ) {%- else %} {{ field2.attributes.maximum }} {%- endif %}
{%- else %}
var_{{field.field_name}}_{{field2.field_name}} = {{loop.index}}
{%- endif %}
{%- endif %}
{{field.field_name}}_obj.{{field2.field_name}} = var_{{field.field_name}}_{{field2.field_name}}
{%- endfor %}
obj.add_{{field.field_name}}({{field.field_name}}_obj)
{%- endif %}
{%- endfor %}
epw = EPW({{ obj.var_name }}=obj)
epw.save(self.path, check=False)
epw2 = EPW()
epw2.read(self.path)
{%- for field in obj.fields %}
{%- if not field.is_list %}
{%- if field.attributes.pytype == "str" %}
self.assertEqual(epw2.{{obj.var_name}}.{{field.field_name}}, var_{{field.field_name}})
{%- elif field.attributes.pytype == "int" %}
self.assertEqual(epw2.{{obj.var_name}}.{{field.field_name}}, var_{{field.field_name}})
{%- elif field.attributes.pytype == "float" %}
self.assertAlmostEqual(epw2.{{obj.var_name}}.{{field.field_name}}, var_{{field.field_name}})
{%- endif %}
{%- else %}
{%- for field2 in objs[field.object_name].fields %}
{%- if field2.attributes.pytype == "str" %}
self.assertEqual(epw2.{{obj.var_name}}.{{field.field_name}}s[0].{{field2.field_name}}, var_{{field.field_name}}_{{field2.field_name}})
{%- elif field2.attributes.pytype == "int" %}
self.assertEqual(epw2.{{obj.var_name}}.{{field.field_name}}s[0].{{field2.field_name}}, var_{{field.field_name}}_{{field2.field_name}})
{%- elif field2.attributes.pytype == "float" %}
self.assertAlmostEqual(epw2.{{obj.var_name}}.{{field.field_name}}s[0].{{field2.field_name}}, var_{{field.field_name}}_{{field2.field_name}})
{%- endif %}
{%- endfor %}
{%- endif %}
{%- endfor %}
| 68.284314
| 349
| 0.582627
| 771
| 6,965
| 5.115435
| 0.081712
| 0.120943
| 0.127789
| 0.103448
| 0.888692
| 0.856237
| 0.8357
| 0.813387
| 0.74645
| 0.735041
| 0
| 0.017152
| 0.196411
| 6,965
| 102
| 350
| 68.284314
| 0.687511
| 0
| 0
| 0.569892
| 0
| 0
| 0.068619
| 0.006747
| 0
| 0
| 0
| 0
| 0.064516
| 0
| null | null | 0
| 0.043011
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c0b78c0f4d6686cb27000e69abc4bee940118bc
| 189
|
py
|
Python
|
src/karmabot/db/__all_models.py
|
pogross/karmabot
|
75f1ae60d5274d35c03f3e06e5c218dafc9ebe82
|
[
"MIT"
] | 40
|
2019-11-03T13:00:25.000Z
|
2022-03-29T20:14:25.000Z
|
src/karmabot/db/__all_models.py
|
pogross/karmabot
|
75f1ae60d5274d35c03f3e06e5c218dafc9ebe82
|
[
"MIT"
] | 67
|
2019-11-02T19:07:44.000Z
|
2021-12-01T18:17:34.000Z
|
src/karmabot/db/__all_models.py
|
pogross/karmabot
|
75f1ae60d5274d35c03f3e06e5c218dafc9ebe82
|
[
"MIT"
] | 27
|
2019-11-08T15:44:40.000Z
|
2022-03-14T22:16:43.000Z
|
# Add all models here. Ensures loading all models
import karmabot.db.karma_note # noqa: F401
import karmabot.db.karma_transaction # noqa: F401
import karmabot.db.karma_user # noqa: F401
| 37.8
| 50
| 0.783069
| 29
| 189
| 5
| 0.517241
| 0.289655
| 0.331034
| 0.434483
| 0.4
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0.055556
| 0.142857
| 189
| 4
| 51
| 47.25
| 0.839506
| 0.42328
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4c38255576560b2ba9b68d6c3167e986f3c102e3
| 31,271
|
py
|
Python
|
thrift/compiler/test/fixtures/interactions/gen-py/test/fixtures/interactions/MyService.py
|
dgrnbrg-meta/fbthrift
|
1d5f0799ef53feeb83425b6c9c79f86aeac7d9ed
|
[
"Apache-2.0"
] | null | null | null |
thrift/compiler/test/fixtures/interactions/gen-py/test/fixtures/interactions/MyService.py
|
dgrnbrg-meta/fbthrift
|
1d5f0799ef53feeb83425b6c9c79f86aeac7d9ed
|
[
"Apache-2.0"
] | null | null | null |
thrift/compiler/test/fixtures/interactions/gen-py/test/fixtures/interactions/MyService.py
|
dgrnbrg-meta/fbthrift
|
1d5f0799ef53feeb83425b6c9c79f86aeac7d9ed
|
[
"Apache-2.0"
] | null | null | null |
#
# Autogenerated by Thrift
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
# @generated
#
from __future__ import absolute_import
import sys
from thrift.util.Recursive import fix_spec
from thrift.Thrift import TType, TMessageType, TPriority, TRequestContext, TProcessorEventHandler, TServerInterface, TProcessor, TException, TApplicationException, UnimplementedTypedef
from thrift.protocol.TProtocol import TProtocolException
from json import loads
import sys
if sys.version_info[0] >= 3:
long = int
from .ttypes import UTF8STRINGS, CustomException
from thrift.Thrift import TProcessor
import pprint
import warnings
from thrift import Thrift
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.protocol import TCompactProtocol
from thrift.protocol import THeaderProtocol
fastproto = None
try:
from thrift.protocol import fastproto
except ImportError:
pass
all_structs = []
UTF8STRINGS = bool(0) or sys.version_info.major >= 3
from thrift.util.Decorators import (
future_process_main,
future_process_method,
process_main as thrift_process_main,
process_method as thrift_process_method,
should_run_on_thread,
write_results_after_future,
)
class Iface:
def foo(self, ):
pass
def interact(self, arg=None):
"""
Parameters:
- arg
"""
pass
def interactFast(self, ):
pass
class ContextIface:
def foo(self, handler_ctx, ):
pass
def interact(self, handler_ctx, arg=None):
"""
Parameters:
- arg
"""
pass
def interactFast(self, handler_ctx, ):
pass
# HELPER FUNCTIONS AND STRUCTURES
class foo_args:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('foo_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(foo_args)
foo_args.thrift_spec = (
)
foo_args.thrift_struct_annotations = {
}
foo_args.thrift_field_annotations = {
}
class foo_result:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('foo_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(foo_result)
foo_result.thrift_spec = (
)
foo_result.thrift_struct_annotations = {
}
foo_result.thrift_field_annotations = {
}
class interact_args:
"""
Attributes:
- arg
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.arg = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('interact_args')
if self.arg != None:
oprot.writeFieldBegin('arg', TType.I32, 1)
oprot.writeI32(self.arg)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'arg' in json_obj and json_obj['arg'] is not None:
self.arg = json_obj['arg']
if self.arg > 0x7fffffff or self.arg < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.arg is not None:
value = pprint.pformat(self.arg, indent=0)
value = padding.join(value.splitlines(True))
L.append(' arg=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(interact_args)
interact_args.thrift_spec = (
None, # 0
(1, TType.I32, 'arg', None, None, 2, ), # 1
)
interact_args.thrift_struct_annotations = {
}
interact_args.thrift_field_annotations = {
}
def interact_args__init__(self, arg=None,):
self.arg = arg
interact_args.__init__ = interact_args__init__
def interact_args__setstate__(self, state):
state.setdefault('arg', None)
self.__dict__ = state
interact_args.__getstate__ = lambda self: self.__dict__.copy()
interact_args.__setstate__ = interact_args__setstate__
class interact_result:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('interact_result')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(interact_result)
interact_result.thrift_spec = (
)
interact_result.thrift_struct_annotations = {
}
interact_result.thrift_field_annotations = {
}
class interactFast_args:
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('interactFast_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
def __repr__(self):
L = []
padding = ' ' * 4
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(interactFast_args)
interactFast_args.thrift_spec = (
)
interactFast_args.thrift_struct_annotations = {
}
interactFast_args.thrift_field_annotations = {
}
class interactFast_result:
"""
Attributes:
- success
"""
thrift_spec = None
thrift_field_annotations = None
thrift_struct_annotations = None
__init__ = None
@staticmethod
def isUnion():
return False
def read(self, iprot):
if (isinstance(iprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0)
return
if (isinstance(iprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(iprot, THeaderProtocol.THeaderProtocolAccelerate) and iprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastproto is not None:
fastproto.decode(self, iprot.trans, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2)
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.I32:
self.success = iprot.readI32()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if (isinstance(oprot, TBinaryProtocol.TBinaryProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_BINARY_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=0))
return
if (isinstance(oprot, TCompactProtocol.TCompactProtocolAccelerated) or (isinstance(oprot, THeaderProtocol.THeaderProtocolAccelerate) and oprot.get_protocol_id() == THeaderProtocol.THeaderProtocol.T_COMPACT_PROTOCOL)) and self.thrift_spec is not None and fastproto is not None:
oprot.trans.write(fastproto.encode(self, [self.__class__, self.thrift_spec, False], utf8strings=UTF8STRINGS, protoid=2))
return
oprot.writeStructBegin('interactFast_result')
if self.success != None:
oprot.writeFieldBegin('success', TType.I32, 0)
oprot.writeI32(self.success)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def readFromJson(self, json, is_text=True, **kwargs):
relax_enum_validation = bool(kwargs.pop('relax_enum_validation', False))
set_cls = kwargs.pop('custom_set_cls', set)
dict_cls = kwargs.pop('custom_dict_cls', dict)
if kwargs:
extra_kwargs = ', '.join(kwargs.keys())
raise ValueError(
'Unexpected keyword arguments: ' + extra_kwargs
)
json_obj = json
if is_text:
json_obj = loads(json)
if 'success' in json_obj and json_obj['success'] is not None:
self.success = json_obj['success']
if self.success > 0x7fffffff or self.success < -0x80000000:
raise TProtocolException(TProtocolException.INVALID_DATA, 'number exceeds limit in field')
def __repr__(self):
L = []
padding = ' ' * 4
if self.success is not None:
value = pprint.pformat(self.success, indent=0)
value = padding.join(value.splitlines(True))
L.append(' success=%s' % (value))
return "%s(%s)" % (self.__class__.__name__, "\n" + ",\n".join(L) if L else '')
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
# Override the __hash__ function for Python3 - t10434117
__hash__ = object.__hash__
all_structs.append(interactFast_result)
interactFast_result.thrift_spec = (
(0, TType.I32, 'success', None, None, 2, ), # 0
)
interactFast_result.thrift_struct_annotations = {
}
interactFast_result.thrift_field_annotations = {
}
def interactFast_result__init__(self, success=None,):
self.success = success
interactFast_result.__init__ = interactFast_result__init__
def interactFast_result__setstate__(self, state):
state.setdefault('success', None)
self.__dict__ = state
interactFast_result.__getstate__ = lambda self: self.__dict__.copy()
interactFast_result.__setstate__ = interactFast_result__setstate__
class Client(Iface):
_fbthrift_force_cpp_transport = False
def __enter__(self):
return self
def __exit__(self, type, value, tb):
if self._iprot:
self._iprot.trans.close()
if self._oprot and self._iprot is not self._oprot:
self._oprot.trans.close()
def __init__(self, iprot=None, oprot=None, cpp_transport=None):
self._iprot = self._oprot = iprot
if oprot != None:
self._oprot = oprot
self._seqid = 0
self._fbthrift_cpp_transport = cpp_transport
def foo(self, ):
if (self._fbthrift_cpp_transport):
args = foo_args()
return self._fbthrift_cpp_transport._send_request("MyService", "foo", args, foo_result).success
self.send_foo()
self.recv_foo()
def send_foo(self, ):
self._oprot.writeMessageBegin('foo', TMessageType.CALL, self._seqid)
args = foo_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_foo(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = foo_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def interact(self, arg=None):
"""
Parameters:
- arg
"""
if (self._fbthrift_cpp_transport):
args = interact_args()
args.arg = arg
return self._fbthrift_cpp_transport._send_request("MyService", "interact", args, interact_result).success
self.send_interact(arg)
self.recv_interact()
def send_interact(self, arg=None):
self._oprot.writeMessageBegin('interact', TMessageType.CALL, self._seqid)
args = interact_args()
args.arg = arg
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_interact(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = interact_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
return
def interactFast(self, ):
if (self._fbthrift_cpp_transport):
args = interactFast_args()
return self._fbthrift_cpp_transport._send_request("MyService", "interactFast", args, interactFast_result).success
self.send_interactFast()
return self.recv_interactFast()
def send_interactFast(self, ):
self._oprot.writeMessageBegin('interactFast', TMessageType.CALL, self._seqid)
args = interactFast_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_interactFast(self, ):
(fname, mtype, rseqid) = self._iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(self._iprot)
self._iprot.readMessageEnd()
raise x
result = interactFast_result()
result.read(self._iprot)
self._iprot.readMessageEnd()
if result.success != None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "interactFast failed: unknown result");
class Processor(Iface, TProcessor):
_onewayMethods = ()
def __init__(self, handler):
TProcessor.__init__(self)
self._handler = handler
self._processMap = {}
self._priorityMap = {}
self._processMap["foo"] = Processor.process_foo
self._priorityMap["foo"] = TPriority.NORMAL
self._processMap["interact"] = Processor.process_interact
self._priorityMap["interact"] = TPriority.NORMAL
self._processMap["interactFast"] = Processor.process_interactFast
self._priorityMap["interactFast"] = TPriority.NORMAL
def onewayMethods(self):
l = []
l.extend(Processor._onewayMethods)
return tuple(l)
@thrift_process_main()
def process(self,): pass
@thrift_process_method(foo_args, oneway=False)
def process_foo(self, args, handler_ctx):
result = foo_result()
try:
self._handler.foo()
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'foo', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
@thrift_process_method(interact_args, oneway=False)
def process_interact(self, args, handler_ctx):
result = interact_result()
try:
self._handler.interact(args.arg)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'interact', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
@thrift_process_method(interactFast_args, oneway=False)
def process_interactFast(self, args, handler_ctx):
result = interactFast_result()
try:
result.success = self._handler.interactFast()
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'interactFast', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
Iface._processor_type = Processor
class ContextProcessor(ContextIface, TProcessor):
_onewayMethods = ()
def __init__(self, handler):
TProcessor.__init__(self)
self._handler = handler
self._processMap = {}
self._priorityMap = {}
self._processMap["foo"] = ContextProcessor.process_foo
self._priorityMap["foo"] = TPriority.NORMAL
self._processMap["interact"] = ContextProcessor.process_interact
self._priorityMap["interact"] = TPriority.NORMAL
self._processMap["interactFast"] = ContextProcessor.process_interactFast
self._priorityMap["interactFast"] = TPriority.NORMAL
def onewayMethods(self):
l = []
l.extend(ContextProcessor._onewayMethods)
return tuple(l)
@thrift_process_main()
def process(self,): pass
@thrift_process_method(foo_args, oneway=False)
def process_foo(self, args, handler_ctx):
result = foo_result()
try:
self._handler.foo(handler_ctx)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'foo', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
@thrift_process_method(interact_args, oneway=False)
def process_interact(self, args, handler_ctx):
result = interact_result()
try:
self._handler.interact(handler_ctx, args.arg)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'interact', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
@thrift_process_method(interactFast_args, oneway=False)
def process_interactFast(self, args, handler_ctx):
result = interactFast_result()
try:
result.success = self._handler.interactFast(handler_ctx)
except:
ex = sys.exc_info()[1]
self._event_handler.handlerError(handler_ctx, 'interactFast', ex)
result = Thrift.TApplicationException(message=repr(ex))
return result
ContextIface._processor_type = ContextProcessor
fix_spec(all_structs)
del all_structs
| 37.675904
| 339
| 0.728215
| 3,661
| 31,271
| 5.928981
| 0.067468
| 0.027642
| 0.021561
| 0.030959
| 0.834055
| 0.818529
| 0.810421
| 0.802267
| 0.79379
| 0.788814
| 0
| 0.007687
| 0.167983
| 31,271
| 829
| 340
| 37.721351
| 0.826582
| 0.018164
| 0
| 0.71791
| 1
| 0
| 0.033788
| 0.004117
| 0
| 0
| 0.001307
| 0
| 0
| 1
| 0.113433
| false
| 0.013433
| 0.028358
| 0.019403
| 0.30597
| 0.004478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c38b70c6689862ec97af69b67ea88caa9572af9
| 924
|
py
|
Python
|
Population - Dictionary.py
|
fatih-iver-2016400264/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
Population - Dictionary.py
|
fatih-iver-2016400264/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
Population - Dictionary.py
|
fatih-iver-2016400264/Intro-to-Computer-Science-with-Python
|
7b8127681415dfd100a0e70fe8a672cec696bbb7
|
[
"MIT"
] | null | null | null |
# Define a Dictionary, population,
# that provides information
# on the world's largest cities.
# The key is the name of a city
# (a string), and the associated
# value is its population in
# millions.
# Key | Value
# Shanghai | 17.8
# Istanbul | 13.3
# Karachi | 13.0
# Mumbai | 12.5
population = {}
population["Shanghai"] = 17.8
population["Istanbul"] = 13.3
population["Karachi"] = 13.0
population["Mumbai"] = 12.5# Define a Dictionary, population,
# that provides information
# on the world's largest cities.
# The key is the name of a city
# (a string), and the associated
# value is its population in
# millions.
# Key | Value
# Shanghai | 17.8
# Istanbul | 13.3
# Karachi | 13.0
# Mumbai | 12.5
population = {}
population["Shanghai"] = 17.8
population["Istanbul"] = 13.3
population["Karachi"] = 13.0
population["Mumbai"] = 12.5
| 23.692308
| 62
| 0.625541
| 126
| 924
| 4.587302
| 0.285714
| 0.069204
| 0.076125
| 0.093426
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0.069565
| 0.253247
| 924
| 39
| 63
| 23.692308
| 0.768116
| 0.613636
| 0
| 1
| 0
| 0
| 0.19661
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4c605de86fa814563d93837b4ac7fc1136c6f6bf
| 16,082
|
py
|
Python
|
research/brain_coder/common/config_lib_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | 1
|
2021-05-17T01:42:29.000Z
|
2021-05-17T01:42:29.000Z
|
research/brain_coder/common/config_lib_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
research/brain_coder/common/config_lib_test.py
|
jdavidagudelo/tensorflow-models
|
6f019beec73b01861363bf717706e27f4210b979
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
"""Tests for common.config_lib."""
import tensorflow as tf
from research.brain_coder.common import config_lib # brain coder
class ConfigLibTest(tf.test.TestCase):
def testConfig(self):
config = config_lib.Config(hello='world', foo='bar', num=123, f=56.7)
self.assertEqual('world', config.hello)
self.assertEqual('bar', config['foo'])
config.hello = 'everyone'
config['bar'] = 9000
self.assertEqual('everyone', config['hello'])
self.assertEqual(9000, config.bar)
self.assertEqual(5, len(config))
def testConfigUpdate(self):
config = config_lib.Config(a=1, b=2, c=3)
config.update({'b': 10, 'd': 4})
self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config)
config = config_lib.Config(a=1, b=2, c=3)
config.update(b=10, d=4)
self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4}, config)
config = config_lib.Config(a=1, b=2, c=3)
config.update({'e': 5}, b=10, d=4)
self.assertEqual({'a': 1, 'b': 10, 'c': 3, 'd': 4, 'e': 5}, config)
config = config_lib.Config(
a=1,
b=2,
x=config_lib.Config(
l='a',
y=config_lib.Config(m=1, n=2),
z=config_lib.Config(
q=config_lib.Config(a=10, b=20),
r=config_lib.Config(s=1, t=2))))
config.update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}})
self.assertEqual(
config_lib.Config(
a=1, b=2,
x=config_lib.Config(
l='a',
y=config_lib.Config(m=10, n=2),
z=config_lib.Config(
q=config_lib.Config(a=10, b=20),
r=config_lib.Config(s=5, t=2)))),
config)
config = config_lib.Config(
foo='bar',
num=100,
x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)),
y=config_lib.Config(qrs=5, tuv=10),
d={'a': 1, 'b': 2},
l=[1, 2, 3])
config.update(
config_lib.Config(
foo='hat',
num=50.5,
x={'a': 5, 'z': -10},
y=config_lib.Config(wxyz=-1)),
d={'a': 10, 'c': 20},
l=[3, 4, 5, 6])
self.assertEqual(
config_lib.Config(
foo='hat',
num=50.5,
x=config_lib.Config(a=5, b=2, z=-10,
c=config_lib.Config(h=10, i=20, j=30)),
y=config_lib.Config(qrs=5, tuv=10, wxyz=-1),
d={'a': 10, 'c': 20},
l=[3, 4, 5, 6]),
config)
self.assertTrue(isinstance(config.x, config_lib.Config))
self.assertTrue(isinstance(config.x.c, config_lib.Config))
self.assertTrue(isinstance(config.y, config_lib.Config))
config = config_lib.Config(
foo='bar',
num=100,
x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)),
y=config_lib.Config(qrs=5, tuv=10),
d={'a': 1, 'b': 2},
l=[1, 2, 3])
config.update(
config_lib.Config(
foo=1234,
num='hello',
x={'a': 5, 'z': -10, 'c': {'h': -5, 'k': 40}},
y=[1, 2, 3, 4],
d='stuff',
l={'a': 1, 'b': 2}))
self.assertEqual(
config_lib.Config(
foo=1234,
num='hello',
x=config_lib.Config(a=5, b=2, z=-10,
c=config_lib.Config(h=-5, i=20, j=30, k=40)),
y=[1, 2, 3, 4],
d='stuff',
l={'a': 1, 'b': 2}),
config)
self.assertTrue(isinstance(config.x, config_lib.Config))
self.assertTrue(isinstance(config.x.c, config_lib.Config))
self.assertTrue(isinstance(config.y, list))
def testConfigStrictUpdate(self):
config = config_lib.Config(a=1, b=2, c=3)
config.strict_update({'b': 10, 'c': 20})
self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config)
config = config_lib.Config(a=1, b=2, c=3)
config.strict_update(b=10, c=20)
self.assertEqual({'a': 1, 'b': 10, 'c': 20}, config)
config = config_lib.Config(a=1, b=2, c=3, d=4)
config.strict_update({'d': 100}, b=10, a=20)
self.assertEqual({'a': 20, 'b': 10, 'c': 3, 'd': 100}, config)
config = config_lib.Config(
a=1,
b=2,
x=config_lib.Config(
l='a',
y=config_lib.Config(m=1, n=2),
z=config_lib.Config(
q=config_lib.Config(a=10, b=20),
r=config_lib.Config(s=1, t=2))))
config.strict_update(x={'y': {'m': 10}, 'z': {'r': {'s': 5}}})
self.assertEqual(
config_lib.Config(
a=1, b=2,
x=config_lib.Config(
l='a',
y=config_lib.Config(m=10, n=2),
z=config_lib.Config(
q=config_lib.Config(a=10, b=20),
r=config_lib.Config(s=5, t=2)))),
config)
config = config_lib.Config(
foo='bar',
num=100,
x=config_lib.Config(a=1, b=2, c=config_lib.Config(h=10, i=20, j=30)),
y=config_lib.Config(qrs=5, tuv=10),
d={'a': 1, 'b': 2},
l=[1, 2, 3])
config.strict_update(
config_lib.Config(
foo='hat',
num=50,
x={'a': 5, 'c': {'h': 100}},
y=config_lib.Config(tuv=-1)),
d={'a': 10, 'c': 20},
l=[3, 4, 5, 6])
self.assertEqual(
config_lib.Config(
foo='hat',
num=50,
x=config_lib.Config(a=5, b=2,
c=config_lib.Config(h=100, i=20, j=30)),
y=config_lib.Config(qrs=5, tuv=-1),
d={'a': 10, 'c': 20},
l=[3, 4, 5, 6]),
config)
def testConfigStrictUpdateFail(self):
config = config_lib.Config(a=1, b=2, c=3, x=config_lib.Config(a=1, b=2))
with self.assertRaises(KeyError):
config.strict_update({'b': 10, 'c': 20, 'd': 50})
with self.assertRaises(KeyError):
config.strict_update(b=10, d=50)
with self.assertRaises(KeyError):
config.strict_update(x={'c': 3})
with self.assertRaises(TypeError):
config.strict_update(a='string')
with self.assertRaises(TypeError):
config.strict_update(x={'a': 'string'})
with self.assertRaises(TypeError):
config.strict_update(x=[1, 2, 3])
def testConfigFromStr(self):
config = config_lib.Config.from_str("{'c': {'d': 5}, 'b': 2, 'a': 1}")
self.assertEqual(
{'c': {'d': 5}, 'b': 2, 'a': 1}, config)
self.assertTrue(isinstance(config, config_lib.Config))
self.assertTrue(isinstance(config.c, config_lib.Config))
def testConfigParse(self):
config = config_lib.Config.parse(
'hello="world",num=1234.5,lst=[10,20.5,True,"hi",("a","b","c")],'
'dct={9:10,"stuff":"qwerty","subdict":{1:True,2:False}},'
'subconfig=c(a=1,b=[1,2,[3,4]],c=c(f="f",g="g"))')
self.assertEqual(
{'hello': 'world', 'num': 1234.5,
'lst': [10, 20.5, True, 'hi', ('a', 'b', 'c')],
'dct': {9: 10, 'stuff': 'qwerty', 'subdict': {1: True, 2: False}},
'subconfig': {'a': 1, 'b': [1, 2, [3, 4]], 'c': {'f': 'f', 'g': 'g'}}},
config)
self.assertTrue(isinstance(config, config_lib.Config))
self.assertTrue(isinstance(config.subconfig, config_lib.Config))
self.assertTrue(isinstance(config.subconfig.c, config_lib.Config))
self.assertFalse(isinstance(config.dct, config_lib.Config))
self.assertFalse(isinstance(config.dct['subdict'], config_lib.Config))
self.assertTrue(isinstance(config.lst[4], tuple))
def testConfigParseErrors(self):
with self.assertRaises(SyntaxError):
config_lib.Config.parse('a=[1,2,b="hello"')
with self.assertRaises(SyntaxError):
config_lib.Config.parse('a=1,b=c(x="a",y="b"')
with self.assertRaises(SyntaxError):
config_lib.Config.parse('a=1,b=c(x="a")y="b"')
with self.assertRaises(SyntaxError):
config_lib.Config.parse('a=1,b=c(x="a"),y="b",')
def testOneOf(self):
def make_config():
return config_lib.Config(
data=config_lib.OneOf(
[config_lib.Config(task=1, a='hello'),
config_lib.Config(task=2, a='world', b='stuff'),
config_lib.Config(task=3, c=1234)],
task=2),
model=config_lib.Config(stuff=1))
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=1,a="hi")'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=1, a='hi'),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=2,a="hi")'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=2, a='hi', b='stuff'),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=3)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=3, c=1234),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=2, a='world', b='stuff'),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=4,d=9999)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=4, d=9999),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=5'))
self.assertEqual(
config_lib.Config(
data=5,
model=config_lib.Config(stuff=2)),
config)
def testOneOfStrict(self):
def make_config():
return config_lib.Config(
data=config_lib.OneOf(
[config_lib.Config(task=1, a='hello'),
config_lib.Config(task=2, a='world', b='stuff'),
config_lib.Config(task=3, c=1234)],
task=2),
model=config_lib.Config(stuff=1))
config = make_config()
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=1,a="hi")'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=1, a='hi'),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=2,a="hi")'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=2, a='hi', b='stuff'),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=3)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=3, c=1234),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(task=2, a='world', b='stuff'),
model=config_lib.Config(stuff=2)),
config)
def testNestedOneOf(self):
def make_config():
return config_lib.Config(
data=config_lib.OneOf(
[config_lib.Config(task=1, a='hello'),
config_lib.Config(
task=2,
a=config_lib.OneOf(
[config_lib.Config(x=1, y=2),
config_lib.Config(x=-1, y=1000, z=4)],
x=1)),
config_lib.Config(task=3, c=1234)],
task=2),
model=config_lib.Config(stuff=1))
config = make_config()
config.update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(
task=2,
a=config_lib.Config(x=-1, y=1000, z=8)),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=2,a=c(x=-1,z=8))'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(
task=2,
a=config_lib.Config(x=-1, y=1000, z=8)),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.update(config_lib.Config.parse('model=c(stuff=2)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(
task=2,
a=config_lib.Config(x=1, y=2)),
model=config_lib.Config(stuff=2)),
config)
config = make_config()
config.strict_update(config_lib.Config.parse('model=c(stuff=2)'))
self.assertEqual(
config_lib.Config(
data=config_lib.Config(
task=2,
a=config_lib.Config(x=1, y=2)),
model=config_lib.Config(stuff=2)),
config)
def testOneOfStrictErrors(self):
def make_config():
return config_lib.Config(
data=config_lib.OneOf(
[config_lib.Config(task=1, a='hello'),
config_lib.Config(task=2, a='world', b='stuff'),
config_lib.Config(task=3, c=1234)],
task=2),
model=config_lib.Config(stuff=1))
config = make_config()
with self.assertRaises(TypeError):
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=[1,2,3]'))
config = make_config()
with self.assertRaises(KeyError):
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=3,c=5678,d=9999)'))
config = make_config()
with self.assertRaises(ValueError):
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=c(task=4,d=9999)'))
config = make_config()
with self.assertRaises(TypeError):
config.strict_update(config_lib.Config.parse(
'model=c(stuff=2),data=5'))
if __name__ == '__main__':
tf.test.main()
| 37.751174
| 84
| 0.495958
| 2,044
| 16,082
| 3.785714
| 0.060665
| 0.198889
| 0.317912
| 0.061385
| 0.885888
| 0.871802
| 0.86198
| 0.839623
| 0.787025
| 0.766219
| 0
| 0.050963
| 0.344795
| 16,082
| 425
| 85
| 37.84
| 0.683401
| 0.000684
| 0
| 0.757256
| 0
| 0.013193
| 0.07309
| 0.042781
| 0
| 0
| 0
| 0
| 0.158311
| 1
| 0.039578
| false
| 0
| 0.013193
| 0.010554
| 0.065963
| 0.002639
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
4c6cea76643c562f1e28cf80347c636e77c2df73
| 86
|
py
|
Python
|
ketaway/__init__.py
|
ketaway/ketaway
|
cbfbcaa1a80f051a2e7dd40598f33ff749ecd2dd
|
[
"MIT"
] | null | null | null |
ketaway/__init__.py
|
ketaway/ketaway
|
cbfbcaa1a80f051a2e7dd40598f33ff749ecd2dd
|
[
"MIT"
] | null | null | null |
ketaway/__init__.py
|
ketaway/ketaway
|
cbfbcaa1a80f051a2e7dd40598f33ff749ecd2dd
|
[
"MIT"
] | null | null | null |
from ketaway.mongkolchai import Ketaway
from ketaway.mongkolchai import Test_Update
| 28.666667
| 44
| 0.860465
| 11
| 86
| 6.636364
| 0.545455
| 0.30137
| 0.60274
| 0.767123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 86
| 2
| 45
| 43
| 0.960526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
d5f4a4854f6d55ce0ccf465fdf2041bd253f2f64
| 293
|
py
|
Python
|
demos/instance_occlsegm/instance_occlsegm_lib/contrib/synthetic2d/extensions/__init__.py
|
pazeshun/jsk_apc
|
0ff42000ad5992f8a31e719a5360a39cf4fa1fde
|
[
"BSD-3-Clause"
] | null | null | null |
demos/instance_occlsegm/instance_occlsegm_lib/contrib/synthetic2d/extensions/__init__.py
|
pazeshun/jsk_apc
|
0ff42000ad5992f8a31e719a5360a39cf4fa1fde
|
[
"BSD-3-Clause"
] | 2
|
2019-04-11T05:36:23.000Z
|
2019-08-19T12:58:10.000Z
|
demos/instance_occlsegm/instance_occlsegm_lib/contrib/synthetic2d/extensions/__init__.py
|
pazeshun/jsk_apc
|
0ff42000ad5992f8a31e719a5360a39cf4fa1fde
|
[
"BSD-3-Clause"
] | null | null | null |
# flake8: noqa
from .instance_segmentation_vis_report import InstanceSegmentationVisReport
from .instance_segmentation_voc_evaluator import InstanceSegmentationVOCEvaluator
from .params_report import ParamsReport
from .semantic_segmentation_vis_report import SemanticSegmentationVisReport
| 29.3
| 81
| 0.901024
| 28
| 293
| 9.071429
| 0.571429
| 0.141732
| 0.188976
| 0.212598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003704
| 0.078498
| 293
| 9
| 82
| 32.555556
| 0.937037
| 0.040956
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d5f85fce7937fe992c8e579a91891108beeed5b2
| 47
|
py
|
Python
|
tensortrade/oms/services/__init__.py
|
zeeshanalipanhwar/tensortrade
|
7c294293cb65d0e31cae47402145dffe2e7bc75f
|
[
"Apache-2.0"
] | 3,081
|
2020-01-12T13:42:13.000Z
|
2022-03-27T18:09:31.000Z
|
tensortrade/oms/services/__init__.py
|
zeeshanalipanhwar/tensortrade
|
7c294293cb65d0e31cae47402145dffe2e7bc75f
|
[
"Apache-2.0"
] | 257
|
2020-01-15T03:14:29.000Z
|
2022-03-31T04:19:14.000Z
|
tensortrade/oms/services/__init__.py
|
zeeshanalipanhwar/tensortrade
|
7c294293cb65d0e31cae47402145dffe2e7bc75f
|
[
"Apache-2.0"
] | 804
|
2020-01-12T12:22:22.000Z
|
2022-03-28T13:41:59.000Z
|
from . import execution
from . import slippage
| 15.666667
| 23
| 0.787234
| 6
| 47
| 6.166667
| 0.666667
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.170213
| 47
| 2
| 24
| 23.5
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
910b764b42a528ef2f18a74aab31a62de880e4ca
| 2,794
|
py
|
Python
|
plenum/test/bls/test_update_bls_key.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 148
|
2017-07-11T19:05:25.000Z
|
2022-03-16T21:31:20.000Z
|
plenum/test/bls/test_update_bls_key.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 561
|
2017-06-29T17:59:56.000Z
|
2022-03-09T15:47:14.000Z
|
plenum/test/bls/test_update_bls_key.py
|
andkononykhin/plenum
|
28dc1719f4b7e80d31dafbadb38cfec4da949886
|
[
"Apache-2.0"
] | 378
|
2017-06-29T17:45:27.000Z
|
2022-03-26T07:27:59.000Z
|
from plenum.test.bls.helper import check_update_bls_key
nodeCount = 4
nodes_wth_bls = 4
# As we use tests with Module scope, results from previous tests are accumulated, so
# rotating BLS keys one by one, eventually we will have all keys changed
def test_update_bls_one_node(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_client,
sdk_pool_handle):
'''
Rotated BLS key for 1st node;
BLS multi-signatures must be calculated for all Nodes.
'''
check_update_bls_key(node_num=0, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle)
def test_update_bls_two_nodes(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_client,
sdk_pool_handle):
'''
Rotated BLS key for 1st and 2d nodes;
BLS multi-signatures must be calculated for all Nodes.
'''
check_update_bls_key(node_num=1, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle)
def test_update_bls_three_nodes(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_client,
sdk_pool_handle):
'''
Rotated BLS key for 1-3 Nodes;
BLS multi-signatures must be calculated for all Nodes.
'''
check_update_bls_key(node_num=2, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle)
def test_update_bls_all_nodes(looper, txnPoolNodeSet,
sdk_wallet_stewards,
sdk_wallet_client,
sdk_pool_handle):
'''
Rotated BLS key for all Nodes;
BLS multi-signatures must be calculated for all Nodes.
'''
check_update_bls_key(node_num=3, saved_multi_sigs_count=4,
looper=looper, txnPoolNodeSet=txnPoolNodeSet,
sdk_wallet_stewards=sdk_wallet_stewards,
sdk_wallet_client=sdk_wallet_client,
sdk_pool_handle=sdk_pool_handle)
| 41.088235
| 84
| 0.590193
| 310
| 2,794
| 4.929032
| 0.206452
| 0.141361
| 0.133508
| 0.157068
| 0.840314
| 0.840314
| 0.840314
| 0.840314
| 0.840314
| 0.840314
| 0
| 0.008408
| 0.361489
| 2,794
| 67
| 85
| 41.701493
| 0.848094
| 0.180387
| 0
| 0.717949
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.025641
| 0
| 0.128205
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9117deb40405b337e3f577e57bf7bd9c97067e75
| 8,611
|
py
|
Python
|
tests/test_image1D_supervised.py
|
wernergkrebs/autokeras
|
b473fc1aa86e809229ea1af0fcab3b00ecf4676e
|
[
"MIT"
] | null | null | null |
tests/test_image1D_supervised.py
|
wernergkrebs/autokeras
|
b473fc1aa86e809229ea1af0fcab3b00ecf4676e
|
[
"MIT"
] | null | null | null |
tests/test_image1D_supervised.py
|
wernergkrebs/autokeras
|
b473fc1aa86e809229ea1af0fcab3b00ecf4676e
|
[
"MIT"
] | null | null | null |
from unittest.mock import patch
import pytest
from autokeras.image1D_supervised import *
from tests.common import clean_dir, MockProcess, simple_transform
from autokeras.constant import Constant
import os
def mock_train(**kwargs):
str(kwargs)
return 1, 0
def test_train_x_array_exception():
clf = Image1DClassifier()
with pytest.raises(Exception) as info:
clf.fit(15, [])
assert str(info.value) == 'x_train should have exactly 2 dimensions.'
def test_xy_dim_exception():
clf = Image1DClassifier()
with pytest.raises(Exception) as info:
clf.fit([[1, 2], [3, 4]], [6, 7, 8])
assert str(info.value) == 'x_train and y_train should have the same number of instances.'
def test_x_float_exception():
clf = Image1DClassifier()
with pytest.raises(Exception) as info:
clf.fit([[1, 'abc'], [3, 4]], [7, 8])
assert str(info.value) == 'x_train should only contain numerical data.'
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict(_):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
Constant.DATA_AUGMENTATION = False
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=True)
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y, )
results = clf.predict(train_x)
assert all(map(lambda result: result in train_y, results))
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
def test_timeout():
# Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1000
Constant.T_MIN = 0.0001
Constant.DATA_AUGMENTATION = False
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=False)
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
with pytest.raises(TimeoutError):
clf.fit(train_x, train_y, time_limit=1)
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_timeout_resume(_):
Constant.MAX_ITER_NUM = 1
# make it impossible to complete within 10sec
Constant.MAX_MODEL_NUM = 1000
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25)
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y, 15)
history_len = len(clf.load_searcher().history)
assert history_len != 0
results = clf.predict(test_x)
assert len(results) == 100
clf = Image1DClassifier(verbose=False, path=path, resume=True)
assert len(clf.load_searcher().history) == history_len
Constant.MAX_MODEL_NUM = history_len + 1
clf.fit(train_x, train_y)
assert len(clf.load_searcher().history) == history_len + 1
results = clf.predict(test_x)
assert len(results) == 100
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.bayesian.transform', side_effect=simple_transform)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_final_fit(_, _1):
Constant.LIMIT_MEMORY = True
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=False)
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.N_NEIGHBOURS = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25)
test_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y)
clf.final_fit(train_x, train_y, test_x, test_y)
results = clf.predict(test_x)
assert len(results) == 100
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_save_continue(_):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25)
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
assert len(clf.load_searcher().history) == 1
Constant.MAX_MODEL_NUM = 2
clf = Image1DClassifier(verbose=False, path=path, resume=True)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.load_searcher().history) == 2
Constant.MAX_MODEL_NUM = 1
clf = Image1DClassifier(verbose=False, path=path, resume=False)
clf.fit(train_x, train_y)
results = clf.predict(test_x)
assert len(results) == 100
assert len(clf.load_searcher().history) == 1
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.bayesian.transform', side_effect=simple_transform)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_csv_file(_, _1):
pass
@patch('autokeras.image_supervised.temp_folder_generator', return_value='dummy_path/')
def test_init_image_classifier_with_none_path(_):
clf = Image1DClassifier()
assert clf.path == 'dummy_path/'
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_fit_predict_regression(_):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 4
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
Constant.DATA_AUGMENTATION = False
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DRegressor(path=path, verbose=False)
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
clf.fit(train_x, train_y, )
results = clf.predict(train_x)
assert len(results) == len(train_x)
clean_dir(path)
@patch('torch.multiprocessing.Pool', new=MockProcess)
@patch('autokeras.search.ModelTrainer.train_model', side_effect=mock_train)
def test_export_keras_model(_):
Constant.MAX_ITER_NUM = 1
Constant.MAX_MODEL_NUM = 1
Constant.SEARCH_MAX_ITER = 1
Constant.T_MIN = 0.8
train_x = np.random.rand(100, 25)
train_y = np.random.randint(0, 5, 100)
test_x = np.random.rand(100, 25)
path = 'tests/resources/temp'
clean_dir(path)
clf = Image1DClassifier(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
score = clf.evaluate(train_x, train_y)
assert score <= 1.0
test_x = clf.reshapeTo2D(test_x) #for saved processing.
train_x_2d = clf.reshapeTo2D(train_x) #for saved processing.
model_file_name = os.path.join(path, 'test_keras_model.h5')
clf.export_keras_model(model_file_name)
from keras.models import load_model
model = load_model(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
del model, results, model_file_name
model_file_name = os.path.join(path, 'test_autokeras_model.pkl')
clf.export_autokeras_model(model_file_name)
from autokeras.utils import pickle_from_file
model = pickle_from_file(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
score = model.evaluate(train_x_2d, train_y)
assert score <= 1.0
clean_dir(path)
clf = Image1DRegressor(path=path, verbose=False, resume=False)
clf.n_epochs = 100
clf.fit(train_x, train_y)
score = clf.evaluate(train_x, train_y)
assert score >= 0.0
model_file_name = os.path.join(path, 'test_keras_model.h5')
clf.export_keras_model(model_file_name)
from keras.models import load_model
model = load_model(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
del model, results, model_file_name
model_file_name = os.path.join(path, 'test_autokeras_model.pkl')
clf.export_autokeras_model(model_file_name)
from autokeras.utils import pickle_from_file
model = pickle_from_file(model_file_name)
results = model.predict(test_x)
assert len(results) == len(test_x)
score = model.evaluate(train_x_2d, train_y)
assert score >= 0.0
clean_dir(path)
| 34.170635
| 93
| 0.71339
| 1,271
| 8,611
| 4.592447
| 0.121164
| 0.02981
| 0.030838
| 0.028782
| 0.833819
| 0.814802
| 0.810348
| 0.796128
| 0.76135
| 0.729827
| 0
| 0.032077
| 0.170944
| 8,611
| 251
| 94
| 34.306773
| 0.785544
| 0.013007
| 0
| 0.742991
| 0
| 0
| 0.117142
| 0.076171
| 0
| 0
| 0
| 0
| 0.116822
| 1
| 0.060748
| false
| 0.004673
| 0.046729
| 0
| 0.11215
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91188bfee4fd93cdb2eb68021ee0854d66da1ef7
| 10,888
|
py
|
Python
|
cogs/mal.py
|
Sushiemaster/selfbot
|
d20a3ea4a056577440b99e507f31dc136da9e8cd
|
[
"MIT"
] | null | null | null |
cogs/mal.py
|
Sushiemaster/selfbot
|
d20a3ea4a056577440b99e507f31dc136da9e8cd
|
[
"MIT"
] | null | null | null |
cogs/mal.py
|
Sushiemaster/selfbot
|
d20a3ea4a056577440b99e507f31dc136da9e8cd
|
[
"MIT"
] | null | null | null |
import spice_api as spice
import requests
import re
import asyncio
import gc
from discord.ext import commands
from bs4 import BeautifulSoup
from appuselfbot import bot_prefix
from cogs.utils.checks import *
'''Module for MyAnimeList search of anime, manga, and light novels.'''
class Mal:
def __init__(self, bot):
self.bot = bot
# Mal search (chained with either anime or manga)
@commands.group(pass_context=True)
async def mal(self, ctx):
"""Search MyAnimeList for an anime/manga. Ex: >mal anime Steins;Gate
Optionally, put [link] after the anime/manga part to just get the link instead of the full info.
Ex: >mal anime [link] Steins;Gate"""
if ctx.invoked_subcommand is None:
await self.bot.send_message(ctx.message.channel,
bot_prefix + 'Invalid Syntax. Example use: ``>mal anime steins;gate`` or ``>mal manga boku no hero academia``')
# Anime search for Mal
@mal.command(pass_context=True)
async def anime(self, ctx, *, msg: str):
"""Search the anime database. Ex: >mal anime Steins;Gate"""
loop = asyncio.get_event_loop()
config = load_optional_config()
fetch = await self.bot.send_message(ctx.message.channel, bot_prefix + 'Searching...')
try:
link = False
try:
if msg.startswith('[link]'):
msg = msg[6:]
link = True
# Search google for the anime under site:myanimelist.net
searchUrl = "https://www.googleapis.com/customsearch/v1?q=site:myanimelist.net anime " + msg.strip() + "&start=" + '1' + "&key=" + \
config['google_api_key'] + "&cx=" + config[
'custom_search_engine']
r = requests.get(searchUrl)
response = r.content.decode('utf-8')
result = json.loads(response)
animeID = re.findall('/anime/(.*)/', str(result['items'][0]['link']))
results = await loop.run_in_executor(None, spice.search_id, int(animeID[0]), spice.get_medium('anime'),
spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
# If no results found or daily api limit exceeded, use spice's search
if not results:
allresults = await loop.run_in_executor(None, spice.search, msg.strip(), spice.get_medium('anime'),
spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
results = allresults[0]
# On any exception, search spice instead
except:
allresults = await loop.run_in_executor(None, spice.search, msg.strip(), spice.get_medium('anime'),
spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
results = allresults[0]
# No results found for specified tags
if not results:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No results.')
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return
if not embed_perms(ctx.message) or link is True:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'https://myanimelist.net/anime/%s' % results.id)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return
# Formatting embed
selection = results
synopsis = BeautifulSoup(selection.synopsis, 'lxml')
em = discord.Embed(description='{}'.format('https://myanimelist.net/anime/%s' % selection.id),
colour=0x0066CC)
try:
english = selection.english
if english:
em.add_field(name='English Title', value=english, inline=False)
except:
pass
em.add_field(name='Type', value=selection.anime_type)
if selection.episodes == '0':
episodes = 'Unknown'
else:
episodes = selection.episodes
em.add_field(name='Episodes', value=episodes)
em.add_field(name='Score', value=selection.score + '/10')
em.add_field(name='Status', value=selection.status)
try:
synop = synopsis.get_text()[:400].split('.')
text = ''
for i in range(0, len(synop)-1):
text += synop[i] + '.'
except:
text = synopsis.get_text()
em.add_field(name='Synopsis',
value=text + ' [Read more »](https://myanimelist.net/anime/%s)' % selection.id)
if selection.status == "Publishing":
date = selection.raw_data.start_date.text + " -"
else:
date = selection.raw_data.start_date.text + " - " + selection.raw_data.end_date.text
if date:
em.add_field(name='Airing Time:', value=date)
em.set_thumbnail(url=selection.image_url)
em.set_author(name=selection.title,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
await self.bot.send_message(ctx.message.channel, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No results')
await self.bot.delete_message(fetch)
# Manga search for Mal
@mal.command(pass_context=True)
async def manga(self, ctx, *, msg: str):
"""Search the manga database. Ex: >mal manga Boku no Hero Academia"""
loop = asyncio.get_event_loop()
config = load_optional_config()
fetch = await self.bot.send_message(ctx.message.channel, bot_prefix + 'Searching...')
try:
link = False
try:
if msg.startswith('[link]'):
msg = msg[6:]
link = True
config = load_optional_config()
# Search google for the manga under site:myanimelist.net
searchUrl = "https://www.googleapis.com/customsearch/v1?q=site:myanimelist.net manga " + msg.strip() + "&start=" + '1' + "&key=" + \
config['google_api_key'] + "&cx=" + config[
'custom_search_engine']
r = requests.get(searchUrl)
response = r.content.decode('utf-8')
result = json.loads(response)
mangaID = re.findall('/manga/(.*)/', str(result['items'][0]['link']))
results = await loop.run_in_executor(None, spice.search_id, int(mangaID[0]), spice.get_medium('manga'), spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
# If no results found or daily api limit exceeded, use spice's search
if not results:
allresults = await loop.run_in_executor(None, spice.search, msg.strip(), spice.get_medium('manga'),
spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
results = allresults[0]
# On any exception, search spice instead
except:
allresults = await loop.run_in_executor(None, spice.search, msg.strip(), spice.get_medium('manga'),
spice.init_auth(config['mal_username'], config['mal_password']))
gc.collect()
results = allresults[0]
# No results found for specified tags
if not results:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No results.')
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return
if not embed_perms(ctx.message) or link is True:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'https://myanimelist.net/manga/%s' % results.id)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
return
# Formatting
selection = results
synopsis = BeautifulSoup(selection.synopsis, 'lxml')
em = discord.Embed(description='{}'.format('https://myanimelist.net/manga/%s' % selection.id),
colour=0x0066CC)
em.add_field(name='Type', value=selection.manga_type)
if selection.chapters == '0':
chapters = 'Unknown'
else:
chapters = selection.chapters
em.add_field(name='Chapters', value=chapters)
em.add_field(name='Score', value=selection.score + '/10')
try:
english = selection.english
if english:
em.add_field(name='English Title', value=english, inline=False)
except:
pass
em.add_field(name='Status', value=selection.status)
try:
synop = synopsis.get_text()[:400].split('.')
text = ''
for i in range(0, len(synop) - 1):
text += synop[i] + '.'
except:
text = synopsis.get_text()
em.add_field(name='Synopsis',
value=text + ' [Read more »](https://myanimelist.net/manga/%s)' % selection.id)
if selection.status == "Publishing":
date = selection.raw_data.start_date.text + " -"
else:
date = selection.raw_data.start_date.text + " - " + selection.raw_data.end_date.text
if date:
em.add_field(name='Publishing Time:', value=date)
em.set_thumbnail(url=selection.image_url)
em.set_author(name=selection.title,
icon_url='https://myanimelist.cdn-dena.com/img/sp/icon/apple-touch-icon-256.png')
await self.bot.send_message(ctx.message.channel, embed=em)
await self.bot.delete_message(fetch)
await self.bot.delete_message(ctx.message)
except:
await self.bot.send_message(ctx.message.channel, bot_prefix + 'No results')
await self.bot.delete_message(fetch)
def setup(bot):
bot.add_cog(Mal(bot))
| 46.529915
| 184
| 0.550147
| 1,212
| 10,888
| 4.825908
| 0.174092
| 0.032313
| 0.051291
| 0.043084
| 0.844589
| 0.817063
| 0.80065
| 0.784579
| 0.784579
| 0.770901
| 0
| 0.006777
| 0.335966
| 10,888
| 233
| 185
| 46.729614
| 0.801936
| 0.047116
| 0
| 0.756757
| 0
| 0.016216
| 0.12027
| 0
| 0.010811
| 0
| 0.00161
| 0
| 0
| 1
| 0.010811
| false
| 0.059459
| 0.048649
| 0
| 0.086486
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
e67e6e83245a2ad11db28787714d9410a5ae18e8
| 198
|
py
|
Python
|
tests/test_conda_store.py
|
peytondmurray/conda-store
|
40fe4a0cecbefaff7cac819244f9862bd188b045
|
[
"BSD-3-Clause"
] | 47
|
2020-05-23T10:02:57.000Z
|
2022-03-18T00:14:58.000Z
|
tests/test_conda_store.py
|
peytondmurray/conda-store
|
40fe4a0cecbefaff7cac819244f9862bd188b045
|
[
"BSD-3-Clause"
] | 192
|
2020-06-12T02:05:14.000Z
|
2022-03-26T13:16:33.000Z
|
tests/test_conda_store.py
|
peytondmurray/conda-store
|
40fe4a0cecbefaff7cac819244f9862bd188b045
|
[
"BSD-3-Clause"
] | 15
|
2020-06-12T12:38:23.000Z
|
2021-11-11T00:39:57.000Z
|
def test_conda_store_update_storage_metrics(conda_store):
conda_store.update_storage_metrics()
def test_conda_store_update_conda_channels(conda_store):
conda_store.update_conda_channels()
| 28.285714
| 57
| 0.858586
| 28
| 198
| 5.428571
| 0.285714
| 0.394737
| 0.421053
| 0.223684
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.080808
| 198
| 6
| 58
| 33
| 0.835165
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e6878a617f679df32f0431e0d545b161b0d4ed0b
| 93,602
|
py
|
Python
|
Ruiplan.py
|
lynch829/Ruiplan2.0
|
0d5ce96de550cc913f022437c629066840da10cc
|
[
"MIT"
] | 1
|
2021-06-07T21:04:37.000Z
|
2021-06-07T21:04:37.000Z
|
Ruiplan.py
|
lynch829/Ruiplan2.0
|
0d5ce96de550cc913f022437c629066840da10cc
|
[
"MIT"
] | null | null | null |
Ruiplan.py
|
lynch829/Ruiplan2.0
|
0d5ce96de550cc913f022437c629066840da10cc
|
[
"MIT"
] | null | null | null |
# RayStation version: 4.7.5.4
import socket
import wpf
from System.Windows import *
from System.Windows.Controls import *
import collections
import sys
import os
import time
from connect import *
patient = get_current("Patient")
machine_db=get_current("MachineDB")
roi_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
ch_count=0
for ch in roi_names:
n=ch[0:5].upper()
if n=="COUCH":
ch_count+=1
if ch_count==0:
ch_count=2
lebcont=0
for d in roi_names:
if "CTRL." in d:
lebcont += 1
ctrleb=d
if lebcont != 1:
raise Exception ("No Section or Strategy Selected. Please check!!!")
os._exit()
stgleb=ctrleb.split('.')[1]
if stgleb=="IMRT" and ch_count==2:
ac_list=["600CD","VARIAN 23CX","TrueBeamSN1403","TB1403FFF","UN-SN2240"]
bm_list=["F1B4","F3B2","F1B4&2","BST4","BST7","AVG5","AVG7","AVG9"]
if stgleb=="IMRT" and ch_count==3:
ac_list=["4082", "4082_FFF"]
bm_list=["F1B4","F3B2","F1B4&2","BST4","BST7","AVG5","AVG7","AVG9"]
if stgleb=="VMAT" and ch_count==2:
ac_list=["TrueBeamSN1403","TB1403FFF","UN-SN2240"]
bm_list=["2 ARC","4 ARC","6 P-ARC","5ANG-10ARC","9ANG-9ARC"]
if stgleb=="VMAT" and ch_count==3:
ac_list=["4082","4082_FFF"]
bm_list=["2 ARC","4 ARC","6 P-ARC","5ANG-10ARC","9ANG-9ARC"]
class MyWindow(Window):
def __init__(self):
wpf.LoadComponent(self, 'RuiPlan.xaml')
#self.Topmost=True
self.WindowStartupLocation=WindowStartupLocation.CenterScreen
#ac_list = ["4082","UN-SN2240","TrueBeamSN1403"]
self.SelectAC.ItemsSource = ac_list
self.SelectBM.ItemsSource = bm_list
def ConfirmClicked(self, sender, event):
''' Gets the dose at the selected relative volume for the selected ROI '''
ac_name = self.SelectAC.SelectedItem
bm_name = self.SelectBM.SelectedItem
comleb = self.SelectBM.SelectedItem
fdose = self.fd.Text
for ck in self.opck.Children:
if ck.IsChecked:
opleb="Y"
else:
opleb="N"
if ac_name == "" or bm_name== "":
return
# pswd=self.pwd.Password
# if pswd.upper()==chr(65)+chr(67):
# pass
# else:
# raise Exception ("Wrong Password!!!")
# os._exit()
text = "New plan with {0} beams have been built."
self.Ptext.Text = text.format(bm_name)
self.RelVolPanel.Visibility = Visibility.Visible
with open("paratmp","wb") as fff:
fff.write(ac_name + '\r\n' + bm_name+ '\r\n'+opleb+ '\r\n'+fdose+ '\r\n')
def CloseClicked(self, sender, event):
self.DialogResult = True
window = MyWindow()
window.ShowDialog()
hm=socket.gethostname()
liclist=[]
if os.path.exists('\\\SQL\\Share\\Public\\rayx\\ruiplanme.lic'):
with open ('\\\SQL\\Share\\Public\\rayx\\ruiplanme.lic' ,'r') as lic:
licn=lic.readlines()
for user in licn:
if len(user)>15:
liclist.append(chr(int(user[0:3]))+chr(int(user[3:6]))+chr(int(user[6:9]))+chr(int(user[9:11])))
if len(licn[0])==22:
ex=licn[0][11:21]
exdate=str(int(ex,8))[0:4]+'-'+str(int(ex,8))[4:6]+'-'+str(int(ex,8))[6:8]
exstamp=time.mktime(time.strptime(exdate,"%Y-%m-%d"))
if exstamp-int(time.time())>0:
pass
else:
raise Exception ("No License or License Expired!!!")
os._exit()
else:
raise Exception ("No License or License Expired!!!")
os._exit()
if hm in liclist:
pass
else:
raise Exception ("No License or License Expired!!!")
os._exit()
else:
raise Exception ("No License or License Expired!!!")
os._exit()
with open ('paratmp' ,'r') as ptmp:
lines=ptmp.readlines()
machname=lines[0][0:-1]
nob=lines[1][0:-1]
optleb=lines[2][0:-1]
fdose=int(lines[3][0:-1])
dos=[]
nfra=[]
pdos=[]
tarls=[]
dosls=[]
ctrleb=""
for m in roi_names:
if "CTRL." in m:
ctrlleb=m
if patient.PatientModel.RegionsOfInterest[m].OrganData.OrganType == "Target":
con=collections.Counter(m)
if con['_']==2:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
dosls.append(int(dy))
if con['_']==3:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
#tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
#dosls.append(int(dy))
if con['_']==1:
tarls.append(m)
dostmp=m.split('_')[1]
dosls.append(int(dostmp))
pdose=max(dos)
if (nfra[0]*fdose!=pdose):
raise Exception ("Fraction Number And Total Dose Are Not Match")
os._exit()
if len(ctrlleb)==0:
raise Exception ("No Strategy Selected. Please check!!!")
os._exit()
else:
if ctrlleb[-4:]=="IMRT":
stg="SMLC"
if ctrlleb[-4:]=="VMAT":
stg="VMAT"
if machname in ['4082','4082_FFF','TrueBeamSN1403','TB1403FFF']:
csblab='False'
else:
csblab='True'
if machname in ["600CD","VARIAN 23CX"]:
for ch in roi_names:
if "Couch" in ch:
patient.PatientModel.RegionsOfInterest[ch].DeleteRoi()
with CompositeAction('Add Treatment plan'):
retval_0 = patient.AddNewPlan(PlanName="plan", PlannedBy="Achao", Comment="AutoPlan", ExaminationName="CT 1", AllowDuplicateNames=False)
retval_0.SetDefaultDoseGrid(VoxelSize={ 'x': 0.3, 'y': 0.3, 'z': 0.3 })
retval_1 = retval_0.AddNewBeamSet(Name="AutoPlan", ExaminationName="CT 1", MachineName=machname, NominalEnergy=None, Modality="Photons", TreatmentTechnique=stg, PatientPosition="HeadFirstSupine", NumberOfFractions=nfra[0], CreateSetupBeams=csblab, UseLocalizationPointAsSetupIsocenter=False, Comment="")
retval_1.AddDosePrescriptionToRoi(RoiName=pname, DoseVolume=95, PrescriptionType="DoseAtVolume", DoseValue=pdose, RelativePrescriptionLevel=1, AutoScaleDose=False)
info=patient.QueryPlanInfo(Filter={'Name':'^{0}$'.format("plan")})
patient.LoadPlan(PlanInfo=info[0])
plan=patient.LoadPlan(PlanInfo=info[0])
#plan=get_current("plan")
structure_set=plan.GetStructureSet()
try:
ptv_center=structure_set.RoiGeometries[pname].GetCenterOfRoi()
except:
print '(Cannot access center of ROI{0}.Exiting script.'.format(pname)
sys.exit()
iso={'x':ptv_center.x,'y':ptv_center.y,'z':ptv_center.z}
isox=round(ptv_center.x,2)
isoy=round(ptv_center.y,2)
isoz=round(ptv_center.z,2)
if isox>0:
side='left'
else:
side='right'
beam_set = get_current("BeamSet")
if (nob=='2 ARC'):
with CompositeAction('Add beam (1, Beam Set: 2arc)'):
retval_0 = beam_set.CreateArcBeam(ArcStopGantryAngle=181, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=179, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 2arc)'):
retval_1 = beam_set.CreateArcBeam(ArcStopGantryAngle=179, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=181, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_1.SetBolus(BolusName="")
if (nob=='4 ARC'):
with CompositeAction('Add beam (1, Beam Set: 2arc)'):
retval_0 = beam_set.CreateArcBeam(ArcStopGantryAngle=181, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=179, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 2arc)'):
retval_1 = beam_set.CreateArcBeam(ArcStopGantryAngle=179, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=181, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_1.SetBolus(BolusName="")
with CompositeAction('Add beam (3, Beam Set: 2arc)'):
retval_2 = beam_set.CreateArcBeam(ArcStopGantryAngle=181, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=179, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 2arc)'):
retval_3 = beam_set.CreateArcBeam(ArcStopGantryAngle=179, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=181, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
if (nob=='6 P-ARC'):
with CompositeAction('Add beam (1, Beam Set: 6arc)'):
retval_0 = beam_set.CreateArcBeam(ArcStopGantryAngle=120, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=178, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 6arc)'):
retval_1 = beam_set.CreateArcBeam(ArcStopGantryAngle=310, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=50, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_1.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 6arc)'):
retval_2 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=240, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 6arc)'):
retval_3 = beam_set.CreateArcBeam(ArcStopGantryAngle=240, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=182, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
with CompositeAction('Add beam (5, Beam Set: 6arc)'):
retval_4 = beam_set.CreateArcBeam(ArcStopGantryAngle=50, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=310, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
with CompositeAction('Add beam (6, Beam Set: 6arc)'):
retval_5 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=120, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
if (nob=='5ANG-10ARC'):
with CompositeAction('Add beam (1, Beam Set: 5ANG-10ARC)'):
retval_0 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=0, CouchAngle=80, CollimatorAngle=10, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 5ANG-10ARC)'):
retval_1 = beam_set.CreateArcBeam(ArcStopGantryAngle=0, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=182, CouchAngle=80, CollimatorAngle=350, ApertureBlock=None)
retval_1.SetBolus(BolusName="")
with CompositeAction('Add beam (3, Beam Set: 5ANG-10ARC)'):
retval_2 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=0, CouchAngle=40, CollimatorAngle=10, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 5ANG-10ARC)'):
retval_3 = beam_set.CreateArcBeam(ArcStopGantryAngle=0, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=182, CouchAngle=40, CollimatorAngle=350, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
with CompositeAction('Add beam (5, Beam Set: 5ANG-10ARC)'):
retval_4 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=178, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 5ANG-10ARC)'):
retval_5 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=182, CouchAngle=0, CollimatorAngle=350, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
with CompositeAction('Add beam (7, Beam Set: 5ANG-10ARC)'):
retval_6 = beam_set.CreateArcBeam(ArcStopGantryAngle=0, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=178, CouchAngle=320, CollimatorAngle=10, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (8, Beam Set: 5ANG-10ARC)'):
retval_7 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="8", Description="8", GantryAngle=0, CouchAngle=320, CollimatorAngle=350, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
with CompositeAction('Add beam (9, Beam Set: 5ANG-10ARC)'):
retval_8 = beam_set.CreateArcBeam(ArcStopGantryAngle=0, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="9", Description="9", GantryAngle=178, CouchAngle=280, CollimatorAngle=10, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (10, Beam Set: 5ANG-10ARC)'):
retval_9 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="10", Description="10", GantryAngle=0, CouchAngle=280, CollimatorAngle=350, ApertureBlock=None)
retval_9.SetBolus(BolusName="")
if (nob=='9ANG-9ARC'):
with CompositeAction('Add beam (1, Beam Set: 9ANG-9ARC)'):
retval_0 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=178, CouchAngle=0, CollimatorAngle=10, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 9ANG-9ARC)'):
retval_1 = beam_set.CreateArcBeam(ArcStopGantryAngle=20, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=182, CouchAngle=20, CollimatorAngle=350, ApertureBlock=None)
retval_1.SetBolus(BolusName="")
with CompositeAction('Add beam (3, Beam Set: 9ANG-9ARC)'):
retval_2 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=20, CouchAngle=40, CollimatorAngle=10, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 9ANG-9ARC)'):
retval_3 = beam_set.CreateArcBeam(ArcStopGantryAngle=20, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=182, CouchAngle=60, CollimatorAngle=350, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
with CompositeAction('Add beam (5, Beam Set: 9ANG-9ARC)'):
retval_4 = beam_set.CreateArcBeam(ArcStopGantryAngle=182, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=20, CouchAngle=80, CollimatorAngle=10, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 9ANG-9ARC)'):
retval_5 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=340, CouchAngle=280, CollimatorAngle=10, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 9ANG-9ARC)'):
retval_6 = beam_set.CreateArcBeam(ArcStopGantryAngle=340, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=178, CouchAngle=300, CollimatorAngle=350, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
with CompositeAction('Add beam (8, Beam Set: 9ANG-9ARC)'):
retval_7 = beam_set.CreateArcBeam(ArcStopGantryAngle=178, ArcRotationDirection="Clockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="8", Description="8", GantryAngle=340, CouchAngle=320, CollimatorAngle=10, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
# CompositeAction ends
with CompositeAction('Add beam (9, Beam Set: 9ANG-9ARCc)'):
retval_8 = beam_set.CreateArcBeam(ArcStopGantryAngle=340, ArcRotationDirection="CounterClockwise", Energy=6, MachineCone=None, Isocenter={ 'x': isox, 'y':isoy, 'z': isoz }, Name="9", Description="9", GantryAngle=178, CouchAngle=340, CollimatorAngle=350, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
if (nob=='F1B4'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=165, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=135, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=225, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=195, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
if (nob=='F1B4&2'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=165, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=135, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=40, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=320, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=225, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['6'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=195, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['7'].BeamMU = 0
if (nob=='F3B2'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=160, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=40, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=320, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=200, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
#ABCDEF
if (nob=='BST4' and side=='left'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=120, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=106, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=310, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=295, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
if (nob=='BST7' and side=='left'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=120, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=106, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=40, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=320, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=310, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['6'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=295, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['7'].BeamMU = 0
if (nob=='BST4' and side=='right'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=62, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=48, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=251, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=237, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
if (nob=='BST7' and side=='right'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=62, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=48, CouchAngle=0, CollimatorAngle=340, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=40, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=320, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=251, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['6'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=237, CouchAngle=0, CollimatorAngle=20, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['7'].BeamMU = 0
if (nob=='AVG5'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=140, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=60, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=300, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=230, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
if (nob=='AVG7'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=150, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=100, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=50, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=310, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=260, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['6'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=210, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['7'].BeamMU = 0
if (nob=='AVG9'):
with CompositeAction('Add beam (1, Beam Set: 1)'):
retval_2 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="1", Description="1", GantryAngle=160, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_2.SetBolus(BolusName="")
beam_set.Beams['1'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (2, Beam Set: 1)'):
retval_3 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="2", Description="2", GantryAngle=120, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_3.SetBolus(BolusName="")
beam_set.Beams['2'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (3, Beam Set: 1)'):
retval_4 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="3", Description="3", GantryAngle=80, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_4.SetBolus(BolusName="")
beam_set.Beams['3'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (4, Beam Set: 1)'):
retval_5 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="4", Description="4", GantryAngle=40, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_5.SetBolus(BolusName="")
beam_set.Beams['4'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (5, Beam Set: 1)'):
retval_6 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="5", Description="5", GantryAngle=0, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_6.SetBolus(BolusName="")
beam_set.Beams['5'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (6, Beam Set: 1)'):
retval_7 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="6", Description="6", GantryAngle=320, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_7.SetBolus(BolusName="")
beam_set.Beams['6'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (7, Beam Set: 1)'):
retval_8 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="7", Description="7", GantryAngle=280, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_8.SetBolus(BolusName="")
beam_set.Beams['7'].BeamMU = 0
with CompositeAction('Add beam (8, Beam Set: 1)'):
retval_9 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="8", Description="8", GantryAngle=240, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_9.SetBolus(BolusName="")
beam_set.Beams['8'].BeamMU = 0
# CompositeAction ends
with CompositeAction('Add beam (9, Beam Set: 1)'):
retval_0 = beam_set.CreatePhotonBeam(Energy=6, BlockTray=None, Cone=None, MachineCone=None, Wedge=None, Isocenter={ 'x': isox, 'y': isoy, 'z': isoz }, Name="9", Description="9", GantryAngle=200, CouchAngle=0, CollimatorAngle=0, ApertureBlock=None)
retval_0.SetBolus(BolusName="")
beam_set.Beams['9'].BeamMU = 0
patient.Save()
info2=patient.QueryPlanInfo(Filter={'Name':'^{0}$'.format("plan")})
patient.LoadPlan(PlanInfo=info2[0])
#with open ('paratmp' ,'r') as ptmp:
# lines=ptmp.readlines()
# methname=lines[0:-1]
#with open ('xy.txt','w') as xy:
# if (lines[1]=='2'):
# xy.write("2")
# else:
# xy.write("6")
patient = get_current("Patient")
machine_db=get_current("MachineDB")
examination = get_current("Examination")
roi_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
dos=[]
nfra=[]
pdos=[]
tarls=[]
dosls=[]
oarls=[]
oardosls=[]
for m in roi_names:
if patient.PatientModel.RegionsOfInterest[m].OrganData.OrganType == "Target":
con=collections.Counter(m)
if con['_']==2:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
dosls.append(int(dy))
if con['_']==3:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
#tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
#dosls.append(int(dy))
if con['_']==1:
tarls.append(m)
dostmp=m.split('_')[1]
dosls.append(int(dostmp))
if patient.PatientModel.RegionsOfInterest[m].OrganData.OrganType == "OrganAtRisk":
con2=collections.Counter(m)
if con2['_']==1:
oarls.append(m)
oardos=m.split('_')[1]
oardosls.append(int(oardos))
pdose=max(dos)
info=patient.QueryPlanInfo(Filter={'Name':'^{0}$'.format("plan")})
patient.LoadPlan(PlanInfo=info[0])
plan=patient.LoadPlan(PlanInfo=info[0])
shdic=['SEA-HORSE','SEAHORSE','SEA HORSE']
hipdic=['HIPPO']
lendic=['LEN-R','LENS-R','LEN-L','LENS-L','LENL','LENR','L LENS','R LENS','LENS R','LENS L']
ondic=['ON-L','ON-R','OPTIC NERVE-R','OPTIC NERVE-L','ONL','ONR']
bsdic=['BRAINSTEM','BRAIN-STEM','BS']
corddic=['CORD','SPINALCORD','SPINALCORD (THORAX)']
lungdic=['LUNG-L','LUNGL','LUNG-R','LUNGR','LUNG (LEFT)','LUNG (RIGHT)','LUNG RIGHT','LUNG LEFT']
chiasmdic=['CHIASM','DSSS','OPTIC CHIASM','OPTIC CHISMA','CHISMA','OPTIC CHIASMA','OPTIC CHIASM','CHIASMA']
parotiddic=['PAROTID-R','PAROTID-L','PAROTID R','PAROTID L','PAROTIDGLAND (LEFT)','PAROTIDGLAND (RIGHT)']
femoraldic=['FEMORAL HEAD R','FEMORAL HEAD L','FEMORALHEAD (LEFT)','FEMORALHEAD (RIGHT)']
heartdic=['HEART']
liverdic=['LIVER']
kiddic=['KIDNEY-L','KIDNEY (LEFT)','KIDNEY-R','KIDNEY (RIGHT)','KIDNEY LEFT','KIDNEY RIGHT']
bladdic=['BLADDER']
rectumdic=['RECTUM']
redic=['1']
if ("B1" not in roi_names):
with CompositeAction('ROI Algebra (B1)'):
retval_1 = patient.PatientModel.CreateRoi(Name="B1", Color="Yellow", Type="Organ", TissueName=None, RoiMaterial=None)
retval_1.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': ["skin"], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 1, 'Inferior': 1, 'Anterior': 1, 'Posterior': 1, 'Right': 1, 'Left': 1 } }, ResultOperation="Subtraction", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_1.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
if ("B1.5" not in roi_names):
with CompositeAction('ROI Algebra (B1.5)'):
retval_2 = patient.PatientModel.CreateRoi(Name="B1.5", Color="Pink", Type="Organ", TissueName=None, RoiMaterial=None)
retval_2.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': ["skin"], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 1.5, 'Inferior': 1.5, 'Anterior': 1.5, 'Posterior': 1.5, 'Right': 1.5, 'Left': 1.5 } }, ResultOperation="Subtraction", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_2.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
if ("B2" not in roi_names):
with CompositeAction('ROI Algebra (B2)'):
retval_3 = patient.PatientModel.CreateRoi(Name="B2", Color="Cyan", Type="Organ", TissueName=None, RoiMaterial=None)
retval_3.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': ["skin"], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 2, 'Inferior': 2, 'Anterior': 2, 'Posterior': 2, 'Right': 2, 'Left': 2 } }, ResultOperation="Subtraction", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_3.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
if "BODYCTRL.IMRT" in roi_names or "BODYCTRL.VMAT" in roi_names:
if "DSSX" not in roi_names:
with CompositeAction('ROI Algebra (DSSX)'):
retval_4 = patient.PatientModel.CreateRoi(Name="DSSX", Color="Cyan", Type="Organ", TissueName=None, RoiMaterial=None)
retval_4.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': ["B2"], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 5, 'Posterior': 5, 'Right': 0, 'Left': 0 } }, ResultOperation="Intersection", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_4.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
po=plan.PlanOptimizations[0]
opt_param = po.OptimizationParameters
opt_param.Algorithm.MaxNumberOfIterations=60
opt_param.DoseCalculation.IterationsInPreparationsPhase = 20
opt_param.DoseCalculation.ComputeFinalDose=True
opt_param.DoseCalculation.ComputeIntermediateDose=False
if ctrlleb[-4:]=="VMAT":
opt_param.SegmentConversion.ArcConversionProperties.UseMaxLeafTravelDistancePerDegree=True
opt_param.SegmentConversion.ArcConversionProperties.MaxLeafTravelDistancePerDegree=0.4
for bs in opt_param.TreatmentSetupSettings[0].BeamSettings:
bs.ArcConversionPropertiesPerBeam.MaxArcDeliveryTime=5.0
if ctrlleb[-4:]=="IMRT":
opt_param.SegmentConversion.MaxNumberOfSegments=50
opt_param.SegmentConversion.MinSegmentArea=4.0
opt_param.SegmentConversion.MinSegmentMUPerFraction=4.0
opt_param.SegmentConversion.MinNumberOfOpenLeafPairs=2
opt_param.SegmentConversion.MinLeafEndSeparation=0.0
for bs in opt_param.TreatmentSetupSettings[0].BeamSettings:
bs.AllowBeamSplit=False
lenls=[]
onls=[]
bsls=[]
chiasmls=[]
lungls=[]
parotidls=[]
cordls=[]
kidls=[]
femoralls=[]
heartls=[]
liverls=[]
bladls=[]
rectumls=[]
shls=[]
hipls=[]
rels=[]
chiasmls=[]
#roi_names = [r.Name for r in patient.PatientModel.RegionsOfInterest]
#if patient.PatientModel.RegionsOfInterest[m].OrganData.OrganType == "OrganAtRisk":
for i in roi_names:
if patient.PatientModel.RegionsOfInterest[i].OrganData.OrganType == "OrganAtRisk":
conx=collections.Counter(i)
if conx['_']==0:
if i.upper() in lendic:
lenls.append(i)
if i.upper() in ondic:
onls.append(i)
if i.upper() in parotiddic:
parotidls.append(i)
if i.upper() in bsdic:
bsls.append(i)
if i.upper() in lungdic:
lungls.append(i)
if i.upper() in corddic:
cordls.append(i)
if i.upper() in heartdic:
heartls.append(i)
if i.upper() in liverdic:
liverls.append(i)
if i.upper() in kiddic:
kidls.append(i)
if i.upper() in femoraldic:
femoralls.append(i)
if i.upper() in shdic:
shls.append(i)
if i.upper() in hipdic:
hipls.append(i)
if i.upper() in bladdic:
bladls.append(i)
if i.upper() in rectumdic:
rectumls.append(i)
if i.upper() in redic:
rels.append(i)
if i.upper() in chiasmdic:
chiasmls.append(i)
else:
if i.split('_')[0].upper() in lendic:
lenls.append(i)
if i.split('_')[0].upper() in ondic:
onls.append(i)
if i.split('_')[0].upper() in parotiddic:
parotidls.append(i)
if i.split('_')[0].upper() in bsdic:
bsls.append(i)
if i.split('_')[0].upper() in lungdic:
lungls.append(i)
if i.split('_')[0].upper() in corddic:
cordls.append(i)
if i.split('_')[0].upper() in heartdic:
heartls.append(i)
if i.split('_')[0].upper() in liverdic:
liverls.append(i)
if i.split('_')[0].upper() in kiddic:
kidls.append(i)
if i.split('_')[0].upper() in femoraldic:
femoralls.append(i)
if i.split('_')[0].upper() in shdic:
shls.append(i)
if i.split('_')[0].upper() in hipdic:
hipls.append(i)
if i.split('_')[0].upper() in bladdic:
bladls.append(i)
if i.split('_')[0].upper() in rectumdic:
rectumls.append(i)
if i.split('_')[0].upper() in redic:
rels.append(i)
if i.split('_')[0].upper() in chiasmdic:
chiasmls.append(i)
cnt=0
for di in dosls:
dscale=int(di*1.03)
with CompositeAction('Add Optimization Function'):
retval_1 = po.AddOptimizationFunction(FunctionType="MinDVH", RoiName=tarls[cnt])
retval_1.DoseFunctionParameters.DoseLevel = dscale
retval_1.DoseFunctionParameters.PercentVolume = 100
retval_1.DoseFunctionParameters.Weight = 100
with CompositeAction('Add Optimization Function'):
retval_2 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=tarls[cnt])
retval_2.DoseFunctionParameters.DoseLevel = dscale
retval_2.DoseFunctionParameters.PercentVolume = 0
retval_2.DoseFunctionParameters.Weight = 100
cnt=cnt+1
if 'HEADCTRL.IMRT' in roi_names or 'HEADCTRL.VMAT' in roi_names:
if len(lenls) !=0: #['Lens-L','Lens-R_400']
for li in lenls:
conli=collections.Counter(li)
if conli['_']==0:
with CompositeAction('Add Optimization Function'):
retval_5 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=li)
retval_5.DoseFunctionParameters.DoseLevel = int(pdose*0.185)
retval_5.DoseFunctionParameters.Weight = 20
else:
if int(str(li.split('_')[1])) != 0:
with CompositeAction('Add Optimization Function'):
retval_5 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=li)
retval_5.DoseFunctionParameters.DoseLevel = int(str(li.split('_')[1]))
retval_5.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_15 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=li)
retval_15.DoseFunctionParameters.DoseLevel = int(str(li.split('_')[1]))
retval_15.DoseFunctionParameters.EudParameterA = 150
retval_15.DoseFunctionParameters.Weight = 30
if len(onls) !=0:
for oi in onls:
conoi=collections.Counter(oi)
if conoi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_6 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=oi)
retval_6.DoseFunctionParameters.DoseLevel = int(pdose*0.83)
retval_6.DoseFunctionParameters.Weight = 20
else:
if int(str(oi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_6 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=oi)
retval_6.DoseFunctionParameters.DoseLevel = int(str(oi.split('_')[1]))
retval_6.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=oi)
retval_16.DoseFunctionParameters.DoseLevel = int(str(oi.split('_')[1]))
retval_16.DoseFunctionParameters.EudParameterA = 150
retval_16.DoseFunctionParameters.Weight = 30
if len(cordls) !=0:
for ci in cordls:
if 'cm' not in roi_names:
with CompositeAction('ROI Algebra (cm)'):
retval_0 = patient.PatientModel.CreateRoi(Name="cm", Color="Cyan", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [ci], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0.3, 'Inferior': 0.3, 'Anterior': 0.3, 'Posterior': 0.3, 'Right': 0.3, 'Left': 0.3 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
else:
pass
conci=collections.Counter(ci)
if conci['_']==0:
with CompositeAction('Add Optimization Function'):
retval_7 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_7.DoseFunctionParameters.DoseLevel = int(pdose*0.64)
retval_7.DoseFunctionParameters.Weight = 20
else:
if int(str(ci.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_7 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_7.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_7.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_15 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ci)
retval_15.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_15.DoseFunctionParameters.EudParameterA = 150
retval_15.DoseFunctionParameters.Weight = 30
if len(bsls) !=0:
for bsi in bsls:
conbsi=collections.Counter(bsi)
if conbsi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=bsi)
retval_8.DoseFunctionParameters.DoseLevel = int(pdose*0.83)
retval_8.DoseFunctionParameters.Weight = 20
else:
if int(str(bsi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=bsi)
retval_8.DoseFunctionParameters.DoseLevel = int(str(bsi.split('_')[1]))
retval_8.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=bsi)
retval_16.DoseFunctionParameters.DoseLevel = int(str(bsi.split('_')[1]))
retval_16.DoseFunctionParameters.EudParameterA = 150
retval_16.DoseFunctionParameters.Weight = 30
if len(chiasmls) !=0:
for chisi in chiasmls:
conchisi=collections.Counter(chisi)
if conchisi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=chisi)
retval_8.DoseFunctionParameters.DoseLevel = int(pdose*0.83)
retval_8.DoseFunctionParameters.Weight = 20
else:
if int(str(chisi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=chisi)
retval_8.DoseFunctionParameters.DoseLevel = int(str(chisi.split('_')[1]))
retval_8.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=chisi)
retval_16.DoseFunctionParameters.DoseLevel = int(str(chisi.split('_')[1]))
retval_16.DoseFunctionParameters.EudParameterA = 150
retval_16.DoseFunctionParameters.Weight = 30
if len(parotidls) !=0:
for pi in parotidls:
conpi=collections.Counter(pi)
if conpi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_9 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=pi)
retval_9.DoseFunctionParameters.DoseLevel = int(pdose*0.58)
retval_9.DoseFunctionParameters.EudParameterA = 1
retval_9.DoseFunctionParameters.Weight = 5
else:
if int(str(pi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_9 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=pi)
retval_9.DoseFunctionParameters.DoseLevel = int(str(pi.split('_')[1]))
retval_9.DoseFunctionParameters.EudParameterA = 1
retval_9.DoseFunctionParameters.Weight = 15
if len(lungls) !=0:
if len(lungls)==2:
if "Lung-Z" not in roi_names:
with CompositeAction('ROI Algebra (Lung-Z)'):
retval_0 = patient.PatientModel.CreateRoi(Name="Lung-Z", Color="Pink", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [lungls[0], lungls[1]], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
for lgi in lungls:
conlgi=collections.Counter(lgi)
if conlgi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_10.DoseFunctionParameters.DoseLevel = int(pdose*0.13)
retval_10.DoseFunctionParameters.PercentVolume = 45
retval_10.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_10.DoseFunctionParameters.DoseLevel = int(pdose*0.35)
retval_10.DoseFunctionParameters.PercentVolume = 22
retval_10.DoseFunctionParameters.Weight = 5
else:
if int(str(lgi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=lgi)
retval_10.DoseFunctionParameters.DoseLevel = int(str(lgi.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(shls) !=0:
for shi in shls:
with CompositeAction('Add Optimization Function'):
retval_11 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=shi)
retval_11.DoseFunctionParameters.DoseLevel = int(pdose*0.45)
retval_11.DoseFunctionParameters.Weight = 20
with CompositeAction('Add Optimization Function'):
retval_12 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=shi)
retval_12.DoseFunctionParameters.DoseLevel = int(pdose*0.3)
retval_12.DoseFunctionParameters.EudParameterA = 1
retval_12.DoseFunctionParameters.Weight = 5
if len(hipls) !=0:
for hipi in hipls:
with CompositeAction('Add Optimization Function'):
retval_13 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=hipi)
retval_13.DoseFunctionParameters.DoseLevel = int(pdose*0.55)
retval_13.DoseFunctionParameters.Weight = 20
with CompositeAction('Add Optimization Function'):
retval_14 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=hipi)
retval_14.DoseFunctionParameters.DoseLevel = int(pdose*0.45)
retval_14.DoseFunctionParameters.EudParameterA = 1
retval_14.DoseFunctionParameters.Weight = 5
if 'BODYCTRL.IMRT' in roi_names or 'BODYCTRL.VMAT' in roi_names:
if len(femoralls) !=0:
for fi in femoralls:
confi=collections.Counter(fi)
if confi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_5 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=fi)
retval_5.DoseFunctionParameters.DoseLevel = int(pdose*0.65)
retval_5.DoseFunctionParameters.PercentVolume = 50
retval_5.DoseFunctionParameters.Weight = 5
else:
if int(str(fi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_5 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=fi)
retval_5.DoseFunctionParameters.DoseLevel = int(str(fi.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(bladls) !=0:
for bi in bladls:
conbi=collections.Counter(bi)
if conbi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_6 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=bi)
retval_6.DoseFunctionParameters.DoseLevel = int(pdose*0.91)
retval_6.DoseFunctionParameters.PercentVolume = 50
retval_6.DoseFunctionParameters.Weight = 5
else:
if int(str(bi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=bi)
retval_10.DoseFunctionParameters.DoseLevel = int(str(bi.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(rectumls) !=0:
for ri in rectumls:
conri=collections.Counter(ri)
if conri['_']==0:
with CompositeAction('Add Optimization Function'):
retval_7 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=ri)
retval_7.DoseFunctionParameters.DoseLevel = int(pdose*0.9)
retval_7.DoseFunctionParameters.PercentVolume = 50
retval_7.DoseFunctionParameters.Weight = 5
else:
if int(str(ri.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ri)
retval_10.DoseFunctionParameters.DoseLevel = int(str(ri.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(cordls) !=0:
for ci in cordls:
if 'cm' not in roi_names:
with CompositeAction('ROI Algebra (cm)'):
retval_0 = patient.PatientModel.CreateRoi(Name="cm", Color="Cyan", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [ci], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0.3, 'Inferior': 0.3, 'Anterior': 0.3, 'Posterior': 0.3, 'Right': 0.3, 'Left': 0.3 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
conci=collections.Counter(ci)
if conci['_']==0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_8.DoseFunctionParameters.DoseLevel = int(pdose*0.73)
retval_8.DoseFunctionParameters.Weight = 30
else:
if int(str(ci.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_7 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_7.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_7.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_17 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ci)
retval_17.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_17.DoseFunctionParameters.EudParameterA = 150
retval_17.DoseFunctionParameters.Weight = 30
if len(kidls) !=0:
for ki in kidls:
conki=collections.Counter(ki)
if conki['_']==0:
with CompositeAction('Add Optimization Function'):
retval_9 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=ki)
retval_9.DoseFunctionParameters.DoseLevel = int(pdose*0.2)
retval_9.DoseFunctionParameters.PercentVolume = 40
retval_9.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=ki)
retval_10.DoseFunctionParameters.DoseLevel = int(pdose*0.333)
retval_10.DoseFunctionParameters.PercentVolume = 22
retval_10.DoseFunctionParameters.Weight = 5
else:
if int(str(ki.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ki)
retval_10.DoseFunctionParameters.DoseLevel = int(str(ki.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(lungls) !=0:
if len(lungls)==2:
if "Lung-Z" not in roi_names:
with CompositeAction('ROI Algebra (Lung-Z)'):
retval_0 = patient.PatientModel.CreateRoi(Name="Lung-Z", Color="Pink", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [lungls[0], lungls[1]], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
for lgi in lungls:
conlgi=collections.Counter(lgi)
if conlgi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_11 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_11.DoseFunctionParameters.DoseLevel = int(pdose*0.13)
retval_11.DoseFunctionParameters.PercentVolume = 45
retval_11.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_12 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_12.DoseFunctionParameters.DoseLevel = int(pdose*0.35)
retval_12.DoseFunctionParameters.PercentVolume = 22
retval_12.DoseFunctionParameters.Weight = 5
else:
if int(str(lgi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=lgi)
retval_10.DoseFunctionParameters.DoseLevel = int(str(lgi.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(heartls) !=0:
for hti in heartls:
conhti=collections.Counter(hti)
if conhti['_']==0:
with CompositeAction('Add Optimization Function'):
retval_13 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=hti)
retval_13.DoseFunctionParameters.DoseLevel = int(pdose*0.5)
retval_13.DoseFunctionParameters.PercentVolume = 30
retval_13.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_14 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=hti)
retval_14.DoseFunctionParameters.DoseLevel = int(pdose*0.67)
retval_14.DoseFunctionParameters.PercentVolume = 22
retval_14.DoseFunctionParameters.Weight = 5
else:
if int(str(hti.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=hti)
retval_10.DoseFunctionParameters.DoseLevel = int(str(hti.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(liverls) !=0:
for lvri in liverls:
conlvri=collections.Counter(lvri)
if conlvri['_']==0:
with CompositeAction('Add Optimization Function'):
retval_15 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lvri)
retval_15.DoseFunctionParameters.DoseLevel = int(pdose*0.2)
retval_15.DoseFunctionParameters.PercentVolume = 40
retval_15.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lvri)
retval_16.DoseFunctionParameters.DoseLevel = int(pdose*0.33)
retval_16.DoseFunctionParameters.PercentVolume = 22
retval_16.DoseFunctionParameters.Weight = 5
else:
if int(str(lvri.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=lvri)
retval_16.DoseFunctionParameters.DoseLevel = int(str(lvri.split('_')[1]))
retval_16.DoseFunctionParameters.EudParameterA = 1
retval_16.DoseFunctionParameters.Weight = 15
if len(rels) !=0:
for rei in rels:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=rei)
retval_8.DoseFunctionParameters.DoseLevel = int(pdose*0.75)
retval_8.DoseFunctionParameters.Weight = 30
if 'BREASTCTRL.VMAT' in roi_names or 'BREASTCTRL.IMRT' in roi_names:
if len(cordls) !=0:
for ci in cordls:
if 'cm' not in roi_names:
with CompositeAction('ROI Algebra (cm)'):
retval_0 = patient.PatientModel.CreateRoi(Name="cm", Color="Cyan", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [ci], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0.3, 'Inferior': 0.3, 'Anterior': 0.3, 'Posterior': 0.3, 'Right': 0.3, 'Left': 0.3 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
conci=collections.Counter(ci)
if conci['_']==0:
with CompositeAction('Add Optimization Function'):
retval_8 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_8.DoseFunctionParameters.DoseLevel = int(pdose*0.73)
retval_8.DoseFunctionParameters.Weight = 30
else:
if int(str(ci.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_7 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=ci)
retval_7.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_7.DoseFunctionParameters.Weight = 50
with CompositeAction('Add Optimization Function'):
retval_17 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ci)
retval_17.DoseFunctionParameters.DoseLevel = int(str(ci.split('_')[1]))
retval_17.DoseFunctionParameters.EudParameterA = 150
retval_17.DoseFunctionParameters.Weight = 30
if len(kidls) !=0:
for ki in kidls:
conki=collections.Counter(ki)
if conki['_']==0:
with CompositeAction('Add Optimization Function'):
retval_9 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=ki)
retval_9.DoseFunctionParameters.DoseLevel = int(pdose*0.2)
retval_9.DoseFunctionParameters.PercentVolume = 40
retval_9.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=ki)
retval_10.DoseFunctionParameters.DoseLevel = int(pdose*0.333)
retval_10.DoseFunctionParameters.PercentVolume = 22
retval_10.DoseFunctionParameters.Weight = 5
else:
if int(str(ki.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=ki)
retval_10.DoseFunctionParameters.DoseLevel = int(str(ki.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(lungls) !=0:
if len(lungls)==2:
if "Lung-Z" not in roi_names:
with CompositeAction('ROI Algebra (Lung-Z)'):
retval_0 = patient.PatientModel.CreateRoi(Name="Lung-Z", Color="Pink", Type="Organ", TissueName=None, RoiMaterial=None)
retval_0.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [lungls[0], lungls[1]], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_0.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
# CompositeAction ends
for lgi in lungls:
conlgi=collections.Counter(lgi)
if conlgi['_']==0:
with CompositeAction('Add Optimization Function'):
retval_11 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_11.DoseFunctionParameters.DoseLevel = int(pdose*0.13)
retval_11.DoseFunctionParameters.PercentVolume = 45
retval_11.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_12 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lgi)
retval_12.DoseFunctionParameters.DoseLevel = int(pdose*0.35)
retval_12.DoseFunctionParameters.PercentVolume = 22
retval_12.DoseFunctionParameters.Weight = 5
else:
if int(str(lgi.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=lgi)
retval_10.DoseFunctionParameters.DoseLevel = int(str(lgi.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(heartls) !=0:
for hti in heartls:
conhti=collections.Counter(hti)
if conhti['_']==0:
with CompositeAction('Add Optimization Function'):
retval_13 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=hti)
retval_13.DoseFunctionParameters.DoseLevel = int(pdose*0.5)
retval_13.DoseFunctionParameters.PercentVolume = 30
retval_13.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_14 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=hti)
retval_14.DoseFunctionParameters.DoseLevel = int(pdose*0.67)
retval_14.DoseFunctionParameters.PercentVolume = 22
retval_14.DoseFunctionParameters.Weight = 5
else:
if int(str(hti.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_10 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=hti)
retval_10.DoseFunctionParameters.DoseLevel = int(str(hti.split('_')[1]))
retval_10.DoseFunctionParameters.EudParameterA = 1
retval_10.DoseFunctionParameters.Weight = 15
if len(liverls) !=0:
for lvri in liverls:
conlvri=collections.Counter(lvri)
if conlvri['_']==0:
with CompositeAction('Add Optimization Function'):
retval_15 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lvri)
retval_15.DoseFunctionParameters.DoseLevel = int(pdose*0.2)
retval_15.DoseFunctionParameters.PercentVolume = 40
retval_15.DoseFunctionParameters.Weight = 5
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxDVH", RoiName=lvri)
retval_16.DoseFunctionParameters.DoseLevel = int(pdose*0.33)
retval_16.DoseFunctionParameters.PercentVolume = 22
retval_16.DoseFunctionParameters.Weight = 5
else:
if int(str(lvri.split('_')[1]))!=0:
with CompositeAction('Add Optimization Function'):
retval_16 = po.AddOptimizationFunction(FunctionType="MaxEud", RoiName=lvri)
retval_16.DoseFunctionParameters.DoseLevel = int(str(lvri.split('_')[1]))
retval_16.DoseFunctionParameters.EudParameterA = 1
retval_16.DoseFunctionParameters.Weight = 15
with CompositeAction('Add Optimization Function'):
retval_11 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName="B1")
retval_11.DoseFunctionParameters.DoseLevel = int(pdose*0.8)
retval_11.DoseFunctionParameters.Weight = 1
with CompositeAction('Add Optimization Function'):
retval_12 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName="B1.5")
retval_12.DoseFunctionParameters.DoseLevel = int(pdose*0.7)
retval_12.DoseFunctionParameters.Weight = 1
with CompositeAction('Add Optimization Function'):
retval_13 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName="B2")
retval_13.DoseFunctionParameters.DoseLevel = int(pdose*0.5)
retval_13.DoseFunctionParameters.Weight = 1
if (nob=='BST7' or nob=='BST4'):
if "BSTP" not in roi_names:
with CompositeAction('ROI Algebra (BSTP)'):
retval_4 = patient.PatientModel.CreateRoi(Name="BSTP", Color="Cyan", Type="Ptv", TissueName=None, RoiMaterial=None)
retval_4.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ResultOperation="None", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 2, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_4.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
with CompositeAction('Add Optimization Function'):
retval_4 = po.AddOptimizationFunction(FunctionType="MinDose", RoiName="BSTP")
retval_4.DoseFunctionParameters.DoseLevel = pdose
retval_4.DoseFunctionParameters.Weight = 2
#patient.BodySite=machname + ' ' + stgleb + ' ' + pname
#patient.Save()
if optleb=='Y':
patient = get_current("Patient")
machine_db=get_current("MachineDB")
examination = get_current("Examination")
laps=['Lap1','Lap2','Lap3','Lap4']
roi_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
dos=[]
nfra=[]
pdos=[]
tarls=[]
dosls=[]
for m in roi_names:
if patient.PatientModel.RegionsOfInterest[m].OrganData.OrganType == "Target":
con=collections.Counter(m)
if con['_']==2:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
dosls.append(int(dy))
if con['_']==3:
pname=m
nf=m.split('_')[1]
dy=m.split('_')[2]
#tarls.append(m)
dos.append(int(dy))
nfra.append(int(nf))
#dosls.append(int(dy))
if con['_']==1:
tarls.append(m)
dostmp=m.split('_')[1]
dosls.append(int(dostmp))
pdose=max(dos)
info=patient.QueryPlanInfo(Filter={'Name':'^{0}$'.format("plan")})
patient.LoadPlan(PlanInfo=info[0])
plan=patient.LoadPlan(PlanInfo=info[0])
po=plan.PlanOptimizations[0]
opt_param = po.OptimizationParameters
rtmp_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
for i in laps:
if i in rtmp_names:
with CompositeAction('Delete ROI (i)'):
patient.PatientModel.RegionsOfInterest[i].DeleteRoi()
retval_1 = patient.PatientModel.CreateRoi(Name=laps[0], Color="Magenta", Type="Control", TissueName=None, RoiMaterial=None)
po.RunOptimization()
con2=collections.Counter(pname)
if con2['_']==2:
with CompositeAction('Add Optimization Function'):
retval_1 = po.AddOptimizationFunction(FunctionType="MinDVH", RoiName=pname)
retval_1.DoseFunctionParameters.DoseLevel = pdose
retval_1.DoseFunctionParameters.PercentVolume = 97
retval_1.DoseFunctionParameters.Weight = 400
with CompositeAction('Add Optimization Function'):
retval_2 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName=pname)
retval_2.DoseFunctionParameters.DoseLevel = int(pdose*1.085)
retval_2.DoseFunctionParameters.Weight = 400
if con2['_']==3:
with CompositeAction('Add Optimization Function'):
retval_1 = po.AddOptimizationFunction(FunctionType="MinDVH", RoiName=pname)
retval_1.DoseFunctionParameters.DoseLevel = pdose
retval_1.DoseFunctionParameters.PercentVolume = 97
retval_1.DoseFunctionParameters.Weight = 60
rtmp_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
for i in laps:
if i in rtmp_names:
with CompositeAction('Delete ROI (i)'):
patient.PatientModel.RegionsOfInterest[i].DeleteRoi()
retval_1 = patient.PatientModel.CreateRoi(Name=laps[1], Color="Magenta", Type="Control", TissueName=None, RoiMaterial=None)
po.RunOptimization()
if ("rx" not in roi_names):
with CompositeAction('ROI Algebra (rx)'):
retval_1 = patient.PatientModel.CreateRoi(Name="rx", Color="Yellow", Type="Organ", TissueName=None, RoiMaterial=None)
retval_1.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 0.8, 'Inferior': 0.8, 'Anterior': 0.8, 'Posterior': 0.8, 'Right': 0.8, 'Left': 0.8 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 0.4, 'Inferior': 0.4, 'Anterior': 0.4, 'Posterior': 0.4, 'Right': 0.4, 'Left': 0.4 } }, ResultOperation="Subtraction", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_1.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
with CompositeAction('Add Optimization Function'):
retval_2 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName="rx")
retval_2.DoseFunctionParameters.DoseLevel = pdose-40
retval_2.DoseFunctionParameters.Weight = 50
if ("pr" not in roi_names):
with CompositeAction('ROI Algebra (pr)'):
retval_3 = patient.PatientModel.CreateRoi(Name="pr", Color="Yellow", Type="Ptv", TissueName=None, RoiMaterial=None)
retval_3.SetAlgebraExpression(ExpressionA={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 } }, ExpressionB={ 'Operation': "Union", 'SourceRoiNames': [pname], 'MarginSettings': { 'Type': "Contract", 'Superior': 0.4, 'Inferior': 0.4, 'Anterior': 0.4, 'Posterior': 0.4, 'Right': 0.4, 'Left': 0.4 } }, ResultOperation="Subtraction", ResultMarginSettings={ 'Type': "Expand", 'Superior': 0, 'Inferior': 0, 'Anterior': 0, 'Posterior': 0, 'Right': 0, 'Left': 0 })
retval_3.UpdateDerivedGeometry(Examination=examination, Algorithm="Auto")
with CompositeAction('Add Optimization Function'):
retval_4 = po.AddOptimizationFunction(FunctionType="MinDVH", RoiName="pr")
retval_4.DoseFunctionParameters.DoseLevel = pdose+20
retval_4.DoseFunctionParameters.PercentVolume = 100
retval_4.DoseFunctionParameters.Weight = 10
if "DSSX" in roi_names:
with CompositeAction('Add Optimization Function'):
retval_5 = po.AddOptimizationFunction(FunctionType="MaxDose", RoiName="DSSX")
retval_5.DoseFunctionParameters.DoseLevel = int(pdose*0.81)
retval_5.DoseFunctionParameters.Weight = 15
rtmp_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
for i in laps:
if i in rtmp_names:
with CompositeAction('Delete ROI (i)'):
patient.PatientModel.RegionsOfInterest[i].DeleteRoi()
retval_1 = patient.PatientModel.CreateRoi(Name=laps[2], Color="Magenta", Type="Control", TissueName=None, RoiMaterial=None)
po.RunOptimization()
rtmp_names=[r.Name for r in patient.PatientModel.RegionsOfInterest]
for i in laps:
if i in rtmp_names:
with CompositeAction('Delete ROI (i)'):
patient.PatientModel.RegionsOfInterest[i].DeleteRoi()
retval_1 = patient.PatientModel.CreateRoi(Name=laps[3], Color="Magenta", Type="Control", TissueName=None, RoiMaterial=None)
po.RunOptimization()
#patient.Save()
else:
pass
| 45.659512
| 595
| 0.65739
| 10,752
| 93,602
| 5.640904
| 0.053385
| 0.028046
| 0.059851
| 0.03901
| 0.891049
| 0.877084
| 0.856688
| 0.831083
| 0.797415
| 0.780103
| 0
| 0.038884
| 0.194419
| 93,602
| 2,049
| 596
| 45.681796
| 0.765463
| 0.026185
| 0
| 0.619157
| 0
| 0
| 0.126724
| 0.000923
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.003065
| 0.006897
| null | null | 0.000766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e689fb9b94328bc41cc9aaf288196896b56b3890
| 25
|
py
|
Python
|
tests/dir_not_pkg/pkg1/som_mod.py
|
Cologler/execode-python
|
71e172ee5875a161c0daec61266069982c845b83
|
[
"MIT"
] | null | null | null |
tests/dir_not_pkg/pkg1/som_mod.py
|
Cologler/execode-python
|
71e172ee5875a161c0daec61266069982c845b83
|
[
"MIT"
] | null | null | null |
tests/dir_not_pkg/pkg1/som_mod.py
|
Cologler/execode-python
|
71e172ee5875a161c0daec61266069982c845b83
|
[
"MIT"
] | null | null | null |
def func():
return 3
| 8.333333
| 12
| 0.56
| 4
| 25
| 3.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.32
| 25
| 2
| 13
| 12.5
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
e6cd433c9823e4be1b6ac7a218958e08f5d363d8
| 493
|
py
|
Python
|
mcc-2015/02 Complete the Equation.py
|
jaredliw/mcc-python-solutions
|
f54b1d2a044788b2adc1eb19a490422eb92ffe77
|
[
"MIT"
] | 2
|
2021-04-09T04:03:39.000Z
|
2021-04-09T04:18:28.000Z
|
mcc-2015/02 Complete the Equation.py
|
jaredliw/mcc-python-solutions
|
f54b1d2a044788b2adc1eb19a490422eb92ffe77
|
[
"MIT"
] | null | null | null |
mcc-2015/02 Complete the Equation.py
|
jaredliw/mcc-python-solutions
|
f54b1d2a044788b2adc1eb19a490422eb92ffe77
|
[
"MIT"
] | null | null | null |
a, b, c = map(int, input().split())
# Nothing much, just a bunch of ifs
if a + b == c:
print("{}+{}={}".format(a, b, c))
elif a - b == c:
print("{}-{}={}".format(a, b, c))
elif a * b == c:
print("{}*{}={}".format(a, b, c))
elif a / b == c:
print("{}/{}={}".format(a, b, c))
# a = b + c is the same as a - b = c, skipped
elif a == b - c:
print("{}={}-{}".format(a, b, c))
# a = b * c is the same as a / b = c, skipped
elif a == b / c:
print("{}={}/{}".format(a, b, c))
| 27.388889
| 45
| 0.440162
| 90
| 493
| 2.411111
| 0.244444
| 0.156682
| 0.235023
| 0.221198
| 0.78341
| 0.78341
| 0.78341
| 0.78341
| 0.78341
| 0.78341
| 0
| 0
| 0.25355
| 493
| 17
| 46
| 29
| 0.589674
| 0.245436
| 0
| 0
| 0
| 0
| 0.130435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.461538
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
e6deaef6f4dc147da689ad30fb1907dffe073522
| 210,662
|
py
|
Python
|
fMRI Tasks/WASABI distractmap/old/WASABI_distractmap_v1.0.0.py
|
canlab/WASABI_public
|
c10a33fcd8959ff9798eeec099a3f8954531661d
|
[
"MIT"
] | 1
|
2021-11-16T09:59:14.000Z
|
2021-11-16T09:59:14.000Z
|
fMRI Tasks/WASABI distractmap/old/WASABI_distractmap_v1.0.0.py
|
canlab/WASABI_public
|
c10a33fcd8959ff9798eeec099a3f8954531661d
|
[
"MIT"
] | null | null | null |
fMRI Tasks/WASABI distractmap/old/WASABI_distractmap_v1.0.0.py
|
canlab/WASABI_public
|
c10a33fcd8959ff9798eeec099a3f8954531661d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy3 Experiment Builder (v2020.2.10),
on January 28, 2021, at 23:04
If you publish work using this script the most relevant publication is:
Peirce J, Gray JR, Simpson S, MacAskill M, Höchenberger R, Sogo H, Kastman E, Lindeløv JK. (2019)
PsychoPy2: Experiments in behavior made easy Behav Res 51: 195.
https://doi.org/10.3758/s13428-018-01193-y
This python script was extensively modified in order to work in the Dartmouth Brain Imaging Center environment reading in signals coming from a
Siemens 3T fMRI scanner. Physiological data is acquired with a Biopac MP150 Physiological data acquisition device via LabJack U3.
e
Some measures have been taken to minimize experimental latency. PTB/Psychopy style is used to initialize all objects prior to screen flipping as much as possible.
Data is written in BIDS 1.4.1 format, as separate tab-separated-value (.tsv) files for each run per subject, (UTF-8 encoding).
Following this format:
all data headers are in lower snake_case.
The paradigm will generate these files of name:
1x sub-XXXX_ses-XX_task-Practice1back_events.tsv
1x sub-XXXX_ses-XX_task-Practice2back_events.tsv
8x sub-XXXX_ses-XX_task-distractmap_acq-[bodySite]_run-XX_events.tsv
8x sub-XXXX_ses-XX_task-distractmap-ratings_acq-[bodySite]_run-XX_events.tsv
x16 trials per file with the following
headers:
'onset','duration','rt','response','correct','attempt','condition'
'onset', 'duration', 'rt', 'response', 'correct', 'bodySite', 'temperature', 'condition', 'pretrial-jitter', 'posttrial-jitter'
'onset', 'duration', 'bodySite', 'intensity', 'temperature', 'condition', 'posttrial-jitter'
Troubleshooting Tips:
If you get window-related errors, make sure to downgrade pyglet to 1.4.1:
pip uninstall pyglet
pip install pyglet==1.4.1
0a. Import Libraries
"""
from __future__ import absolute_import, division
from psychopy import locale_setup
from psychopy import prefs
from psychopy import sound, gui, visual, core, data, event, logging, clock
from psychopy.constants import (NOT_STARTED, STARTED, PLAYING, PAUSED,
STOPPED, FINISHED, PRESSED, RELEASED, FOREVER)
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import (sin, cos, tan, log, log10, pi, average,
sqrt, std, deg2rad, rad2deg, linspace, asarray)
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
import sys # to get file system encoding
from psychopy.hardware import keyboard
from builtins import str
from builtins import range
import pandas as pd
import collections
try:
from collections import OrderedDict
except ImportError:
OrderedDict=dict
import random
__author__ = "Michael Sun"
__version__ = "1.0.0"
__email__ = "msun@dartmouth.edu"
__status__ = "Production"
"""
0b. Beta-Testing Togglers
Set to 1 during development, 0 during production
"""
debug = 0
cheat = 0
autorespond = 0
# Device togglers
biopac_exists = 1
thermode_exists = 1
class simKeys:
'''
an object to simulate key presses
keyList: a list of keys/ to watch
name: randomly selected from keyList
rtRange: [min RT, max RT] where min and max RT are sepecified in ms
'''
def __init__(self, keyList, rtRange):
self.name=np.random.choice(keyList)
self.rt = np.random.choice(np.linspace(rtRange[0], rtRange[1])/1000)
# pick an RT
thisRT=randint(0,5)
thisSimKey=simKeys(keyList=['space'],
rtRange=[200,1000])
def rescale(self, width=0, height=0, operation='', units=None, log=True):
(old_width,old_height) = self.size
if all([height,width]):
pass
elif height:
ratio = height/old_height
width = old_width * ratio
elif width:
ratio = width/old_width
height = old_height * ratio
self.setSize([width,height],operation,units,log)
visual.ImageStim.rescale = rescale
"""
0c. Prepare Devices: Biopac Psychophysiological Acquisition
"""
# Biopac parameters _________________________________________________
# Relevant Biopac commands:
# To send a Biopac marker code to Acqknowledge, replace the FIO number with a value between 0-255(dec), or an 8-bit word(bin)
# For instance, the following code would send a value of 15 by setting the first 4 bits to “1": biopac.getFeedback(u3.PortStateWrite(State = [15, 0, 0]))
# Toggling each of the FIO 8 channels directly: biopac.setFIOState(fioNum = 0:7, state=1)
# Another command that may work: biopac.setData(byte)
# biopac channels EDIT
task_ID=7
intro=193
bodymapping_instruction=15
leftface_heat=17
rightface_heat=18
leftarm_heat=19
rightarm_heat=20
leftleg_heat=21
rightleg_heat=22
chest_heat=23
abdomen_heat=24
nback_instructions=186
nback_fixation=187
nback_trial_start=188
next_run=189
nback_hit=190
nback_comiss=191
nback_feedback_pos=194
nback_feedback_miss=195
nback_feedback_neg=196
intensity_rating=43
between_run_msg=45
end_task = 197
if biopac_exists == 1:
# Initialize LabJack U3 Device, which is connected to the Biopac MP150 psychophysiological amplifier data acquisition device
# This involves importing the labjack U3 Parallelport to USB library
# U3 Troubleshooting:
# Check to see if u3 was imported correctly with: help('u3')
# Check to see if u3 is calibrated correctly with: cal_data = biopac.getCalibrationData()
# Check to see the data at the FIO, EIO, and CIO ports: biopac.getFeedback(u3.PortStateWrite(State = [0, 0, 0]))
try:
from psychopy.hardware.labjacks import U3
# from labjack import u3
except ImportError:
import u3
# Function defining setData to use the FIOports (address 6000)
def biopacSetData(self, byte, endian='big', address=6000):
if endian=='big':
byteStr = '{0:08b}'.format(byte)[-1::-1]
else:
byteStr = '{0:08b}'.format(byte)
[self.writeRegister(address+pin, int(entry)) for (pin, entry) in enumerate(byteStr)]
biopac = U3()
biopac.setData = biopacSetData
# Set all FIO bits to digital output and set to low (i.e. “0")
# The list in square brackets represent what’s desired for the FIO, EIO, CIO ports. We will only change the FIO port's state.
biopac.configIO(FIOAnalog=0, EIOAnalog=0)
for FIONUM in range(8):
biopac.setFIOState(fioNum = FIONUM, state=0)
biopac.setData(biopac, 0)
# Medoc TSA2 parameters ______________________________________________
# Initialize the Medoc TSA2 thermal stimulation delivery device
# Medoc Troubleshooting:
# To find the computer IP address, check with MMS Arbel's External Control (or Windows ipconfig alternatively)
# Communication port is always 20121
# Relevant Medoc commands:
# Prepare a program: sendCommand('select_tp', config.START_CALIBRATION)
# Poll the Machine to know if it's ready for another command: poll_for_change("[RUNNING/IDLE]", poll_interval=0.5, poll_max = -1 (unlimited), verbose=False, server_lag=1)
# Select "RUNNING" if you are using a "Manual Trigger" and a SELECT_TP has already been sent. Select "IDLE" if you are using an "Auto" Trigger design
# Trigger a prepared program: sendCommand('trigger')
# Pause a program: sendCommand('pause')
# Stop a program: sendCommand('stop')
if thermode_exists == 1:
# Import medocControl library, python library custom written for Medoc with pyMedoc pollforchange functionality.
# Make sure medocControl.py is in the same directory
from medocControl import *
"""
1. Experimental Parameters
Clocks, paths, etc.
"""
# Clocks
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# Paths
# Ensure that relative paths start from the same directory as this script
_thisDir = os.path.dirname(os.path.abspath(__file__))
os.chdir(_thisDir)
main_dir = _thisDir
stimuli_dir = main_dir + os.sep + "stimuli"
instructions_dir = main_dir + os.sep + 'instruction_stim'
nback_dir = main_dir + os.sep + "nbackorder"
# Brings up the Calibration/Data folder to load the appropriate calibration data right away.
calibration_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.path.pardir, os.path.pardir, 'Calibration', 'data')
"""
2. Start Experimental Dialog Boxes
"""
# Upload participant file: Browse for file
# Store info about the experiment session
psychopyVersion = '2020.2.10'
expName = 'distractmap' # from the Builder filename that created this script
if debug == 1:
expInfo = {
'subject number': '99',
'gender': 'm',
'session': '99',
'handedness': 'r',
'scanner': 'MS'
}
else:
expInfo = {
'subject number': '',
'gender': '',
'session': '',
'handedness': '',
'scanner': ''
}
## Limit the entries of this to hot temperatures (32-49 degrees in half-degree-steps)
participant_settingsHeat = {
'Left Face': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left face
'Right Face': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right face
'Left Arm': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left arm
'Right Arm': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right arm
'Left Leg': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left leg
'Right Leg': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right leg
'Chest': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for chest
'Abdomen': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49] # Calibrated Temp for abdomen
}
## Limit the entries of this to hot temperatures (32-49 degrees in half-degree-steps)
participant_settingsWarm = {
'Left Face': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left face
'Right Face': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right face
'Left Arm': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left arm
'Right Arm': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right arm
'Left Leg': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for left leg
'Right Leg': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for right leg
'Chest': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49], # Calibrated Temp for chest
'Abdomen': [32,32.5,33,33.5,34,34.5,35,35.5,36,36.5,37,37.5,38,38.5,39,39.5,40,40.5,41,41.5,42,42.5,43,43.5,44,44.5,45,45.5,46,46.5,47,47.5,48,48.5,49] # Calibrated Temp for abdomen
}
# Load the subject's calibration file and ensure that it is valid
if debug==1:
expInfo = {
'subject number': '999',
'gender': 'm',
'bodymap first- or second-half (1 or 2)': '2',
'session': '99',
'handedness': 'r',
'scanner': 'TEST'
}
participant_settingsHeat = {
'Left Face': 46,
'Right Face': 46,
'Left Arm': 46,
'Right Arm': 46,
'Left Leg': 46,
'Right Leg': 46,
'Chest': 46,
'Abdomen': 46
}
participant_settingsWarm = {
'Left Face': 40,
'Right Face': 40,
'Left Arm': 40,
'Right Arm': 40,
'Left Leg': 40,
'Right Leg': 40,
'Chest': 40,
'Abdomen': 40
}
else:
dlg1 = gui.fileOpenDlg(tryFilePath=calibration_dir, tryFileName="", prompt="Select participant calibration file (*_task-Calibration_participants.tsv)", allowed="Calibration files (*.tsv)")
if dlg1!=None:
if "_task-Calibration_participants.tsv" in dlg1[0]:
# Read in participant info csv and convert to a python dictionary
a = pd.read_csv(dlg1[0], delimiter='\t', index_col=0, header=0, squeeze=True)
if a.shape == (1,39):
participant_settingsHeat = {}
participant_settingsWarm = {}
p_info = [dict(zip(a.iloc[i].index.values, a.iloc[i].values)) for i in range(len(a))][0]
expInfo['subject number'] = p_info['participant_id']
expInfo['gender'] = p_info['gender']
expInfo['handedness'] = p_info['handedness']
# Heat Settings
participant_settingsHeat['Left Face'] = p_info['leftface_ht']
participant_settingsHeat['Right Face'] = p_info['rightface_ht']
participant_settingsHeat['Left Arm'] = p_info['leftarm_ht']
participant_settingsHeat['Right Arm'] = p_info['rightarm_ht']
participant_settingsHeat['Left Leg'] = p_info['leftleg_ht']
participant_settingsHeat['Right Leg'] = p_info['rightleg_ht']
participant_settingsHeat['Chest'] = p_info['chest_ht']
participant_settingsHeat['Abdomen'] = p_info['abdomen_ht']
ses_num = str(1)
expInfo2 = {
'session': ses_num,
'scanner': ''
}
dlg2 = gui.DlgFromDict(title="WASABI Distraction Map Scan", dictionary=expInfo2, sortKeys=False)
expInfo['session'] = expInfo2['session']
expInfo['scanner'] = expInfo2['scanner']
if dlg2.OK == False:
core.quit() # user pressed cancel
else:
errorDlg1 = gui.Dlg(title="Error - invalid file")
errorDlg1.addText("Selected file is not a valid calibration file. Data is incorrectly formatted. (Wrong dimensions)")
errorDlg1.show()
dlg1=None
else:
errorDlg2 = gui.Dlg(title="Error - invalid file")
errorDlg2.addText("Selected file is not a valid calibration file. Name is not formatted sub-XXX_task-Calibration_participant.tsv")
errorDlg2.show()
dlg1=None
if dlg1==None:
dlg2 = gui.DlgFromDict(title="WASABI Body-Site Scan", dictionary=expInfo, sortKeys=False)
if dlg2.OK == False:
core.quit() # user pressed cancel
pphDlg = gui.DlgFromDict(participant_settingsHeat,
title='Participant Heat Parameters')
if pphDlg.OK == False:
core.quit()
ppwDlg = gui.DlgFromDict(participant_settingsWarm,
title='Participant Warmth Parameters')
if ppwDlg.OK == False:
core.quit()
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
expInfo['psychopyVersion'] = psychopyVersion
"""
3. Setup the Window
fullscr = False for testing, True for running participants
"""
if debug == 1:
win = visual.Window(
size=[1280, 720], fullscr=False,
screen=0, # Change this to the appropriate display
winType='pyglet', allowGUI=True, allowStencil=False,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
else:
win = visual.Window(
size=[1920, 1080], fullscr=True,
screen=-1, # Change this to the appropriate fMRI projector
winType='pyglet', allowGUI=True, allowStencil=False,
monitor='testMonitor', color=[-1.000,-1.000,-1.000], colorSpace='rgb',
blendMode='avg', useFBO=True,
units='height')
# store frame rate of monitor if we can measure it
expInfo['frameRate'] = win.getActualFrameRate()
if expInfo['frameRate'] != None:
frameDur = 1.0 / round(expInfo['frameRate'])
else:
frameDur = 1.0 / 60.0 # could not measure, so guess
win.mouseVisible = False # Make the mouse invisible for the remainder of the experiment
"""
4. Prepare Experimental Dictionaries for Body-Site Cues and Medoc Temperature Programs
"""
## Check gender for Chest cue
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestF.png"])
if expInfo['gender'] in {"M", "m", "Male", "male"}:
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestM.png"])
elif expInfo['gender'] in {"F", "f", "Female", "female"}:
Chest_imgPath = os.sep.join([stimuli_dir,"cue","ChestF.png"])
bodysite_word2img = {"Left Face": os.sep.join([stimuli_dir,"cue","LeftFace.png"]),
"Right Face": os.sep.join([stimuli_dir,"cue","RightFace.png"]),
"Left Arm": os.sep.join([stimuli_dir,"cue","LeftArm.png"]),
"Right Arm": os.sep.join([stimuli_dir,"cue","RightArm.png"]),
"Left Leg": os.sep.join([stimuli_dir,"cue","LeftLeg.png"]),
"Right Leg": os.sep.join([stimuli_dir,"cue","RightLeg.png"]),
"Chest": Chest_imgPath,
"Abdomen": os.sep.join([stimuli_dir,"cue","Abdomen.png"])
}
bodysite_word2heatcode = {"Left Face": leftface_heat,
"Right Face": rightface_heat,
"Left Arm": leftarm_heat,
"Right Arm": rightarm_heat,
"Left Leg": leftleg_heat,
"Right Leg": rightleg_heat,
"Chest": chest_heat,
"Abdomen": abdomen_heat
}
# Set up a dictionary for all the configured Medoc programs for the main thermode
thermode1_temp2program = {}
with open("thermode1_programs.txt") as f:
for line in f:
(key, val) = line.split()
thermode1_temp2program[float(key)] = int(val)
"""
5. Create Body-Site Pairs for each run for this participant
"""
bodySites = ["Left Face", "Right Face", "Left Arm", "Right Arm", "Left Leg", "Right Leg", "Chest", "Abdomen"]
random.shuffle(bodySites)
if debug == 1:
bodySites = ["Abdomen"]
expInfo['body_site_order'] = str(bodySites)
"""
4. Prepare files to write
"""
sub_dir = os.path.join(_thisDir, 'data', 'sub-%05d' % (int(expInfo['subject number'])), 'ses-%02d' % (int(expInfo['session'])))
if not os.path.exists(sub_dir):
os.makedirs(sub_dir)
psypy_filename = os.path.join(sub_dir, '%05d_%s_%s' % (int(expInfo['subject number']), expName, expInfo['date']))
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
# originPath='C:\\Users\\Michael\\Downloads\\counterbalance-multiple-tasks-demo.py',
savePickle=True, saveWideText=True,
dataFileName=psypy_filename)
# save a log file for detail verbose info
logFile = logging.LogFile(psypy_filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
frameTolerance = 0.001 # how close to onset before 'same' frame
# Create python lists to later concatenate or convert into pandas dataframes
Practice_1back_trial = []
Practice_1back = []
Practice_2back_trial = []
Practice_2back = []
distractmap_bids_trial = []
distractmap_bids = []
rating_bids_trial = []
rating_bids = []
"""
5. Initialize Trial-level Components
"""
# General Instructional Text
start_msg = 'Please wait. \nThe scan will begin shortly. \n Experimenter press [s] to continue.'
in_between_run_msg = 'Thank you.\n Please wait for the next scan to start \n Experimenter press [e] to continue.'
end_msg = 'Please wait for instructions from the experimenter'
# create a default keyboard (e.g. to check for escape)
defaultKeyboard = keyboard.Keyboard()
######################
# N-Back Task Components
######################
# Initialize components for Routine "NbackInstructions"
NbackInstructionsClock = core.Clock()
NbackInstructions = visual.TextStim(win=win, name='Nbackinstructions',
text='Welcome to the n-back task \nPlease read the following instructions \nvery carefully.\n\n\n\nExperimenter press [Space] to continue.',
font='Arial', wrapWidth=1.75,
pos=(0, 0.0), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
NbackInstructionImg = visual.ImageStim(
win=win,
name='NbackInstructionImg',
image= 'instruction_stim/1.png', mask=None,
ori=0, pos=(0, 0.15), size=(0.3, 0.3),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
NbackInstructionWideImg = visual.ImageStim(
win=win,
name='NbackInstructionWideImg',
image= 'instruction_stim/3.png', mask=None,
ori=0, pos=(0, 0), size=(1, 0.3),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
ClickPrompt = visual.TextStim(win=win, name='ClickPrompt',
text='',
font='Arial',
pos=(0, -.4), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
NbackStart = keyboard.Keyboard()
# Initialize components for Routine "ButtonTest"
ButtonTestClock = core.Clock()
box1Text = visual.TextStim(win=win, name='box1Text',
text="Button/key 1 \nindicates \"Yes\", a match.",
font='Arial',
pos=(0, 0.1), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
box2Text = visual.TextStim(win=win, name='box2Text',
text="Button/key 2 \nindicates \"No\", a mismatch.",
font='Arial',
pos=(0, -0.1), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
box1Check = visual.TextStim(win=win, name='box1Check',
text="X",
font='Arial',
pos=(-.2, .15), units='height', height=0.05,
color='red', colorSpace='rgb', opacity=1)
box2Check = visual.TextStim(win=win, name='box2Check',
text="X",
font='Arial',
pos=(-.2, -0.05), units='height', height=0.05,
color='red', colorSpace='rgb', opacity=1)
box1 = visual.Rect(
win=win, name='box1',
width=(0.05, 0.05)[0], height=(0.05, 0.05)[1],
ori=0,
pos=(-0.2, 0.15),
lineWidth=1, lineColorSpace='rgb',
fillColorSpace='rgb', lineColor=[1,1,1], fillColor=[1,1,1],
opacity=1, depth=0.0, interpolate=True)
box2 = visual.Rect(
win=win, name='box2',
width=(0.05, 0.05)[0], height=(0.05, 0.05)[1],
ori=0,
pos=(-0.2, -0.05),
lineWidth=1, lineColorSpace='rgb',
fillColorSpace='rgb', lineColor=[1,1,1], fillColor=[1,1,1],
opacity=1, depth=0.0, interpolate=True)
mouse = event.Mouse(win=win, visible=False)
x, y = [None, None]
mouse.mouseClock = core.Clock()
continueText = visual.TextStim(win=win, name='continueText',
text='Experimenter press [Space] to continue.',
font='Arial',
pos=(0, -.35), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
# continueKey = keyboard.Keyboard()
incorrect_text = "Incorrect!"
noresponse_text = "No Response!"
correct_text = "Correct!"
Feedback = visual.TextStim(win=win, name='Feedback',
text="",
font='Arial',
pos=(0, -0.35), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
# Initialize components for Routine "Fixation"
FixationClock = core.Clock()
fixation_2 = visual.TextStim(win=win, name='fixation_2',
text='+',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-2.0)
# Initialize components for Routine "N_back_1_trial"
N_back_1_TrialClock = core.Clock()
grid_lines = visual.ImageStim(
win=win,
name='grid_lines',
image='grid.png', mask=None,
ori=0, pos=(0, 0), size=(0.6, 0.6),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
target_square = visual.Rect(
win=win, name='target_square',
width=(0.15, 0.15)[0], height=(0.15, 0.15)[1],
ori=0, pos=[0,0],
lineWidth=1, lineColor=None, lineColorSpace='rgb',
fillColor=[1.000,1.000,1.000], fillColorSpace='rgb',
opacity=1, depth=-1.0, interpolate=True)
fixation_1 = visual.TextStim(win=win, name='fixation_1',
text='+',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-2.0)
response = event.Mouse(win=win)
response.mouseClock = core.Clock()
# Initialize components for Routine "N_back_2_trials"
N_back_2_TrialClock = core.Clock()
grid_lines_2 = visual.ImageStim(
win=win,
name='grid_lines_2',
image='grid.png', mask=None,
ori=0, pos=(0, 0), size=(0.6, 0.6),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=128, interpolate=True, depth=0.0)
target_square_2 = visual.Rect(
win=win, name='target_square_2',
width=(0.15, 0.15)[0], height=(0.15, 0.15)[1],
ori=0, pos=[0,0],
lineWidth=1, lineColor=None, lineColorSpace='rgb',
fillColor=[1.000,1.000,1.000], fillColorSpace='rgb',
opacity=1, depth=-1.0, interpolate=True)
fixation_3 = visual.TextStim(win=win, name='fixation_3',
text='+',
font='Arial',
pos=(0, 0), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=-2.0)
# response_2 = keyboard.Keyboard()
response_2 = event.Mouse(win=win)
response_2.mouseClock = core.Clock()
# Initialize components for Routine "ScoreReport"
ScoreReportClock = core.Clock()
ScoreReportText = visual.TextStim(win=win, name='ScoreReportText',
text='This text is for reporting your score performance.',
font='Arial', wrapWidth=1.75,
pos=(0, 0.0), units='height', height=0.05,
color='white', colorSpace='rgb', opacity=1)
ScoreReportResponse = keyboard.Keyboard()
# Initialize components for Routine "BodySiteInstruction"
BodySiteInstructionClock = core.Clock()
BodySiteInstructionRead = keyboard.Keyboard()
BodySiteInstructionText = visual.TextStim(win, name='BodySiteInstructionText',
text="Experimenter: Please place thermodes on the designated body-site.",
font = 'Arial',
pos=(0, -.2), height=0.05, wrapWidth=1.6, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0,
anchorHoriz='center')
BodySiteImg = visual.ImageStim(
win=win,
name='BodySiteImg',
mask=None,
ori=0, pos=(0, 0), size=(.40,.40),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=512, interpolate=True, depth=0.0)
# Initialize components for each Rating
ratingTime = 5 # Rating Time limit in seconds
TIME_INTERVAL = 0.005 # Speed at which slider ratings udpate
ratingScaleWidth=1.5
ratingScaleHeight=.4
sliderMin = -.75
sliderMax = .75
intensityText = "How intense was that overall?"
black_triangle_verts = [(sliderMin, .2), # left point
(sliderMax, .2), # right point
(0, -.2)] # bottom-point
# Initialize components for Routine "IntensityRating"
IntensityRatingClock = core.Clock()
IntensityMouse = event.Mouse(win=win, visible=False)
IntensityMouse.mouseClock = core.Clock()
IntensityRating = visual.Rect(win, height=ratingScaleHeight, width=abs(sliderMin), pos= [sliderMin/2, -.1], fillColor='red', lineColor='black')
IntensityBlackTriangle = visual.ShapeStim(
win,
vertices=[(sliderMin, .2), # left point
(sliderMax, .2), # right point
(sliderMin, -.2)], # bottom-point,
fillColor='black', lineColor='black')
IntensityAnchors = visual.ImageStim(
win=win,
image= os.sep.join([stimuli_dir,"ratingscale","intensityScale.png"]),
name='intensityAnchors',
mask=None,
ori=0, pos=(0, -0.09), size=(1.5, .4),
color=[1,1,1], colorSpace='rgb', opacity=1,
flipHoriz=False, flipVert=False,
texRes=512, interpolate=True, depth=0.0)
IntensityPrompt = visual.TextStim(win, name='IntensityPrompt',
text=intensityText,
font = 'Arial',
pos=(0, 0.3), height=0.05, wrapWidth=None, ori=0,
color='white', colorSpace='rgb', opacity=1,
languageStyle='LTR',
depth=0.0,
anchorHoriz='center')
# Create some handy timers
globalClock = core.Clock() # to track the time since experiment started
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
OnebackFiles = ["N-back-1_1.xlsx", "N-back-1_2.xlsx", "N-back-1_3.xlsx", "N-back-1_4.xlsx", "N-back-1_5.xlsx", "N-back-1_6.xlsx", "N-back-1_7.xlsx", "N-back-1_8.xlsx"]
TwobackFiles = ["N-back-2_1.xlsx", "N-back-2_2.xlsx", "N-back-2_3.xlsx", "N-back-2_4.xlsx", "N-back-2_5.xlsx", "N-back-2_6.xlsx", "N-back-2_7.xlsx", "N-back-2_8.xlsx"]
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, task_ID) # Start demarcation of the T1 task in Biopac Acqknowledge
win.mouseVisible = False
"""
6. Welcome Instructions
"""
NbackInstructionText1 = "Welcome to the n-back task \n\n\nPlease read the following instructions \nvery carefully.\n\n\n\nExperimenter press [Space] to continue."
NbackInstructionText2 = "During the task you will be presented a white square in one of nine positions on a grid. \n\n\n\n\n\n\nDepending on the instruction, your task is to indicate whether the \ncurrent position is the same as either:\nthe position on the last trial\nor the position two trials ago\n\n\nExperimenter press [Space] to continue."
NbackInstructionText3 = "Between each trial, a fixation cross will appear in the middle of the grid. \n\n\n\n\n\n\n\n\nYou do not need to respond during this time. \nSimply wait for the next trial.\n\n\n\nExperimenter press [Space] to continue."
NbackInstructionText4 = "\n1-back\n\n\n\n\n\n\n\nDuring 1-back you will have to indicate whether the current position matches the position that was presented in the last trial, by either pressing the \"yes\" button (left click) or the \"no\" button (right click).\n\n\nExperimenter press [Space] to show an example."
NbackInstructions.setText(NbackInstructionText1)
NbackInstructions.draw()
win.flip()
# event.waitKeys(keyList = 'space')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
NbackInstructions.setText(NbackInstructionText2)
NbackInstructions.draw()
NbackInstructionImg.setImage(os.path.join(instructions_dir, '1.png'))
NbackInstructionImg.draw()
win.flip()
# event.waitKeys(keyList = 'space')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
NbackInstructions.setText(NbackInstructionText3)
NbackInstructions.draw()
NbackInstructionImg.setImage(os.path.join(instructions_dir, '2.png'))
NbackInstructionImg.draw()
win.flip()
# event.waitKeys(keyList = 'space')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
NbackInstructionImg.setAutoDraw(False)
NbackInstructions.setText(NbackInstructionText4)
NbackInstructions.draw()
NbackInstructionImg.draw()
win.flip()
#event.waitKeys(keyList = 'space')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
routineTimer.reset()
"""
7. Button Test
"""
# ------Prepare to start Routine "trial"-------
continueRoutine = True
# update component parameters for each repeat
checkboxes = [box1, box2]
clicked = []
mouseDown = False
for box in checkboxes:
box.color = "white"
# setup some python lists for storing info about the mouse
mouse.x = []
mouse.y = []
mouse.leftButton = []
mouse.midButton = []
mouse.rightButton = []
mouse.time = []
mouse.clicked_name = []
# keep track of which components have finished
trialComponents = [box1Text, box2Text, box1, box2, box1Check, box2Check, mouse, continueText]
for thisComponent in trialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
# trialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "ButtonTest"-------
while continueRoutine:
# get current time
t = ButtonTestClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ButtonTestClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *box1* updates
if box1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
box1Text.setAutoDraw(True)
box1.setAutoDraw(True)
# *box2* updates
if box2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
box2Text.setAutoDraw(True)
box2.setAutoDraw(True)
if mouse.getPressed()[0] == 0 & mouse.getPressed()[2] == 0:
mouseDown = False
if mouse.getPressed()[0]==1 and box1.name not in clicked and not mouseDown:
# box1.color = "black" # replace this with a check mark?
box1Check.setAutoDraw(True)
clicked.append(box1.name)
mouseDown = True
if mouse.getPressed()[2]==1 and box2.name not in clicked and not mouseDown:
# box2.color = "black" # replace this with a check mark?
box2Check.setAutoDraw(True)
clicked.append(box2.name)
mouseDown = True
# *mouse* updates
if mouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
mouse.frameNStart = frameN # exact frame index
mouse.tStart = t # local t and not account for scr refresh
mouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(mouse, 'tStartRefresh') # time at next scr refresh
mouse.status = STARTED
mouse.mouseClock.reset()
prevButtonState = mouse.getPressed() # if button is down already this ISN'T a new click
if mouse.status == STARTED: # only update if started and not finished!
buttons = mouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
x, y = mouse.getPos()
mouse.x.append(x)
mouse.y.append(y)
buttons = mouse.getPressed()
mouse.leftButton.append(buttons[0])
mouse.midButton.append(buttons[1])
mouse.rightButton.append(buttons[2])
mouse.time.append(mouse.mouseClock.getTime())
if box1.name in clicked and box2.name in clicked:
continueText.setAutoDraw(True)
win.flip()
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
win.flip()
# event.waitKeys(keyList = 'space')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in trialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ButtonTest"-------
for thisComponent in trialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
routineTimer.reset()
"""
8. Start Practice 1-back
"""
turns = 0
score = 0
while turns <= 3 and score <= 70:
NbackInstructionText5 = "In the below 1-back example you should not respond to the first trial (as there is no trial before it), make a \"no\" response (right click) on trial 2, since the positions on trials 1 and 2 do not match, and make a \"yes\" response on trial 3, since the position is the same as the position on trial 2.\n\n\n\n\n\n\n\n\n\n"
ClickToContinueText = "Click to continue"
NbackInstructionText6 = "First, we will practice some trials so that you can get used to the procedure.\nAfter each response you'll see whether your response was correct, incorrect, or whether you forgot to respond.\n\n\n\n\n\n\n\n\nGood Luck!"
ClickToStartText = "Click to start practice"
InstructionImageArray = ['7.png', '8.png', '9.png', '10.png', '11.png', '12.png', '13.png', '14.png']
iteration = 0
NbackInstructions.setText(NbackInstructionText5)
NbackInstructions.setAutoDraw(True)
ClickPrompt.setText(ClickToContinueText)
mouse = event.Mouse(win=win, visible=False)
prevButtonState = mouse.getPressed() # if button is down already this ISN'T a new click
buttons = prevButtonState
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[0]))
NbackInstructionWideImg.draw()
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_instructions)
win.flip()
continueRoutine = True
i = 0
stimTimer = core.CountdownTimer(1)
while (continueRoutine == True):
if iteration == 1 and mouse.getPressed()[0] == 1:
continueRoutine = False
break
if i > len(InstructionImageArray)-1:
iteration = 1
i = 0
ClickPrompt.setAutoDraw(True)
if stimTimer.getTime() < 0:
stimTimer = core.CountdownTimer(1)
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[i]))
i=i+1
NbackInstructionWideImg.setAutoDraw(True)
win.flip()
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[len(InstructionImageArray)-1])) # Stay on the last image
NbackInstructions.setText(NbackInstructionText6)
NbackInstructions.setAutoDraw(True)
win.flip()
timer = core.CountdownTimer()
timer.add(2)
while timer.getTime() > 0:
continue
mouse = event.Mouse(win=win, visible=False)
while(mouse.getPressed()[0] != 1):
ClickPrompt.setText(ClickToStartText)
ClickPrompt.setAutoDraw(True)
win.flip()
# Wipe the screen
ClickPrompt.setAutoDraw(False)
NbackInstructions.setAutoDraw(False)
NbackInstructionWideImg.setAutoDraw(False)
if biopac_exists:
biopac.setData(biopac, 0)
win.flip()
routineTimer.reset()
########################
# Practice 1-back Begins
########################
correct = 0
score = 0
"""
8i. Pre-1-Back Task Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
routineTimer.add(1.000000) # 1 second pre-task fixation
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
8ii. Practice 1-back Start
"""
# Feedback Text
incorrect_text = "Incorrect!"
noresponse_text = "No Response!"
correct_text = "Correct!"
# set up handler to look after randomisation of conditions etc
Nback1 = os.sep.join([nback_dir, "Practice_N-back-1.xlsx"])
trials = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(Nback1),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "N_back_1_Trial"-------
continueRoutine = True
routineTimer.add(2.000000) # Each trial is 2 seconds
feedbacktype = "none"
# update component parameters for each repeat
target_square.setPos(location)
response.rt = []
gotValidClick = False # until a click is received
# keep track of which components have finished
N_back_1_TrialComponents = [grid_lines, target_square, fixation_2, response, Feedback]
for thisComponent in N_back_1_TrialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
N_back_1_TrialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "N_back_1_Trial"-------
while continueRoutine and routineTimer.getTime() > 0:
# gotValidClick = False
# get current time
t = N_back_1_TrialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=N_back_1_TrialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *grid_lines* updates
if grid_lines.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
grid_lines.frameNStart = frameN # exact frame index
grid_lines.tStart = t # local t and not account for scr refresh
grid_lines.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(grid_lines, 'tStartRefresh') # time at next scr refresh
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_trial_start)
grid_lines.setAutoDraw(True)
if grid_lines.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > grid_lines.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
grid_lines.tStop = t # not accounting for scr refresh
grid_lines.frameNStop = frameN # exact frame index
win.timeOnFlip(grid_lines, 'tStopRefresh') # time at next scr refresh
grid_lines.setAutoDraw(False)
# *target_square* updates
if target_square.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
target_square.frameNStart = frameN # exact frame index
target_square.tStart = t # local t and not account for scr refresh
target_square.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(target_square, 'tStartRefresh') # time at next scr refresh
target_square.setAutoDraw(True)
if target_square.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > target_square.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
target_square.tStop = t # not accounting for scr refresh
target_square.frameNStop = frameN # exact frame index
win.timeOnFlip(target_square, 'tStopRefresh') # time at next scr refresh
target_square.setAutoDraw(False)
# *fixation_2* updates
if fixation_2.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
fixation_2.frameNStart = frameN # exact frame index
fixation_2.tStart = t # local t and not account for scr refresh
fixation_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_2, 'tStartRefresh') # time at next scr refresh
fixation_2.setAutoDraw(True)
if fixation_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
fixation_2.tStop = t # not accounting for scr refresh
fixation_2.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_2, 'tStopRefresh') # time at next scr refresh
fixation_2.setAutoDraw(False)
# *response* updates
waitOnFlip = False
if response.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
response.frameNStart = frameN # exact frame index
response.tStart = t # local t and not account for scr refresh
response.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(response, 'tStartRefresh') # time at next scr refresh
response.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(response.mouseClock.reset) # t=0 on next screen flip
win.callOnFlip(response.clickReset) # t=0 on next screen flip
prevButtonState = response.getPressed() # if button is down already this ISN'T a new click
if response.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > response.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
response.tStop = t # not accounting for scr refresh
response.frameNStop = frameN # exact frame index
win.timeOnFlip(response, 'tStopRefresh') # time at next scr refresh
response.status = FINISHED
if response.status == STARTED and not waitOnFlip:
response.click, response.rt = response.getPressed(getTime = True)
response.click_left = response.click[0]
response.click_right = response.click[2]
response.rt_left = response.rt[0]
response.rt_right = response.rt[2]
if response.click_left != prevButtonState[0] or response.click_right != prevButtonState[2]: # button state changed?
prevButtonState = response.click
if (response.click_left == 1 or response.click_right == 1) and gotValidClick == False:
print(str(response.click), str(response.rt))
if (corrAns == 1 and response.click_left == 1) or (corrAns == 0 and response.click_right == 1):
response.corr = 1
correct = correct + 1
Feedback.setText(correct_text)
feedbacktype = "pos"
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_hit)
else:
response.corr = 0
Feedback.setText(incorrect_text)
feedbacktype = "neg"
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_comiss) # mark comission error
if response.click_left == 1:
mouse_response = 0;
mouse_response_rt = response.rt_left
elif response.click_right == 1:
mouse_response = 2
mouse_response_rt = response.rt_right
gotValidClick = True
elif response.click_left == 0 and response.click_right == 0 and gotValidClick==False: # No response was made
mouse_response = None
mouse_response_rt = None
if str(corrAns).lower() != 'none':
Feedback.setText(noresponse_text)
feedbacktype = "miss"
else:
Feedback.setText("")
# *Feedback* updates
waitOnFlip = False
if Feedback.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Feedback.frameNStart = frameN # exact frame index
Feedback.tStart = t # local t and not account for scr refresh
Feedback.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Feedback, 'tStartRefresh') # time at next scr refresh
Feedback.status = STARTED
Feedback.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
if 1 < N_back_1_TrialClock.getTime() < 1.75:
Feedback.draw()
if biopac_exists:
if feedbacktype == "pos":
biopac.setData(biopac, nback_feedback_pos)
if feedbacktype == "neg":
biopac.setData(biopac, nback_feedback_neg)
if feedbacktype == "miss":
biopac.setData(biopac, nback_feedback_miss)
else:
if biopac_exists:
biopac.setData(biopac, 0)
win.flip()
# -------Ending Routine "N_back_1_Trial"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if gotValidClick==False: # No response was made
response_2.rt = None
if str(corrAns).lower() == 'none':
response.corr=1
correct = correct + 1
else:
response.corr = 0; # failed to respond (incorrectly)
Feedback.setText(noresponse_text)
trials.addData('grid_lines.started', grid_lines.tStartRefresh)
trials.addData('grid_lines.stopped', grid_lines.tStopRefresh)
trials.addData('target_square.started', target_square.tStartRefresh)
trials.addData('target_square.stopped', target_square.tStopRefresh)
trials.addData('fixation_2.started', fixation_2.tStartRefresh)
trials.addData('fixation_2.stopped', fixation_2.tStopRefresh)
# store data for trials (TrialHandler)
trials.addData('response.corr', response.corr)
trials.addData('response.x', x)
trials.addData('response.y', y)
trials.addData('response.leftButton', response.click)
if gotValidClick==True and (response.click_left == 1 or response.click_right == 1): # we had a response
trials.addData('response.rt_left', response.rt_left)
trials.addData('response.rt_right', response.rt_right)
trials.addData('response.click_left',response.click_left)
trials.addData('response.click_right',response.click_right)
trials.addData('response.corr', response.corr)
trials.addData('response.started', response.tStartRefresh)
trials.addData('response.stopped', response.tStopRefresh)
Practice_1back_trial = []
Practice_1back_trial.extend((grid_lines.tStartRefresh, t, mouse_response_rt, mouse_response, response.corr, turns, "1back"))
Practice_1back.append(Practice_1back_trial)
thisExp.nextEntry()
if cheat == 1:
score = 100
else:
score = correct*100/trials.nTotal
"""
8iii. Practice 1-back Score Report
"""
# Score Feedback Text
ScoreText = "Your score was " + str(score)
if debug == 1:
TryAgainText = "Let's try that again...\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
PleaseWaitText = ScoreText + "\n\n\nPlease wait for the experimenter ..."
PassedText = "Okay! Let's move on.\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
PerfectText = "Perfect! Let's move on.\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
else:
TryAgainText = "Let's try that again...\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
PleaseWaitText = "Please wait for the experimenter ..."
PassedText = "Okay! Let's move on.\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
PerfectText = "Perfect! Let's move on.\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
# ------Prepare to start Routine "ScoreReport"-------
continueRoutine = True
# update component parameters for each repeat
ScoreReportResponse.keys = []
ScoreReportResponse.rt = []
_ScoreReportResponse_allKeys = []
# keep track of which components have finished
ScoreReportComponents = [ScoreReportText, ScoreReportResponse]
for thisComponent in ScoreReportComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ScoreReportClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
if (score <= 70):
ScoreReportText.setText(TryAgainText)
nback_feedback = nback_feedback_neg
if turns >= 3 and score <= 70:
ScoreReportText.setText(PleaseWaitText)
nback_feedback = nback_feedback_neg
if (score > 70):
ScoreReportText.setText(PassedText)
nback_feedback = nback_feedback_pos
if (score == 100):
ScoreReportText.setText( PerfectText)
nback_feedback = nback_feedback_pos
# -------Run Routine "ScoreReport"-------
while continueRoutine:
# get current time
t = ScoreReportClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ScoreReportClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *ScoreReportText* updates
if ScoreReportText.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ScoreReportText.frameNStart = frameN # exact frame index
ScoreReportText.tStart = t # local t and not account for scr refresh
ScoreReportText.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ScoreReportText, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_feedback)
ScoreReportText.setAutoDraw(True)
# *ScoreReportResponse* updates
waitOnFlip = False
if ScoreReportResponse.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ScoreReportResponse.frameNStart = frameN # exact frame index
ScoreReportResponse.tStart = t # local t and not account for scr refresh
ScoreReportResponse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ScoreReportResponse, 'tStartRefresh') # time at next scr refresh
ScoreReportResponse.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ScoreReportResponse.clock.reset) # t=0 on next screen flip
win.callOnFlip(ScoreReportResponse.clearEvents, eventType='keyboard') # clear events on next screen flip
if ScoreReportResponse.status == STARTED and not waitOnFlip:
theseKeys = ScoreReportResponse.getKeys(keyList=['space'], waitRelease=False)
_ScoreReportResponse_allKeys.extend(theseKeys)
if len(_ScoreReportResponse_allKeys):
ScoreReportResponse.keys = _ScoreReportResponse_allKeys[-1].name # just the last key pressed
ScoreReportResponse.rt = _ScoreReportResponse_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
_ScoreReportResponse_allKeys.extend([thisSimKey])
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ScoreReportComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ScoreReport"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in ScoreReportComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('ScoreReportText.started', ScoreReportText.tStartRefresh)
thisExp.addData('ScoreReportText.stopped', ScoreReportText.tStopRefresh)
# check responses
thisExp.addData('ScoreReportResponse.keys', ScoreReportResponse.keys)
thisExp.addData('ScoreReportResponse.started', ScoreReportResponse.tStartRefresh)
thisExp.addData('ScoreReportResponse.stopped', ScoreReportResponse.tStopRefresh)
Practice_1back.append(["score: ", score])
thisExp.nextEntry()
routineTimer.reset()
turns = turns + 1
"""
9. Save Practice-1back File
"""
# each _%s refers to the respective field in the parentheses
Practice_1back_bids_name = sub_dir + os.sep + u'sub-%05d_ses-%02d_task-%s_acq-%s_events.tsv' % (int(expInfo['subject number']), int(expInfo['session']), expName, "Practice1back")
Practice_1back = pd.DataFrame(Practice_1back, columns = ['onset','duration','rt','response','correct','attempt','condition'])
Practice_1back.to_csv(Practice_1back_bids_name, sep="\t")
"""
10. Start Practice 2-back
"""
turns = 0
score = 0
while turns <= 3 and score <= 70:
# ------Prepare to start Routine "Instructions_2"-------
NbackInstructionText8 = "2-back\n\n\nDuring 2-back you will have to indicate whether the current position matches the position matches the position that was presented two trials ago, by either pressing the \"yes\" button (left click) or the \"no\" button (right click).\n\n\nExperimenter press [Space] to see an example."
NbackInstructions.setText(NbackInstructionText8)
NbackInstructions.draw()
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_instructions)
win.flip()
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
routineTimer.reset()
NbackInstructionText9 = "In this 2-back example you should not respond to the first trial or the second trial (as there are insufficient previous trials), and make a \"yes\" response (left click) on trial 3, since the position is the same as the position on trial 1.\n\n\n\n\n\n\n\n\n\n"
# Picture Loop 17-30.png
ClickToContinueText = "Click to continue"
NbackInstructionText10 = "Now, we will practice some trials so that you can get used to the procedure.\nAfter each response you'll see whether your response was correct, incorrect, or whether you forgot to respond.\n\n\n\n\n\n\n\n\nGood Luck!"
ClickToStart = "Click to start practice"
InstructionImageArray = ['18.png', '19.png', '20.png', '21.png', '22.png', '23.png', '24.png']
iteration = 0
NbackInstructions.setText(NbackInstructionText9)
NbackInstructions.setAutoDraw(True)
ClickPrompt.setText(ClickToContinueText)
mouse = event.Mouse(win=win, visible=False)
prevButtonState = mouse.getPressed() # if button is down already this ISN'T a new click
buttons = prevButtonState
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[0]))
NbackInstructionWideImg.draw()
win.flip()
continueRoutine = True
i = 0
stimTimer = core.CountdownTimer(1)
while (continueRoutine == True):
if iteration == 1 and mouse.getPressed()[0] == 1:
continueRoutine = False
break
if i > len(InstructionImageArray)-1:
iteration = 1
i = 0
ClickPrompt.setAutoDraw(True)
if stimTimer.getTime() < 0:
stimTimer = core.CountdownTimer(1)
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[i]))
NbackInstructionWideImg.draw()
i=i+1
NbackInstructionWideImg.setAutoDraw(True)
win.flip()
NbackInstructionWideImg.setImage(os.path.join(instructions_dir, InstructionImageArray[len(InstructionImageArray)-1])) # Stay on the last image
NbackInstructions.setText(NbackInstructionText10)
NbackInstructions.setAutoDraw(True)
win.flip()
timer = core.CountdownTimer()
timer.add(2)
while timer.getTime() > 0:
continue
mouse = event.Mouse(win=win, visible=False)
while(mouse.getPressed()[0] != 1):
ClickPrompt.setText(ClickToStartText)
ClickPrompt.setAutoDraw(True)
win.flip()
# Wipe the screen
ClickPrompt.setAutoDraw(False)
NbackInstructions.setAutoDraw(False)
NbackInstructionWideImg.setAutoDraw(False)
if biopac_exists:
biopac.setData(biopac, 0)
win.flip()
routineTimer.reset()
########################
# Practice 2-back Begins
########################
Feedback.setText("")
correct = 0
score = 0
"""
10i. Pre-2-Back Task Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
routineTimer.add(1.000000) # 1 second pre-task fixation
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
10ii. Practice 2-back Start
"""
# set up handler to look after randomisation of conditions etc
Nback2 = os.sep.join([nback_dir, "Practice_N-back-2.xlsx"])
trials_2 = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(Nback2),
seed=None, name='trials_2')
thisExp.addLoop(trials_2) # add the loop to the experiment
thisTrial_2 = trials_2.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
for thisTrial_2 in trials_2:
currentLoop = trials_2
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
# ------Prepare to start Routine "N_back_2_trials"-------
continueRoutine = True
routineTimer.add(2.000000) # Each trial is 2 seconds
feedbacktype = "none"
# update component parameters for each repeat
target_square_2.setPos(location)
response_2 = event.Mouse(win=win, visible=False) # Re-initialize
response_2.click = []
response_2.rt = []
response_2.corr = []
x, y = [None, None]
gotValidClick = False # until a click is received
# keep track of which components have finished
N_back_2_trialsComponents = [grid_lines_2, target_square_2, fixation_3, response_2, Feedback]
for thisComponent in N_back_2_trialsComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
N_back_2_TrialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "N_back_2_trials"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = N_back_2_TrialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=N_back_2_TrialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *response_2* updates
waitOnFlip = False
if response_2.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
response_2.frameNStart = frameN # exact frame index
response_2.tStart = t # local t and not account for scr refresh
response_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(response_2, 'tStartRefresh') # time at next scr refresh
response_2.status = STARTED
waitOnFlip = True
win.callOnFlip(response_2.mouseClock.reset) # t=0 on next screen flip
win.callOnFlip(response_2.clickReset) # t=0 on next screen flip
prevButtonState = response_2.getPressed() # if button is down already this ISN'T a new click
if response_2.status == STARTED: # only update if started and not finished!
if tThisFlipGlobal > response_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
response_2.tStop = t # not accounting for scr refresh
response_2.frameNStop = frameN # exact frame index
win.timeOnFlip(response_2, 'tStopRefresh') # time at next scr refresh
response_2.status = FINISHED
if response_2.status == STARTED and not waitOnFlip:
response_2.click, response_2.rt = response_2.getPressed(getTime = True)
response_2.click_left = response_2.click[0]
response_2.click_right = response_2.click[2]
response_2.rt_left = response_2.rt[0]
response_2.rt_right = response_2.rt[2]
if response_2.click_left != prevButtonState[0] or response_2.click_right != prevButtonState[2]: # button state changed?
prevButtonState = response_2.click
if (response_2.click_left == 1 or response_2.click_right == 1) and gotValidClick == False:
print(str(response_2.click), str(response_2.rt))
if (corrAns == 1 and response_2.click_left == 1) or (corrAns == 0 and response_2.click_right == 1):
response_2.corr = 1
correct = correct + 1
Feedback.setText(correct_text)
feedbacktype = "pos"
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_hit)
else:
response_2.corr = 0
Feedback.setText(incorrect_text)
feedbacktype = "neg"
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_comiss) # mark comission error
if response_2.click_left == 1:
mouse_response = 0
mouse_response_rt = response_2.rt_left
elif response_2.click_right == 1:
mouse_response = 2
mouse_response_rt = response_2.rt_right
gotValidClick = True
elif response_2.click_left == 0 and response_2.click_right == 0 and gotValidClick==False: # No response was made
mouse_response = None
mouse_response_rt = None
if str(corrAns).lower() != 'none':
Feedback.setText(noresponse_text)
feedbacktype = "miss"
else:
Feedback.setText("")
# *grid_lines_2* updates
if grid_lines_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
grid_lines_2.frameNStart = frameN # exact frame index
grid_lines_2.tStart = t # local t and not account for scr refresh
grid_lines_2.tStartRefresh = tThisFlipGlobal # on global time
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_trial_start)
win.timeOnFlip(grid_lines_2, 'tStartRefresh') # time at next scr refresh
grid_lines_2.setAutoDraw(True)
if grid_lines_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > grid_lines_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
grid_lines_2.tStop = t # not accounting for scr refresh
grid_lines_2.frameNStop = frameN # exact frame index
win.timeOnFlip(grid_lines_2, 'tStopRefresh') # time at next scr refresh
grid_lines_2.setAutoDraw(False)
# *target_square_2* updates
if target_square_2.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
target_square_2.frameNStart = frameN # exact frame index
target_square_2.tStart = t # local t and not account for scr refresh
target_square_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(target_square_2, 'tStartRefresh') # time at next scr refresh
target_square_2.setAutoDraw(True)
if target_square_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > target_square_2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
target_square_2.tStop = t # not accounting for scr refresh
target_square_2.frameNStop = frameN # exact frame index
win.timeOnFlip(target_square_2, 'tStopRefresh') # time at next scr refresh
target_square_2.setAutoDraw(False)
# *fixation_3* updates
if fixation_3.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
fixation_3.frameNStart = frameN # exact frame index
fixation_3.tStart = t # local t and not account for scr refresh
fixation_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_3, 'tStartRefresh') # time at next scr refresh
fixation_3.setAutoDraw(True)
if fixation_3.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_3.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
fixation_3.tStop = t # not accounting for scr refresh
fixation_3.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_3, 'tStopRefresh') # time at next scr refresh
fixation_3.setAutoDraw(False)
# *Feedback* updates
waitOnFlip = False
if Feedback.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
Feedback.frameNStart = frameN # exact frame index
Feedback.tStart = t # local t and not account for scr refresh
Feedback.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(Feedback, 'tStartRefresh') # time at next scr refresh
Feedback.status = STARTED
Feedback.setAutoDraw(False)
# # Autoresponder
# if t >= thisSimKey.rt and autorespond == 1:
# _response_2_allKeys.extend([thisSimKey])
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in N_back_2_trialsComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
if 1 < N_back_2_TrialClock.getTime() < 1.75:
Feedback.draw()
if biopac_exists:
if feedbacktype == "pos":
biopac.setData(biopac, nback_feedback_pos)
if feedbacktype == "neg":
biopac.setData(biopac, nback_feedback_neg)
if feedbacktype == "miss":
biopac.setData(biopac, nback_feedback_miss)
else:
if biopac_exists:
biopac.setData(biopac, 0)
win.flip()
# -------Ending Routine "N_back_2_trials"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in N_back_2_trialsComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# Check non-response
if gotValidClick==False: # No response was made
response_2.rt = None
if str(corrAns).lower() == 'none':
response_2.corr=1
correct = correct + 1
else:
response_2.corr = 0; # failed to respond (incorrectly)
Feedback.setText(noresponse_text)
trials_2.addData('response_2.x', x)
trials_2.addData('response_2.y', y)
trials_2.addData('response_2.leftButton', response_2.click)
trials_2.addData('grid_lines_2.started', grid_lines_2.tStartRefresh)
trials_2.addData('grid_lines_2.stopped', grid_lines_2.tStopRefresh)
trials_2.addData('target_square_2.started', target_square_2.tStartRefresh)
trials_2.addData('target_square_2.stopped', target_square_2.tStopRefresh)
trials_2.addData('fixation_3.started', fixation_3.tStartRefresh)
trials_2.addData('fixation_3.stopped', fixation_3.tStopRefresh)
if gotValidClick==True and (response_2.click_left == 1 or response_2.click_right == 1): # we had a response
trials.addData('response_2.rt_left', response_2.rt_left)
trials.addData('response_2.rt_right', response_2.rt_right)
# store data for trials_2 (TrialHandler)
trials_2.addData('response_2.click',response_2.click)
trials_2.addData('response_2.corr', response_2.corr)
trials_2.addData('response_2.started', response_2.tStartRefresh)
trials_2.addData('response_2.stopped', response_2.tStopRefresh)
Practice_2back_trial = []
Practice_2back_trial.extend((grid_lines_2.tStartRefresh, t, mouse_response_rt, mouse_response, response_2.corr, turns, "2back"))
Practice_2back.append(Practice_2back_trial)
routineTimer.reset()
thisExp.nextEntry()
if cheat == 1:
score = 100
else:
score = correct*100/trials.nTotal
"""
10iii. Practice 2-back Score Report
"""
# Score Feedback Text
ScoreText = "Your score was " + str(score)
if debug == 1:
TryAgainText = "Let's try that again...\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
PleaseWaitText = ScoreText + "\n\n\nPlease wait for the experimenter ..."
PassedText = "Okay! Let's move on.\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
PerfectText = "Perfect! Let's move on.\n\n\n" + ScoreText + "\n\n\n\nExperimenter press [Space] to continue."
else:
TryAgainText = "Let's try that again...\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
PleaseWaitText = "Please wait for the experimenter ..."
PassedText = "Okay! Let's move on.\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
PerfectText = "Perfect! Let's move on.\n\n\n\n\n\n\n\nExperimenter press [Space] to continue."
# ------Prepare to start Routine "ScoreReport_2"-------
continueRoutine = True
# update component parameters for each repeat
ScoreReportResponse.keys = []
ScoreReportResponse.rt = []
_ScoreReportResponse_allKeys = []
# keep track of which components have finished
ScoreReportComponents = [ScoreReportText, ScoreReportResponse]
for thisComponent in ScoreReportComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
ScoreReportClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
if (score <= 70):
ScoreReportText.setText(TryAgainText)
nback_feedback = nback_feedback_neg
if turns >= 3 and score <= 70:
ScoreReportText.setText(PleaseWaitText)
nback_feedback = nback_feedback_neg
if (score > 70):
ScoreReportText.setText(PassedText)
nback_feedback = nback_feedback_pos
if (score == 100):
ScoreReportText.setText( PerfectText)
nback_feedback = nback_feedback_pos
# -------Run Routine "ScoreReport"-------
while continueRoutine:
# get current time
t = ScoreReportClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=ScoreReportClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *ScoreReportText* updates
if ScoreReportText.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ScoreReportText.frameNStart = frameN # exact frame index
ScoreReportText.tStart = t # local t and not account for scr refresh
ScoreReportText.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ScoreReportText, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_feedback)
ScoreReportText.setAutoDraw(True)
# *ScoreReportResponse* updates
waitOnFlip = False
if ScoreReportResponse.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
ScoreReportResponse.frameNStart = frameN # exact frame index
ScoreReportResponse.tStart = t # local t and not account for scr refresh
ScoreReportResponse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(ScoreReportResponse, 'tStartRefresh') # time at next scr refresh
ScoreReportResponse.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(ScoreReportResponse.clock.reset) # t=0 on next screen flip
win.callOnFlip(ScoreReportResponse.clearEvents, eventType='keyboard') # clear events on next screen flip
if ScoreReportResponse.status == STARTED and not waitOnFlip:
theseKeys = ScoreReportResponse.getKeys(keyList=['space'], waitRelease=False)
_ScoreReportResponse_allKeys.extend(theseKeys)
if len(_ScoreReportResponse_allKeys):
ScoreReportResponse.keys = _ScoreReportResponse_allKeys[-1].name # just the last key pressed
ScoreReportResponse.rt = _ScoreReportResponse_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
_ScoreReportResponse_allKeys.extend([thisSimKey])
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in ScoreReportComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "ScoreReport"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in ScoreReportComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('ScoreReportText.started', ScoreReportText.tStartRefresh)
thisExp.addData('ScoreReportText.stopped', ScoreReportText.tStopRefresh)
# check responses
thisExp.addData('ScoreReportResponse.keys', ScoreReportResponse.keys)
thisExp.addData('ScoreReportResponse.started', ScoreReportResponse.tStartRefresh)
thisExp.addData('ScoreReportResponse.stopped', ScoreReportResponse.tStopRefresh)
Practice_2back.append(["score: ", score])
thisExp.nextEntry()
routineTimer.reset()
turns = turns + 1
"""
11. Save Practice-2back File
"""
# each _%s refers to the respective field in the parentheses
Practice_2back_bids_name = sub_dir + os.sep + u'sub-%05d_ses-%02d_task-%s_acq-%s_events.tsv' % (int(expInfo['subject number']), int(expInfo['session']), expName, "Practice2back")
Practice_2back = pd.DataFrame(Practice_2back, columns = ['onset','duration','rt','response','correct','attempt','condition'])
Practice_2back.to_csv(Practice_2back_bids_name, sep="\t")
###################
# Real Trials Start
###################
NbackInstructionText11 = "The tutorial is now over, we will now begin our scans, after which you will be instructed of the task assigned to you.\n\n\nWe will add some difficulty by periodically sending painful thermal stimulations to a designated body-site. \nDuring the task it is very important that you respond as fast and as accurately as possible.\n\n\nYou should try to respond shortly after the square is presented. This might be difficult, so it is important that you concentrate!\n\nExperimenter press [Space] to continue."
NbackInstructions.setText(NbackInstructionText11)
NbackInstructions.draw()
win.flip()
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'space' in event.getKeys(keyList = 'space'):
continueRoutine = False
routineTimer.reset()
"""
12. Body-Site Instructions: Instruct the Experimenter on the Body Sites to attach thermodes to at the beginning of each run
"""
for runs in range(len(bodySites)):
# ------Prepare to start Routine "BodySiteInstruction"-------
routineTimer.reset()
continueRoutine = True
# update component parameters for each repeat
BodySiteInstructionRead.keys = []
BodySiteInstructionRead.rt = []
_BodySiteInstructionRead_allKeys = []
# Update instructions and cues based on current run's body-sites:
BodySiteInstructionText.text="Experimenter: \nPlease place the thermode on the: \n" + bodySites[runs].lower()
# keep track of which components have finished
BodySiteInstructionComponents = [BodySiteInstructionText, BodySiteImg, BodySiteInstructionRead]
for thisComponent in BodySiteInstructionComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
BodySiteInstructionClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "BodySiteInstruction"-------
while continueRoutine:
# get current time
t = BodySiteInstructionClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=BodySiteInstructionClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *BodySiteInstructionText* updates
if BodySiteInstructionText.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
BodySiteInstructionText.frameNStart = frameN # exact frame index
BodySiteInstructionText.tStart = t # local t and not account for scr refresh
BodySiteInstructionText.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(BodySiteInstructionText, 'tStartRefresh') # time at next scr refresh
BodySiteInstructionText.setAutoDraw(True)
# *BodySiteImg* updates
if BodySiteImg.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
BodySiteImg.image = bodysite_word2img[bodySites[runs]]
BodySiteImg.pos = (0, .2)
# keep track of start time/frame for later
BodySiteImg.frameNStart = frameN # exact frame index
BodySiteImg.tStart = t # local t and not account for scr refresh
BodySiteImg.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(BodySiteImg, 'tStartRefresh') # time at next scr refresh
BodySiteImg.setAutoDraw(True)
# *BodySiteInstructionRead* updates
waitOnFlip = False
if BodySiteInstructionRead.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
BodySiteInstructionRead.frameNStart = frameN # exact frame index
BodySiteInstructionRead.tStart = t # local t and not account for scr refresh
BodySiteInstructionRead.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(BodySiteInstructionRead, 'tStartRefresh') # time at next scr refresh
BodySiteInstructionRead.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(print, "Cueing Off All Biopac Channels")
win.callOnFlip(print, "Showing BodySite Instructions")
win.callOnFlip(print, "Cueing Biopac Channel: " + str(bodymapping_instruction))
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, bodymapping_instruction)
win.callOnFlip(BodySiteInstructionRead.clock.reset) # t=0 on next screen flip
win.callOnFlip(BodySiteInstructionRead.clearEvents, eventType='keyboard') # clear events on next screen flip
if BodySiteInstructionRead.status == STARTED and not waitOnFlip:
theseKeys = BodySiteInstructionRead.getKeys(keyList=['space'], waitRelease=False)
_BodySiteInstructionRead_allKeys.extend(theseKeys)
if len(_BodySiteInstructionRead_allKeys):
BodySiteInstructionRead.keys = _BodySiteInstructionRead_allKeys[-1].name # just the last key pressed
BodySiteInstructionRead.rt = _BodySiteInstructionRead_allKeys[-1].rt
# a response ends the routine
continueRoutine = False
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
_BodySiteInstructionRead_allKeys.extend([thisSimKey])
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in BodySiteInstructionComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "BodySiteInstruction"-------
print("CueOff Channel: " + str(bodymapping_instruction))
if biopac_exists == 1:
biopac.setData(biopac, 0)
for thisComponent in BodySiteInstructionComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('BodySiteInstructionText.started', BodySiteInstructionText.tStartRefresh)
thisExp.addData('BodySiteImg.started', BodySiteImg.tStartRefresh)
thisExp.addData('BodySiteImg.stopped', BodySiteImg.tStopRefresh)
# check responses
if BodySiteInstructionRead.keys in ['', [], None]: # No response was made
BodySiteInstructionRead.keys = None
thisExp.addData('BodySiteInstructionRead.keys',BodySiteInstructionRead.keys)
if BodySiteInstructionRead.keys != None: # we had a response
thisExp.addData('BodySiteInstructionRead.rt', BodySiteInstructionRead.rt)
thisExp.addData('BodySiteInstructionRead.started', BodySiteInstructionRead.tStartRefresh)
thisExp.addData('BodySiteInstructionRead.stopped', BodySiteInstructionRead.tStopRefresh)
# Start a new BIDS data collection array for each run
bodymap_bids_data = []
# the Routine "BodySiteInstruction" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
"""
13. Start Scanner
"""
start = visual.TextStim(win, text=start_msg, height=.05, color=win.rgb + 0.5)
start.draw() # Automatically draw every frame
win.flip()
fmriStart = globalClock.getTime() # Start the clock
if autorespond != 1:
TR = 0.46
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 's' in event.getKeys(keyList = 's'): # experimenter start key - safe key before fMRI trigger
event.clearEvents()
while continueRoutine == True:
if '5' in event.getKeys(keyList = '5'): # fMRI trigger
fmriStart = globalClock.getTime() # Start the clock
timer = core.CountdownTimer() # Wait 6 TRs, Dummy Scans
timer.add(TR*6)
while timer.getTime() > 0:
continue
continueRoutine = False
"""
14. Begin First 1-Back Trials
"""
bodySiteData = bodySites[runs]
temperature = participant_settingsHeat[bodySites[runs]]
BiopacChannel = bodysite_word2heatcode[bodySites[runs]]
thermodeCommand = thermode1_temp2program[participant_settingsHeat[bodySites[runs]]]
routineTimer.reset()
NbackInstructions.setText("The following trials will be 1-back, please indicate whether or not the square in the current position matches the position that was presented in the last trial.")
NbackInstructions.draw()
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_instructions)
win.flip()
timer = core.CountdownTimer()
timer.add(10)
while timer.getTime() > 0:
continue
routineTimer.reset()
jitter2 = None # Reset jitter2
for r in range(4): # 4 repetitions
"""
14i. Select Medoc Thermal Program
"""
if thermode_exists == 1:
sendCommand('select_tp', thermodeCommand)
"""
14ii. Pre-1-Back Task Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
if not jitter2:
jitter1 = random.choice([5,7.5,10])
elif jitter2 == 5:
jitter1 = 10
elif jitter2 == 7.5:
jitter1 = 7.5
elif jitter2 == 10:
jitter1 = 5
routineTimer.add(jitter1)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter1-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
14iii. First Phase: 4 trials of 1-Back Task Start
"""
# set up handler to look after randomisation of conditions etc
if not OnebackFiles:
OnebackFiles = ["N-back-1_1.xlsx", "N-back-1_2.xlsx", "N-back-1_3.xlsx", "N-back-1_4.xlsx", "N-back-1_5.xlsx", "N-back-1_6.xlsx", "N-back-1_7.xlsx", "N-back-1_8.xlsx"]
Nback = os.sep.join([nback_dir, OnebackFiles.pop()])
trials = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(Nback), # Randomize the order
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "N_back_1_Trial"-------
# Trigger Thermal Program
if trials.thisTrialN == 4 and thermode_exists == 1:
sendCommand('trigger') # Trigger the thermode
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
target_square.setPos(location)
response.rt = []
gotValidClick = False # until a click is received
# keep track of which components have finished
N_back_1_TrialComponents = [grid_lines, target_square, fixation_2, response]
for thisComponent in N_back_1_TrialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
N_back_1_TrialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "N_back_1_Trial"-------
onset = globalClock.getTime() - fmriStart # Record onset time of the trial
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = N_back_1_TrialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=N_back_1_TrialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *grid_lines* updates
if grid_lines.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
grid_lines.frameNStart = frameN # exact frame index
grid_lines.tStart = t # local t and not account for scr refresh
grid_lines.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(grid_lines, 'tStartRefresh') # time at next scr refresh
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_trial_start)
grid_lines.setAutoDraw(True)
if grid_lines.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > grid_lines.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
grid_lines.tStop = t # not accounting for scr refresh
grid_lines.frameNStop = frameN # exact frame index
win.timeOnFlip(grid_lines, 'tStopRefresh') # time at next scr refresh
grid_lines.setAutoDraw(False)
# *target_square* updates
if target_square.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
target_square.frameNStart = frameN # exact frame index
target_square.tStart = t # local t and not account for scr refresh
target_square.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(target_square, 'tStartRefresh') # time at next scr refresh
target_square.setAutoDraw(True)
if target_square.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > target_square.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
target_square.tStop = t # not accounting for scr refresh
target_square.frameNStop = frameN # exact frame index
win.timeOnFlip(target_square, 'tStopRefresh') # time at next scr refresh
target_square.setAutoDraw(False)
# *fixation_2* updates
if fixation_2.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
fixation_2.frameNStart = frameN # exact frame index
fixation_2.tStart = t # local t and not account for scr refresh
fixation_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_2, 'tStartRefresh') # time at next scr refresh
fixation_2.setAutoDraw(True)
if fixation_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
fixation_2.tStop = t # not accounting for scr refresh
fixation_2.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_2, 'tStopRefresh') # time at next scr refresh
fixation_2.setAutoDraw(False)
# *response* updates
waitOnFlip = False
if response.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
response.frameNStart = frameN # exact frame index
response.tStart = t # local t and not account for scr refresh
response.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(response, 'tStartRefresh') # time at next scr refresh
response.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(response.mouseClock.reset) # t=0 on next screen flip
win.callOnFlip(response.clickReset) # t=0 on next screen flip
prevButtonState = response.getPressed() # if button is down already this ISN'T a new click
if response.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > response.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
response.tStop = t # not accounting for scr refresh
response.frameNStop = frameN # exact frame index
win.timeOnFlip(response, 'tStopRefresh') # time at next scr refresh
response.status = FINISHED
if response.status == STARTED and not waitOnFlip:
response.click, response.rt = response.getPressed(getTime = True)
response.click_left = response.click[0]
response.click_right = response.click[2]
response.rt_left = response.rt[0]
response.rt_right = response.rt[2]
if response.click_left != prevButtonState[0] or response.click_right != prevButtonState[2]: # button state changed?
prevButtonState = response.click
if (response.click_left == 1 or response.click_right == 1) and gotValidClick == False:
print(str(response.click), str(response.rt))
if (corrAns == 1 and response.click_left == 1) or (corrAns == 0 and response.click_right == 1):
response.corr = 1
correct = correct + 1
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_hit)
else:
response.corr = 0
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_comiss) # mark comission error
if response.click_left == 1:
mouse_response = 0
mouse_response_rt = response.rt_left
elif response.click_right == 1:
mouse_response = 2
mouse_response_rt = response.rt_right
gotValidClick = True
elif response.click_left == 0 and response.click_right == 0 and gotValidClick==False: # No response was made
mouse_response = None
mouse_response_rt = None
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "N_back_1_Trial"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if gotValidClick==False: # No response was made
response_2.rt = None
if str(corrAns).lower() == 'none':
response.corr=1
correct = correct + 1
else:
response.corr = 0; # failed to respond (incorrectly)
trials.addData('grid_lines.started', grid_lines.tStartRefresh)
trials.addData('grid_lines.stopped', grid_lines.tStopRefresh)
trials.addData('target_square.started', target_square.tStartRefresh)
trials.addData('target_square.stopped', target_square.tStopRefresh)
trials.addData('fixation_2.started', fixation_2.tStartRefresh)
trials.addData('fixation_2.stopped', fixation_2.tStopRefresh)
# store data for trials (TrialHandler)
trials.addData('response.corr', response.corr)
trials.addData('response.x', x)
trials.addData('response.y', y)
trials.addData('response.leftButton', response.click)
if gotValidClick==True and (response.click_left == 1 or response.click_right == 1): # we had a response
trials.addData('response.rt_left', response.rt_left)
trials.addData('response.rt_right', response.rt_right)
trials.addData('response.click',response.click)
trials.addData('response.corr', response.corr)
trials.addData('response.started', response.tStartRefresh)
trials.addData('response.stopped', response.tStopRefresh)
distractmap_bids_trial = []
distractmap_bids_trial.extend((onset, t, mouse_response_rt, mouse_response, response.corr, bodySites[runs], temperature, "1back"))
distractmap_bids.append(distractmap_bids_trial)
routineTimer.reset()
thisExp.nextEntry()
"""
14iv. Post First 1-Back Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
jitter2 = random.choice([5,7.5,10])
routineTimer.add(jitter2)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter2-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
14v. Phase-1 1-back Pain Rating Trial
"""
# ------Prepare to start Routine "IntensityRating"-------
continueRoutine = True
routineTimer.add(ratingTime)
# update component parameters for each repeat
# keep track of which components have finished
IntensityMouse = event.Mouse(win=win, visible=False) # Re-initialize IntensityMouse
IntensityMouse.setPos((0,0))
timeAtLastInterval = 0
mouseX = 0
oldMouseX = 0
IntensityRating.width = abs(sliderMin)
IntensityRating.pos = [sliderMin/2, -.1]
IntensityRatingComponents = [IntensityMouse, IntensityBlackTriangle, IntensityRating, IntensityAnchors, IntensityPrompt]
for thisComponent in IntensityRatingComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
IntensityRatingClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
IntensityRating.fillColor='red'
obtainedRating = 0
# -------Run Routine "IntensityRating"-------
onset = globalClock.getTime() - fmriStart # Record onset time of the trial
while continueRoutine:
if obtainedRating == 0:
timeNow = globalClock.getTime()
if (timeNow - timeAtLastInterval) > TIME_INTERVAL:
mouseRel=IntensityMouse.getRel()
mouseX=oldMouseX + mouseRel[0]
IntensityRating.pos = ((sliderMin + mouseX)/2,0)
IntensityRating.width = abs((mouseX-sliderMin))
if mouseX > sliderMax:
mouseX = sliderMax
if mouseX < sliderMin:
mouseX = sliderMin
timeAtLastInterval = timeNow
oldMouseX=mouseX
sliderValue = (mouseX - sliderMin) / (sliderMax - sliderMin) * 100
# get current time
t = IntensityRatingClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=IntensityRatingClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *IntensityMouse* updates
if IntensityMouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityMouse.frameNStart = frameN # exact frame index
IntensityMouse.tStart = t # local t and not account for scr refresh
IntensityMouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityMouse, 'tStartRefresh') # time at next scr refresh
IntensityMouse.status = STARTED
IntensityMouse.mouseClock.reset()
prevButtonState = IntensityMouse.getPressed() # if button is down already this ISN'T a new click
if IntensityMouse.status == STARTED: # only update if started and not finished!
if tThisFlipGlobal > IntensityMouse.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityMouse.tStop = t # not accounting for scr refresh
IntensityMouse.frameNStop = frameN # exact frame index
IntensityMouse.status = FINISHED
buttons = IntensityMouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
IntensityRating.fillColor='white'
obtainedRating = 1
# *IntensityRating* updates
if IntensityRating.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityRating.frameNStart = frameN # exact frame index
IntensityRating.tStart = t # local t and not account for scr refresh
IntensityRating.tStartRefresh = tThisFlipGlobal # on global time
win.callOnFlip(print, "Show Intensity Rating")
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, intensity_rating)
win.timeOnFlip(IntensityRating, 'tStartRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(True)
if IntensityRating.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityRating.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityRating.tStop = t # not accounting for scr refresh
IntensityRating.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityRating, 'tStopRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(False)
# *IntensityBlackTriangle* updates
if IntensityBlackTriangle.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityBlackTriangle.frameNStart = frameN # exact frame index
IntensityBlackTriangle.tStart = t # local t and not account for scr refresh
IntensityBlackTriangle.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityBlackTriangle, 'tStartRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(True)
if IntensityBlackTriangle.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityBlackTriangle.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityBlackTriangle.tStop = t # not accounting for scr refresh
IntensityBlackTriangle.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityBlackTriangle, 'tStopRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(False)
# *IntensityAnchors* updates
if IntensityAnchors.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityAnchors.frameNStart = frameN # exact frame index
IntensityAnchors.tStart = t # local t and not account for scr refresh
IntensityAnchors.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityAnchors, 'tStartRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(True)
if IntensityAnchors.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityAnchors.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityAnchors.tStop = t # not accounting for scr refresh
IntensityAnchors.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityAnchors, 'tStopRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(False)
# *IntensityPrompt* updates
if IntensityPrompt.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityPrompt.frameNStart = frameN # exact frame index
IntensityPrompt.tStart = t # local t and not account for scr refresh
IntensityPrompt.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityPrompt, 'tStartRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(True)
if IntensityPrompt.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityPrompt.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityPrompt.tStop = t # not accounting for scr refresh
IntensityPrompt.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityPrompt, 'tStopRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(False)
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
sliderValue = random.randint(0,100)
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine:
win.flip()
# -------Ending Routine "IntensityRating"-------
print("CueOff Channel " + str(intensity_rating))
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# store data for thisExp (ExperimentHandler)
thisExp.addData('IntensityRating.response', sliderValue)
thisExp.addData('IntensityRating.rt', timeNow - IntensityRating.tStart)
thisExp.nextEntry()
thisExp.addData('IntensityRating.started', IntensityRating.tStart)
thisExp.addData('IntensityRating.stopped', IntensityRating.tStop)
rating_bids_trial = []
rating_bids_trial.extend((onset, t, bodySites[runs], sliderValue, temperature, "1back", jitter1, jitter2))
rating_bids.append(rating_bids_trial)
# the Routine "IntensityRating" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
"""
15. Begin First 2-Back Trials
"""
NbackInstructions.setText("The following trials will be 2-back, please indicate whether or not the square in the current position matches the position that was presented two trials before.")
NbackInstructions.draw()
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_instructions)
win.flip()
timer = core.CountdownTimer()
timer.add(10)
while timer.getTime() > 0:
continue
routineTimer.reset()
jitter2=None # Reset jitter2
for r in range(8): # 8 repetitions
"""
15i. Select Medoc Thermal Program
"""
if thermode_exists == 1:
sendCommand('select_tp', thermodeCommand)
"""
15ii. Pre-2-Back Task Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
if not jitter2:
jitter1 = random.choice([5,7.5,10])
elif jitter2 == 5:
jitter1 = 10
elif jitter2 == 7.5:
jitter1 = 7.5
elif jitter2 == 10:
jitter1 = 5
routineTimer.add(jitter1)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter1-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
15iii. Second Phase: 8 trials of 2-Back Task Start
"""
# set up handler to look after randomisation of conditions etc
if not TwobackFiles:
TwobackFiles = ["N-back-2_1.xlsx", "N-back-2_2.xlsx", "N-back-2_3.xlsx", "N-back-2_4.xlsx", "N-back-2_5.xlsx", "N-back-2_6.xlsx", "N-back-2_7.xlsx", "N-back-2_8.xlsx"]
Nback = os.sep.join([nback_dir, TwobackFiles.pop()])
trials_2 = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(Nback),
seed=None, name='trials_2')
thisExp.addLoop(trials_2) # add the loop to the experiment
thisTrial_2 = trials_2.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
for thisTrial_2 in trials_2:
currentLoop = trials_2
# abbreviate parameter names if possible (e.g. rgb = thisTrial_2.rgb)
if thisTrial_2 != None:
for paramName in thisTrial_2:
exec('{} = thisTrial_2[paramName]'.format(paramName))
# ------Prepare to start Routine "N_back_2_trials"-------
# Trigger Thermal Program
if trials_2.thisTrialN == 4 and thermode_exists == 1:
sendCommand('trigger')
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
target_square_2.setPos(location)
response_2 = event.Mouse(win=win, visible=False) # Re-initialize
response_2.click = []
response_2.rt = []
response_2.corr = []
x, y = [None, None]
gotValidClick = False # until a click is received
# keep track of which components have finished
N_back_2_trialsComponents = [grid_lines_2, target_square_2, fixation_3, response_2, Feedback]
for thisComponent in N_back_2_trialsComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
N_back_2_TrialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "N_back_2_trials"-------
onset = globalClock.getTime() - fmriStart
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = N_back_2_TrialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=N_back_2_TrialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *response_2* updates
waitOnFlip = False
if response_2.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
response_2.frameNStart = frameN # exact frame index
response_2.tStart = t # local t and not account for scr refresh
response_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(response_2, 'tStartRefresh') # time at next scr refresh
response_2.status = STARTED
waitOnFlip = True
win.callOnFlip(response_2.mouseClock.reset) # t=0 on next screen flip
win.callOnFlip(response_2.clickReset) # t=0 on next screen flip
prevButtonState = response_2.getPressed() # if button is down already this ISN'T a new click
if response_2.status == STARTED: # only update if started and not finished!
if tThisFlipGlobal > response_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
response_2.tStop = t # not accounting for scr refresh
response_2.frameNStop = frameN # exact frame index
win.timeOnFlip(response_2, 'tStopRefresh') # time at next scr refresh
response_2.status = FINISHED
if response_2.status == STARTED and not waitOnFlip:
response_2.click, response_2.rt = response_2.getPressed(getTime = True)
response_2.click_left = response_2.click[0]
response_2.click_right = response_2.click[2]
response_2.rt_left = response_2.rt[0]
response_2.rt_right = response_2.rt[2]
if response_2.click_left != prevButtonState[0] or response_2.click_right != prevButtonState[2]: # button state changed?
prevButtonState = response_2.click
if (response_2.click_left == 1 or response_2.click_right == 1) and gotValidClick == False:
print(str(response_2.click), str(response_2.rt))
if (corrAns == 1 and response_2.click_left == 1) or (corrAns == 0 and response_2.click_right == 1):
response_2.corr = 1
correct = correct + 1
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_hit)
else:
response_2.corr = 0
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_comiss) # mark comission error
if response_2.click_left == 1:
mouse_response = 0
mouse_response_rt = response_2.rt_left
elif response_2.click_right == 1:
mouse_response = 2
mouse_response_rt = response_2.rt_left
gotValidClick = True
elif response_2.click_left == 0 and response_2.click_right == 0 and gotValidClick==False: # No response was made
mouse_response = None
mouse_response_rt = None
# *grid_lines_2* updates
if grid_lines_2.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
grid_lines_2.frameNStart = frameN # exact frame index
grid_lines_2.tStart = t # local t and not account for scr refresh
grid_lines_2.tStartRefresh = tThisFlipGlobal # on global time
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_trial_start)
win.timeOnFlip(grid_lines_2, 'tStartRefresh') # time at next scr refresh
grid_lines_2.setAutoDraw(True)
if grid_lines_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > grid_lines_2.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
grid_lines_2.tStop = t # not accounting for scr refresh
grid_lines_2.frameNStop = frameN # exact frame index
win.timeOnFlip(grid_lines_2, 'tStopRefresh') # time at next scr refresh
grid_lines_2.setAutoDraw(False)
# *target_square_2* updates
if target_square_2.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
target_square_2.frameNStart = frameN # exact frame index
target_square_2.tStart = t # local t and not account for scr refresh
target_square_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(target_square_2, 'tStartRefresh') # time at next scr refresh
target_square_2.setAutoDraw(True)
if target_square_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > target_square_2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
target_square_2.tStop = t # not accounting for scr refresh
target_square_2.frameNStop = frameN # exact frame index
win.timeOnFlip(target_square_2, 'tStopRefresh') # time at next scr refresh
target_square_2.setAutoDraw(False)
# *fixation_3* updates
if fixation_3.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
fixation_3.frameNStart = frameN # exact frame index
fixation_3.tStart = t # local t and not account for scr refresh
fixation_3.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_3, 'tStartRefresh') # time at next scr refresh
fixation_3.setAutoDraw(True)
if fixation_3.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_3.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
fixation_3.tStop = t # not accounting for scr refresh
fixation_3.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_3, 'tStopRefresh') # time at next scr refresh
fixation_3.setAutoDraw(False)
# # Autoresponder
# if t >= thisSimKey.rt and autorespond == 1:
# _response_2_allKeys.extend([thisSimKey])
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in N_back_2_trialsComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "N_back_2_trials"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in N_back_2_trialsComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# Check non-response
if gotValidClick==False: # No response was made
response_2.rt = None
if str(corrAns).lower() == 'none':
response_2.corr=1
correct = correct + 1
else:
response_2.corr = 0; # failed to respond (incorrectly)
trials_2.addData('response_2.x', x)
trials_2.addData('response_2.y', y)
trials_2.addData('response_2.leftButton', response_2.click)
trials_2.addData('grid_lines_2.started', grid_lines_2.tStartRefresh)
trials_2.addData('grid_lines_2.stopped', grid_lines_2.tStopRefresh)
trials_2.addData('target_square_2.started', target_square_2.tStartRefresh)
trials_2.addData('target_square_2.stopped', target_square_2.tStopRefresh)
trials_2.addData('fixation_3.started', fixation_3.tStartRefresh)
trials_2.addData('fixation_3.stopped', fixation_3.tStopRefresh)
if gotValidClick==True and (response_2.click_left == 1 or response_2.click_right == 1): # we had a response
trials.addData('response_2.rt_left', response_2.rt_left)
trials.addData('response_2.rt_right', response_2.rt_right)
# store data for trials_2 (TrialHandler)
trials_2.addData('response_2.click',response_2.click)
trials_2.addData('response_2.corr', response_2.corr)
trials_2.addData('response_2.started', response_2.tStartRefresh)
trials_2.addData('response_2.stopped', response_2.tStopRefresh)
distractmap_bids_trial = []
distractmap_bids_trial.extend((onset, t, mouse_response_rt, mouse_response, response_2.corr, bodySites[runs], temperature, "2back"))
distractmap_bids.append(distractmap_bids_trial)
routineTimer.reset()
thisExp.nextEntry()
# completed 1 repeats of 'trials_2'
"""
15iv. Post 2-Back Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
jitter2 = random.choice([5,7.5,10])
routineTimer.add(jitter2)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter2-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
15v. Phase-2 2-back Pain Rating Trial
"""
############ ASK PAIN INTENSITY #######################################
# ------Prepare to start Routine "IntensityRating"-------
continueRoutine = True
routineTimer.add(ratingTime)
# update component parameters for each repeat
# keep track of which components have finished
IntensityMouse = event.Mouse(win=win, visible=False) # Re-initialize IntensityMouse
IntensityMouse.setPos((0,0))
timeAtLastInterval = 0
mouseX = 0
oldMouseX = 0
IntensityRating.width = abs(sliderMin)
IntensityRating.pos = [sliderMin/2, -.1]
IntensityRatingComponents = [IntensityMouse, IntensityBlackTriangle, IntensityRating, IntensityAnchors, IntensityPrompt]
for thisComponent in IntensityRatingComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
IntensityRatingClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
IntensityRating.fillColor='red'
obtainedRating = 0
# -------Run Routine "IntensityRating"-------
onset = globalClock.getTime() - fmriStart # Record onset time of the trial
while continueRoutine:
if obtainedRating == 0:
timeNow = globalClock.getTime()
if (timeNow - timeAtLastInterval) > TIME_INTERVAL:
mouseRel=IntensityMouse.getRel()
mouseX=oldMouseX + mouseRel[0]
IntensityRating.pos = ((sliderMin + mouseX)/2,0)
IntensityRating.width = abs((mouseX-sliderMin))
if mouseX > sliderMax:
mouseX = sliderMax
if mouseX < sliderMin:
mouseX = sliderMin
timeAtLastInterval = timeNow
oldMouseX=mouseX
sliderValue = (mouseX - sliderMin) / (sliderMax - sliderMin) * 100
# get current time
t = IntensityRatingClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=IntensityRatingClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *IntensityMouse* updates
if IntensityMouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityMouse.frameNStart = frameN # exact frame index
IntensityMouse.tStart = t # local t and not account for scr refresh
IntensityMouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityMouse, 'tStartRefresh') # time at next scr refresh
IntensityMouse.status = STARTED
IntensityMouse.mouseClock.reset()
prevButtonState = IntensityMouse.getPressed() # if button is down already this ISN'T a new click
if IntensityMouse.status == STARTED: # only update if started and not finished!
if tThisFlipGlobal > IntensityMouse.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityMouse.tStop = t # not accounting for scr refresh
IntensityMouse.frameNStop = frameN # exact frame index
IntensityMouse.status = FINISHED
buttons = IntensityMouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
IntensityRating.fillColor='white'
obtainedRating = 1
# *IntensityRating* updates
if IntensityRating.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityRating.frameNStart = frameN # exact frame index
IntensityRating.tStart = t # local t and not account for scr refresh
IntensityRating.tStartRefresh = tThisFlipGlobal # on global time
win.callOnFlip(print, "Show Intensity Rating")
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, intensity_rating)
win.timeOnFlip(IntensityRating, 'tStartRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(True)
if IntensityRating.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityRating.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityRating.tStop = t # not accounting for scr refresh
IntensityRating.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityRating, 'tStopRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(False)
# *IntensityBlackTriangle* updates
if IntensityBlackTriangle.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityBlackTriangle.frameNStart = frameN # exact frame index
IntensityBlackTriangle.tStart = t # local t and not account for scr refresh
IntensityBlackTriangle.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityBlackTriangle, 'tStartRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(True)
if IntensityBlackTriangle.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityBlackTriangle.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityBlackTriangle.tStop = t # not accounting for scr refresh
IntensityBlackTriangle.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityBlackTriangle, 'tStopRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(False)
# *IntensityAnchors* updates
if IntensityAnchors.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityAnchors.frameNStart = frameN # exact frame index
IntensityAnchors.tStart = t # local t and not account for scr refresh
IntensityAnchors.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityAnchors, 'tStartRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(True)
if IntensityAnchors.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityAnchors.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityAnchors.tStop = t # not accounting for scr refresh
IntensityAnchors.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityAnchors, 'tStopRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(False)
# *IntensityPrompt* updates
if IntensityPrompt.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityPrompt.frameNStart = frameN # exact frame index
IntensityPrompt.tStart = t # local t and not account for scr refresh
IntensityPrompt.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityPrompt, 'tStartRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(True)
if IntensityPrompt.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityPrompt.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityPrompt.tStop = t # not accounting for scr refresh
IntensityPrompt.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityPrompt, 'tStopRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(False)
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
sliderValue = random.randint(0,100)
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine:
win.flip()
# -------Ending Routine "IntensityRating"-------
print("CueOff Channel " + str(intensity_rating))
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# store data for thisExp (ExperimentHandler)
thisExp.addData('IntensityRating.response', sliderValue)
thisExp.addData('IntensityRating.rt', timeNow - IntensityRating.tStart)
thisExp.nextEntry()
thisExp.addData('IntensityRating.started', IntensityRating.tStart)
thisExp.addData('IntensityRating.stopped', IntensityRating.tStop)
rating_bids_trial = []
rating_bids_trial.extend((onset, t, bodySites[runs], sliderValue, temperature, "2back", jitter1, jitter2))
rating_bids.append(rating_bids_trial)
# the Routine "IntensityRating" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
"""
16. Begin Second 1-Back Trials
"""
NbackInstructions.setText("The following trials will be 1-back, please indicate whether or not the square in the current position matches the position that was presented in the last trial.")
NbackInstructions.draw()
win.flip()
timer = core.CountdownTimer()
timer.add(10)
while timer.getTime() > 0:
continue
routineTimer.reset()
jitter2=None # Reset jitter2
for r in range(4): # 4 repetitions
"""
16i. Select Medoc Thermal Program
"""
if thermode_exists == 1:
sendCommand('select_tp', thermodeCommand)
"""
16ii. Pre-1-Back Task Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
if not jitter2:
jitter1 = random.choice([5,7.5,10])
elif jitter2 == 5:
jitter1 = 10
elif jitter2 == 7.5:
jitter1 = 7.5
elif jitter2 == 10:
jitter1 = 5
routineTimer.add(jitter1)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter1-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
16iii. Third Phase: 4 trials of 1-Back Task Start
"""
# set up handler to look after randomisation of conditions etc
if not OnebackFiles:
OnebackFiles = ["N-back-1_1.xlsx", "N-back-1_2.xlsx", "N-back-1_3.xlsx", "N-back-1_4.xlsx", "N-back-1_5.xlsx", "N-back-1_6.xlsx", "N-back-1_7.xlsx", "N-back-1_8.xlsx"]
Nback = os.sep.join([nback_dir, OnebackFiles.pop()])
trials = data.TrialHandler(nReps=1, method='sequential',
extraInfo=expInfo, originPath=-1,
trialList=data.importConditions(Nback),
seed=None, name='trials')
thisExp.addLoop(trials) # add the loop to the experiment
thisTrial = trials.trialList[0] # so we can initialise stimuli with some values
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
for thisTrial in trials:
currentLoop = trials
# abbreviate parameter names if possible (e.g. rgb = thisTrial.rgb)
if thisTrial != None:
for paramName in thisTrial:
exec('{} = thisTrial[paramName]'.format(paramName))
# ------Prepare to start Routine "N_back_1_Trial"-------
# Trigger Thermal Program
if trials.thisTrialN == 4 and thermode_exists == 1:
sendCommand('trigger')
continueRoutine = True
routineTimer.add(2.000000)
# update component parameters for each repeat
target_square.setPos(location)
response.rt = []
gotValidClick = False # until a click is received
# keep track of which components have finished
N_back_1_TrialComponents = [grid_lines, target_square, fixation_2, response]
for thisComponent in N_back_1_TrialComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
N_back_1_TrialClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "N_back_1_Trial"-------
onset = globalClock.getTime() - fmriStart
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = N_back_1_TrialClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=N_back_1_TrialClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *grid_lines* updates
if grid_lines.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
grid_lines.frameNStart = frameN # exact frame index
grid_lines.tStart = t # local t and not account for scr refresh
grid_lines.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(grid_lines, 'tStartRefresh') # time at next scr refresh
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_trial_start)
grid_lines.setAutoDraw(True)
if grid_lines.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > grid_lines.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
grid_lines.tStop = t # not accounting for scr refresh
grid_lines.frameNStop = frameN # exact frame index
win.timeOnFlip(grid_lines, 'tStopRefresh') # time at next scr refresh
grid_lines.setAutoDraw(False)
# *target_square* updates
if target_square.status == NOT_STARTED and tThisFlip >= 0-frameTolerance:
# keep track of start time/frame for later
target_square.frameNStart = frameN # exact frame index
target_square.tStart = t # local t and not account for scr refresh
target_square.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(target_square, 'tStartRefresh') # time at next scr refresh
target_square.setAutoDraw(True)
if target_square.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > target_square.tStartRefresh + 1-frameTolerance:
# keep track of stop time/frame for later
target_square.tStop = t # not accounting for scr refresh
target_square.frameNStop = frameN # exact frame index
win.timeOnFlip(target_square, 'tStopRefresh') # time at next scr refresh
target_square.setAutoDraw(False)
# *fixation_2* updates
if fixation_2.status == NOT_STARTED and tThisFlip >= 1-frameTolerance:
# keep track of start time/frame for later
fixation_2.frameNStart = frameN # exact frame index
fixation_2.tStart = t # local t and not account for scr refresh
fixation_2.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_2, 'tStartRefresh') # time at next scr refresh
fixation_2.setAutoDraw(True)
if fixation_2.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_2.tStartRefresh + 1.0-frameTolerance:
# keep track of stop time/frame for later
fixation_2.tStop = t # not accounting for scr refresh
fixation_2.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_2, 'tStopRefresh') # time at next scr refresh
fixation_2.setAutoDraw(False)
# *response* updates
waitOnFlip = False
if response.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
response.frameNStart = frameN # exact frame index
response.tStart = t # local t and not account for scr refresh
response.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(response, 'tStartRefresh') # time at next scr refresh
response.status = STARTED
# keyboard checking is just starting
waitOnFlip = True
win.callOnFlip(response.mouseClock.reset) # t=0 on next screen flip
win.callOnFlip(response.clickReset) # t=0 on next screen flip
prevButtonState = response.getPressed() # if button is down already this ISN'T a new click
if response.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > response.tStartRefresh + 2-frameTolerance:
# keep track of stop time/frame for later
response.tStop = t # not accounting for scr refresh
response.frameNStop = frameN # exact frame index
win.timeOnFlip(response, 'tStopRefresh') # time at next scr refresh
response.status = FINISHED
if response.status == STARTED and not waitOnFlip:
response.click, response.rt = response.getPressed(getTime = True)
response.click_left = response.click[0]
response.click_right = response.click[2]
response.rt_left = response.rt[0]
response.rt_right = response.rt[2]
if response.click_left != prevButtonState[0] or response.click_right != prevButtonState[2]: # button state changed?
prevButtonState = response.click
if (response.click_left == 1 or response.click_right == 1) and gotValidClick == False:
print(str(response.click), str(response.rt))
if (corrAns == 1 and response.click_left == 1) or (corrAns == 0 and response.click_right == 1):
response.corr = 1
correct = correct + 1
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_hit)
else:
response.corr = 0
if biopac_exists:
biopac.setData(biopac, 0)
biopac.setData(biopac, nback_comiss) # mark comission error
if response.click_left == 1:
mouse_response = 0
mouse_response_rt = response.rt_left
elif response.click_right == 1:
mouse_response = 2
mouse_response_rt = response.rt_right
gotValidClick = True
elif response.click_left == 0 and response.click_right == 0 and gotValidClick==False: # No response was made
mouse_response = None
mouse_response_rt = None
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "N_back_1_Trial"-------
if biopac_exists:
biopac.setData(biopac, 0)
for thisComponent in N_back_1_TrialComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
if gotValidClick==False: # No response was made
response_2.rt = None
if str(corrAns).lower() == 'none':
response.corr=1
correct = correct + 1
else:
response.corr = 0; # failed to respond (incorrectly)
trials.addData('grid_lines.started', grid_lines.tStartRefresh)
trials.addData('grid_lines.stopped', grid_lines.tStopRefresh)
trials.addData('target_square.started', target_square.tStartRefresh)
trials.addData('target_square.stopped', target_square.tStopRefresh)
trials.addData('fixation_2.started', fixation_2.tStartRefresh)
trials.addData('fixation_2.stopped', fixation_2.tStopRefresh)
# store data for trials (TrialHandler)
trials.addData('response.corr', response.corr)
trials.addData('response.x', x)
trials.addData('response.y', y)
trials.addData('response.leftButton', response.click)
if gotValidClick==True and (response.click_left == 1 or response.click_right == 1): # we had a response
trials.addData('response.rt_left', response.rt_left)
trials.addData('response.rt_right', response.rt_right)
trials.addData('response.click',response.click)
trials.addData('response.corr', response.corr)
trials.addData('response.started', response.tStartRefresh)
trials.addData('response.stopped', response.tStopRefresh)
distractmap_bids_trial = []
distractmap_bids_trial.extend((onset, t, mouse_response_rt, mouse_response, response.corr, bodySites[runs], temperature, "1back"))
distractmap_bids.append(distractmap_bids_trial)
routineTimer.reset()
thisExp.nextEntry()
"""
16iv. Post Second 1-Back Fixation Cross
"""
# ------Prepare to start Routine "Fixation"-------
continueRoutine = True
jitter2 = random.choice([5,7.5,10])
routineTimer.add(jitter2)
# update component parameters for each repeat
# keep track of which components have finished
FixationComponents = [fixation_1]
for thisComponent in FixationComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
FixationClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
# -------Run Routine "Fixation"-------
while continueRoutine and routineTimer.getTime() > 0:
# get current time
t = FixationClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=FixationClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *fixation_1* updates
if fixation_1.status == NOT_STARTED and tThisFlip >= 0.0-frameTolerance:
# keep track of start time/frame for later
fixation_1.frameNStart = frameN # exact frame index
fixation_1.tStart = t # local t and not account for scr refresh
fixation_1.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(fixation_1, 'tStartRefresh') # time at next scr refresh
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, nback_fixation)
fixation_1.setAutoDraw(True)
if fixation_1.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > fixation_1.tStartRefresh + jitter2-frameTolerance:
# keep track of stop time/frame for later
fixation_1.tStop = t # not accounting for scr refresh
fixation_1.frameNStop = frameN # exact frame index
win.timeOnFlip(fixation_1, 'tStopRefresh') # time at next scr refresh
fixation_1.setAutoDraw(False)
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in FixationComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine: # don't flip if this routine is over or we'll get a blank screen
win.flip()
# -------Ending Routine "Fixation"-------
if biopac_exists:
win.callOnFlip(biopac.setData, biopac, 0)
for thisComponent in FixationComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
thisExp.addData('fixation_1.started', fixation_1.tStartRefresh)
thisExp.addData('fixation_1.stopped', fixation_1.tStopRefresh)
routineTimer.reset()
"""
16v. Phase-3 1-back Pain Rating Trial
"""
############ ASK PAIN INTENSITY #######################################
# ------Prepare to start Routine "IntensityRating"-------
continueRoutine = True
routineTimer.add(ratingTime)
# update component parameters for each repeat
# keep track of which components have finished
IntensityMouse = event.Mouse(win=win, visible=False) # Re-initialize IntensityMouse
IntensityMouse.setPos((0,0))
timeAtLastInterval = 0
mouseX = 0
oldMouseX = 0
IntensityRating.width = abs(sliderMin)
IntensityRating.pos = [sliderMin/2, -.1]
IntensityRatingComponents = [IntensityMouse, IntensityBlackTriangle, IntensityRating, IntensityAnchors, IntensityPrompt]
for thisComponent in IntensityRatingComponents:
thisComponent.tStart = None
thisComponent.tStop = None
thisComponent.tStartRefresh = None
thisComponent.tStopRefresh = None
if hasattr(thisComponent, 'status'):
thisComponent.status = NOT_STARTED
# reset timers
t = 0
_timeToFirstFrame = win.getFutureFlipTime(clock="now")
IntensityRatingClock.reset(-_timeToFirstFrame) # t0 is time of first possible flip
frameN = -1
IntensityRating.fillColor='red'
obtainedRating = 0
# -------Run Routine "IntensityRating"-------
onset = globalClock.getTime() - fmriStart # Record onset time of the trial
while continueRoutine:
if obtainedRating == 0:
timeNow = globalClock.getTime()
if (timeNow - timeAtLastInterval) > TIME_INTERVAL:
mouseRel=IntensityMouse.getRel()
mouseX=oldMouseX + mouseRel[0]
IntensityRating.pos = ((sliderMin + mouseX)/2,0)
IntensityRating.width = abs((mouseX-sliderMin))
if mouseX > sliderMax:
mouseX = sliderMax
if mouseX < sliderMin:
mouseX = sliderMin
timeAtLastInterval = timeNow
oldMouseX=mouseX
sliderValue = (mouseX - sliderMin) / (sliderMax - sliderMin) * 100
# get current time
t = IntensityRatingClock.getTime()
tThisFlip = win.getFutureFlipTime(clock=IntensityRatingClock)
tThisFlipGlobal = win.getFutureFlipTime(clock=None)
frameN = frameN + 1 # number of completed frames (so 0 is the first frame)
# update/draw components on each frame
# *IntensityMouse* updates
if IntensityMouse.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityMouse.frameNStart = frameN # exact frame index
IntensityMouse.tStart = t # local t and not account for scr refresh
IntensityMouse.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityMouse, 'tStartRefresh') # time at next scr refresh
IntensityMouse.status = STARTED
IntensityMouse.mouseClock.reset()
prevButtonState = IntensityMouse.getPressed() # if button is down already this ISN'T a new click
if IntensityMouse.status == STARTED: # only update if started and not finished!
if tThisFlipGlobal > IntensityMouse.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityMouse.tStop = t # not accounting for scr refresh
IntensityMouse.frameNStop = frameN # exact frame index
IntensityMouse.status = FINISHED
buttons = IntensityMouse.getPressed()
if buttons != prevButtonState: # button state changed?
prevButtonState = buttons
if sum(buttons) > 0: # state changed to a new click
IntensityRating.fillColor='white'
obtainedRating = 1
# *IntensityRating* updates
if IntensityRating.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityRating.frameNStart = frameN # exact frame index
IntensityRating.tStart = t # local t and not account for scr refresh
IntensityRating.tStartRefresh = tThisFlipGlobal # on global time
win.callOnFlip(print, "Show Intensity Rating")
if biopac_exists == 1:
win.callOnFlip(biopac.setData, biopac, 0)
win.callOnFlip(biopac.setData, biopac, intensity_rating)
win.timeOnFlip(IntensityRating, 'tStartRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(True)
if IntensityRating.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityRating.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityRating.tStop = t # not accounting for scr refresh
IntensityRating.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityRating, 'tStopRefresh') # time at next scr refresh
IntensityRating.setAutoDraw(False)
# *IntensityBlackTriangle* updates
if IntensityBlackTriangle.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityBlackTriangle.frameNStart = frameN # exact frame index
IntensityBlackTriangle.tStart = t # local t and not account for scr refresh
IntensityBlackTriangle.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityBlackTriangle, 'tStartRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(True)
if IntensityBlackTriangle.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityBlackTriangle.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityBlackTriangle.tStop = t # not accounting for scr refresh
IntensityBlackTriangle.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityBlackTriangle, 'tStopRefresh') # time at next scr refresh
IntensityBlackTriangle.setAutoDraw(False)
# *IntensityAnchors* updates
if IntensityAnchors.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityAnchors.frameNStart = frameN # exact frame index
IntensityAnchors.tStart = t # local t and not account for scr refresh
IntensityAnchors.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityAnchors, 'tStartRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(True)
if IntensityAnchors.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityAnchors.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityAnchors.tStop = t # not accounting for scr refresh
IntensityAnchors.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityAnchors, 'tStopRefresh') # time at next scr refresh
IntensityAnchors.setAutoDraw(False)
# *IntensityPrompt* updates
if IntensityPrompt.status == NOT_STARTED and t >= 0.0-frameTolerance:
# keep track of start time/frame for later
IntensityPrompt.frameNStart = frameN # exact frame index
IntensityPrompt.tStart = t # local t and not account for scr refresh
IntensityPrompt.tStartRefresh = tThisFlipGlobal # on global time
win.timeOnFlip(IntensityPrompt, 'tStartRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(True)
if IntensityPrompt.status == STARTED:
# is it time to stop? (based on global clock, using actual start)
if tThisFlipGlobal > IntensityPrompt.tStartRefresh + ratingTime-frameTolerance:
# keep track of stop time/frame for later
IntensityPrompt.tStop = t # not accounting for scr refresh
IntensityPrompt.frameNStop = frameN # exact frame index
win.timeOnFlip(IntensityPrompt, 'tStopRefresh') # time at next scr refresh
IntensityPrompt.setAutoDraw(False)
# Autoresponder
if t >= thisSimKey.rt and autorespond == 1:
sliderValue = random.randint(0,100)
continueRoutine = False
# check for quit (typically the Esc key)
if endExpNow or defaultKeyboard.getKeys(keyList=["escape"]):
core.quit()
# check if all components have finished
if not continueRoutine: # a component has requested a forced-end of Routine
break
continueRoutine = False # will revert to True if at least one component still running
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "status") and thisComponent.status != FINISHED:
continueRoutine = True
break # at least one component has not yet finished
# refresh the screen
if continueRoutine:
win.flip()
# -------Ending Routine "IntensityRating"-------
print("CueOff Channel " + str(intensity_rating))
for thisComponent in IntensityRatingComponents:
if hasattr(thisComponent, "setAutoDraw"):
thisComponent.setAutoDraw(False)
# store data for thisExp (ExperimentHandler)
thisExp.addData('IntensityRating.response', sliderValue)
thisExp.addData('IntensityRating.rt', timeNow - IntensityRating.tStart)
thisExp.nextEntry()
thisExp.addData('IntensityRating.started', IntensityRating.tStart)
thisExp.addData('IntensityRating.stopped', IntensityRating.tStop)
rating_bids_trial = []
rating_bids_trial.extend((onset, t, bodySites[runs], sliderValue, temperature, "1back", jitter1, jitter2))
rating_bids.append(rating_bids_trial)
# the Routine "IntensityRating" was not non-slip safe, so reset the non-slip timer
routineTimer.reset()
"""
17. Save data into Excel and .CSV formats and Tying up Loose Ends
"""
distractmap_bids_data = pd.DataFrame(distractmap_bids, columns = ['onset', 'duration', 'rt', 'response', 'correct', 'bodySite', 'temperature', 'condition'])
distractmap_bids_filename = sub_dir + os.sep + u'sub-%05d_ses-%02d_task-%s_acq-%s_run-%s_events.tsv' % (int(expInfo['subject number']), int(expInfo['session']), expName, bodySites[runs].replace(" ", "").lower(), str(runs+1))
distractmap_bids_data.to_csv(distractmap_bids_filename, sep="\t")
rating_bids_data = pd.DataFrame(rating_bids, columns = ['onset', 'duration', 'bodySite', 'intensity', 'temperature', 'condition', 'pretrial-jitter', 'posttrial-jitter'])
rating_bids_filename = sub_dir + os.sep + u'sub-%05d_ses-%02d_task-%s_acq-%s_run-%s_events.tsv' % (int(expInfo['subject number']), int(expInfo['session']), 'distractmap-ratings', bodySites[runs].replace(" ", "").lower(), str(runs+1))
rating_bids_data.to_csv(rating_bids_filename, sep="\t")
# Reset for the next run
distractmap_bids_data = []
distractmap_bids = []
rating_bids_data = []
rating_bids = []
"""
18. End of Run, Wait for Experimenter instructions to begin next run
"""
message = visual.TextStim(win, text=in_between_run_msg, height=0.05, units='height')
message.draw()
win.callOnFlip(print, "Awaiting Experimenter to start next run...\nPress [e] to continue")
if biopac_exists:
win.callOnFlip(biopac.setData, biopac,0)
win.callOnFlip(biopac.setData, biopac,between_run_msg)
win.flip()
# Autoresponder
if autorespond != 1:
# event.waitKeys(keyList = 'e')
continueRoutine = True
event.clearEvents()
while continueRoutine == True:
if 'e' in event.getKeys(keyList = 'e'):
continueRoutine = False
"""
19. Wrap up
"""
if biopac_exists:
biopac.setData(biopac,0)
biopac.setData(biopac,end_task)
win.flip()
# these shouldn't be strictly necessary (should auto-save)
thisExp.saveAsWideText(psypy_filename+'.csv', delim='auto')
thisExp.saveAsPickle(psypy_filename)
logging.flush()
# make sure everything is closed down
message = visual.TextStim(win, text=end_msg, height=0.05, units='height')
message.draw()
if biopac_exists == 1:
biopac.close() # Close the labjack U3 device to end communication with the Biopac MP150
thisExp.abort() # or data files will save again on exit
win.close() # close the window
core.quit()
"""
End of Experiment
"""
| 50.82316
| 532
| 0.606887
| 22,945
| 210,662
| 5.488778
| 0.054435
| 0.015007
| 0.010132
| 0.012959
| 0.846959
| 0.833182
| 0.823972
| 0.814817
| 0.804859
| 0.798325
| 0
| 0.027065
| 0.305807
| 210,662
| 4,145
| 533
| 50.82316
| 0.834122
| 0.208766
| 0
| 0.832053
| 0
| 0.008013
| 0.086947
| 0.012081
| 0
| 0
| 0
| 0
| 0
| 1
| 0.001002
| false
| 0.002337
| 0.009349
| 0
| 0.010684
| 0.005342
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc48e0670662b705c585d67da3d4c3e8a62e4d92
| 2,514
|
py
|
Python
|
tests/test_servers.py
|
cchurch/ATEMStreamingXML
|
bc928d0dbb1bdf115b980e3d4e0b47aa20925409
|
[
"BSD-3-Clause"
] | 2
|
2021-04-13T06:54:39.000Z
|
2021-06-17T19:04:46.000Z
|
tests/test_servers.py
|
cchurch/ATEMStreamingXML
|
bc928d0dbb1bdf115b980e3d4e0b47aa20925409
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_servers.py
|
cchurch/ATEMStreamingXML
|
bc928d0dbb1bdf115b980e3d4e0b47aa20925409
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: UTF-8 -*-
# Python
from __future__ import unicode_literals
# PyTest
import pytest
def test_server_name_requires_remove_or_url(main, xml_compare, service_name, server_name):
with xml_compare('empty.xml', 'empty.xml'):
with pytest.raises(SystemExit):
main('--service', service_name, '--server-name', server_name)
def test_add_server(main, xml_compare, service_name, server_name, server_url):
with xml_compare('empty.xml', 'add-server.xml'):
main('--service', service_name, '--server-name', server_name, '--server-url', server_url)
def test_add_server_no_change(main, xml_compare, service_name, server_name, server_url):
with xml_compare('add-server.xml', 'add-server.xml'):
main('-S', service_name, '-N', server_name, '-U', server_url)
def test_add_another_server(main, xml_compare, service_name, alt_server_name, alt_server_url):
with xml_compare('add-server.xml', 'add-alt-server.xml'):
main('--service', service_name, '--server-name', alt_server_name, '--server-url', alt_server_url)
def test_add_another_server_no_change(main, xml_compare, service_name, alt_server_name, alt_server_url):
with xml_compare('add-alt-server.xml', 'add-alt-server.xml'):
main('-S', service_name, '-N', alt_server_name, '-U', alt_server_url)
def test_update_server_url(main, xml_compare, service_name, server_name, alt_server_url):
with xml_compare('add-server.xml', 'update-server.xml'):
main('--service', service_name, '--server-name', server_name, '--server-url', alt_server_url)
def test_update_server_url_no_change(main, xml_compare, service_name, server_name, alt_server_url):
with xml_compare('update-server.xml', 'update-server.xml'):
main('-S', service_name, '-N', server_name, '-U', alt_server_url)
def test_remove_server_requires_server_name(main, xml_compare, service_name):
with xml_compare('add-alt-server.xml', 'add-alt-server.xml'):
with pytest.raises(SystemExit):
main('--service', service_name, '--remove-server')
def test_remove_server(main, xml_compare, service_name, alt_server_name):
with xml_compare('add-alt-server.xml', 'add-server.xml'):
main('--service', service_name, '--server-name', alt_server_name, '--remove-server')
def test_remove_server_no_change(main, xml_compare, service_name, alt_server_name):
with xml_compare('add-server.xml', 'add-server.xml'):
main('-S', service_name, '-N', alt_server_name, '--remove-server')
| 41.9
| 105
| 0.71957
| 367
| 2,514
| 4.574932
| 0.095368
| 0.148898
| 0.108398
| 0.125074
| 0.911257
| 0.864205
| 0.848124
| 0.795116
| 0.715902
| 0.60274
| 0
| 0.000456
| 0.128481
| 2,514
| 59
| 106
| 42.610169
| 0.76586
| 0.013922
| 0
| 0.176471
| 0
| 0
| 0.210101
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0.058824
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fc53c5dbace548bf7d04bc9c071d1d84b1a54ab7
| 198
|
py
|
Python
|
spesial_asserts.py
|
durovda/tdd_training
|
47a154a9f546e2b854a48c17acc817d751d22f17
|
[
"Apache-2.0"
] | null | null | null |
spesial_asserts.py
|
durovda/tdd_training
|
47a154a9f546e2b854a48c17acc817d751d22f17
|
[
"Apache-2.0"
] | null | null | null |
spesial_asserts.py
|
durovda/tdd_training
|
47a154a9f546e2b854a48c17acc817d751d22f17
|
[
"Apache-2.0"
] | null | null | null |
def assert_lists_equal(actual_list, expected_list):
assert actual_list == expected_list, f'\nactual = {actual_list}' \
f'\nexpected = {expected_list}'
| 49.5
| 72
| 0.590909
| 21
| 198
| 5.190476
| 0.47619
| 0.275229
| 0.330275
| 0.40367
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.308081
| 198
| 3
| 73
| 66
| 0.79562
| 0
| 0
| 0
| 0
| 0
| 0.272727
| 0
| 0
| 0
| 0
| 0
| 0.666667
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fca231340bfd3e5d989edd3c83a37089c7be0fc7
| 95,877
|
py
|
Python
|
tests/expectations/metrics/test_core.py
|
serialbandicoot/great_expectations
|
88b636aa060ab3625f55ca234914e8218330ec63
|
[
"Apache-2.0"
] | null | null | null |
tests/expectations/metrics/test_core.py
|
serialbandicoot/great_expectations
|
88b636aa060ab3625f55ca234914e8218330ec63
|
[
"Apache-2.0"
] | null | null | null |
tests/expectations/metrics/test_core.py
|
serialbandicoot/great_expectations
|
88b636aa060ab3625f55ca234914e8218330ec63
|
[
"Apache-2.0"
] | null | null | null |
import copy
import logging
import numpy as np
import pandas as pd
import pytest
import great_expectations.exceptions as ge_exceptions
from great_expectations.core.batch import Batch
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyBatchData,
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.registry import get_metric_provider
from great_expectations.self_check.util import (
build_pandas_engine,
build_sa_engine,
build_spark_engine,
)
from great_expectations.validator.validation_graph import MetricConfiguration
from tests.expectations.test_util import get_table_columns_metric
def test_metric_loads_pd():
assert get_metric_provider("column.max", PandasExecutionEngine()) is not None
def test_basic_metric_pd():
df = pd.DataFrame({"a": [1, 2, 3, 3, None]})
batch = Batch(data=df)
engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 3}
def test_mean_metric_pd():
engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, None]}))
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.mean",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 2}
def test_stdev_metric_pd():
engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, None]}))
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.standard_deviation",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 1}
def test_max_metric_column_exists_pd():
df = pd.DataFrame({"a": [1, 2, 3, 3, None]})
batch = Batch(data=df)
engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 3}
def test_max_metric_column_does_not_exist_pd():
df = pd.DataFrame({"a": [1, 2, 3, 3, None]})
batch = Batch(data=df)
engine = PandasExecutionEngine(batch_data_dict={batch.id: batch.data})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert (
str(eee.value)
== 'Error: The column "non_existent_column" in BatchData does not exist.'
)
def test_max_metric_column_exists_sa(sa):
engine = build_sa_engine(pd.DataFrame({"a": [1, 2, 1, None]}), sa)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
partial_metric = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(partial_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": partial_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 2}
def test_max_metric_column_does_not_exist_sa(sa):
engine = build_sa_engine(pd.DataFrame({"a": [1, 2, 1, None]}), sa)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
partial_metric = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
results = engine.resolve_metrics(
metrics_to_resolve=(partial_metric,), metrics=metrics
)
metrics.update(results)
assert (
'Error: The column "non_existent_column" in BatchData does not exist.'
in str(eee.value)
)
def test_max_metric_column_exists_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame({"a": [1, 2, 1]}),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
partial_metric = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(partial_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": partial_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 2}
def test_max_metric_column_does_not_exist_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame({"a": [1, 2, 1]}),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
partial_metric = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
results = engine.resolve_metrics(
metrics_to_resolve=(partial_metric,), metrics=metrics
)
metrics.update(results)
assert (
str(eee.value)
== 'Error: The column "non_existent_column" in BatchData does not exist.'
)
def test_map_value_set_sa(sa):
engine = build_sa_engine(pd.DataFrame({"a": [1, 2, 3, 3, None]}), sa)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.in_set.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
metrics = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
# Note: metric_dependencies is optional here in the config when called from a validator.
aggregate_partial = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"unexpected_condition": desired_metric},
)
metrics = engine.resolve_metrics(
metrics_to_resolve=(aggregate_partial,), metrics=metrics
)
desired_metric = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"metric_partial_fn": aggregate_partial},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results == {desired_metric.id: 0}
def test_map_of_type_sa(sa):
eng = sa.create_engine("sqlite://")
df = pd.DataFrame({"a": [1, 2, 3, 3, None]})
df.to_sql(name="test", con=eng, index=False)
batch_data = SqlAlchemyBatchData(
execution_engine=eng, table_name="test", source_table_name="test"
)
engine = SqlAlchemyExecutionEngine(
engine=eng, batch_data_dict={"my_id": batch_data}
)
desired_metric = MetricConfiguration(
metric_name="table.column_types",
metric_domain_kwargs={},
metric_value_kwargs=None,
)
results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))
assert results[desired_metric.id][0]["name"] == "a"
assert isinstance(results[desired_metric.id][0]["type"], sa.FLOAT)
def test_map_value_set_spark(spark_session, basic_spark_df_execution_engine):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 3, 3, None]},
),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.in_set.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
# Note: metric_dependencies is optional here in the config when called from a validator.
aggregate_partial = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"unexpected_condition": condition_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(aggregate_partial,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"metric_partial_fn": aggregate_partial},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 0}
# We run the same computation again, this time with None being replaced by nan instead of NULL
# to demonstrate this behavior
df = pd.DataFrame({"a": [1, 2, 3, 3, None]})
df = spark_session.createDataFrame(df)
engine = basic_spark_df_execution_engine
engine.load_batch_data(batch_id="my_id", batch_data=df)
condition_metric = MetricConfiguration(
metric_name="column_values.in_set.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
# Note: metric_dependencies is optional here in the config when called from a validator.
aggregate_partial = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"unexpected_condition": condition_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(aggregate_partial,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.in_set.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"value_set": [1, 2, 3]},
metric_dependencies={"metric_partial_fn": aggregate_partial},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: 1}
def test_map_column_value_lengths_between_pd():
engine = build_pandas_engine(
pd.DataFrame({"a": ["a", "aaa", "bcbc", "defgh", None]})
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.value_length.map",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
ser_expected_lengths = pd.Series([1, 3, 4, 5])
result_series, _, _ = results[desired_metric.id]
assert ser_expected_lengths.equals(result_series)
def test_map_unique_column_exists_pd():
engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, 3, 4, None]}))
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
assert list(metrics[condition_metric.id][0]) == [False, False, True, True, False]
assert metrics[unexpected_count_metric.id] == 2
unexpected_rows_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_rows",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 1}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id]["a"].index == [2]
assert metrics[unexpected_rows_metric.id]["a"].values == [3]
def test_map_unique_column_does_not_exist_pd():
engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 3, 3, None]}))
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert (
str(eee.value)
== 'Error: The column "non_existent_column" in BatchData does not exist.'
)
def test_map_unique_column_exists_sa(sa):
engine = build_sa_engine(
pd.DataFrame(
{"a": [1, 2, 3, 3, None], "b": ["foo", "bar", "baz", "qux", "fish"]}
),
sa,
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
# This is no longer a MAP_CONDITION because mssql does not support it. Instead, it is a WINDOW_CONDITION
#
# aggregate_fn = MetricConfiguration(
# metric_name="column_values.unique.unexpected_count.aggregate_fn",
# metric_domain_kwargs={"column": "a"},
# metric_value_kwargs=None,
# metric_dependencies={"unexpected_condition": condition_metric},
# )
# aggregate_fn_metrics = engine.resolve_metrics(
# metrics_to_resolve=(aggregate_fn,), metrics=metrics
# )
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
# metric_dependencies={"metric_partial_fn": aggregate_fn},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,),
metrics=metrics, # metrics=aggregate_fn_metrics
)
metrics.update(results)
assert results[desired_metric.id] == 2
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_values",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == [3, 3]
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_value_counts",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results[desired_metric.id] == [(3, 2)]
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_rows",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={"unexpected_condition": condition_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == [(3, "baz"), (3, "qux")]
def test_map_unique_column_does_not_exist_sa(sa):
engine = build_sa_engine(
pd.DataFrame(
{"a": [1, 2, 3, 3, None], "b": ["foo", "bar", "baz", "qux", "fish"]}
),
sa,
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
metrics = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
assert (
'Error: The column "non_existent_column" in BatchData does not exist.'
in str(eee.value)
)
def test_map_unique_column_exists_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{
"a": [1, 2, 3, 3, 4, None],
"b": [None, "foo", "bar", "baz", "qux", "fish"],
}
),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
metrics.update(results)
# unique is a *window* function so does not use the aggregate_fn version of unexpected count
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == 2
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_values",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == [3, 3]
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_value_counts",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == [(3, 2)]
desired_metric = MetricConfiguration(
metric_name="column_values.unique.unexpected_rows",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={
"result_format": {"result_format": "BASIC", "partial_unexpected_count": 20}
},
metric_dependencies={"unexpected_condition": condition_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results[desired_metric.id] == [(3, "bar"), (3, "baz")]
def test_map_unique_column_does_not_exist_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{
"a": [1, 2, 3, 3, 4, None],
"b": [None, "foo", "bar", "baz", "qux", "fish"],
}
),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_values.unique.condition",
metric_domain_kwargs={"column": "non_existent_column"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
with pytest.raises(ge_exceptions.ExecutionEngineError) as eee:
# noinspection PyUnusedLocal
metrics = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,), metrics=metrics
)
assert (
str(eee.value)
== 'Error: The column "non_existent_column" in BatchData does not exist.'
)
def test_z_score_under_threshold_pd():
df = pd.DataFrame({"a": [1, 2, 3, None]})
engine = PandasExecutionEngine(batch_data_dict={"my_id": df})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
mean = MetricConfiguration(
metric_name="column.mean",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
stdev = MetricConfiguration(
metric_name="column.standard_deviation",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metrics = (mean, stdev)
results = engine.resolve_metrics(
metrics_to_resolve=desired_metrics, metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.map",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"column.standard_deviation": stdev,
"column.mean": mean,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.under_threshold.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"double_sided": True, "threshold": 2},
metric_dependencies={
"column_values.z_score.map": desired_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert list(results[desired_metric.id][0]) == [False, False, False]
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.under_threshold.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"double_sided": True, "threshold": 2},
metric_dependencies={"unexpected_condition": desired_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results[desired_metric.id] == 0
def test_z_score_under_threshold_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 3, 3, None]},
),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
mean = MetricConfiguration(
metric_name="column.mean.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
stdev = MetricConfiguration(
metric_name="column.standard_deviation.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metrics = (mean, stdev)
results = engine.resolve_metrics(
metrics_to_resolve=desired_metrics, metrics=metrics
)
metrics.update(results)
mean = MetricConfiguration(
metric_name="column.mean",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": mean},
)
stdev = MetricConfiguration(
metric_name="column.standard_deviation",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": stdev,
"table.columns": table_columns_metric,
},
)
desired_metrics = (mean, stdev)
results = engine.resolve_metrics(
metrics_to_resolve=desired_metrics, metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.map",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"column.standard_deviation": stdev,
"column.mean": mean,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.under_threshold.condition",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"double_sided": True, "threshold": 2},
metric_dependencies={
"column_values.z_score.map": desired_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.under_threshold.unexpected_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"double_sided": True, "threshold": 2},
metric_dependencies={"unexpected_condition": desired_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column_values.z_score.under_threshold.unexpected_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"double_sided": True, "threshold": 2},
metric_dependencies={"metric_partial_fn": desired_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results[desired_metric.id] == 0
def test_table_metric_pd(caplog):
df = pd.DataFrame({"a": [1, 2, 3, 3, None], "b": [1, 2, 3, 3, None]})
engine = PandasExecutionEngine(batch_data_dict={"my_id": df})
desired_metric = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
)
results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))
assert results == {desired_metric.id: 5}
assert (
'Unexpected key(s) "column" found in domain_kwargs for domain type "table"'
in caplog.text
)
def test_map_column_pairs_equal_metric_pd():
engine = build_pandas_engine(
pd.DataFrame(
data={
"a": [0, 1, 9, 2],
"b": [5, 4, 3, 6],
"c": [5, 4, 3, 6],
"d": [7, 8, 9, 0],
}
)
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
"""
Two tests:
1. Pass -- no unexpected rows.
2. Fail -- three unexpected rows.
"""
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "column_pair_values.equal"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
# First, assert Pass (no unexpected results).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [False, False, False, False]
assert metrics[unexpected_count_metric.id] == 0
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].empty
assert len(metrics[unexpected_rows_metric.id].columns) == 4
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
# Second, assert Fail (one unexpected result).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [True, True, False, True]
assert metrics[unexpected_count_metric.id] == 3
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].equals(
pd.DataFrame(
data={"a": [0, 1, 2], "b": [5, 4, 6], "c": [5, 4, 6], "d": [7, 8, 0]},
index=pd.Index([0, 1, 3]),
)
)
assert len(metrics[unexpected_rows_metric.id].columns) == 4
pd.testing.assert_index_equal(
metrics[unexpected_rows_metric.id].index, pd.Index([0, 1, 3])
)
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 3
assert metrics[unexpected_values_metric.id] == [(0, 7), (1, 8), (2, 0)]
def test_map_column_pairs_equal_metric_sa(sa):
engine = build_sa_engine(
pd.DataFrame(
data={
"a": [0, 1, 9, 2],
"b": [5, 4, 3, 6],
"c": [5, 4, 3, 6],
"d": [7, 8, 9, 0],
}
),
sa,
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
"""
Two tests:
1. Pass -- no unexpected rows.
2. Fail -- three unexpected rows.
"""
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "column_pair_values.equal"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
# First, assert Pass (no unexpected results).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)
assert metrics[unexpected_count_metric.id] == 0
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_rows_metric.id]) == 0
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_A": "b",
"column_B": "c",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
# Second, assert Fail (one unexpected result).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)
assert metrics[unexpected_count_metric.id] == 3
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id] == [
(0, 5, 5, 7),
(1, 4, 4, 8),
(2, 6, 6, 0),
]
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_A": "a",
"column_B": "d",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 3
assert metrics[unexpected_values_metric.id] == [(0, 7), (1, 8), (2, 0)]
def test_map_column_pairs_greater_metric_pd():
df = pd.DataFrame({"a": [2, 3, 4, None, 3, None], "b": [1, 2, 3, None, 3, 5]})
engine = PandasExecutionEngine(batch_data_dict={"my_id": df})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_pair_values.a_greater_than_b.condition",
metric_domain_kwargs={
"column_A": "a",
"column_B": "b",
"ignore_row_if": "either_value_is_missing",
},
metric_value_kwargs={
"or_equal": True,
"result_format": {
"result_format": "SUMMARY",
"partial_unexpected_count": 6,
},
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
assert (
results[condition_metric.id][0]
.reset_index(drop=True)
.equals(pd.Series([False, False, False, False]))
)
unexpected_values_metric = MetricConfiguration(
metric_name="column_pair_values.a_greater_than_b.unexpected_values",
metric_domain_kwargs={
"column_A": "a",
"column_B": "b",
"ignore_row_if": "either_value_is_missing",
},
metric_value_kwargs={
"or_equal": True,
"result_format": {
"result_format": "SUMMARY",
"partial_unexpected_count": 6,
},
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
def test_map_column_pairs_greater_metric_sa(sa):
engine = build_sa_engine(
pd.DataFrame(
data={
"a": [2, 3, 4, None, 3, None],
"b": [1, 2, 3, None, 3, 5],
}
),
sa,
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_pair_values.a_greater_than_b.condition",
metric_domain_kwargs={
"column_A": "a",
"column_B": "b",
"ignore_row_if": "either_value_is_missing",
},
metric_value_kwargs={
"or_equal": True,
"result_format": {
"result_format": "SUMMARY",
"partial_unexpected_count": 6,
},
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)
unexpected_values_metric = MetricConfiguration(
metric_name="column_pair_values.a_greater_than_b.unexpected_values",
metric_domain_kwargs={
"column_A": "a",
"column_B": "b",
"ignore_row_if": "either_value_is_missing",
},
metric_value_kwargs={
"or_equal": True,
"result_format": {
"result_format": "SUMMARY",
"partial_unexpected_count": 6,
},
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
def test_map_column_pairs_in_set_metric_pd():
df = pd.DataFrame({"a": [10, 3, 4, None, 3, None], "b": [1, 2, 3, None, 3, 5]})
engine = PandasExecutionEngine(batch_data_dict={"my_id": df})
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
condition_metric = MetricConfiguration(
metric_name="column_pair_values.in_set.condition",
metric_domain_kwargs={
"column_A": "a",
"column_B": "b",
"ignore_row_if": "either_value_is_missing",
},
metric_value_kwargs={
"value_pairs_set": [(2, 1), (3, 2), (4, 3), (3, 3)],
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
assert (
results[condition_metric.id][0]
.reset_index(drop=True)
.equals(pd.Series([True, False, False, False]))
)
def test_table_metric_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 1]},
),
batch_id="my_id",
)
desired_metric = MetricConfiguration(
metric_name="table.row_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
)
results = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))
desired_metric = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=results
)
assert results == {desired_metric.id: 3}
def test_median_metric_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 3]},
),
batch_id="my_id",
)
desired_metric = MetricConfiguration(
metric_name="table.row_count.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
)
metrics = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))
row_count = MetricConfiguration(
metric_name="table.row_count",
metric_domain_kwargs={},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric},
)
metrics = engine.resolve_metrics(metrics_to_resolve=(row_count,), metrics=metrics)
desired_metric = MetricConfiguration(
metric_name="column.median",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"table.row_count": row_count},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results == {desired_metric.id: 2}
def test_distinct_metric_spark(spark_session):
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 1, 2, 3, 3, None]},
),
batch_id="my_id",
)
desired_metric = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"sort": "value", "collate": None},
)
metrics = engine.resolve_metrics(metrics_to_resolve=(desired_metric,))
assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])
desired_metric = MetricConfiguration(
metric_name="column.distinct_values",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"column.value_counts": desired_metric},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
assert results == {desired_metric.id: {1, 2, 3}}
def test_distinct_metric_sa(sa):
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
desired_metric = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"sort": "value", "collate": None},
)
desired_metric_b = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs={"sort": "value", "collate": None},
)
metrics = engine.resolve_metrics(
metrics_to_resolve=(desired_metric, desired_metric_b)
)
assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])
assert pd.Series(index=[4], data=[6]).equals(metrics[desired_metric_b.id])
desired_metric = MetricConfiguration(
metric_name="column.distinct_values",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"column.value_counts": desired_metric},
)
desired_metric_b = MetricConfiguration(
metric_name="column.distinct_values",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={"column.value_counts": desired_metric_b},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric, desired_metric_b), metrics=metrics
)
assert results[desired_metric.id] == {1, 2, 3}
assert results[desired_metric_b.id] == {4}
def test_distinct_metric_pd():
engine = build_pandas_engine(pd.DataFrame({"a": [1, 2, 1, 2, 3, 3]}))
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric = MetricConfiguration(
metric_name="column.value_counts",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs={"sort": "value", "collate": None},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert pd.Series(index=[1, 2, 3], data=[2, 2, 2]).equals(metrics[desired_metric.id])
desired_metric = MetricConfiguration(
metric_name="column.distinct_values",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"column.value_counts": desired_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(desired_metric,), metrics=metrics
)
metrics.update(results)
assert results == {desired_metric.id: {1, 2, 3}}
def test_batch_aggregate_metrics_sa(caplog, sa):
import datetime
engine = build_sa_engine(
pd.DataFrame({"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]}), sa
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": desired_metric_1,
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": desired_metric_2,
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": desired_metric_3,
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"metric_partial_fn": desired_metric_4,
"table.columns": table_columns_metric,
},
)
caplog.clear()
caplog.set_level(logging.DEBUG, logger="great_expectations")
start = datetime.datetime.now()
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
end = datetime.datetime.now()
print("t1")
print(end - start)
assert results[desired_metric_1.id] == 3
assert results[desired_metric_2.id] == 1
assert results[desired_metric_3.id] == 4
assert results[desired_metric_4.id] == 4
# Check that all four of these metrics were computed on a single domain
found_message = False
for record in caplog.records:
if (
record.message
== "SqlAlchemyExecutionEngine computed 4 metrics on domain_id ()"
):
found_message = True
assert found_message
def test_batch_aggregate_metrics_spark(caplog, spark_session):
import datetime
engine: SparkDFExecutionEngine = build_spark_engine(
spark=spark_session,
df=pd.DataFrame(
{"a": [1, 2, 1, 2, 3, 3], "b": [4, 4, 4, 4, 4, 4]},
),
batch_id="my_id",
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min.aggregate_fn",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
desired_metric_1 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric_1},
)
desired_metric_2 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "a"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric_2},
)
desired_metric_3 = MetricConfiguration(
metric_name="column.max",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric_3},
)
desired_metric_4 = MetricConfiguration(
metric_name="column.min",
metric_domain_kwargs={"column": "b"},
metric_value_kwargs=None,
metric_dependencies={"metric_partial_fn": desired_metric_4},
)
start = datetime.datetime.now()
caplog.clear()
caplog.set_level(logging.DEBUG, logger="great_expectations")
results = engine.resolve_metrics(
metrics_to_resolve=(
desired_metric_1,
desired_metric_2,
desired_metric_3,
desired_metric_4,
),
metrics=metrics,
)
metrics.update(results)
end = datetime.datetime.now()
print(end - start)
assert results[desired_metric_1.id] == 3
assert results[desired_metric_2.id] == 1
assert results[desired_metric_3.id] == 4
assert results[desired_metric_4.id] == 4
# Check that all four of these metrics were computed on a single domain
found_message = False
for record in caplog.records:
if (
record.message
== "SparkDFExecutionEngine computed 4 metrics on domain_id ()"
):
found_message = True
assert found_message
def test_map_multicolumn_sum_equal_pd():
engine = build_pandas_engine(
pd.DataFrame(
data={"a": [0, 1, 2], "b": [5, 4, 3], "c": [0, 0, 1], "d": [7, 8, 9]}
)
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
"""
Two tests:
1. Pass -- no unexpected rows.
2. Fail -- one unexpected row.
"""
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "multicolumn_sum.equal"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
# First, assert Pass (no unexpected results).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"sum_total": 5,
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [False, False, False]
assert metrics[unexpected_count_metric.id] == 0
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].empty
assert len(metrics[unexpected_rows_metric.id].columns) == 4
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
# Second, assert Fail (one unexpected result).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"sum_total": 5,
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [False, False, True]
assert metrics[unexpected_count_metric.id] == 1
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].equals(
pd.DataFrame(data={"a": [2], "b": [3], "c": [1], "d": [9]}, index=[2])
)
assert len(metrics[unexpected_rows_metric.id].columns) == 4
pd.testing.assert_index_equal(
metrics[unexpected_rows_metric.id].index, pd.Index([2])
)
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 1
assert metrics[unexpected_values_metric.id] == [{"a": 2, "b": 3, "c": 1}]
def test_map_multicolumn_sum_equal_sa(sa):
engine = build_sa_engine(
pd.DataFrame(
data={"a": [0, 1, 2], "b": [5, 4, 3], "c": [0, 0, 1], "d": [7, 8, 9]}
),
sa,
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
"""
Two tests:
1. Pass -- no unexpected rows.
2. Fail -- one unexpected row.
"""
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "multicolumn_sum.equal"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
# First, assert Pass (no unexpected results).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"sum_total": 5,
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)
assert metrics[unexpected_count_metric.id] == 0
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_rows_metric.id]) == 0
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
# Second, assert Fail (one unexpected result).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"sum_total": 5,
},
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert isinstance(metrics[condition_metric.id][0], sa.sql.elements.AsBoolean)
assert metrics[unexpected_count_metric.id] == 1
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id] == [(2, 3, 1, 9)]
assert len(metrics[unexpected_rows_metric.id][0]) == 4
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 1
assert metrics[unexpected_values_metric.id] == [{"a": 2, "b": 3, "c": 1}]
def test_map_compound_columns_unique_pd():
engine = build_pandas_engine(
pd.DataFrame(data={"a": [0, 1, 1], "b": [1, 2, 3], "c": [0, 2, 2]})
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
"""
Two tests:
1. Pass -- no duplicated compound column keys.
2. Fail -- two duplicated compound column keys.
"""
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "compound_columns.unique"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
# First, assert Pass (no unexpected results).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [False, False, False]
assert metrics[unexpected_count_metric.id] == 0
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].empty
assert len(metrics[unexpected_rows_metric.id].columns) == 3
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 0
assert metrics[unexpected_values_metric.id] == []
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
# Second, assert Fail (one unexpected result).
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "c"],
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "c"],
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [False, True, True]
assert metrics[unexpected_count_metric.id] == 2
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].equals(
pd.DataFrame(data={"a": [1, 1], "b": [2, 3], "c": [2, 2]}, index=[1, 2])
)
assert len(metrics[unexpected_rows_metric.id].columns) == 3
pd.testing.assert_index_equal(
metrics[unexpected_rows_metric.id].index, pd.Index([1, 2])
)
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "c"],
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 2
assert metrics[unexpected_values_metric.id] == [{"a": 1, "c": 2}, {"a": 1, "c": 2}]
def test_map_select_column_values_unique_within_record_pd():
engine = build_pandas_engine(
pd.DataFrame(
data={
"a": [1, 1, 8, 1, 4, None, None, 7],
"b": [1, 2, 2, 2, 4, None, None, 1],
"c": [2, 3, 7, 3, 4, None, 9, 0],
}
)
)
metrics: dict = {}
table_columns_metric: MetricConfiguration
results: dict
table_columns_metric, results = get_table_columns_metric(engine=engine)
metrics.update(results)
# Save original metrics for testing unexpected results.
metrics_save: dict = copy.deepcopy(metrics)
metric_name: str = "select_column_values.unique.within_record"
condition_metric_name: str = f"{metric_name}.condition"
unexpected_count_metric_name: str = f"{metric_name}.unexpected_count"
unexpected_rows_metric_name: str = f"{metric_name}.unexpected_rows"
unexpected_values_metric_name: str = f"{metric_name}.unexpected_values"
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "all_values_are_missing",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "all_values_are_missing",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [
True,
False,
False,
False,
True,
True,
False,
]
assert metrics[unexpected_count_metric.id] == 3
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "all_values_are_missing",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 8}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].equals(
pd.DataFrame(
data={"a": [1.0, 4.0, None], "b": [1.0, 4.0, None], "c": [2.0, 4.0, 9.0]},
index=[0, 4, 6],
)
)
assert len(metrics[unexpected_rows_metric.id].columns) == 3
pd.testing.assert_index_equal(
metrics[unexpected_rows_metric.id].index, pd.Index([0, 4, 6])
)
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "all_values_are_missing",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 3
unexpected_values = []
for unexpected_value_dict in metrics[unexpected_values_metric.id]:
updated_unexpected_value_dict = {
key: "NULL" if np.isnan(value) else value
for key, value in unexpected_value_dict.items()
}
unexpected_values.append(updated_unexpected_value_dict)
assert unexpected_values == [
{"a": 1.0, "b": 1.0, "c": 2.0},
{"a": 4.0, "b": 4.0, "c": 4.0},
{"a": "NULL", "b": "NULL", "c": 9.0},
]
# Restore from saved original metrics in order to start fresh on testing for unexpected results.
metrics = copy.deepcopy(metrics_save)
condition_metric = MetricConfiguration(
metric_name=condition_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "any_value_is_missing",
},
metric_value_kwargs=None,
metric_dependencies={
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(condition_metric,),
metrics=metrics,
)
metrics.update(results)
unexpected_count_metric = MetricConfiguration(
metric_name=unexpected_count_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "any_value_is_missing",
},
metric_value_kwargs=None,
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_count_metric,), metrics=metrics
)
metrics.update(results)
# Condition metrics return "negative logic" series.
assert list(metrics[condition_metric.id][0]) == [
True,
False,
False,
False,
True,
False,
]
assert metrics[unexpected_count_metric.id] == 2
unexpected_rows_metric = MetricConfiguration(
metric_name=unexpected_rows_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "any_value_is_missing",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_rows_metric,), metrics=metrics
)
metrics.update(results)
assert metrics[unexpected_rows_metric.id].equals(
pd.DataFrame(
data={"a": [1.0, 4.0], "b": [1.0, 4.0], "c": [2.0, 4.0]}, index=[0, 4]
)
)
assert len(metrics[unexpected_rows_metric.id].columns) == 3
pd.testing.assert_index_equal(
metrics[unexpected_rows_metric.id].index, pd.Index([0, 4])
)
unexpected_values_metric = MetricConfiguration(
metric_name=unexpected_values_metric_name,
metric_domain_kwargs={
"column_list": ["a", "b", "c"],
"ignore_row_if": "any_value_is_missing",
},
metric_value_kwargs={
"result_format": {"result_format": "SUMMARY", "partial_unexpected_count": 3}
},
metric_dependencies={
"unexpected_condition": condition_metric,
"table.columns": table_columns_metric,
},
)
results = engine.resolve_metrics(
metrics_to_resolve=(unexpected_values_metric,), metrics=metrics
)
metrics.update(results)
assert len(metrics[unexpected_values_metric.id]) == 2
assert metrics[unexpected_values_metric.id] == [
{"a": 1.0, "b": 1.0, "c": 2.0},
{"a": 4.0, "b": 4.0, "c": 4.0},
]
| 32.325354
| 108
| 0.643929
| 10,238
| 95,877
| 5.685485
| 0.026568
| 0.07889
| 0.062466
| 0.054838
| 0.958992
| 0.953683
| 0.945987
| 0.943616
| 0.937242
| 0.932432
| 0
| 0.009181
| 0.245679
| 95,877
| 2,965
| 109
| 32.336256
| 0.795664
| 0.03396
| 0
| 0.724719
| 0
| 0
| 0.123294
| 0.045621
| 0
| 0
| 0
| 0
| 0.054976
| 1
| 0.01565
| false
| 0
| 0.006019
| 0
| 0.021669
| 0.001204
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5d9efdc9bb26b9b6b0108ca3f06e1f902f4ae2fa
| 3,538
|
py
|
Python
|
rpi_deep_pantilt/detect/pretrained/api_v2/facessd_mobilenet_v2.py
|
timayy/rpi-deep-pantilt
|
5173887dd88c31d08f3e2e802acd365dbf0daba9
|
[
"MIT"
] | null | null | null |
rpi_deep_pantilt/detect/pretrained/api_v2/facessd_mobilenet_v2.py
|
timayy/rpi-deep-pantilt
|
5173887dd88c31d08f3e2e802acd365dbf0daba9
|
[
"MIT"
] | null | null | null |
rpi_deep_pantilt/detect/pretrained/api_v2/facessd_mobilenet_v2.py
|
timayy/rpi-deep-pantilt
|
5173887dd88c31d08f3e2e802acd365dbf0daba9
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
from rpi_deep_pantilt import __path__ as rpi_deep_pantilt_path
from rpi_deep_pantilt.detect.custom.base_predictors import (
TFLiteDetectionPostProcessPredictor,
)
class FaceSSDMobileNetV2EdgeTPU(TFLiteDetectionPostProcessPredictor):
"""
Model source: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md#open-images-trained-models
Non-max supression op (TFLite_Detection_Postprocess) added to graph via tools/tflite-postprocess-ops-128-uint8-quant.sh
"""
LABELS = ["face"]
def __init__(
self,
model_uri="https://github.com/leigh-johnson/rpi-deep-pantilt/releases/download/v1.1.1/facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2.tar.gz",
model_name="facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2",
input_shape=(320, 320),
min_score_thresh=0.50,
input_type=tf.uint8,
tflite_file="model_postprocessed_quantized_128_uint8_edgetpu.tflite",
label_file=rpi_deep_pantilt_path[0] + "/data/facessd_label_map.pbtxt",
):
super().__init__(
model_name=model_name,
tflite_file=tflite_file,
label_file=label_file,
model_uri=model_uri,
input_shape=input_shape,
min_score_thresh=min_score_thresh,
input_type=input_type,
edge_tpu=True,
)
class FaceSSDMobileNetV2Int8(TFLiteDetectionPostProcessPredictor):
"""
Model source: https://github.com/tensorflow/models/blob/master/research/object_detection/g3doc/tf1_detection_zoo.md#open-images-trained-models
Non-max supression op (TFLite_Detection_Postprocess) added to graph via tools/tflite-postprocess-ops-128-uint8-quant.sh
"""
LABELS = ["face"]
def __init__(
self,
model_uri="https://github.com/leigh-johnson/rpi-deep-pantilt/releases/download/v1.1.1/facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2.tar.gz",
model_name="facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2",
input_shape=(320, 320),
min_score_thresh=0.50,
input_type=tf.uint8,
tflite_file="model_postprocessed_quantized_128_uint8.tflite",
label_file=rpi_deep_pantilt_path[0] + "/data/facessd_label_map.pbtxt",
):
super().__init__(
model_name=model_name,
tflite_file=tflite_file,
label_file=label_file,
model_uri=model_uri,
input_shape=input_shape,
min_score_thresh=min_score_thresh,
input_type=input_type,
)
class FaceSSDMobileNetV2Float32(TFLiteDetectionPostProcessPredictor):
LABELS = ["face"]
def __init__(
self,
model_uri="https://github.com/leigh-johnson/rpi-deep-pantilt/releases/download/v1.1.1/facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2.tar.gz",
model_name="facessd_mobilenet_v2_quantized_320x320_open_image_v4_tflite2",
input_shape=(320, 320),
min_score_thresh=0.50,
input_type=tf.float32,
tflite_file="model_postprocessed.tflite",
label_file=rpi_deep_pantilt_path[0] + "/data/facessd_label_map.pbtxt",
):
super().__init__(
model_name=model_name,
tflite_file=tflite_file,
label_file=label_file,
model_uri=model_uri,
input_shape=input_shape,
min_score_thresh=min_score_thresh,
input_type=input_type,
)
| 37.242105
| 163
| 0.704918
| 436
| 3,538
| 5.279817
| 0.220183
| 0.027368
| 0.054735
| 0.070374
| 0.857515
| 0.857515
| 0.857515
| 0.857515
| 0.857515
| 0.857515
| 0
| 0.043679
| 0.20407
| 3,538
| 94
| 164
| 37.638298
| 0.773793
| 0.148954
| 0
| 0.768116
| 0
| 0.043478
| 0.27961
| 0.132234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dbb2d232ee14c11428986bc88417852df785d2d
| 9,726
|
py
|
Python
|
tests/test_hexamer/test_extract_seq/test_bridge_plus_strand_with_hardclip.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
tests/test_hexamer/test_extract_seq/test_bridge_plus_strand_with_hardclip.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
tests/test_hexamer/test_extract_seq/test_bridge_plus_strand_with_hardclip.py
|
zyxue/kleat3
|
861b02797937eea51e99f9c29d195fb3e7dea376
|
[
"MIT"
] | null | null | null |
from unittest.mock import MagicMock, patch
import kleat.misc.settings as S
from kleat.hexamer.hexamer import extract_seq
"""
cc: ctg_clv; ice: init_clv_end
rc: ref_clv; ire: init_ref_end
"""
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_before_clv(mock_apautils):
"""
AA
TC┘ <-bridge read
CGCATTCGTCG <-bridge contig (hardcipped, could be chimeric https://www.biostars.org/p/109333/)
\\\| | <-hardclip mask
012345678901 <-contig coord
|cc^ ^ice
...XXXATTCGTCG... <-genome
234567890123 <-genome coord
| 1 |
rc^ ^ire
"""
ctg = MagicMock()
ctg.reference_name = 'chr2'
mock_apautils.infer_query_sequence.return_value = 'CGCATTCGTCG'
ctg.cigartuples = ((S.BAM_CHARD_CLIP, 3), (S.BAM_CMATCH, 8))
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=8, ref_fa=ref_fa, ctg_clv=6)
assert extract_seq(**kw) == 'CGCATTC'
assert extract_seq(window=1, **kw) == 'C'
assert extract_seq(window=2, **kw) == 'TC'
assert extract_seq(window=3, **kw) == 'TTC'
assert extract_seq(window=4, **kw) == 'ATTC'
assert extract_seq(window=5, **kw) == 'CATTC'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_after_clv(mock_apautils):
"""
AAA
GTT┘ <-bridge read
A-GGTTGCAGA <-bridge contig
| | | |/// <-hardclip mask
0 1234567890 <-contig coord
ctg_clv^ ^ice <-contig coord
...ACGGTTGCAGA... <-genome
789012345678 <-genome coord
1 | |
ref_clv^ ^init_fe
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
mock_apautils.infer_query_sequence.return_value = 'AGGTTGCAGA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 6),
(S.BAM_CHARD_CLIP, 3)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='C')
kw = dict(contig=ctg, strand='+', ref_clv=12, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ACGGTT'
ref_fa.fetch.assert_called_with('chr1', 8, 9)
assert extract_seq(window=1, **kw) == 'T'
assert extract_seq(window=2, **kw) == 'TT'
assert extract_seq(window=3, **kw) == 'GTT'
assert extract_seq(window=4, **kw) == 'GGTT'
assert extract_seq(window=5, **kw) == 'CGGTT'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_before_edgecase_1(mock_apautils):
"""
AA
TC┘ <-bridge read
CATTCGT <-bridge contig (hardcipped, could be chimeric https://www.biostars.org/p/109333/)
\\\\| | <-hardclip mask
01234567 <-contig coord
cc^ |^ice
...XATTCGT... <-genome
23456789 <-genome coord
| |
rc^ ^ire
"""
ctg = MagicMock()
ctg.reference_name = 'chr2'
mock_apautils.infer_query_sequence.return_value = 'CATTCGT'
ctg.cigartuples = (
(S.BAM_CHARD_CLIP, 4),
(S.BAM_CMATCH, 3)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=6, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'CATTC'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_before_edgecase_2(mock_apautils):
"""
AA
TC┘ <-bridge read
CATTCGT <-bridge contig (hardcipped, could be chimeric https://www.biostars.org/p/109333/)
\\\\\ | <-hardclip mask
01234567 <-contig coord
cc^ ^ice
...XATTCGT... <-genome
23456789 <-genome coord
| |
rc^ ^ire
"""
ctg = MagicMock()
ctg.reference_name = 'chr2'
mock_apautils.infer_query_sequence.return_value = 'CATTCGT'
ctg.cigartuples = (
(S.BAM_CHARD_CLIP, 5),
(S.BAM_CMATCH, 2)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=6, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'CATTC'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_before_edgecase_3(mock_apautils):
"""
AA
TC┘ <-bridge read
CATTCGT <-bridge contig (hardcipped, could be chimeric https://www.biostars.org/p/109333/)
\\\\\\| <-hardclip mask
01234567 <-contig coord
cc^ ^ice
...XATTCGT... <-genome
23456789 <-genome coord
| |
rc^ ^ire
"""
ctg = MagicMock()
ctg.reference_name = 'chr2'
mock_apautils.infer_query_sequence.return_value = 'CATTCGT'
ctg.cigartuples = (
(S.BAM_CHARD_CLIP, 6),
(S.BAM_CMATCH, 1)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=6, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'CATTC'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_after_edgecase_1(mock_apautils):
"""
AAA
GTT┘ <-bridge read
A-GGTTGCA <-bridge contig
| | |/// <-hardclip mask
0 12345678 <-contig coord
cc^ ^ice
...ACGGTTGCA... <-genome
7890123456 <-genome coord
1 | |
rc^ ^ie
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
mock_apautils.infer_query_sequence.return_value = 'AGGTTGCA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 4),
(S.BAM_CHARD_CLIP, 3)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='C')
kw = dict(contig=ctg, strand='+', ref_clv=12, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ACGGTT'
ref_fa.fetch.assert_called_with('chr1', 8, 9)
assert extract_seq(window=1, **kw) == 'T'
assert extract_seq(window=2, **kw) == 'TT'
assert extract_seq(window=3, **kw) == 'GTT'
assert extract_seq(window=4, **kw) == 'GGTT'
assert extract_seq(window=5, **kw) == 'CGGTT'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_after_edgecase_2(mock_apautils):
"""
AAA
GTT┘ <-bridge read
A-GGTTGCA <-bridge contig
| | //// <-hardclip mask
0 12345678 <-contig coord
cc^ ^ice
...ACGGTTGCA... <-genome
7890123456 <-genome coord
1 | |
rc^ ^ie
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
mock_apautils.infer_query_sequence.return_value = 'AGGTTGCA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 3),
(S.BAM_CHARD_CLIP, 4),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='C')
kw = dict(contig=ctg, strand='+', ref_clv=12, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ACGGTT'
@patch('kleat.hexamer.hexamer.apautils')
def test_hardclip_spanning_clv_from_after_edgecase_3(mock_apautils):
"""
AAA
GTT┘ <-bridge read
A-GGTTGCA <-bridge contig
| | ///// <-hardclip mask
0 12345678 <-contig coord
cc^ ^ice
...ACGGTTGCA... <-genome
7890123456 <-genome coord
1 | |
rc^ ^ie
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
mock_apautils.infer_query_sequence.return_value = 'AGGTTGCA'
ctg.cigartuples = (
(S.BAM_CMATCH, 1),
(S.BAM_CREF_SKIP, 1),
(S.BAM_CMATCH, 2),
(S.BAM_CHARD_CLIP, 5),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='C')
kw = dict(contig=ctg, strand='+', ref_clv=12, ref_fa=ref_fa, ctg_clv=4)
assert extract_seq(**kw) == 'ACGGTT'
def test_for_clv_on_the_end_of_contig_edgecase():
"""
AA
ACT┘| <-bridge read
ACGTACT | <-suffix contig
0123456789 <-contig coord
^ctg_clv
4567890123 <-genome coord
^ref_clv
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ACGTACT'
ctg.cigartuples = (
(S.BAM_CMATCH, 7),
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
ref_fa.fetch = MagicMock(return_value='C')
kw = dict(contig=ctg, strand='+', ref_clv=10, ref_fa=ref_fa, ctg_clv=6)
assert extract_seq(**kw) == 'ACGTACT'
def test_for_bridge_read_on_suffix_end_and_clv_is_on_the_end_of_contig_edgecase():
"""
AA
CGTACT┘| <-bridge read
012345678
|||AAA |
ATGACGT┘ | | <-suffix contig
0123456789012 <-contig offset coord
| ^ctg_clv
5678901234567 <-genome offset coord
1 ^ref_clv
"""
ctg = MagicMock()
ctg.reference_name = 'chr1'
ctg.query_sequence = 'ATGACGTAAA'
ctg.cigartuples = (
(S.BAM_CMATCH, 7),
(S.BAM_CSOFT_CLIP, 3)
)
ref_fa = MagicMock()
ref_fa.get_reference_length.return_value = 100
kw = dict(contig=ctg, strand='+', ref_clv=9, ref_fa=ref_fa, ctg_clv=9)
assert extract_seq(**kw) == 'ATGACGTAAA'
| 30.489028
| 103
| 0.579992
| 1,197
| 9,726
| 4.464495
| 0.11863
| 0.043975
| 0.07485
| 0.061752
| 0.859656
| 0.849177
| 0.808757
| 0.793975
| 0.779566
| 0.779566
| 0
| 0.051408
| 0.287991
| 9,726
| 318
| 104
| 30.584906
| 0.7187
| 0.28758
| 0
| 0.700637
| 0
| 0
| 0.077844
| 0.037819
| 0
| 0
| 0
| 0
| 0.171975
| 1
| 0.063694
| false
| 0
| 0.019108
| 0
| 0.082803
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5dd42db9c4c11e2da3ea19e05cce7ada776bcf28
| 658
|
py
|
Python
|
paraVerComoFuncionaAlgumasCoisas/LetsCode_Curso/03-aplicacoes/2-arquivos-CSV/ipython1.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
paraVerComoFuncionaAlgumasCoisas/LetsCode_Curso/03-aplicacoes/2-arquivos-CSV/ipython1.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
paraVerComoFuncionaAlgumasCoisas/LetsCode_Curso/03-aplicacoes/2-arquivos-CSV/ipython1.py
|
jonasht/pythonEstudos
|
5e7d28e7bd82b9d1b08e795867fdbaa743f4b747
|
[
"MIT"
] | null | null | null |
# coding: utf-8
import csv
with open('brasil_covid.csv', 'r', encoding='utf-8') as arquivo_csv:
leitor = csv.reader(arquivo_csv)
for linha in leitor:
print(linha)
with open('brasil_covid.csv', 'r', encoding='utf-8') as arquivo_csv:
leitor = csv.reader(arquivo_csv)
header = next(leitor)
for linha in leitor:
if linha[2] > 1:
print(linha)
with open('brasil_covid.csv', 'r', encoding='utf-8') as arquivo_csv:
leitor = csv.reader(arquivo_csv)
header = next(leitor)
for linha in leitor:
if float(linha[2]) > 1:
print(linha)
| 26.32
| 68
| 0.572948
| 89
| 658
| 4.134831
| 0.280899
| 0.163043
| 0.11413
| 0.154891
| 0.891304
| 0.826087
| 0.826087
| 0.826087
| 0.826087
| 0.826087
| 0
| 0.017429
| 0.302432
| 658
| 24
| 69
| 27.416667
| 0.784314
| 0.019757
| 0
| 0.823529
| 0
| 0
| 0.102644
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.176471
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5df151d2fa34d6d56c27087038abfa812bb7a7e3
| 35,844
|
py
|
Python
|
openprocurement/contracting/api/tests/change_blanks.py
|
openprocurement/openprocurement.contracting.api
|
05ff7d52e938520961088735552cd266b70281ef
|
[
"Apache-2.0"
] | 1
|
2016-06-18T10:17:34.000Z
|
2016-06-18T10:17:34.000Z
|
openprocurement/contracting/api/tests/change_blanks.py
|
openprocurement/openprocurement.contracting.api
|
05ff7d52e938520961088735552cd266b70281ef
|
[
"Apache-2.0"
] | 13
|
2016-10-31T14:38:07.000Z
|
2018-05-16T07:59:42.000Z
|
openprocurement/contracting/api/tests/change_blanks.py
|
openprocurement/openprocurement.contracting.api
|
05ff7d52e938520961088735552cd266b70281ef
|
[
"Apache-2.0"
] | 18
|
2016-05-05T10:00:50.000Z
|
2018-06-15T14:38:47.000Z
|
# -*- coding: utf-8 -*-
from datetime import timedelta
from copy import deepcopy
from openprocurement.api.utils import get_now
# ContractNoItemsChangeTest
def no_items_contract_change(self):
data = deepcopy(self.initial_data)
del data['items']
response = self.app.post_json('/contracts', {"data": data})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
contract = response.json['data']
self.assertEqual(contract['status'], 'active')
self.assertNotIn('items', contract)
tender_token = data['tender_token']
response = self.app.patch_json('/contracts/{}/credentials?acc_token={}'.format(contract['id'], tender_token),
{'data': ''})
self.assertEqual(response.status, '200 OK')
token = response.json['access']['token']
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(contract['id'], token),
{'data': {'rationale': u'причина зміни укр',
'rationaleTypes': ['qualityImprovement']}})
self.assertEqual(response.status, '201 Created')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(contract['id'], change['id'], token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(contract['id'], token),
{"data": {"status": "terminated", "amountPaid": {"amount": 100, "valueAddedTaxIncluded": True, "currency": "UAH"}}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'terminated')
response = self.app.get('/contracts/{}'.format(contract['id']))
self.assertNotIn('items', response.json['data'])
# ContactChangesResourceTest
def not_found(self):
response = self.app.get('/contracts/some_id/changes', status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'contract_id'}
])
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 0)
response = self.app.get('/contracts/{}/changes/some_id'.format(self.contract['id']), status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'change_id'}
])
response = self.app.patch_json(
'/contracts/{}/changes/some_id'.format(self.contract['id']), {'data': {}}, status=404)
self.assertEqual(response.status, '404 Not Found')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['status'], 'error')
self.assertEqual(response.json['errors'], [
{u'description': u'Not Found', u'location': u'url', u'name': u'change_id'}
])
def get_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'Принцеси не какають.',
'rationale_ru': u'ff',
'rationale_en': 'asdf',
'contractNumber': 12,
'rationaleTypes': ['priceReduction']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertIn('date', change)
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
response = self.app.get('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
change_data = response.json['data']
self.assertEqual(change_data, change)
response = self.app.get('/contracts/{}'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertIn('changes', response.json['data'])
self.assertEqual(len(response.json['data']['changes']), 1)
self.assertEqual(set(response.json['data']['changes'][0].keys()),
set(['id', 'date', 'status', 'rationaleTypes', 'rationale', 'rationale_ru', 'rationale_en', 'contractNumber']))
self.app.authorization = None
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
self.assertEqual(set(response.json['data'][0].keys()),
set(['id', 'date', 'status', 'rationaleTypes', 'rationale', 'rationale_ru', 'rationale_en', 'contractNumber']))
def create_change_invalid(self):
response = self.app.post('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
'data', status=415)
self.assertEqual(response.status, '415 Unsupported Media Type')
self.assertEqual(response.json['errors'], [
{u'description':
u"Content-Type header should be one of ['application/json']", u'location': u'header', u'name': u'Content-Type'}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": ["This field is required."]},
{"location": "body", "name": "rationale", "description": ["This field is required."]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': "", 'rationaleTypes': ['volumeCuts']}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationale", "description": ["String value is too short."]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale_ua': ""}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationale_ua", "description": "Rogue field"}
])
self.app.authorization = None
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale_ua': "aaa"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/contracts/{}/changes'.format(self.contract['id']),
{'data': {'rationale_ua': "aaa"}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'changes': [{'rationale': "penguin", 'rationaleTypes': ['volumeCuts']}]}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('changes', response.json['data'])
def create_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': 'change cause en',
'rationaleTypes': ['qualityImprovement']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertIn('date', change)
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 1)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['priceReduction']}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't create new contract change while any (pending) change exists"}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['non-existing-rationale']}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": [["Value must be one of ['volumeCuts', 'itemPriceVariation', 'qualityImprovement', 'thirdParty', 'durationExtension', 'priceReduction', 'taxRate', 'fiscalYearExtension']."]]}
])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'трататата', 'rationaleTypes': ['priceReduction']}})
self.assertEqual(response.status, '201 Created')
change2 = response.json['data']
self.assertEqual(change2['status'], 'pending')
response = self.app.get('/contracts/{}/changes'.format(self.contract['id']))
self.assertEqual(response.status, '200 OK')
self.assertEqual(len(response.json['data']), 2)
def patch_change(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertEqual(change['contractNumber'], u'№ 146')
creation_date = change['date']
now = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'date': now}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_ru': 'шота на руськом'}})
self.assertEqual(response.status, '200 OK')
self.assertIn('rationale_ru', response.json['data'])
first_patch_date = response.json['data']['date']
self.assertEqual(first_patch_date, creation_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_en': 'another cause desctiption'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationale_en'], 'another cause desctiption')
second_patch_date = response.json['data']['date']
self.assertEqual(first_patch_date, second_patch_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': ['fiscalYearExtension', 'priceReduction']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension', 'priceReduction'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': ['fiscalYearExtension', 'volumeCuts', 'taxRate']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension', 'volumeCuts', 'taxRate'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': 'fiscalYearExtension'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['rationaleTypes'], ['fiscalYearExtension'])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': 'fiscalYearExtension, volumeCuts'}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": [["Value must be one of ['volumeCuts', 'itemPriceVariation', 'qualityImprovement', 'thirdParty', 'durationExtension', 'priceReduction', 'taxRate', 'fiscalYearExtension']."]]}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationaleTypes': []}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "rationaleTypes", "description": ["Please provide at least 1 item."]}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'id': '1234' * 8}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.body, 'null')
self.app.authorization = None
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'rationale_en': 'la-la-la'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.patch_json('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']),
{'data': {'rationale_en': 'la-la-la'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertNotEqual(response.json['data']['date'], creation_date)
self.assertNotEqual(response.json['data']['date'], first_patch_date)
self.assertNotEqual(response.json['data']['date'], second_patch_date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'pending'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
def change_date_signed(self):
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
self.assertEqual(change['contractNumber'], u'№ 146')
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change status. 'dateSigned' is required."}
])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': "12-14-11"}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": ["Could not parse 12-14-11. Should be ISO8601."]}
])
valid_date1_raw = get_now()
valid_date1 = valid_date1_raw.isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': valid_date1}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date1)
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': one_day_in_past}}, status=403)
self.assertIn("can't be earlier than contract dateSigned", response.json['errors'][0]["description"])
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'dateSigned': get_now().isoformat()}}, status=403)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change in current (active) status"}
])
response = self.app.get('/contracts/{}/changes/{}'.format(self.contract['id'], change['id']))
change1 = response.json['data']
self.assertEqual(change1['dateSigned'], valid_date1)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'iнша причина зміни укр',
'rationale_en': u'another change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 147'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change2 = response.json['data']
self.assertEqual(change['status'], 'pending')
one_day_in_future = (get_now() + timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': one_day_in_future}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": [u"Contract signature date can't be in the future"]}
])
smaller_than_last_change = (valid_date1_raw - timedelta(seconds=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': smaller_than_last_change}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(smaller_than_last_change, valid_date1), response.json['errors'][0]["description"])
date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': date}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], date)
# date update request
valid_date2_raw = get_now()
valid_date2 = valid_date2_raw.isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': valid_date2}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр',
'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 148'}})
self.assertEqual(response.status, '201 Created')
change3 = response.json['data']
self.assertEqual(change['status'], 'pending')
smaller_than_last_change = (valid_date2_raw - timedelta(seconds=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': smaller_than_last_change}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(smaller_than_last_change, valid_date2), response.json['errors'][0]["description"])
date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': date}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['dateSigned'], date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'status': 'terminated', "amountPaid": {"amount": 15}}})
self.assertEqual(response.status, '200 OK')
def date_signed_on_change_creation(self):
# test create change with date signed
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': one_day_in_past,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}}, status=403)
self.assertIn("can't be earlier than contract dateSigned", response.json['errors'][0]["description"])
one_day_in_future = (get_now() + timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': one_day_in_future,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}}, status=422)
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "dateSigned", "description": [u"Contract signature date can't be in the future"]}
])
date = get_now().isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'dateSigned': date,
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['dateSigned'], date)
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.status, '200 OK')
def change_date_signed_very_old_contracts_data(self):
# prepare old contract data
contract = self.db.get(self.contract['id'])
contract['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
self.app.authorization = ('Basic', ('broker', ''))
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр',
'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 146'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}}, status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update contract change status. 'dateSigned' is required."}
])
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': one_day_in_past}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], one_day_in_past)
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'iнша причина зміни укр',
'rationale_en': u'another change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 147'}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
change2 = response.json['data']
self.assertEqual(change['status'], 'pending')
two_days_in_past = (get_now() - timedelta(days=2)).isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'dateSigned': two_days_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(two_days_in_past, one_day_in_past), response.json['errors'][0]["description"])
valid_date = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change2['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': valid_date}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], valid_date)
# prepare old contract change data
contract = self.db.get(self.contract['id'])
last_change = contract['changes'][-1]
last_change['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], last_change['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр',
'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'],
'contractNumber': u'№ 148'}})
self.assertEqual(response.status, '201 Created')
change3 = response.json['data']
self.assertEqual(change['status'], 'pending')
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'dateSigned': two_days_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(two_days_in_past, last_change['date']), response.json['errors'][0]["description"])
valid_date2 = get_now().isoformat()
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change3['id'], self.contract_token),
{'data': {'status': 'active', 'dateSigned': valid_date2}})
self.assertEqual(response.json['data']['status'], 'active')
self.assertEqual(response.json['data']['dateSigned'], valid_date2)
def date_signed_on_change_creation_for_very_old_contracts_data(self):
# prepare old contract data
contract = self.db.get(self.contract['id'])
contract['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}?acc_token={}'.format(self.contract['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
self.app.authorization = ('Basic', ('broker', ''))
one_day_in_past = (get_now() - timedelta(days=1)).isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'причина зміни укр', 'rationale_en': u'change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 146',
'dateSigned': one_day_in_past}})
self.assertEqual(response.json['data']['dateSigned'], one_day_in_past)
change = response.json['data']
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], change['id'], self.contract_token),
{'data': {'status': 'active'}})
self.assertEqual(response.json['data']['status'], 'active')
# prepare old contract change data
contract = self.db.get(self.contract['id'])
last_change = contract['changes'][-1]
last_change['dateSigned'] = None
self.db.save(contract)
response = self.app.get('/contracts/{}/changes/{}?acc_token={}'.format(self.contract['id'], last_change['id'], self.contract_token))
self.assertEqual(response.status, '200 OK')
self.assertNotIn('dateSigned', response.json['data'])
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр', 'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 148',
'dateSigned': one_day_in_past}}, status=403)
self.assertEqual("Change dateSigned ({}) can't be earlier than last active change dateSigned ({})".format(one_day_in_past, last_change['date']), response.json['errors'][0]["description"])
valid_date = get_now().isoformat()
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(self.contract['id'], self.contract_token),
{'data': {'rationale': u'третя причина зміни укр', 'rationale_en': u'third change cause en',
'rationaleTypes': ['priceReduction'], 'contractNumber': u'№ 148',
'dateSigned': valid_date}})
self.assertEqual(response.json['data']['dateSigned'], valid_date)
| 61.906736
| 244
| 0.608693
| 3,808
| 35,844
| 5.620011
| 0.054622
| 0.105836
| 0.127891
| 0.071959
| 0.931592
| 0.915845
| 0.886454
| 0.864212
| 0.837391
| 0.831083
| 0
| 0.014587
| 0.210133
| 35,844
| 578
| 245
| 62.013841
| 0.740251
| 0.006919
| 0
| 0.71519
| 0
| 0.004219
| 0.289018
| 0.080678
| 0
| 0
| 0
| 0
| 0.352321
| 1
| 0.021097
| false
| 0
| 0.006329
| 0
| 0.027426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5df854a75a8b01c8b921a79cd7f95db9714f08db
| 12,955
|
py
|
Python
|
tests/test_inheritance_invariant.py
|
cameron-simpson/icontract
|
0a3d7f8c28e8b9e24d973450b232bd7c3f89010a
|
[
"MIT"
] | null | null | null |
tests/test_inheritance_invariant.py
|
cameron-simpson/icontract
|
0a3d7f8c28e8b9e24d973450b232bd7c3f89010a
|
[
"MIT"
] | null | null | null |
tests/test_inheritance_invariant.py
|
cameron-simpson/icontract
|
0a3d7f8c28e8b9e24d973450b232bd7c3f89010a
|
[
"MIT"
] | null | null | null |
# pylint: disable=invalid-name
# pylint: disable=missing-docstring
# pylint: disable=unused-argument
# pylint: disable=no-member
import abc
import unittest
from typing import Optional # pylint: disable=unused-import
import icontract
import tests.error
class TestOK(unittest.TestCase):
def test_count_checks(self):
class Increment:
count = 0
def __call__(self) -> bool:
Increment.count += 1
return True
inc = Increment()
@icontract.invariant(lambda self: inc())
class A(icontract.DBC):
def __repr__(self) -> str:
return "instance of A"
def some_func(self): # pylint: disable=no-self-use
return 1
class B(A):
def __repr__(self) -> str:
return "instance of B"
def some_func(self):
return 2
inst = B()
self.assertEqual(1, Increment.count, "Invariant is expected to run only once at the initializer.")
inst.some_func()
self.assertEqual(3, Increment.count, "Invariant is expected to run before and after the method call.")
class TestViolation(unittest.TestCase):
def test_inherited(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 10
def func(self) -> None:
self.x = -1
def __repr__(self) -> str:
return "instance of A"
class B(A):
def __repr__(self) -> str:
return "instance of B"
b = B()
violation_error = None # type: Optional[icontract.ViolationError]
try:
b.func()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 0:\n"
"self was instance of B\n"
"self.x was -1", tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_violated_in_child(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 10
def func(self) -> None:
self.x = 100
def __repr__(self) -> str:
return "instance of A"
class B(A):
def func(self) -> None:
self.x = -1
def __repr__(self) -> str:
return "instance of B"
b = B()
violation_error = None # type: Optional[icontract.ViolationError]
try:
b.func()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 0:\n"
"self was instance of B\n"
"self.x was -1", tests.error.wo_mandatory_location(str(violation_error)))
def test_additional_invariant_violated_in_childs_init(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 10
def __repr__(self) -> str:
return "instance of A"
@icontract.invariant(lambda self: self.x > 100)
class B(A):
def __repr__(self) -> str:
return "instance of B"
violation_error = None # type: Optional[icontract.ViolationError]
try:
_ = B()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 100:\n"
"self was instance of B\n"
"self.x was 10", tests.error.wo_mandatory_location(str(violation_error)))
def test_method_violates_in_child(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 1000
def some_method(self) -> None:
self.x = 10
def __repr__(self) -> str:
return "instance of A"
@icontract.invariant(lambda self: self.x > 100)
class B(A):
def __repr__(self) -> str:
return "instance of B"
b = B()
violation_error = None # type: Optional[icontract.ViolationError]
try:
b.some_method()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 100:\n"
"self was instance of B\n"
"self.x was 10", tests.error.wo_mandatory_location(str(violation_error)))
def test_triple_inheritance(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 10
def func(self) -> None:
self.x = -1
def __repr__(self) -> str:
return "instance of A"
class B(A):
def __repr__(self) -> str:
return "instance of B"
class C(B):
def __repr__(self) -> str:
return "instance of C"
c = C()
violation_error = None # type: Optional[icontract.ViolationError]
try:
c.func()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 0:\n"
"self was instance of C\n"
"self.x was -1", tests.error.wo_mandatory_location(str(violation_error)))
def test_with_abstract_method(self):
@icontract.invariant(lambda self: self.x > 0)
class A(icontract.DBC):
def __init__(self) -> None:
self.x = 10
@abc.abstractmethod
def func(self) -> None:
pass
def __repr__(self) -> str:
return "instance of A"
class B(A):
def func(self) -> None:
self.x = -1
def __repr__(self) -> str:
return "instance of B"
b = B()
violation_error = None # type: Optional[icontract.ViolationError]
try:
b.func()
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual("self.x > 0:\n"
"self was instance of B\n"
"self.x was -1", tests.error.wo_mandatory_location(str(violation_error)))
class TestProperty(unittest.TestCase):
def test_inherited_getter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
@property
def some_prop(self) -> int:
self.toggled = True
return 0
class SomeClass(SomeBase):
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
_ = some_inst.some_prop
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_setter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
@property
def some_prop(self) -> int:
return 0
@some_prop.setter
def some_prop(self, value: int) -> None:
self.toggled = True
class SomeClass(SomeBase):
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_inst.some_prop = 0
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_deleter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
@property
def some_prop(self) -> int:
return 0
@some_prop.deleter
def some_prop(self) -> None:
self.toggled = True
class SomeClass(SomeBase):
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
del some_inst.some_prop
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_invariant_on_getter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
class SomeClass(SomeBase):
@property
def some_prop(self) -> int:
self.toggled = True
return 0
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
_ = some_inst.some_prop
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_invariant_on_setter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
class SomeClass(SomeBase):
@property
def some_prop(self) -> int:
return 0
@some_prop.setter
def some_prop(self, value: int) -> None:
self.toggled = True
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
some_inst.some_prop = 0
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
def test_inherited_invariant_on_deleter(self):
@icontract.invariant(lambda self: not self.toggled)
class SomeBase(icontract.DBC):
def __init__(self) -> None:
self.toggled = False
class SomeClass(SomeBase):
@property
def some_prop(self) -> int:
return 0
@some_prop.deleter
def some_prop(self) -> None:
self.toggled = True
def __repr__(self):
return self.__class__.__name__
some_inst = SomeClass()
violation_error = None # type: Optional[icontract.ViolationError]
try:
del some_inst.some_prop
except icontract.ViolationError as err:
violation_error = err
self.assertIsNotNone(violation_error)
self.assertEqual('not self.toggled:\n'
'self was SomeClass\n'
'self.toggled was True', tests.error.wo_mandatory_location(str(violation_error)))
if __name__ == '__main__':
unittest.main()
| 31.597561
| 110
| 0.552528
| 1,376
| 12,955
| 4.952035
| 0.083576
| 0.09862
| 0.033901
| 0.061638
| 0.889492
| 0.877458
| 0.877458
| 0.861902
| 0.849134
| 0.849134
| 0
| 0.008063
| 0.358549
| 12,955
| 409
| 111
| 31.674817
| 0.811913
| 0.051717
| 0
| 0.845659
| 0
| 0
| 0.080466
| 0
| 0
| 0
| 0
| 0
| 0.083601
| 1
| 0.212219
| false
| 0.003215
| 0.016077
| 0.086817
| 0.424437
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b9015c1c91121abf4b325c960700e8d7560421e4
| 86
|
py
|
Python
|
7_kyu/exes_and_ohs.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
7_kyu/exes_and_ohs.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
7_kyu/exes_and_ohs.py
|
nik4nd/codewars
|
efae95f1f9fbd5f31fc62b1b4f5a7d1ee511ced0
|
[
"MIT"
] | null | null | null |
def xo(s):
return True if s.lower().count('x') == s.lower().count('o') else False
| 28.666667
| 74
| 0.593023
| 16
| 86
| 3.1875
| 0.75
| 0.235294
| 0.431373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 86
| 2
| 75
| 43
| 0.708333
| 0
| 0
| 0
| 0
| 0
| 0.023256
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5d114ace3962d2fb8fbbfc7d920116d0689faa74
| 959
|
py
|
Python
|
benchmarks/tak.py
|
jpages/twopy
|
d0ae42b02ee60cf432e716884f43ec6670bcae2b
|
[
"BSD-3-Clause"
] | 7
|
2018-12-18T20:32:04.000Z
|
2021-05-30T04:20:22.000Z
|
benchmarks/tak.py
|
jpages/twopy
|
d0ae42b02ee60cf432e716884f43ec6670bcae2b
|
[
"BSD-3-Clause"
] | null | null | null |
benchmarks/tak.py
|
jpages/twopy
|
d0ae42b02ee60cf432e716884f43ec6670bcae2b
|
[
"BSD-3-Clause"
] | 1
|
2021-11-14T17:47:11.000Z
|
2021-11-14T17:47:11.000Z
|
def tak(x, y, z):
if not y < x:
return z
else:
return tak(tak(x-1, y, z), tak(y-1, z, x), tak(z-1, x, y))
print(tak(18, 12, 6))
print(tak(27, 18, 9))
print(tak(36, 27, 18))
print(tak(45, 36, 27))
print(tak(54, 45, 36))
print(tak(63, 54, 45))
print(tak(72, 63, 54))
print(tak(81, 72, 63))
print(tak(90, 81, 72))
print(tak(18, 12, 6))
print(tak(27, 18, 9))
print(tak(36, 27, 18))
print(tak(45, 36, 27))
print(tak(54, 45, 36))
print(tak(63, 54, 45))
print(tak(72, 63, 54))
print(tak(81, 72, 63))
print(tak(90, 81, 72))
print(tak(18, 12, 6))
print(tak(27, 18, 9))
print(tak(36, 27, 18))
print(tak(45, 36, 27))
print(tak(54, 45, 36))
print(tak(63, 54, 45))
print(tak(72, 63, 54))
print(tak(81, 72, 63))
print(tak(90, 81, 72))
print(tak(18, 12, 6))
print(tak(27, 18, 9))
print(tak(36, 27, 18))
print(tak(45, 36, 27))
print(tak(54, 45, 36))
print(tak(63, 54, 45))
print(tak(72, 63, 54))
print(tak(81, 72, 63))
print(tak(90, 81, 72))
| 18.803922
| 66
| 0.576642
| 209
| 959
| 2.645933
| 0.110048
| 0.520796
| 0.072333
| 0.086799
| 0.896926
| 0.896926
| 0.896926
| 0.896926
| 0.896926
| 0.896926
| 0
| 0.265075
| 0.169969
| 959
| 50
| 67
| 19.18
| 0.429648
| 0
| 0
| 0.878049
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0
| 0
| 0.073171
| 0.878049
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 11
|
53c5819df1d9193e62ad3939410b944097fcfab7
| 8,862
|
py
|
Python
|
Dominant_parallel_lines_detection/MNet/code/post_process.py
|
dongkwonjin/Semantic-Line-DRM
|
0f20ca85ca80bc9e7c9157932343dfad6f7fdbd5
|
[
"MIT"
] | 31
|
2020-08-13T04:34:10.000Z
|
2022-03-30T17:56:06.000Z
|
Dominant_parallel_lines_detection/MNet/code/post_process.py
|
dongkwonjin/Semantic-Line-DRM
|
0f20ca85ca80bc9e7c9157932343dfad6f7fdbd5
|
[
"MIT"
] | null | null | null |
Dominant_parallel_lines_detection/MNet/code/post_process.py
|
dongkwonjin/Semantic-Line-DRM
|
0f20ca85ca80bc9e7c9157932343dfad6f7fdbd5
|
[
"MIT"
] | 2
|
2020-11-25T10:44:56.000Z
|
2021-03-03T08:15:57.000Z
|
import torch
import numpy as np
from libs.modules import *
class Post_Process_CRM(object):
def __init__(self, dict_DB):
self.forward_model = dict_DB['forward_model']
self.visualize = dict_DB['visualize']
def generate_line_pair(self):
num = self.out_pts[self.rest_idx].shape[0]
self.rest_num = num
# reference, target
idx1 = torch.zeros((num * (num - 1) // 2), dtype=torch.int64).cuda()
idx2 = torch.zeros((num * (num - 1) // 2), dtype=torch.int64).cuda()
k = 0
for i in range(num):
for j in range(i + 1, num):
idx1[k] = i
idx2[k] = j
k += 1
self.pairwise = {'idx1': idx1, 'idx2': idx2, 'num': k}
def run_RNet(self):
# extract ref & tar line features from DNet
f_ref = {'fc1': self.out_fc['fc1'][self.pairwise['idx1']],
'fc2': self.out_fc['fc2'][self.pairwise['idx1']]}
f_tar = {'fc1': self.out_fc['fc1'][self.pairwise['idx2']],
'fc2': self.out_fc['fc2'][self.pairwise['idx2']]}
self.out_ranking = self.forward_model.run_comparator(f_ref, f_tar, self.RNet)
def construct_pairwise_comparison_matrix(self, result):
self.matrix = torch.zeros((self.rest_num, self.rest_num), dtype=torch.float32)
for i in range(self.pairwise['num']):
idx1 = self.pairwise['idx1'][i]
idx2 = self.pairwise['idx2'][i]
self.matrix[idx1, idx2] = result['cls'][i, 0]
self.matrix[idx2, idx1] = result['cls'][i, 1]
def ranking_and_sorting(self):
score = torch.sum(self.matrix, dim=1)
rank_idx = torch.argsort(score, descending=True)
self.idx_rank_1 = self.rest_idx[int(rank_idx[0])]
# update
self.visit[self.idx_rank_1] = 0
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
# line selection
self.dominant_check[self.idx_rank_1] = 1
def run_MNet(self):
c = self.out_fc['fc1'][self.idx_rank_1].shape[0]
f_ref2 = {'fc1': self.out_fc['fc1'][self.idx_rank_1].unsqueeze(0).expand(torch.sum(self.visit), c)}
f_tar2 = {'fc1': self.out_fc['fc1'][self.rest_idx]}
out = self.forward_model.run_comparator(f_ref2, f_tar2, self.MNet)
idx_matching_1 = torch.argsort(out['cls'][:, 1], descending=True)
idx_matching_1 = self.rest_idx[int(idx_matching_1[0])]
# line selection
self.dominant_check[idx_matching_1] = 2
def run(self):
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
# generate line pair
self.generate_line_pair()
self.run_RNet()
self.construct_pairwise_comparison_matrix(self.out_ranking)
self.ranking_and_sorting()
self.run_MNet()
# selected dominant parallel lines
out_pri = self.out_pts[self.dominant_check == 1]
out_mul = self.out_pts[self.dominant_check != 0]
return out_pri, out_mul
def update_data(self, batch, img, out_pos):
self.batch = batch
self.out_pos = out_pos
self.out_pts = self.out_pos
self.out_num = self.out_pts.shape[0]
# feature from detector fc1, fc2
out = self.forward_model.run_feature_extractor(img=img,
line_pts=self.out_pts.unsqueeze(0),
model=self.DNet)
self.out_fc = {}
self.out_fc['fc1'] = out['fc1']
self.out_fc['fc2'] = out['fc2']
self.visit = torch.ones(self.out_num, dtype=torch.int32)
self.dominant_check = torch.zeros((self.out_num), dtype=torch.int32)
def update_model(self, DNet, RNet, MNet):
self.DNet = DNet
self.RNet = RNet
self.MNet = MNet
class Post_Process_CRM_removal(object):
def __init__(self, cfg, dict_DB):
self.cfg = cfg
self.forward_model = dict_DB['forward_model']
self.visualize = dict_DB['visualize']
def generate_line_pair(self):
num = self.out_pts[self.rest_idx].shape[0]
self.rest_num = num
# reference, target
idx1 = torch.zeros((num * (num - 1) // 2), dtype=torch.int64).cuda()
idx2 = torch.zeros((num * (num - 1) // 2), dtype=torch.int64).cuda()
k = 0
for i in range(num):
for j in range(i + 1, num):
idx1[k] = i
idx2[k] = j
k += 1
self.pairwise = {'idx1': idx1, 'idx2': idx2, 'num': k}
def run_RNet(self):
# extract ref & tar line features from DNet
f_ref = {'fc1': self.out_fc['fc1'][self.pairwise['idx1']],
'fc2': self.out_fc['fc2'][self.pairwise['idx1']]}
f_tar = {'fc1': self.out_fc['fc1'][self.pairwise['idx2']],
'fc2': self.out_fc['fc2'][self.pairwise['idx2']]}
self.out_ranking = self.forward_model.run_comparator(f_ref, f_tar, self.RNet)
def construct_pairwise_comparison_matrix(self, result):
self.matrix = torch.zeros((self.rest_num, self.rest_num), dtype=torch.float32)
for i in range(self.pairwise['num']):
idx1 = self.pairwise['idx1'][i]
idx2 = self.pairwise['idx2'][i]
self.matrix[idx1, idx2] = result['cls'][i, 0]
self.matrix[idx2, idx1] = result['cls'][i, 1]
def ranking_and_sorting(self):
score = torch.sum(self.matrix, dim=1)
rank_idx = torch.argsort(score, descending=True)
self.idx_rank_1 = self.rest_idx[int(rank_idx[0])]
# update
self.visit[self.idx_rank_1] = 0
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
# line selection
self.dominant_check[self.idx_rank_1] = 1
def run_MNet(self):
c = self.out_fc['fc1'][self.idx_rank_1].shape[0]
f_ref2 = {'fc1': self.out_fc['fc1'][self.idx_rank_1].unsqueeze(0).expand(torch.sum(self.visit), c)}
f_tar2 = {'fc1': self.out_fc['fc1'][self.rest_idx]}
out = self.forward_model.run_comparator(f_ref2, f_tar2, self.MNet)
pos_check = torch.argmax(out['cls'], dim=1)
pos_cls = out['cls'][pos_check == 1, 1]
# remove negative lines
neg_idx = self.rest_idx[(pos_check == 0).nonzero()[:, 0]]
self.visit[neg_idx] = 0
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
return pos_cls
def line_removal(self, top_m):
# edge density
res_pts = self.out_pts[self.rest_idx]
# region mask
mask = divided_region_mask(line_pts=res_pts,
size=[self.cfg.width, self.cfg.height])
check = suppression(top_m, mask, 0.85).type(torch.float32)
# update
self.visit[self.rest_idx[top_m]] = 0 # top matching
self.visit[self.rest_idx[check == 1]] = 0 # suppressed
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
return check
def run(self):
self.rest_idx = (self.visit == 1).nonzero()[:, 0]
# generate line pair
self.generate_line_pair()
self.run_RNet()
self.construct_pairwise_comparison_matrix(self.out_ranking)
self.ranking_and_sorting()
out_cls = self.run_MNet()
num = np.minimum(self.top_k, self.out_num)
for iter in range(num - 1):
if out_cls.shape[0] == 0: # all suppressed
break
sorted = torch.argsort(out_cls, descending=True)
top_m = self.rest_idx[int(sorted[0])]
self.dominant_check[top_m] = 2
top_check = torch.zeros((sorted.shape[0]), dtype=torch.int32).cuda()
top_check[sorted[0]] = 1
remove_check = self.line_removal(sorted[0])
out_cls = out_cls[(remove_check + top_check) == 0]
# selected dominant parallel lines
out_pri = self.out_pts[self.dominant_check == 1]
out_mul = self.out_pts[self.dominant_check != 0]
return out_pri, out_mul
def update_data(self, batch, img, out_pos):
self.batch = batch
self.out_pts = out_pos
self.out_num = self.out_pts.shape[0]
# feature from detector fc1, fc2
out = self.forward_model.run_feature_extractor(img=img,
line_pts=self.out_pts.unsqueeze(0),
model=self.DNet)
self.out_fc = {}
self.out_fc['fc1'] = out['fc1']
self.out_fc['fc2'] = out['fc2']
self.visit = torch.ones(self.out_num, dtype=torch.int32)
self.dominant_check = torch.zeros((self.out_num), dtype=torch.int32)
def update_model(self, DNet, RNet, MNet, top_k):
self.top_k = top_k
self.DNet = DNet
self.RNet = RNet
self.MNet = MNet
| 31.425532
| 107
| 0.572331
| 1,221
| 8,862
| 3.947584
| 0.108108
| 0.066805
| 0.037344
| 0.029876
| 0.808714
| 0.797303
| 0.786515
| 0.786515
| 0.786515
| 0.756846
| 0
| 0.031885
| 0.288648
| 8,862
| 281
| 108
| 31.537367
| 0.732709
| 0.049312
| 0
| 0.745562
| 0
| 0
| 0.028922
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112426
| false
| 0
| 0.017751
| 0
| 0.16568
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
54df17a0bda10955fae3409519f8c180f37dd0a8
| 279
|
py
|
Python
|
tasks/data_build/build/from_georisques/__init__.py
|
Envinorma/data-tasks
|
a117aede1610f8ec21212e21579f2b73ec7de7e2
|
[
"MIT"
] | null | null | null |
tasks/data_build/build/from_georisques/__init__.py
|
Envinorma/data-tasks
|
a117aede1610f8ec21212e21579f2b73ec7de7e2
|
[
"MIT"
] | 11
|
2021-05-17T15:32:37.000Z
|
2021-09-20T07:27:37.000Z
|
tasks/data_build/build/from_georisques/__init__.py
|
Envinorma/data-tasks
|
a117aede1610f8ec21212e21579f2b73ec7de7e2
|
[
"MIT"
] | null | null | null |
from .installations import build_all_installations, build_all_installations_datasets # noqa: F401
from .documents import build_all_documents, build_all_documents_datasets # noqa: F401
from .classements import build_all_classements, build_all_classements_datasets # noqa: F401
| 69.75
| 98
| 0.860215
| 36
| 279
| 6.25
| 0.277778
| 0.213333
| 0.186667
| 0.177778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.096774
| 279
| 3
| 99
| 93
| 0.857143
| 0.114695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
071cab040352ebb05a22f69d0222dc797134f343
| 4,388
|
py
|
Python
|
tests/test_is_number.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | 3
|
2020-08-20T10:27:13.000Z
|
2021-11-02T20:28:16.000Z
|
tests/test_is_number.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
tests/test_is_number.py
|
alvistack/daveoncode-python-string-utils
|
78929d88d90b1f90cb4837528ed955166bf0f559
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from string_utils import is_number
class IsNumberTestCase(TestCase):
def test_cannot_handle_non_string_objects(self):
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
is_number(None)
self.assertEqual(str(raised.exception), 'Expected "str", received "NoneType"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
is_number(False)
self.assertEqual(str(raised.exception), 'Expected "str", received "bool"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
is_number(0)
self.assertEqual(str(raised.exception), 'Expected "str", received "int"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
is_number([])
self.assertEqual(str(raised.exception), 'Expected "str", received "list"')
with self.assertRaises(TypeError) as raised:
# noinspection PyTypeChecker
is_number({'a': 1})
self.assertEqual(str(raised.exception), 'Expected "str", received "dict"')
def test_returns_false_if_string_is_empty(self):
self.assertFalse(is_number(''))
self.assertFalse(is_number(' '))
def test_returns_false_if_string_contains_number_but_has_spaces(self):
self.assertFalse(is_number(' 1'))
self.assertFalse(is_number('99 '))
self.assertFalse(is_number(' 1234 '))
self.assertFalse(is_number(' +1234567890'))
self.assertFalse(is_number(' 1.2 '))
def test_returns_false_if_string_is_sign_only(self):
self.assertFalse(is_number('+'))
self.assertFalse(is_number('-'))
def test_returns_false_if_contains_operations(self):
self.assertFalse(is_number('1 + 1'))
self.assertFalse(is_number('1+1'))
self.assertFalse(is_number('1 - 1'))
self.assertFalse(is_number('1-1'))
def test_returns_true_for_unsigned_integers(self):
self.assertTrue(is_number('1'))
self.assertTrue(is_number('99'))
self.assertTrue(is_number('1234567890'))
def test_returns_true_for_signed_integers(self):
self.assertTrue(is_number('+1'))
self.assertTrue(is_number('+99'))
self.assertTrue(is_number('+1234567890'))
self.assertTrue(is_number('-1'))
self.assertTrue(is_number('-99'))
self.assertTrue(is_number('-1234567890'))
def test_returns_true_for_unsigned_double(self):
self.assertTrue(is_number('1.0'))
self.assertTrue(is_number('.007'))
self.assertTrue(is_number('1.000'))
self.assertTrue(is_number('99.99'))
self.assertTrue(is_number('1234567890.000123456'))
def test_returns_true_for_signed_double(self):
self.assertTrue(is_number('+1.0'))
self.assertTrue(is_number('+.007'))
self.assertTrue(is_number('+1.000'))
self.assertTrue(is_number('+99.99'))
self.assertTrue(is_number('+1234567890.000123456'))
self.assertTrue(is_number('-1.0'))
self.assertTrue(is_number('-.007'))
self.assertTrue(is_number('-1.000'))
self.assertTrue(is_number('-99.99'))
self.assertTrue(is_number('-1234567890.000123456'))
def test_double_cannot_contain_multiple_dots(self):
self.assertFalse(is_number('+1..0'))
self.assertFalse(is_number('+..007'))
self.assertFalse(is_number('+1..000'))
self.assertFalse(is_number('+99..99'))
self.assertFalse(is_number('+1234567890..000123456'))
self.assertFalse(is_number('-1..0'))
self.assertFalse(is_number('-..007'))
self.assertFalse(is_number('-1..000'))
self.assertFalse(is_number('-99..99'))
self.assertFalse(is_number('-1234567890..000123456'))
def test_number_cannot_contain_multiple_sign(self):
self.assertFalse(is_number('+-1'))
self.assertFalse(is_number('++1'))
self.assertFalse(is_number('--1'))
self.assertFalse(is_number('+-1.1'))
self.assertFalse(is_number('++1.1'))
self.assertFalse(is_number('--1.1'))
def test_returns_true_for_scientific_notation(self):
self.assertTrue(is_number('1e3'))
self.assertTrue(is_number('50e2'))
self.assertTrue(is_number('1.245e10'))
| 37.186441
| 86
| 0.654057
| 521
| 4,388
| 5.259117
| 0.138196
| 0.181022
| 0.179927
| 0.243431
| 0.877737
| 0.829927
| 0.806934
| 0.785766
| 0.690876
| 0.690876
| 0
| 0.070898
| 0.202826
| 4,388
| 117
| 87
| 37.504274
| 0.712407
| 0.030538
| 0
| 0.127907
| 0
| 0
| 0.118879
| 0.020245
| 0
| 0
| 0
| 0
| 0.767442
| 1
| 0.139535
| false
| 0
| 0.023256
| 0
| 0.174419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0756d0c74cb33d127235c742beb29d9a1d611cc4
| 18,212
|
py
|
Python
|
pyramid_openapi3/tests/test_views.py
|
Wim-De-Clercq/pyramid_openapi3
|
60e803a04c77751f5fee5f0e2c86acfcc4cced5e
|
[
"MIT"
] | null | null | null |
pyramid_openapi3/tests/test_views.py
|
Wim-De-Clercq/pyramid_openapi3
|
60e803a04c77751f5fee5f0e2c86acfcc4cced5e
|
[
"MIT"
] | null | null | null |
pyramid_openapi3/tests/test_views.py
|
Wim-De-Clercq/pyramid_openapi3
|
60e803a04c77751f5fee5f0e2c86acfcc4cced5e
|
[
"MIT"
] | null | null | null |
"""Tests views."""
from dataclasses import dataclass
from openapi_core.shortcuts import RequestValidator
from openapi_core.shortcuts import ResponseValidator
from pyramid.exceptions import ConfigurationError
from pyramid.interfaces import Interface
from pyramid.interfaces import IRouteRequest
from pyramid.interfaces import IRoutesMapper
from pyramid.interfaces import IView
from pyramid.interfaces import IViewClassifier
from pyramid.router import Router
from pyramid.testing import DummyRequest
from pyramid.testing import testConfig
from pyramid_openapi3.exceptions import RequestValidationError
import os
import pytest
import tempfile
class DummyStartResponse(object):
def __call__(self, status, headerlist) -> None:
"""WSGI start_response protocol."""
self.status = status
self.headerlist = headerlist
MINIMAL_DOCUMENT = b"""
openapi: "3.0.0"
info:
version: "1.0.0"
title: Foo API
paths:
/foo:
get:
responses:
200:
description: A foo
"""
SPLIT_DOCUMENT = b"""
openapi: "3.0.0"
info:
version: "1.0.0"
title: Foo API
paths:
/foo:
$ref: "paths.yaml#/foo"
"""
SPLIT_DOCUMENT_PATHS = b"""
foo:
get:
responses:
200:
description: A foo
"""
def test_add_spec_view() -> None:
"""Test registration of a view that serves the openapi document."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(MINIMAL_DOCUMENT)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
# assert settings
openapi_settings = config.registry.settings["pyramid_openapi3"]
assert openapi_settings["filepath"] == document.name
assert openapi_settings["spec_route_name"] == "foo_api_spec"
assert openapi_settings["spec"].info.title == "Foo API"
assert isinstance(openapi_settings["request_validator"], RequestValidator)
assert isinstance(openapi_settings["response_validator"], ResponseValidator)
# assert route
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
assert routes[0].name == "foo_api_spec"
assert routes[0].path == "/foo.yaml"
# assert view
request = config.registry.queryUtility(IRouteRequest, name="foo_api_spec")
view = config.registry.adapters.registered(
(IViewClassifier, request, Interface), IView, name=""
)
assert view(request=None, context=None).body == MINIMAL_DOCUMENT
def test_add_spec_view_already_defined() -> None:
"""Test that creating a spec more than once raises an Exception."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.TemporaryDirectory() as directory:
spec_name = os.path.join(directory, "openapi.yaml")
spec_paths_name = os.path.join(directory, "paths.yaml")
with open(spec_name, "wb") as f:
f.write(SPLIT_DOCUMENT)
with open(spec_paths_name, "wb") as f:
f.write(SPLIT_DOCUMENT_PATHS)
config.pyramid_openapi3_spec_directory(
spec_name, route="/foo", route_name="foo_api_spec"
)
with tempfile.NamedTemporaryFile() as document:
document.write(MINIMAL_DOCUMENT)
document.seek(0)
with pytest.raises(
ConfigurationError,
match=(
"Spec has already been configured. You may only call "
"pyramid_openapi3_spec or pyramid_openapi3_spec_directory once"
),
):
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
def test_add_spec_view_directory() -> None:
"""Test registration of a view that serves the openapi document."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.TemporaryDirectory() as directory:
spec_name = os.path.join(directory, "openapi.yaml")
spec_paths_name = os.path.join(directory, "paths.yaml")
with open(spec_name, "wb") as f:
f.write(SPLIT_DOCUMENT)
with open(spec_paths_name, "wb") as f:
f.write(SPLIT_DOCUMENT_PATHS)
config.pyramid_openapi3_spec_directory(
spec_name, route="/foo", route_name="foo_api_spec"
)
# assert settings
openapi_settings = config.registry.settings["pyramid_openapi3"]
assert openapi_settings["filepath"] == spec_name
assert openapi_settings["spec_route_name"] == "foo_api_spec"
assert openapi_settings["spec"].info.title == "Foo API"
assert "get" in openapi_settings["spec"].paths["/foo"].operations
assert isinstance(openapi_settings["request_validator"], RequestValidator)
assert isinstance(openapi_settings["response_validator"], ResponseValidator)
# assert route
# routes[0] is the static view, routes[1] is the route
mapper = config.registry.getUtility(IRoutesMapper)
routes = mapper.get_routes()
assert routes[0].name == "__/foo/"
assert routes[0].path == "/foo/*subpath"
assert routes[1].name == "foo_api_spec"
assert routes[1].path == "/foo/openapi.yaml"
# assert view
route_request = config.registry.queryUtility(
IRouteRequest, name="foo_api_spec"
)
static_request = config.registry.queryUtility(IRouteRequest, name="__/foo/")
view = config.registry.adapters.registered(
(IViewClassifier, static_request, Interface), IView, name=""
)
assert route_request is not None
assert static_request is not None
assert view is not None
# assert router
router = Router(config.registry)
response = router({"PATH_INFO": "/foo/openapi.yaml"}, DummyStartResponse())
assert next(response) == SPLIT_DOCUMENT
response = router({"PATH_INFO": "/foo/paths.yaml"}, DummyStartResponse())
assert next(response) == SPLIT_DOCUMENT_PATHS
def test_add_spec_view_directory_already_defined() -> None:
"""Test that creating a spec more than once raises an Exception."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(MINIMAL_DOCUMENT)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo", route_name="foo_api_spec"
)
with tempfile.TemporaryDirectory() as directory:
spec_name = os.path.join(directory, "openapi.yaml")
spec_paths_name = os.path.join(directory, "paths.yaml")
with open(spec_name, "wb") as f:
f.write(SPLIT_DOCUMENT)
with open(spec_paths_name, "wb") as f:
f.write(SPLIT_DOCUMENT_PATHS)
with pytest.raises(
ConfigurationError,
match=(
"Spec has already been configured. You may only call "
"pyramid_openapi3_spec or pyramid_openapi3_spec_directory once"
),
):
config.pyramid_openapi3_spec_directory(
spec_name, route="/foo.yaml", route_name="foo_api_spec"
)
def test_add_spec_view_directory_invalid_route() -> None:
"""Test that creating a spec directory with a filename route raises an Exception."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.TemporaryDirectory() as directory:
spec_name = os.path.join(directory, "openapi.yaml")
spec_paths_name = os.path.join(directory, "paths.yaml")
with open(spec_name, "wb") as f:
f.write(SPLIT_DOCUMENT)
with open(spec_paths_name, "wb") as f:
f.write(SPLIT_DOCUMENT_PATHS)
with pytest.raises(
ConfigurationError,
match=(
"Having route be a filename is not allowed when using a "
"spec directory"
),
):
config.pyramid_openapi3_spec_directory(
spec_name, route="/foo.yaml", route_name="foo_api_spec"
)
def test_add_explorer_view() -> None:
"""Test registration of a view serving the Swagger UI."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(MINIMAL_DOCUMENT)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
config.pyramid_openapi3_add_explorer()
request = config.registry.queryUtility(
IRouteRequest, name="pyramid_openapi3.explorer"
)
view = config.registry.adapters.registered(
(IViewClassifier, request, Interface), IView, name=""
)
response = view(request=DummyRequest(config=config), context=None)
assert b"<title>Swagger UI</title>" in response.body
def test_explorer_view_missing_spec() -> None:
"""Test graceful failure if explorer view is not registered."""
with testConfig() as config:
config.include("pyramid_openapi3")
config.pyramid_openapi3_add_explorer()
request = config.registry.queryUtility(
IRouteRequest, name="pyramid_openapi3.explorer"
)
view = config.registry.adapters.registered(
(IViewClassifier, request, Interface), IView, name=""
)
with pytest.raises(
ConfigurationError,
match="You need to call config.pyramid_openapi3_spec for explorer to work.",
):
view(request=DummyRequest(config=config), context=None)
@dataclass
class DummyRoute:
name: str
pattern: str
def test_openapi_view() -> None:
"""Test registration a an openapi view."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(MINIMAL_DOCUMENT)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
config.add_route("foo", "/foo")
view_func = lambda *arg: "bar" # noqa: E731
config.add_view(openapi=True, renderer="json", view=view_func, route_name="foo")
request_interface = config.registry.queryUtility(IRouteRequest, name="foo")
view = config.registry.adapters.registered(
(IViewClassifier, request_interface, Interface), IView, name=""
)
request = DummyRequest(config=config, content_type="text/html")
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
response = view(context, request)
assert response.json == "bar"
def test_path_parameters() -> None:
"""Test parameters in path are validated correctly."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(
b'openapi: "3.0.0"\n'
b"info:\n"
b' version: "1.0.0"\n'
b" title: Foo API\n"
b"paths:\n"
b" /foo:\n"
b" parameters:\n"
b" - name: foo\n"
b" in: query\n"
b" required: true\n"
b" schema:\n"
b" type: integer\n"
b" get:\n"
b" responses:\n"
b" 200:\n"
b" description: A foo\n"
)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
config.add_route("foo", "/foo")
view_func = lambda *arg: "foo" # noqa: E731 # pragma: no branch
config.add_view(openapi=True, renderer="json", view=view_func, route_name="foo")
request_interface = config.registry.queryUtility(IRouteRequest, name="foo")
view = config.registry.adapters.registered(
(IViewClassifier, request_interface, Interface), IView, name=""
)
# Test validation fails
request = DummyRequest(config=config, content_type="application/json")
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
with pytest.raises(
RequestValidationError, match="Missing required parameter: foo"
):
response = view(context, request)
# Test validation succeeds
request = DummyRequest(
config=config, params={"foo": "1"}, content_type="application/json"
)
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
response = view(context, request)
assert response.json == "foo"
def test_header_parameters() -> None:
"""Test parameters in header are validated correctly."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(
b'openapi: "3.0.0"\n'
b"info:\n"
b' version: "1.0.0"\n'
b" title: Foo API\n"
b"paths:\n"
b" /foo:\n"
b" get:\n"
b" parameters:\n"
b" - name: foo\n"
b" in: header\n"
b" required: true\n"
b" schema:\n"
b" type: integer\n"
b" responses:\n"
b" 200:\n"
b" description: A foo\n"
)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
config.add_route("foo", "/foo")
view_func = lambda *arg: "foo" # noqa: E731 # pragma: no branch
config.add_view(openapi=True, renderer="json", view=view_func, route_name="foo")
request_interface = config.registry.queryUtility(IRouteRequest, name="foo")
view = config.registry.adapters.registered(
(IViewClassifier, request_interface, Interface), IView, name=""
)
# Test validation fails
request = DummyRequest(config=config, content_type="text/html")
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
with pytest.raises(
RequestValidationError, match="Missing required parameter: foo"
):
response = view(context, request)
# Test validation succeeds
request = DummyRequest(
config=config, headers={"foo": "1"}, content_type="text/html"
)
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
response = view(context, request)
assert response.json == "foo"
def test_cookie_parameters() -> None:
"""Test parameters in cookie are validated correctly."""
with testConfig() as config:
config.include("pyramid_openapi3")
with tempfile.NamedTemporaryFile() as document:
document.write(
b'openapi: "3.0.0"\n'
b"info:\n"
b' version: "1.0.0"\n'
b" title: Foo API\n"
b"paths:\n"
b" /foo:\n"
b" get:\n"
b" parameters:\n"
b" - name: foo\n"
b" in: cookie\n"
b" required: true\n"
b" schema:\n"
b" type: integer\n"
b" responses:\n"
b" 200:\n"
b" description: A foo\n"
)
document.seek(0)
config.pyramid_openapi3_spec(
document.name, route="/foo.yaml", route_name="foo_api_spec"
)
config.add_route("foo", "/foo")
view_func = lambda *arg: "foo" # noqa: E731 # pragma: no branch
config.add_view(openapi=True, renderer="json", view=view_func, route_name="foo")
request_interface = config.registry.queryUtility(IRouteRequest, name="foo")
view = config.registry.adapters.registered(
(IViewClassifier, request_interface, Interface), IView, name=""
)
# Test validation fails
request = DummyRequest(config=config, content_type="text/html")
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
with pytest.raises(
RequestValidationError, match="Missing required parameter: foo"
):
response = view(context, request)
# Test validation succeeds
request = DummyRequest(
config=config, cookies={"foo": "1"}, content_type="text/html"
)
request.matched_route = DummyRoute(name="foo", pattern="/foo")
context = None
response = view(context, request)
assert response.json == "foo"
| 37.167347
| 88
| 0.577037
| 1,890
| 18,212
| 5.408995
| 0.097355
| 0.008804
| 0.021129
| 0.02465
| 0.857576
| 0.820112
| 0.810623
| 0.780984
| 0.776093
| 0.764355
| 0
| 0.008968
| 0.320338
| 18,212
| 489
| 89
| 37.243354
| 0.816933
| 0.058313
| 0
| 0.71875
| 0
| 0
| 0.168659
| 0.010666
| 0
| 0
| 0
| 0
| 0.072917
| 1
| 0.03125
| false
| 0
| 0.041667
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
075c980adb01c66462933ed0c04e4ac8d48dae83
| 78,438
|
py
|
Python
|
Lib/ufoLib/test/test_GLIF2.py
|
moyogo/ufolib
|
c5b897168d9f32a66d4828cf922771232a273ff5
|
[
"BSD-3-Clause"
] | null | null | null |
Lib/ufoLib/test/test_GLIF2.py
|
moyogo/ufolib
|
c5b897168d9f32a66d4828cf922771232a273ff5
|
[
"BSD-3-Clause"
] | null | null | null |
Lib/ufoLib/test/test_GLIF2.py
|
moyogo/ufolib
|
c5b897168d9f32a66d4828cf922771232a273ff5
|
[
"BSD-3-Clause"
] | null | null | null |
from __future__ import absolute_import, unicode_literals
import unittest
from ufoLib.glifLib import GlifLibError, readGlyphFromString, writeGlyphToString
from ufoLib.test.testSupport import Glyph, stripText
from itertools import islice
try:
basestring
except NameError:
basestring = str
# ----------
# Test Cases
# ----------
class TestGLIF2(unittest.TestCase):
def assertEqual(self, first, second, msg=None):
if isinstance(first, basestring):
first = stripText(first)
if isinstance(second, basestring):
second = stripText(second)
return super(TestGLIF2, self).assertEqual(first, second, msg=msg)
def pyToGLIF(self, py):
py = stripText(py)
glyph = Glyph()
exec(py, {"glyph" : glyph, "pointPen" : glyph})
glif = writeGlyphToString(glyph.name, glyphObject=glyph, drawPointsFunc=glyph.drawPoints, formatVersion=2, validate=True)
# discard the first line containing the xml declaration
return "\n".join(islice(glif.splitlines(), 1, None))
def glifToPy(self, glif):
glif = stripText(glif)
glif = "<?xml version=\"1.0\"?>\n" + glif
glyph = Glyph()
readGlyphFromString(glif, glyphObject=glyph, pointPen=glyph, validate=True)
return glyph.py()
def testTopElement(self):
# not glyph
glif = """
<notglyph name="a" format="2">
<outline>
</outline>
</notglyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testName_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testName_empty(self):
# empty
glif = """
<glyph name="" format="2">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = ""
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testName_not_a_string(self):
# not a string
py = """
glyph.name = 1
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
def testFormat_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testFormat_illegal_wrong_number(self):
# wrong number
glif = """
<glyph name="a" format="-1">
<outline>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testFormat_illegal_not_int(self):
# not an int
glif = """
<glyph name="a" format="A">
<outline>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testBogusGlyphStructure_unknown_element(self):
# unknown element
glif = """
<glyph name="a" format="2">
<unknown />
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testBogusGlyphStructure_content(self):
# content
glif = """
<glyph name="a" format="2">
Hello World.
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAdvance_legal_widht_and_height(self):
# legal: width and height
glif = """
<glyph name="a" format="2">
<advance height="200" width="100"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100
glyph.height = 200
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testAdvance_legal_width_and_height_floats(self):
# legal: width and height floats
glif = """
<glyph name="a" format="2">
<advance height="200.1" width="100.1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100.1
glyph.height = 200.1
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testAdvance_legal_width(self):
# legal: width
glif = """
<glyph name="a" format="2">
<advance width="100"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = 100
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testAdvance_legal_height(self):
# legal: height
glif = """
<glyph name="a" format="2">
<advance height="200"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.height = 200
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testAdvance_illegal_width(self):
# illegal: not a number
glif = """
<glyph name="a" format="2">
<advance width="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.width = "a"
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAdvance_illegal_height(self):
glif = """
<glyph name="a" format="2">
<advance height="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.height = "a"
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testUnicodes_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.unicodes = [97]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testUnicodes_legal_multiple(self):
glif = """
<glyph name="a" format="2">
<unicode hex="0062"/>
<unicode hex="0063"/>
<unicode hex="0061"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.unicodes = [98, 99, 97]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testUnicodes_illegal(self):
# illegal
glif = """
<glyph name="a" format="2">
<unicode hex="1.1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "zzzzzz"
glyph.unicodes = ["1.1"]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testNote(self):
glif = """
<glyph name="a" format="2">
<note>
hello
</note>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.note = "hello"
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testLib(self):
glif = """
<glyph name="a" format="2">
<outline>
</outline>
<lib>
<dict>
<key>dict</key>
<dict>
<key>hello</key>
<string>world</string>
</dict>
<key>float</key>
<real>2.5</real>
<key>int</key>
<integer>1</integer>
<key>list</key>
<array>
<string>a</string>
<string>b</string>
<integer>1</integer>
<real>2.5</real>
</array>
<key>string</key>
<string>a</string>
</dict>
</lib>
</glyph>
"""
py = """
glyph.name = "a"
glyph.lib = {"dict" : {"hello" : "world"}, "float" : 2.5, "int" : 1, "list" : ["a", "b", 1, 2.5], "string" : "a"}
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testGuidelines_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<guideline x="1"/>
<guideline y="1"/>
<guideline x="1" y="1" angle="0"/>
<guideline x="1" y="1" angle="360"/>
<guideline x="1.1" y="1.1" angle="45.5"/>
<guideline x="1" name="a"/>
<guideline x="1" color="1,1,1,1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"x" : 1}, {"y" : 1}, {"angle" : 0, "x" : 1, "y" : 1}, {"angle" : 360, "x" : 1, "y" : 1}, {"angle" : 45.5, "x" : 1.1, "y" : 1.1}, {"name" : "a", "x" : 1}, {"color" : "1,1,1,1", "x" : 1}]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testGuidelines_illegal_x(self):
# x not an int or float
glif = """
<glyph name="a" format="2">
<guideline x="a" y="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : "a", "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_y(self):
# y not an int or float
glif = """
<glyph name="a" format="2">
<guideline x="1" y="y" angle="45"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : 1, "y" : "a"}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_angle(self):
# angle not an int or float
glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : "a", "x" : 1, "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_x_missing(self):
# x missing
glif = """
<glyph name="a" format="2">
<guideline y="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_y_missing(self):
# y missing
glif = """
<glyph name="a" format="2">
<guideline x="1" angle="45"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 45, "x" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_angle_missing(self):
# angle missing
glif = """
<glyph name="a" format="2">
<guideline x="1" y="1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"x" : 1, "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testGuidelines_illegal_angle_out_of_range(self):
# angle out of range
glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="-1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : -1, "x" : "1", "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<guideline x="1" y="1" angle="361"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"angle" : 361, "x" : "1", "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAnchors_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<anchor x="1" y="2" name="test" color="1,0,0,1"/>
<anchor x="1" y="2"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"color" : "1,0,0,1", "name" : "test", "x" : 1, "y" : 2}, {"x" : 1, "y" : 2}]
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testAnchors_illegal_x(self):
# x not an int or float
glif = """
<glyph name="a" format="2">
<anchor x="a" y="1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"x" : "a", "y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAnchors_illegal_y(self):
# y not an int or float
glif = """
<glyph name="a" format="2">
<anchor x="1" y="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"x" : 1, "y" : "a"}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAnchors_illegal_x_missing(self):
# x missing
glif = """
<glyph name="a" format="2">
<anchor y="1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"y" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testAnchors_illegal_y_missing(self):
# y missing
glif = """
<glyph name="a" format="2">
<anchor x="1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.anchors = [{"x" : 1}]
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testImage_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4" color="1,1,1,1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,1", "fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testImage_legal_no_color_or_transformation(self):
# legal: no color or transformation
glif = """
<glyph name="a" format="2">
<image fileName="test.png"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 0, "xScale" : 1, "xyScale" : 0, "yOffset" : 0, "yScale" : 1, "yxScale" : 0}
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testImage_illegal_no_file_name(self):
# no file name
glif = """
<glyph name="a" format="2">
<image xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4" color="1,1,1,1"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,1", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testImage_bogus_transformation(self):
# bogus transformation
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : "a", "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="a" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : "a", "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : "a"}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : "a", "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : "a", "xScale" : 2, "xyScale" : 3, "yOffset" : 4, "yScale" : 5, "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<image fileName="test.png" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"fileName" : "test.png", "xOffset" : 1, "xScale" : 2, "xyScale" : 3, "yOffset" : "a", "yScale" : 5, "yxScale" : 6}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testImage_bogus_color(self):
# bogus color
glif = """
<glyph name="a" format="2">
<image fileName="test.png" color="1,1,1,x"/>
<outline>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.image = {"color" : "1,1,1,x"}
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testOutline_unknown_element(self):
# unknown element
glif = """
<glyph name="a" format="2">
<outline>
<unknown/>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testOutline_content(self):
# content
glif = """
<glyph name="a" format="2">
<outline>
hello
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testComponent_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, 4)])
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testComponent_illegal_no_base(self):
# no base
glif = """
<glyph name="a" format="2">
<outline>
<component xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testComponent_illegal_bogus_transformation(self):
# bogus values in transformation
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", ("a", 3, 6, 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="a" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, "a", 6, 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="a" yScale="5" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, "a", 5, 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="a" xOffset="1" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, "a", 1, 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="a" yOffset="4"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, "a", 4)])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
glif = """
<glyph name="a" format="2">
<outline>
<component base="x" xScale="2" xyScale="3" yxScale="6" yScale="5" xOffset="1" yOffset="a"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.addComponent(*["x", (2, 3, 6, 5, 1, "a")])
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testContour_legal_one_contour(self):
# legal: one contour
glif = """
<glyph name="a" format="2">
<outline>
<contour>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testContour_legal_two_contours(self):
# legal: two contours
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="2" type="move"/>
</contour>
<contour>
<point x="1" y="2" type="move"/>
<point x="10" y="20" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(10, 20)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testContour_illegal_unkonwn_element(self):
# unknown element
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<unknown/>
</contour>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testContourIdentifier(self):
glif = """
<glyph name="a" format="2">
<outline>
<contour identifier="foo">
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath(**{"identifier" : "foo"})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointCoordinates_legal_int(self):
# legal: int
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointCoordinates_legal_float(self):
# legal: float
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1.1" y="-2.2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1.1, -2.2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointCoordinates_illegal_x(self):
# illegal: x as string
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="a" y="2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[("a", 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointCoordinates_illegal_y(self):
# illegal: y as string
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="a" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, "a")], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeMove_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeMove_legal_smooth(self):
# legal: smooth=True
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move" smooth="yes"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : True})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeMove_illegal_not_at_start(self):
# illegal: not at start
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="3" y="-4" type="line"/>
<point x="1" y="-2" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeLine_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeLine_legal_start_of_contour(self):
# legal: start of contour
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="line"/>
<point x="3" y="-4" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeLine_legal_smooth(self):
# legal: smooth=True
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move"/>
<point x="3" y="-4" type="line" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(3, -4)], **{"segmentType" : "line", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_legal_start_of_contour(self):
# legal: start of contour
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="100" y="200" type="curve"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_legal_smooth(self):
# legal: smooth=True
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_legal_no_off_curves(self):
# legal: no off-curves
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_legal_1_off_curve(self):
# legal: 1 off-curve
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="50" y="100"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(50, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeCurve_illegal_3_off_curves(self):
# illegal: 3 off-curves
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="100"/>
<point x="35" y="125"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(35, 125)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointQCurve_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointQCurve_legal_start_of_contour(self):
# legal: start of contour
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="100" y="200" type="qcurve"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointQCurve_legal_smooth(self):
# legal: smooth=True
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve" smooth="yes"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : True})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointQCurve_legal_no_off_curves(self):
# legal: no off-curves
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointQCurve_legal_one_off_curve(self):
# legal: 1 off-curve
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="50" y="100"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(50, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointQCurve_legal_3_off_curves(self):
# legal: 3 off-curves
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="100"/>
<point x="35" y="125"/>
<point x="65" y="200"/>
<point x="100" y="200" type="qcurve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(35, 125)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testSpecialCaseQCurve_legal_no_on_curve(self):
# contour with no on curve
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0"/>
<point x="0" y="100"/>
<point x="100" y="100"/>
<point x="100" y="0"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"smooth" : False})
pointPen.addPoint(*[(0, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 100)], **{"smooth" : False})
pointPen.addPoint(*[(100, 0)], **{"smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeOffCurve_legal(self):
# legal
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="0" type="move"/>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeOffCurve_legal_start_of_contour(self):
# legal: start of contour
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="65"/>
<point x="65" y="200"/>
<point x="100" y="200" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(65, 200)], **{"smooth" : False})
pointPen.addPoint(*[(100, 200)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testPointTypeOffCurve_illegal_before_move(self):
# before move
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="65"/>
<point x="0" y="0" type="move"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "move", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeOffCurve_illegal_before_line(self):
# before line
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="65"/>
<point x="0" y="0" type="line"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : False})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "line", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testPointTypeOffCurve_illegal_smooth(self):
# smooth=True
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="0" y="65" smooth="yess"/>
<point x="0" y="0" type="curve"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(0, 65)], **{"smooth" : True})
pointPen.addPoint(*[(0, 0)], **{"segmentType" : "curve", "smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testOpenContourLooseOffCurves(self):
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="2" type="move"/>
<point x="1" y="2"/>
<point x="1" y="2"/>
<point x="1" y="2" type="curve"/>
<point x="1" y="2"/>
</contour>
</outline>
</glyph>
"""
self.assertRaises(GlifLibError, self.glifToPy, glif)
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, 2)], **{"smooth" : False})
pointPen.endPath()
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
def testPointIdentifier(self):
glif = """
<glyph name="a" format="2">
<outline>
<contour>
<point x="1" y="-2" type="move" identifier="1"/>
<point x="1" y="-2" type="line" identifier="2"/>
<point x="1" y="-2" type="curve" identifier="3"/>
<point x="1" y="-2" type="qcurve" identifier="4"/>
</contour>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
pointPen.beginPath()
pointPen.addPoint(*[(1, -2)], **{"identifier" : "1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testIdentifierConflict_legal_no_conflict(self):
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
resultGlif = self.pyToGLIF(py)
resultPy = self.glifToPy(glif)
self.assertEqual(glif, resultGlif)
self.assertEqual(py, resultPy)
def testIdentifierConflict_point_point(self):
# point - point
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point1"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_point_contour(self):
# point - contour
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="contour1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "contour1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_point_component(self):
# point - component
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="component1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "component1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_point_guideline(self):
# point - guideline
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="guideline1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "guideline1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_point_anchor(self):
# point - anchor
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="anchor1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "anchor1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_contour_contour(self):
# contour - contour
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_contour_component(self):
# contour - component
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="contour1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "contour1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_contour_guideline(self):
# contour - guideline
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="contour1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "contour1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_contour_anchor(self):
# contour - anchor
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="anchor1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "anchor1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_component_component(self):
# component - component
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_component_guideline(self):
# component - guideline
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="component1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "component1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_component_anchor(self):
# component - anchor
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="anchor1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "anchor1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_guideline_guideline(self):
# guideline - guideline
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline1"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline1", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_guideline_anchor(self):
# guideline - anchor
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="anchor1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor2"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "anchor1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor2", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
def testIdentifierConflict_anchor_anchor(self):
# anchor - anchor
glif = """
<glyph name="a" format="2">
<guideline x="0" identifier="guideline1"/>
<guideline x="0" identifier="guideline2"/>
<anchor x="0" y="0" identifier="anchor1"/>
<anchor x="0" y="0" identifier="anchor1"/>
<outline>
<contour identifier="contour1">
<point x="1" y="-2" type="move" identifier="point1"/>
<point x="1" y="-2" type="line" identifier="point2"/>
<point x="1" y="-2" type="curve" identifier="point3"/>
<point x="1" y="-2" type="qcurve" identifier="point4"/>
</contour>
<contour identifier="contour2">
<point x="1" y="-2" type="move" identifier="point5"/>
</contour>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component1"/>
<component base="x" xyScale="1" yxScale="1" xOffset="1" yOffset="1" identifier="component2"/>
</outline>
</glyph>
"""
py = """
glyph.name = "a"
glyph.guidelines = [{"identifier" : "guideline1", "x" : 0}, {"identifier" : "guideline2", "x" : 0}]
glyph.anchors = [{"identifier" : "anchor1", "x" : 0, "y" : 0}, {"identifier" : "anchor1", "x" : 0, "y" : 0}]
pointPen.beginPath(**{"identifier" : "contour1"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point1", "segmentType" : "move", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point2", "segmentType" : "line", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point3", "segmentType" : "curve", "smooth" : False})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point4", "segmentType" : "qcurve", "smooth" : False})
pointPen.endPath()
pointPen.beginPath(**{"identifier" : "contour2"})
pointPen.addPoint(*[(1, -2)], **{"identifier" : "point5", "segmentType" : "move", "smooth" : False})
pointPen.endPath()
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component1"})
pointPen.addComponent(*["x", (1, 1, 1, 1, 1, 1)], **{"identifier" : "component2"})
"""
self.assertRaises(GlifLibError, self.pyToGLIF, py)
self.assertRaises(GlifLibError, self.glifToPy, glif)
if __name__ == "__main__":
from ufoLib.test.testSupport import runTests
runTests()
| 32.998738
| 208
| 0.600971
| 9,219
| 78,438
| 5.088838
| 0.024515
| 0.037984
| 0.041139
| 0.079124
| 0.936565
| 0.931001
| 0.925694
| 0.921175
| 0.906936
| 0.884619
| 0
| 0.03624
| 0.171881
| 78,438
| 2,376
| 209
| 33.012626
| 0.686003
| 0.018499
| 0
| 0.864198
| 0
| 0.120256
| 0.735789
| 0.140929
| 0
| 0
| 0
| 0
| 0.091449
| 1
| 0.043896
| false
| 0
| 0.002743
| 0
| 0.048468
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
db9a4fc7f001d0402fa4c3445eac72e8927a3b98
| 5,454
|
py
|
Python
|
saleor/graphql/checkout/tests/deprecated/test_checkout_lines_add.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/deprecated/test_checkout_lines_add.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
saleor/graphql/checkout/tests/deprecated/test_checkout_lines_add.py
|
DevPoke/saleor
|
ced3a2249a18031f9f593e71d1d18aa787ec1060
|
[
"CC-BY-4.0"
] | null | null | null |
from unittest import mock
import graphene
from .....checkout.error_codes import CheckoutErrorCode
from .....checkout.fetch import fetch_checkout_info, fetch_checkout_lines
from .....checkout.utils import calculate_checkout_quantity
from .....plugins.manager import get_plugins_manager
from ....tests.utils import get_graphql_content
from ...mutations.utils import update_checkout_shipping_method_if_invalid
MUTATION_CHECKOUT_LINES_ADD = """
mutation checkoutLinesAdd(
$checkoutId: ID, $token: UUID, $lines: [CheckoutLineInput!]!) {
checkoutLinesAdd(checkoutId: $checkoutId, token: $token lines: $lines) {
checkout {
token
quantity
lines {
quantity
variant {
id
}
}
}
errors {
field
code
message
variants
}
}
}"""
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_lines_add."
"update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_lines_add_by_checkout_id(
mocked_update_shipping_method, user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
line = checkout.lines.first()
lines, _ = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
lines, _ = fetch_checkout_lines(checkout)
line = checkout.lines.last()
assert line.variant == variant
assert line.quantity == 1
assert calculate_checkout_quantity(lines) == 4
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
@mock.patch(
"saleor.graphql.checkout.mutations.checkout_lines_add."
"update_checkout_shipping_method_if_invalid",
wraps=update_checkout_shipping_method_if_invalid,
)
def test_checkout_lines_add_by_checkout_token(
mocked_update_shipping_method, user_api_client, checkout_with_item, stock
):
# given
variant = stock.product_variant
checkout = checkout_with_item
lines, _ = fetch_checkout_lines(checkout)
assert calculate_checkout_quantity(lines) == 3
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
# when
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
# then
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert not data["errors"]
checkout.refresh_from_db()
lines, _ = fetch_checkout_lines(checkout)
line = checkout.lines.last()
assert line.variant == variant
assert line.quantity == 1
assert calculate_checkout_quantity(lines) == 4
manager = get_plugins_manager()
lines, _ = fetch_checkout_lines(checkout)
checkout_info = fetch_checkout_info(checkout, lines, [], manager)
mocked_update_shipping_method.assert_called_once_with(checkout_info, lines)
def test_checkout_lines_add_neither_token_and_id_given(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
checkout_id = graphene.Node.to_global_id("Checkout", checkout.pk)
variables = {
"checkoutId": checkout_id,
"token": checkout.token,
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
def test_checkout_lines_add_both_token_and_id_given(
user_api_client, checkout_with_item, stock
):
variant = stock.product_variant
checkout = checkout_with_item
variant_id = graphene.Node.to_global_id("ProductVariant", variant.pk)
variables = {
"lines": [{"variantId": variant_id, "quantity": 1}],
"channelSlug": checkout.channel.slug,
}
response = user_api_client.post_graphql(MUTATION_CHECKOUT_LINES_ADD, variables)
content = get_graphql_content(response)
data = content["data"]["checkoutLinesAdd"]
assert len(data["errors"]) == 1
assert not data["checkout"]
assert data["errors"][0]["code"] == CheckoutErrorCode.GRAPHQL_ERROR.name
| 34.961538
| 83
| 0.697836
| 611
| 5,454
| 5.873977
| 0.145663
| 0.08331
| 0.049039
| 0.038451
| 0.84341
| 0.830593
| 0.820284
| 0.820284
| 0.806353
| 0.806353
| 0
| 0.003218
| 0.20242
| 5,454
| 155
| 84
| 35.187097
| 0.821839
| 0.00275
| 0
| 0.70229
| 0
| 0
| 0.207728
| 0.044158
| 0
| 0
| 0
| 0
| 0.137405
| 1
| 0.030534
| false
| 0
| 0.061069
| 0
| 0.091603
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
917b20062021246790a6fdc78b4c4602f738dc1c
| 37,087
|
py
|
Python
|
groot-jr/weather/Adafruit_ADS1x15/Adafruit_ADS1x15.py
|
henryse/pi-weather-pro
|
edc44820295492a0437ec36a7a868bb56c309f77
|
[
"Apache-2.0"
] | null | null | null |
groot-jr/weather/Adafruit_ADS1x15/Adafruit_ADS1x15.py
|
henryse/pi-weather-pro
|
edc44820295492a0437ec36a7a868bb56c309f77
|
[
"Apache-2.0"
] | null | null | null |
groot-jr/weather/Adafruit_ADS1x15/Adafruit_ADS1x15.py
|
henryse/pi-weather-pro
|
edc44820295492a0437ec36a7a868bb56c309f77
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
import time
from Adafruit_I2C import Adafruit_I2C
# ===========================================================================
# ADS1x15 Class
#
# Originally written by K. Townsend,
# Adafruit (https://github.com/adafruit/Adafruit-Raspberry-Pi-Python-Code/tree/master/Adafruit_ADS1x15)
# Updates and new functions implementation by Pedro Villanueva, 03/2013.
# The only error in the original code was in line 57:
# __ADS1015_REG_CONFIG_DR_920SPS = 0x0050
# should be
# __ADS1015_REG_CONFIG_DR_920SPS = 0x0060
#
# NOT IMPLEMENTED: Conversion ready pin, page 15 datasheet.
# ===========================================================================
class ADS1x15:
i2c = None
# IC Identifiers
__IC_ADS1015 = 0x00
__IC_ADS1115 = 0x01
# Pointer Register
__ADS1015_REG_POINTER_MASK = 0x03
__ADS1015_REG_POINTER_CONVERT = 0x00
__ADS1015_REG_POINTER_CONFIG = 0x01
__ADS1015_REG_POINTER_LOWTHRESH = 0x02
__ADS1015_REG_POINTER_HITHRESH = 0x03
# Config Register
__ADS1015_REG_CONFIG_OS_MASK = 0x8000
__ADS1015_REG_CONFIG_OS_SINGLE = 0x8000 # Write: Set to start a single-conversion
__ADS1015_REG_CONFIG_OS_BUSY = 0x0000 # Read: Bit = 0 when conversion is in progress
__ADS1015_REG_CONFIG_OS_NOTBUSY = 0x8000 # Read: Bit = 1 when device is not performing a conversion
__ADS1015_REG_CONFIG_MUX_MASK = 0x7000
__ADS1015_REG_CONFIG_MUX_DIFF_0_1 = 0x0000 # Differential P = AIN0, N = AIN1 (default)
__ADS1015_REG_CONFIG_MUX_DIFF_0_3 = 0x1000 # Differential P = AIN0, N = AIN3
__ADS1015_REG_CONFIG_MUX_DIFF_1_3 = 0x2000 # Differential P = AIN1, N = AIN3
__ADS1015_REG_CONFIG_MUX_DIFF_2_3 = 0x3000 # Differential P = AIN2, N = AIN3
__ADS1015_REG_CONFIG_MUX_SINGLE_0 = 0x4000 # Single-ended AIN0
__ADS1015_REG_CONFIG_MUX_SINGLE_1 = 0x5000 # Single-ended AIN1
__ADS1015_REG_CONFIG_MUX_SINGLE_2 = 0x6000 # Single-ended AIN2
__ADS1015_REG_CONFIG_MUX_SINGLE_3 = 0x7000 # Single-ended AIN3
__ADS1015_REG_CONFIG_PGA_MASK = 0x0E00
__ADS1015_REG_CONFIG_PGA_6_144V = 0x0000 # +/-6.144V range
__ADS1015_REG_CONFIG_PGA_4_096V = 0x0200 # +/-4.096V range
__ADS1015_REG_CONFIG_PGA_2_048V = 0x0400 # +/-2.048V range (default)
__ADS1015_REG_CONFIG_PGA_1_024V = 0x0600 # +/-1.024V range
__ADS1015_REG_CONFIG_PGA_0_512V = 0x0800 # +/-0.512V range
__ADS1015_REG_CONFIG_PGA_0_256V = 0x0A00 # +/-0.256V range
__ADS1015_REG_CONFIG_MODE_MASK = 0x0100
__ADS1015_REG_CONFIG_MODE_CONTIN = 0x0000 # Continuous conversion mode
__ADS1015_REG_CONFIG_MODE_SINGLE = 0x0100 # Power-down single-shot mode (default)
__ADS1015_REG_CONFIG_DR_MASK = 0x00E0
__ADS1015_REG_CONFIG_DR_128SPS = 0x0000 # 128 samples per second
__ADS1015_REG_CONFIG_DR_250SPS = 0x0020 # 250 samples per second
__ADS1015_REG_CONFIG_DR_490SPS = 0x0040 # 490 samples per second
__ADS1015_REG_CONFIG_DR_920SPS = 0x0060 # 920 samples per second
__ADS1015_REG_CONFIG_DR_1600SPS = 0x0080 # 1600 samples per second (default)
__ADS1015_REG_CONFIG_DR_2400SPS = 0x00A0 # 2400 samples per second
__ADS1015_REG_CONFIG_DR_3300SPS = 0x00C0 # 3300 samples per second (also 0x00E0)
__ADS1115_REG_CONFIG_DR_8SPS = 0x0000 # 8 samples per second
__ADS1115_REG_CONFIG_DR_16SPS = 0x0020 # 16 samples per second
__ADS1115_REG_CONFIG_DR_32SPS = 0x0040 # 32 samples per second
__ADS1115_REG_CONFIG_DR_64SPS = 0x0060 # 64 samples per second
__ADS1115_REG_CONFIG_DR_128SPS = 0x0080 # 128 samples per second
__ADS1115_REG_CONFIG_DR_250SPS = 0x00A0 # 250 samples per second (default)
__ADS1115_REG_CONFIG_DR_475SPS = 0x00C0 # 475 samples per second
__ADS1115_REG_CONFIG_DR_860SPS = 0x00E0 # 860 samples per second
__ADS1015_REG_CONFIG_CMODE_MASK = 0x0010
__ADS1015_REG_CONFIG_CMODE_TRAD = 0x0000 # Traditional comparator with hysteresis (default)
__ADS1015_REG_CONFIG_CMODE_WINDOW = 0x0010 # Window comparator
__ADS1015_REG_CONFIG_CPOL_MASK = 0x0008
__ADS1015_REG_CONFIG_CPOL_ACTVLOW = 0x0000 # ALERT/RDY pin is low when active (default)
__ADS1015_REG_CONFIG_CPOL_ACTVHI = 0x0008 # ALERT/RDY pin is high when active
__ADS1015_REG_CONFIG_CLAT_MASK = 0x0004 # Determines if ALERT/RDY pin latches once asserted
__ADS1015_REG_CONFIG_CLAT_NONLAT = 0x0000 # Non-latching comparator (default)
__ADS1015_REG_CONFIG_CLAT_LATCH = 0x0004 # Latching comparator
__ADS1015_REG_CONFIG_CQUE_MASK = 0x0003
__ADS1015_REG_CONFIG_CQUE_1CONV = 0x0000 # Assert ALERT/RDY after one conversions
__ADS1015_REG_CONFIG_CQUE_2CONV = 0x0001 # Assert ALERT/RDY after two conversions
__ADS1015_REG_CONFIG_CQUE_4CONV = 0x0002 # Assert ALERT/RDY after four conversions
__ADS1015_REG_CONFIG_CQUE_NONE = 0x0003 # Disable the comparator and put ALERT/RDY in high state (default)
# Dictionaries with the sampling speed values
# These simplify and clean the code (avoid the abuse of if/elif/else clauses)
spsADS1115 = {
8: __ADS1115_REG_CONFIG_DR_8SPS,
16: __ADS1115_REG_CONFIG_DR_16SPS,
32: __ADS1115_REG_CONFIG_DR_32SPS,
64: __ADS1115_REG_CONFIG_DR_64SPS,
128: __ADS1115_REG_CONFIG_DR_128SPS,
250: __ADS1115_REG_CONFIG_DR_250SPS,
475: __ADS1115_REG_CONFIG_DR_475SPS,
860: __ADS1115_REG_CONFIG_DR_860SPS
}
spsADS1015 = {
128: __ADS1015_REG_CONFIG_DR_128SPS,
250: __ADS1015_REG_CONFIG_DR_250SPS,
490: __ADS1015_REG_CONFIG_DR_490SPS,
920: __ADS1015_REG_CONFIG_DR_920SPS,
1600: __ADS1015_REG_CONFIG_DR_1600SPS,
2400: __ADS1015_REG_CONFIG_DR_2400SPS,
3300: __ADS1015_REG_CONFIG_DR_3300SPS
}
# Dictionariy with the programable gains
pgaADS1x15 = {
6144: __ADS1015_REG_CONFIG_PGA_6_144V,
4096: __ADS1015_REG_CONFIG_PGA_4_096V,
2048: __ADS1015_REG_CONFIG_PGA_2_048V,
1024: __ADS1015_REG_CONFIG_PGA_1_024V,
512: __ADS1015_REG_CONFIG_PGA_0_512V,
256: __ADS1015_REG_CONFIG_PGA_0_256V
}
# Constructor
def __init__(self, address=0x48, ic=__IC_ADS1015, debug=False):
# Depending on if you have an old or a new Raspberry Pi, you
# may need to change the I2C bus. Older Pis use SMBus 0,
# whereas new Pis use SMBus 1. If you see an error like:
# 'Error accessing 0x48: Check your I2C address '
# change the SMBus number in the initializer below!
self.i2c = Adafruit_I2C(address)
self.address = address
self.debug = debug
# Make sure the IC specified is valid
if (ic < self.__IC_ADS1015) | (ic > self.__IC_ADS1115):
if self.debug:
print "ADS1x15: Invalid IC specified: %dh" % ic
else:
self.ic = ic
# Set pga value, so that getLastConversionResult() can use it,
# any function that accepts a pga value must update this.
self.pga = 6144
def readRaw(self, channel=0, pga=6144, sps=250):
# return raw AD Value
# With invalid channel return -1
if channel > 3:
if self.debug:
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init) it returns the value of the constant
# otherwise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.__IC_ADS1015 == self.ic:
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0 / sps + 0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
return (result[0] << 8) | (result[1])
def readADCSingleEnded(self, channel=0, pga=6144, sps=250):
"""Gets a single-ended ADC reading from the specified channel in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see datasheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
# With invalid channel return -1
if channel > 3:
if self.debug:
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init) it returns the value of the constant
# otherwise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0 / sps + 0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if self.ic == self.__IC_ADS1015:
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return (((result[0] << 8) | (result[1] & 0xFF)) >> 4) * pga / 2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF) * pga / 32768.0
else:
return ((result[0] << 8) | (result[1])) * pga / 32768.0
def readADCDifferential(self, chP=0, chN=1, pga=6144, sps=250):
"""Gets a differential ADC reading from channels chP and chN in mV. \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, single-shot mode
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_SINGLE
# Set channels
if (chP == 0) & (chN == 1):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif (chP == 0) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif (chP == 2) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif (chP == 1) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if self.debug:
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# otherwise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set 'start single-conversion' bit
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.1ms to be sure
delay = 1.0 / sps + 0.0001
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if self.ic == self.__IC_ADS1015:
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return (((result[0] << 8) | (result[1] & 0xFF)) >> 4) * pga / 2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF) * pga / 32768.0
else:
return ((result[0] << 8) | (result[1])) * pga / 32768.0
def readADCDifferential01(self, pga=6144, sps=250):
"""Gets a differential ADC reading from channels 0 and 1 in mV\
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
return self.readADCDifferential(0, 1, pga, sps)
def readADCDifferential03(self, pga=6144, sps=250):
"""Gets a differential ADC reading from channels 0 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
return self.readADCDifferential(0, 3, pga, sps)
def readADCDifferential13(self, pga=6144, sps=250):
"""Gets a differential ADC reading from channels 1 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
return self.__readADCDifferential(1, 3, pga, sps)
def readADCDifferential23(self, pga=6144, sps=250):
"""Gets a differential ADC reading from channels 2 and 3 in mV \
The sample rate for this mode (single-shot) can be used to lower the noise \
(low sps) or to lower the power consumption (high sps) by duty cycling, \
see data sheet page 14 for more info. \
The pga must be given in mV, see page 13 for the supported values."""
return self.readADCDifferential(2, 3, pga, sps)
def startContinuousConversion(self, channel=0, pga=6144, sps=250):
"""Starts the continuous conversion mode and returns the first ADC reading \
in mV from the specified channel. \
The sps controls the sample rate. \
The pga must be given in mV, see datasheet page 13 for the supported values. \
Use getLastConversionResults() to read the next values and \
stopContinuousConversion() to stop converting."""
# Default to channel 0 with invalid channel, or return -1?
if channel > 3:
if self.debug:
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, continuous mode
# The last flag is the only change we need, page 11 datasheet
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_CONTIN
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# otherwise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit to begin conversions
# No need to change this for continuous mode!
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
# Once we write the ADC will convert continuously
# we can read the next values using getLastConversionResult
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.5ms to be sure
delay = 1.0 / sps + 0.0005
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if self.ic == self.__IC_ADS1015:
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return (((result[0] << 8) | (result[1] & 0xFF)) >> 4) * pga / 2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF) * pga / 32768.0
else:
return ((result[0] << 8) | (result[1])) * pga / 32768.0
def startContinuousDifferentialConversion(self, chP=0, chN=1, pga=6144, sps=250):
"""Starts the continuous differential conversion mode and returns the first ADC reading \
in mV as the difference from the specified channels. \
The sps controls the sample rate. \
The pga must be given in mV, see datasheet page 13 for the supported values. \
Use getLastConversionResults() to read the next values and \
stopContinuousConversion() to stop converting."""
# Disable comparator, Non-latching, Alert/Rdy active low
# traditional comparator, continuous mode
# The last flag is the only change we need, page 11 datasheet
config = self.__ADS1015_REG_CONFIG_CQUE_NONE | \
self.__ADS1015_REG_CONFIG_CLAT_NONLAT | \
self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW | \
self.__ADS1015_REG_CONFIG_CMODE_TRAD | \
self.__ADS1015_REG_CONFIG_MODE_CONTIN
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % sps
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set channels
if (chP == 0) & (chN == 1):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif (chP == 0) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif (chP == 2) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif (chP == 1) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if self.debug:
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set 'start single-conversion' bit to begin conversions
# No need to change this for continuous mode!
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write config register to the ADC
# Once we write the ADC will convert continuously
# we can read the next values using getLastConversionResult
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
# Wait for the ADC conversion to complete
# The minimum delay depends on the sps: delay >= 1/sps
# We add 0.5ms to be sure
delay = 1.0 / sps + 0.0005
time.sleep(delay)
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if self.ic == self.__IC_ADS1015:
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return (((result[0] << 8) | (result[1] & 0xFF)) >> 4) * pga / 2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF) * pga / 32768.0
else:
return ((result[0] << 8) | (result[1])) * pga / 32768.0
def stopContinuousConversion(self):
"""Stops the ADC's conversions when in continuous mode \
and resets the configuration to its default value."""
# Write the default config register to the ADC
# Once we write, the ADC will do a single conversion and
# enter power-off mode.
config = 0x8583 # Page 18 datasheet.
config_register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, config_register)
return True
def getLastConversionResults(self):
"""Returns the last ADC conversion result in mV"""
# Read the conversion results
result = self.i2c.readList(self.__ADS1015_REG_POINTER_CONVERT, 2)
if self.ic == self.__IC_ADS1015:
# Shift right 4 bits for the 12-bit ADS1015 and convert to mV
return (((result[0] << 8) | (result[1] & 0xFF)) >> 4) * self.pga / 2048.0
else:
# Return a mV value for the ADS1115
# (Take signed values into account as well)
val = (result[0] << 8) | (result[1])
if val > 0x7FFF:
return (val - 0xFFFF) * self.pga / 32768.0
else:
return ((result[0] << 8) | (result[1])) * self.pga / 32768.0
def startSingleEndedComparator(self, channel, thresholdHigh, thresholdLow,
pga=6144, sps=250, activeLow=True,
traditionalMode=True, latching=False,
numReadings=1):
"""Starts the comparator mode on the specified channel, see datasheet pg. 15. \
In traditional mode it alerts (ALERT pin will go low) when voltage exceeds \
thresholdHigh until it falls below thresholdLow (both given in mV). \
In window mode (traditionalMode=False) it alerts when voltage doesn't lie\
between both thresholds.\
In latching mode the alert will continue until the conversion value is read. \
numReadings controls how many readings are necessary to trigger an alert: 1, 2 or 4.\
Use getLastConversionResults() to read the current value (which may differ \
from the one that triggered the alert) and clear the alert pin in latching mode. \
This function starts the continuous conversion mode. The sps controls \
the sample rate and the pga the gain, see datasheet page 13. """
# With invalid channel return -1
if channel > 3:
if self.debug:
print "ADS1x15: Invalid channel specified: %d" % channel
return -1
# Continuous mode
config = self.__ADS1015_REG_CONFIG_MODE_CONTIN
if not activeLow:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVHI
else:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW
if not traditionalMode:
config |= self.__ADS1015_REG_CONFIG_CMODE_WINDOW
else:
config |= self.__ADS1015_REG_CONFIG_CMODE_TRAD
if latching:
config |= self.__ADS1015_REG_CONFIG_CLAT_LATCH
else:
config |= self.__ADS1015_REG_CONFIG_CLAT_NONLAT
if numReadings == 4:
config |= self.__ADS1015_REG_CONFIG_CQUE_4CONV
elif numReadings == 2:
config |= self.__ADS1015_REG_CONFIG_CQUE_2CONV
else:
config |= self.__ADS1015_REG_CONFIG_CQUE_1CONV
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# othewise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
if (sps not in self.spsADS1015) & self.debug:
print "ADS1x15: Invalid sps specified: %d, using 1600sps" % sps
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid sps specified: %d, using 250sps" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % pga
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set the channel to be converted
if channel == 3:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_3
elif channel == 2:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_2
elif channel == 1:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_1
else:
config |= self.__ADS1015_REG_CONFIG_MUX_SINGLE_0
# Set 'start single-conversion' bit to begin conversions
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write threshold high and low registers to the ADC
# V_digital = (2^(n-1)-1)/pga*V_analog
if self.ic == self.__IC_ADS1015:
thresholdHighWORD = int(thresholdHigh * (2048.0 / pga))
else:
thresholdHighWORD = int(thresholdHigh * (32767.0 / pga))
register = [(thresholdHighWORD >> 8) & 0xFF, thresholdHighWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_HITHRESH, register)
if self.ic == self.__IC_ADS1015:
thresholdLowWORD = int(thresholdLow * (2048.0 / pga))
else:
thresholdLowWORD = int(thresholdLow * (32767.0 / pga))
register = [(thresholdLowWORD >> 8) & 0xFF, thresholdLowWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_LOWTHRESH, register)
# Write config register to the ADC
# Once we write the ADC will convert continously and alert when things happen,
# we can read the converted values using getLastConversionResult
register = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, register)
def startDifferentialComparator(self, chP, chN, thresholdHigh, thresholdLow,
pga=6144, sps=250,
activeLow=True, traditionalMode=True, latching=False,
numReadings=1):
"""Starts the comparator mode on the specified channel, see datasheet pg. 15. \
In traditional mode it alerts (ALERT pin will go low) when voltage exceeds \
thresholdHigh until it falls below thresholdLow (both given in mV). \
In window mode (traditionalMode=False) it alerts when voltage doesn't lie\
between both thresholds.\
In latching mode the alert will continue until the conversion value is read. \
numReadings controls how many readings are necessary to trigger an alert: 1, 2 or 4.\
Use getLastConversionResults() to read the current value (which may differ \
from the one that triggered the alert) and clear the alert pin in latching mode. \
This function starts the continuous conversion mode. The sps controls \
the sample rate and the pga the gain, see datasheet page 13. """
# Continuous mode
config = self.__ADS1015_REG_CONFIG_MODE_CONTIN
if not activeLow:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVHI
else:
config |= self.__ADS1015_REG_CONFIG_CPOL_ACTVLOW
if not traditionalMode:
config |= self.__ADS1015_REG_CONFIG_CMODE_WINDOW
else:
config |= self.__ADS1015_REG_CONFIG_CMODE_TRAD
if latching:
config |= self.__ADS1015_REG_CONFIG_CLAT_LATCH
else:
config |= self.__ADS1015_REG_CONFIG_CLAT_NONLAT
if numReadings == 4:
config |= self.__ADS1015_REG_CONFIG_CQUE_4CONV
elif numReadings == 2:
config |= self.__ADS1015_REG_CONFIG_CQUE_2CONV
else:
config |= self.__ADS1015_REG_CONFIG_CQUE_1CONV
# Set sample per seconds, defaults to 250sps
# If sps is in the dictionary (defined in init()) it returns the value of the constant
# otherwise it returns the value for 250sps. This saves a lot of if/elif/else code!
if self.ic == self.__IC_ADS1015:
if (sps not in self.spsADS1015) & self.debug:
print "ADS1x15: Invalid sps specified: %d, using 1600sps" % sps
config |= self.spsADS1015.setdefault(sps, self.__ADS1015_REG_CONFIG_DR_1600SPS)
else:
if (sps not in self.spsADS1115) & self.debug:
print "ADS1x15: Invalid sps specified: %d, using 250sps" % sps
config |= self.spsADS1115.setdefault(sps, self.__ADS1115_REG_CONFIG_DR_250SPS)
# Set PGA/voltage range, defaults to +-6.144V
if (pga not in self.pgaADS1x15) & self.debug:
print "ADS1x15: Invalid pga specified: %d, using 6144mV" % pga
config |= self.pgaADS1x15.setdefault(pga, self.__ADS1015_REG_CONFIG_PGA_6_144V)
self.pga = pga
# Set channels
if (chP == 0) & (chN == 1):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_1
elif (chP == 0) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_0_3
elif (chP == 2) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_2_3
elif (chP == 1) & (chN == 3):
config |= self.__ADS1015_REG_CONFIG_MUX_DIFF_1_3
else:
if self.debug:
print "ADS1x15: Invalid channels specified: %d, %d" % (chP, chN)
return -1
# Set 'start single-conversion' bit to begin conversions
config |= self.__ADS1015_REG_CONFIG_OS_SINGLE
# Write threshold high and low registers to the ADC
# V_digital = (2^(n-1)-1)/pga*V_analog
if self.ic == self.__IC_ADS1015:
thresholdHighWORD = int(thresholdHigh * (2048.0 / pga))
else:
thresholdHighWORD = int(thresholdHigh * (32767.0 / pga))
bytes_processed = [(thresholdHighWORD >> 8) & 0xFF, thresholdHighWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_HITHRESH, bytes_processed)
if self.ic == self.__IC_ADS1015:
thresholdLowWORD = int(thresholdLow * (2048.0 / pga))
else:
thresholdLowWORD = int(thresholdLow * (32767.0 / pga))
bytes_processed = [(thresholdLowWORD >> 8) & 0xFF, thresholdLowWORD & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_LOWTHRESH, bytes_processed)
# Write config register to the ADC
# Once we write the ADC will convert continously and alert when things happen,
# we can read the converted values using getLastConversionResult
bytes_processed = [(config >> 8) & 0xFF, config & 0xFF]
self.i2c.writeList(self.__ADS1015_REG_POINTER_CONFIG, bytes_processed)
| 48.227568
| 111
| 0.644404
| 4,858
| 37,087
| 4.643063
| 0.092013
| 0.078471
| 0.109239
| 0.083348
| 0.844033
| 0.800763
| 0.776955
| 0.750488
| 0.750488
| 0.748847
| 0
| 0.096285
| 0.277779
| 37,087
| 768
| 112
| 48.290365
| 0.745828
| 0.221641
| 0
| 0.691796
| 0
| 0
| 0.045554
| 0
| 0
| 0
| 0.022336
| 0
| 0
| 0
| null | null | 0
| 0.004435
| null | null | 0.053215
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
91b2151f3d8afa39cf227422eb619e921f736180
| 29,344
|
py
|
Python
|
neural_net.py
|
jgreer013/pymcc
|
3472321cffa0e81136a0d0a9596a594635c45377
|
[
"MIT"
] | null | null | null |
neural_net.py
|
jgreer013/pymcc
|
3472321cffa0e81136a0d0a9596a594635c45377
|
[
"MIT"
] | null | null | null |
neural_net.py
|
jgreer013/pymcc
|
3472321cffa0e81136a0d0a9596a594635c45377
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision.models as models
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5, padding = 2)
self.bn1 = nn.BatchNorm2d(6)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, padding = 2)
self.bn2 = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 32, 5, padding = 2)
self.bn3 = nn.BatchNorm2d(32)
self.conv4 = nn.Conv2d(32, 48, 5, padding = 2)
self.bn4 = nn.BatchNorm2d(48)
self.conv5 = nn.Conv2d(48, 64, 5, padding = 2)
self.bn5 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * 60 * 33, 240)
self.bnf1 = nn.BatchNorm1d(240)
self.fc2 = nn.Linear(240, 120)
self.bnf2 = nn.BatchNorm1d(120)
self.fc3 = nn.Linear(120, 50)
self.bnf3 = nn.BatchNorm1d(50)
self.fc4 = nn.Linear(50, 20)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x): # 1920 x 1080 x 3
x = self.bn1(self.pool(self.relu(self.conv1(x)))) # 960 x 540
x = self.bn2(self.pool(self.relu(self.conv2(x)))) # 480 x 270
x = self.bn3(self.pool(self.relu(self.conv3(x)))) # 240 x 135
x = self.bn4(self.pool(self.relu(self.conv4(x)))) # 120 x 67
x = self.bn5(self.pool(self.relu(self.conv5(x)))) # 60 x 33
x = x.view(-1, 64 * 60 * 33)
x = self.bnf1(self.relu(self.fc1(x)))
x = self.bnf2(self.relu(self.fc2(x)))
x = self.bnf3(self.relu(self.fc3(x)))
x = self.tanh(self.fc4(x))
return x
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class StickNet(nn.Module):
def __init__(self):
super(StickNet, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5, padding = 2)
self.bn1 = nn.BatchNorm2d(6)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, padding = 2)
self.bn2 = nn.BatchNorm2d(16)
self.conv3 = nn.Conv2d(16, 32, 5, padding = 2)
self.bn3 = nn.BatchNorm2d(32)
self.conv4 = nn.Conv2d(32, 48, 5, padding = 2)
self.bn4 = nn.BatchNorm2d(48)
self.conv5 = nn.Conv2d(48, 64, 5, padding = 2)
self.bn5 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * 60 * 33, 240)
self.bnf1 = nn.BatchNorm1d(240)
self.fc2 = nn.Linear(240, 240)
self.bnf2 = nn.BatchNorm1d(240)
self.fc3 = nn.Linear(240, 240)
self.bnf3 = nn.BatchNorm1d(240)
self.fc4 = nn.Linear(240, 4)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x): # 1920 x 1080 x 3
x = self.bn1(self.pool(self.relu(self.conv1(x)))) # 960 x 540
x = self.bn2(self.pool(self.relu(self.conv2(x)))) # 480 x 270
x = self.bn3(self.pool(self.relu(self.conv3(x)))) # 240 x 135
x = self.bn4(self.pool(self.relu(self.conv4(x)))) # 120 x 67
x = self.bn5(self.pool(self.relu(self.conv5(x)))) # 60 x 33
x = x.view(-1, 64 * 60 * 33)
x = self.bnf1(self.relu(self.fc1(x)))
x = self.bnf2(self.relu(self.fc2(x)))
x = self.bnf3(self.relu(self.fc3(x)))
x = self.tanh(self.fc4(x))
return x
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class ModifiedResnet(nn.Module):
def __init__(self):
super(ModifiedResnet, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 1000)
self.bnf = nn.BatchNorm1d(1000)
self.fcf = nn.Linear(1000, 20)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
def forward(self, x):
x = self.relu(self.resnet18(x))
x = self.tanh(self.fcf(x))
return x
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class MixedActivationResnet(nn.Module):
def __init__(self):
super(MixedActivationResnet, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_stick = nn.Linear(512, 64)
self.bn_stick = nn.BatchNorm1d(64)
self.fcf_stick = nn.Linear(64, 4)
self.fc_button = nn.Linear(512, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.resnet18(x))
sticks = self.bn_stick(self.relu(self.fc_stick(x)))
sticks = self.tanh(self.fcf_stick(sticks))
buttons = self.bn_button(self.relu(self.fc_button(x)))
buttons = self.sig(self.fcf_button(buttons))
return sticks, buttons
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class MixedActivationResnet_ActionGenerator(nn.Module):
def __init__(self):
super(MixedActivationResnet_ActionGenerator, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_stick = nn.Linear(512, 64)
self.bn_stick = nn.BatchNorm1d(64)
self.fcf_stick = nn.Linear(64, 4)
self.fc_button = nn.Linear(512, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
#self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.2)
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.resnet18(x))
sticks = self.bn_stick(self.relu(self.fc_stick(x)))
sticks = self.tanh(self.fcf_stick(sticks))
button_probs = self.bn_button(self.relu(self.fc_button(x)))
button_probs = self.sig(self.fcf_button(button_probs))
return sticks, button_probs
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class GeneratorWithActionTanh(nn.Module):
def __init__(self):
super(GeneratorWithActionTanh, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_action = nn.Linear(20, 64)
self.fc_state = nn.Linear(576, 512)
self.bn_state = nn.BatchNorm1d(512)
self.fc_stick = nn.Linear(512, 64)
self.bn_stick = nn.BatchNorm1d(64)
self.fcf_stick = nn.Linear(64, 4)
self.fc_button = nn.Linear(512, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
self.relu = nn.ReLU()
#self.relu = nn.LeakyReLU(0.2)
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, image, action):
image = self.relu(self.resnet18(image))
action = self.relu(self.fc_action(action))
state = self.bn_state(self.relu(self.fc_state(torch.cat((image, action), dim=1))))
sticks = self.bn_stick(self.relu(self.fc_stick(state)))
sticks = self.tanh(self.fcf_stick(sticks))
button_tanh = self.bn_button(self.relu(self.fc_button(state)))
button_tanh = self.tanh(self.fcf_button(button_tanh))
return sticks, button_tanh
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class GeneratorWithAction(nn.Module):
def __init__(self):
super(GeneratorWithAction, self).__init__()
n_hidden = 1000
n_action = 100
n_sum = n_hidden + n_action
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, n_hidden)
self.bn_resnet = nn.BatchNorm1d(n_hidden)
self.fc_action = nn.Linear(20, n_action)
self.bn_action = nn.BatchNorm1d(n_action)
self.fc_state = nn.Linear(n_sum, n_hidden)
self.bn_state = nn.BatchNorm1d(n_hidden)
self.fc_final_state = nn.Linear(n_hidden, n_hidden)
self.bn_final_state = nn.BatchNorm1d(n_hidden)
self.fc_final = nn.Linear(n_hidden, 20)
self.bn_final = nn.BatchNorm1d(20)
self.relu = nn.ReLU()
#self.relu = nn.LeakyReLU(0.2)
self.tanh = nn.Tanh()
def forward(self, image, action):
image = self.relu(self.bn_resnet(self.resnet18(image)))
action = self.relu(self.bn_action(self.fc_action(action)))
state = self.relu(self.bn_state(self.fc_state(torch.cat((image, action), dim=1))))
state = self.relu(self.bn_final_state(self.fc_final_state(state)))
generated_action = self.tanh(self.bn_final(self.fc_final(state)))
return generated_action
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class ResnetImageActionDiscriminator(nn.Module):
def __init__(self):
super(ResnetImageActionDiscriminator, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_action = nn.Linear(20, 64)
# Combine action tensor with image tensor - 64 + 512
self.bn_concat = nn.BatchNorm1d(576)
self.fc_concat = nn.Linear(576, 512)
self.bn_reduced = nn.BatchNorm1d(512)
self.fc_combined = nn.Linear(512, 64)
self.fc_bn = nn.BatchNorm1d(64)
self.fc_final = nn.Linear(64, 1)
#self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.2)
self.sig = nn.Sigmoid()
def forward(self, image, action):
image = self.relu(self.resnet18(image))
action = self.relu(self.fc_action(action)) # 64
concat = self.bn_concat(torch.cat((image, action), dim=1)) # 576
concat = self.bn_reduced(self.relu(self.fc_concat(concat))) # 512
concat = self.fc_bn(self.relu(self.fc_combined(concat)))
prob = self.sig(self.fc_final(concat))
return prob
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class ResnetImageActionDiscriminatorWGAN(nn.Module):
def __init__(self):
super(ResnetImageActionDiscriminatorWGAN, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_action = nn.Linear(20, 64)
# Combine action tensor with image tensor - 64 + 512
self.bn_concat = nn.BatchNorm1d(576)
self.fc_concat = nn.Linear(576, 512)
self.bn_reduced = nn.BatchNorm1d(512)
self.fc_combined = nn.Linear(512, 64)
self.fc_bn = nn.BatchNorm1d(64)
self.fc_final = nn.Linear(64, 1)
self.relu = nn.ReLU()
#self.relu = nn.LeakyReLU(0.2)
self.sig = nn.Sigmoid()
def forward(self, image, action):
image = self.relu(self.resnet18(image))
action = self.relu(self.fc_action(action)) # 64
concat = self.bn_concat(torch.cat((image, action), dim=1)) # 576
concat = self.bn_reduced(self.relu(self.fc_concat(concat))) # 512
concat = self.fc_bn(self.relu(self.fc_combined(concat)))
prob = self.fc_final(concat)
return prob
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class ResnetImageActionDiscriminatorWGANGPWithAction(nn.Module):
def __init__(self):
super(ResnetImageActionDiscriminatorWGANGPWithAction, self).__init__()
n_hidden = 1000
n_action = 20
n_sum = n_hidden + n_action
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, n_hidden)
#self.ln_resnet = nn.LayerNorm(n_hidden)
self.fc_action = nn.Linear(20, n_action)
#self.ln_action = nn.LayerNorm(n_action)
self.fc_prev_action = nn.Linear(20, n_action)
#self.ln_prev_action = nn.LayerNorm(n_action)
self.fc_state = nn.Linear(n_sum, n_hidden)
#self.ln_state = nn.LayerNorm(n_hidden)
# Combine action tensor with image tensor - 64 + 512
self.fc_concat = nn.Linear(n_sum, n_hidden)
#self.ln_reduced = nn.LayerNorm(n_hidden)
self.fc_combined = nn.Linear(n_hidden, n_hidden)
#self.ln_combined = nn.LayerNorm(n_hidden)
self.fc_final = nn.Linear(n_hidden, 1)
#self.relu = nn.ReLU()
self.relu = nn.LeakyReLU(0.2)
self.sig = nn.Sigmoid()
def forward(self, image, action, prev_action):
#image = self.relu(self.ln_resnet(self.resnet18(image)))
image = self.relu(self.resnet18(image))
#action = self.relu(self.ln_action(self.fc_action(action))) # 64
action = self.relu(self.fc_action(action))
#prev_action = self.relu(self.ln_prev_action(self.fc_prev_action(prev_action))) # 64
prev_action = self.relu(self.fc_prev_action(prev_action))
#state = self.relu(self.ln_state(self.fc_state(torch.cat((image, prev_action), dim=1)))) # 512
state = self.relu(self.fc_state(torch.cat((image, prev_action), dim=1)))
concat = torch.cat((state, action), dim=1) # 576
#concat = self.relu(self.ln_reduced(self.fc_concat(concat))) # 512
concat = self.relu(self.fc_concat(concat))
#concat = self.relu(self.ln_combined(self.fc_combined(concat))) # 64
concat = self.relu(self.fc_combined(concat))
prob = self.fc_final(concat) # 1
return prob
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class ResnetImageActionDiscriminatorWGANGP(nn.Module):
def __init__(self):
super(ResnetImageActionDiscriminatorWGANGP, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_action = nn.Linear(20, 64)
# Combine action tensor with image tensor - 64 + 512
self.ln_concat = nn.LayerNorm(576)
self.fc_concat = nn.Linear(576, 512)
self.ln_reduced = nn.LayerNorm(512)
self.fc_combined = nn.Linear(512, 64)
self.ln_combined = nn.LayerNorm(64)
self.fc_final = nn.Linear(64, 1)
self.relu = nn.ReLU()
#self.relu = nn.LeakyReLU(0.2)
self.sig = nn.Sigmoid()
def forward(self, image, action):
image = self.relu(self.resnet18(image))
action = self.relu(self.fc_action(action)) # 64
concat = self.ln_concat(torch.cat((image, action), dim=1)) # 576
concat = self.ln_reduced(self.relu(self.fc_concat(concat))) # 512
concat = self.ln_combined(self.relu(self.fc_combined(concat))) # 64
prob = self.fc_final(concat) # 1
return prob
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class MixedActivationResnetWithActionAndButtonLogits(nn.Module):
def __init__(self):
super(MixedActivationResnetWithActionAndButtonLogits, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_stick = nn.Linear(576, 64)
self.bn_stick = nn.BatchNorm1d(64)
self.fcf_stick = nn.Linear(64, 4)
self.fc_button = nn.Linear(576, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
self.fc_action = nn.Linear(20, 64)
# Combine action tensor with image tensor - 64 + 512
self.bn_concat = nn.BatchNorm1d(576)
self.fc_concat = nn.Linear(576, 576)
self.bn_reduced = nn.BatchNorm1d(576)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x, action):
x = self.relu(self.resnet18(x))
action = self.relu(self.fc_action(action)) # 64
concat = self.bn_concat(torch.cat((x, action), dim=1)) # 576
concat = self.bn_reduced(self.relu(self.fc_concat(concat))) # 512
sticks = self.bn_stick(self.relu(self.fc_stick(concat)))
sticks = self.tanh(self.fcf_stick(sticks))
buttons = self.bn_button(self.relu(self.fc_button(concat)))
buttons = self.fcf_button(buttons)
button_probs = self.sig(buttons)
return sticks, buttons, button_probs
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class Resnet34WithPreviousAction(nn.Module):
def __init__(self):
super(Resnet34WithPreviousAction, self).__init__()
n_hidden = 2000
n_hidden_action = 64
self.resnet34 = models.resnet34(pretrained=False)
num_features = self.resnet34.fc.in_features
print(num_features)
self.resnet34.fc = nn.Linear(num_features, n_hidden)
n_sum = n_hidden + n_hidden_action
self.fc_stick = nn.Linear(n_sum, n_hidden)
self.bn_stick = nn.BatchNorm1d(n_hidden)
self.fc_final = nn.Linear(n_hidden, 20)
self.fc_action = nn.Linear(20, n_hidden_action)
# Combine action tensor with image tensor - 64 + 512
#self.bn_concat = nn.BatchNorm1d(n_sum)
self.fc_concat = nn.Linear(n_sum, n_sum)
self.bn_reduced = nn.BatchNorm1d(n_sum)
self.relu = nn.ReLU()
#self.relu = nn.Tanh()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x, action):
x = self.relu(self.resnet34(x))
action = self.relu(self.fc_action(action)) # 64
concat = torch.cat((x, action), dim=1) # 576
concat = self.relu(self.bn_reduced(self.fc_concat(concat))) # 576
output_action = self.relu(self.bn_stick(self.fc_stick(concat))) # 64
output_action = self.tanh(self.fc_final(output_action)) # 20
return output_action
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class Resnet34(nn.Module):
def __init__(self):
super(Resnet34, self).__init__()
n_hidden = 2000
self.resnet34 = models.resnet34(pretrained=False)
num_features = self.resnet34.fc.in_features
print(num_features)
self.resnet34.fc = nn.Linear(num_features, n_hidden)
self.bn_resnet = nn.BatchNorm1d(n_hidden)
self.fc_stick = nn.Linear(n_hidden, n_hidden)
self.bn_stick = nn.BatchNorm1d(n_hidden)
self.fc_final = nn.Linear(n_hidden, 20)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x):
x = self.relu(self.bn_resnet(self.resnet34(x)))
output_action = self.relu(self.bn_stick(self.fc_stick(x)))
output_action = self.tanh(self.fc_final(output_action)) # 20
return output_action
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class MixedActivationClassificationResnet(nn.Module):
def __init__(self):
super(MixedActivationClassificationResnet, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_stick = nn.Linear(512, 32)
self.bn_stick = nn.BatchNorm1d(32)
self.fcf_stick_l_lr = nn.Linear(32, 5)
self.fcf_stick_l_ud = nn.Linear(32, 5)
self.fcf_stick_r_lr = nn.Linear(32, 5)
self.fcf_stick_r_ud = nn.Linear(32, 5)
self.fc_button = nn.Linear(512, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.relu(self.resnet18(x))
# sticks converted to multi-class problem with 5 classes each, to be converted to one of [-1, -0.5, 0, 0.5, 1]
sticks = self.bn_stick(self.relu(self.fc_stick(x)))
# CrossEntropy applies softmax by itself, so no need to pass it here
stick_l_lr = self.fcf_stick_l_lr(sticks)
stick_l_ud = self.fcf_stick_l_ud(sticks)
stick_r_lr = self.fcf_stick_r_lr(sticks)
stick_r_ud = self.fcf_stick_r_ud(sticks)
# Output these to determine class for output at runtime
stick_l_lr_probs = self.softmax(stick_l_lr)
stick_l_ud_probs = self.softmax(stick_l_ud)
stick_r_lr_probs = self.softmax(stick_r_lr)
stick_r_ud_probs = self.softmax(stick_r_ud)
buttons = self.bn_button(self.relu(self.fc_button(x)))
buttons = self.sig(self.fcf_button(buttons))
return stick_l_lr, stick_l_ud, stick_r_lr, stick_r_ud, buttons, stick_l_lr_probs, stick_l_ud_probs, stick_r_lr_probs, stick_r_ud_probs
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class MixedActivationFocalClassificationResnet(nn.Module):
def __init__(self):
super(MixedActivationFocalClassificationResnet, self).__init__()
self.resnet18 = models.resnet18(pretrained=True)
num_features = self.resnet18.fc.in_features
self.resnet18.fc = nn.Linear(num_features, 512)
self.fc_stick = nn.Linear(512, 32)
self.bn_stick = nn.BatchNorm1d(32)
self.fcf_stick_l_lr = nn.Linear(32, 5)
self.fcf_stick_l_ud = nn.Linear(32, 5)
self.fcf_stick_r_lr = nn.Linear(32, 5)
self.fcf_stick_r_ud = nn.Linear(32, 5)
self.fc_button = nn.Linear(512, 64)
self.bn_button = nn.BatchNorm1d(64)
self.fcf_button = nn.Linear(64, 16)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
self.softmax = nn.Softmax(dim=1)
def forward(self, x):
x = self.relu(self.resnet18(x))
# sticks converted to multi-class problem with 5 classes each, to be converted to one of [-1, -0.5, 0, 0.5, 1]
sticks = self.bn_stick(self.relu(self.fc_stick(x)))
# CrossEntropy applies softmax by itself, so no need to pass it here
stick_l_lr = self.fcf_stick_l_lr(sticks)
stick_l_ud = self.fcf_stick_l_ud(sticks)
stick_r_lr = self.fcf_stick_r_lr(sticks)
stick_r_ud = self.fcf_stick_r_ud(sticks)
# Output these to determine class for output at runtime
stick_l_lr_probs = self.softmax(stick_l_lr)
stick_l_ud_probs = self.softmax(stick_l_ud)
stick_r_lr_probs = self.softmax(stick_r_lr)
stick_r_ud_probs = self.softmax(stick_r_ud)
buttons = self.bn_button(self.relu(self.fc_button(x)))
# Focal BCE uses logits, so we don't want to apply this to the main output
buttons = self.fcf_button(buttons)
buttons_probs = self.sig(buttons)
return stick_l_lr, stick_l_ud, stick_r_lr, stick_r_ud, buttons, stick_l_lr_probs, stick_l_ud_probs, stick_r_lr_probs, stick_r_ud_probs, buttons_probs
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
class NetMixedActivation(nn.Module):
def __init__(self):
super(NetMixedActivation, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5, padding = 2)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5, padding = 2)
self.conv3 = nn.Conv2d(16, 32, 5, padding = 2)
self.conv4 = nn.Conv2d(32, 48, 5, padding = 2)
self.conv5 = nn.Conv2d(48, 64, 5, padding = 2)
self.fc1 = nn.Linear(64 * 60 * 33, 240)
self.fc2 = nn.Linear(240, 120)
self.fc3 = nn.Linear(120, 50)
self.fc4 = nn.Linear(50, 20)
self.relu = nn.ReLU()
self.tanh = nn.Tanh()
self.sig = nn.Sigmoid()
def forward(self, x): # 1920 x 1080 x 3
x = self.pool(self.relu(self.conv1(x))) # 960 x 540
x = self.pool(self.relu(self.conv2(x))) # 480 x 270
x = self.pool(self.relu(self.conv3(x))) # 240 x 135
x = self.pool(self.relu(self.conv4(x))) # 120 x 67
x = self.pool(self.relu(self.conv5(x))) # 60 x 33
x = x.view(-1, 64 * 60 * 33)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
x = self.relu(self.fc3(x))
x = self.fc4(x)
split = torch.split(x, [4, 16], 1) # split tri
joysticks = self.tanh(split[0])
other_buttons = self.sig(split[1])
return torch.cat((joysticks, other_buttons), 1)
def load(self, path, optimizer=None, gpu=None):
checkpoint = torch.load(path, map_location='cpu')
self.load_state_dict(checkpoint['model_state_dict'])
if optimizer:
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
if gpu:
torch.cuda.empty_cache()
self.to(gpu)
| 38.258149
| 157
| 0.627897
| 4,096
| 29,344
| 4.30542
| 0.047119
| 0.048086
| 0.055118
| 0.044344
| 0.900028
| 0.866289
| 0.831358
| 0.807712
| 0.786561
| 0.765523
| 0
| 0.04993
| 0.247853
| 29,344
| 766
| 158
| 38.308094
| 0.749083
| 0.068566
| 0
| 0.784148
| 0
| 0
| 0.024329
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086003
| false
| 0
| 0.006745
| 0
| 0.150084
| 0.003373
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
531232b22967f9e0d487962f1583d5de95409e86
| 6,957
|
py
|
Python
|
ivy/functional/backends/mxnet/layers.py
|
bideeen/ivy
|
d167c245c2c94fb88a6cb00286bf37154e435aea
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/mxnet/layers.py
|
bideeen/ivy
|
d167c245c2c94fb88a6cb00286bf37154e435aea
|
[
"Apache-2.0"
] | null | null | null |
ivy/functional/backends/mxnet/layers.py
|
bideeen/ivy
|
d167c245c2c94fb88a6cb00286bf37154e435aea
|
[
"Apache-2.0"
] | null | null | null |
"""
Collection of MXNet network layers, wrapped to fit Ivy syntax and signature.
"""
# global
import math
import mxnet as mx
def conv1d(x: mx.nd.NDArray,
filters: mx.nd.NDArray,
strides: int,
padding: str,
data_format: str ='NWC',
dilations: int = 1)\
-> mx.nd.NDArray:
if data_format == 'NWC':
x = mx.nd.transpose(x, (0, 2, 1))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
res = mx.nd.Convolution(data=x, weight=mx.nd.transpose(filters, (1, 2, 0)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NWC':
return mx.nd.transpose(res, (0, 2, 1))
else:
return res
def conv1d_transpose(x, filters, strides, padding, _=None, data_format='NWC', dilations=1):
if data_format == 'NWC':
x = mx.nd.transpose(x, (0, 2, 1))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
res = mx.nd.Deconvolution(data=x, weight=mx.nd.transpose(filters, (1, 2, 0)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NWC':
return mx.nd.transpose(res, (0, 2, 1))
else:
return res
def conv2d(x, filters, strides, padding, data_format='NHWC', dilations=1):
if data_format == 'NHWC':
x = mx.nd.transpose(x, (0, 3, 1, 2))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0, 0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
strides = [strides]*2 if isinstance(strides, int) else strides
dilations = [dilations]*2 if isinstance(dilations, int) else dilations
res = mx.nd.Convolution(data=x, weight=mx.nd.transpose(filters, (2, 3, 0, 1)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NHWC':
return mx.nd.transpose(res, (0, 2, 3, 1))
else:
return res
def conv2d_transpose(x, filters, strides, padding, _=None, data_format='NHWC', dilations=1):
if data_format == 'NHWC':
x = mx.nd.transpose(x, (0, 3, 1, 2))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0, 0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
strides = [strides]*2 if isinstance(strides, int) else strides
dilations = [dilations]*2 if isinstance(dilations, int) else dilations
res = mx.nd.Deconvolution(data=x, weight=mx.nd.transpose(filters, (2, 3, 0, 1)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NHWC':
return mx.nd.transpose(res, (0, 2, 3, 1))
else:
return res
def depthwise_conv2d(x, filters, strides, padding, data_format='NHWC', dilations=1):
num_filters = filters.shape[-1]
num_channels = num_filters
if data_format == 'NHWC':
x = mx.nd.transpose(x, (0, 3, 1, 2))
filter_shape = filters.shape[0:-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0, 0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
strides = [strides]*2 if isinstance(strides, int) else strides
dilations = [dilations]*2 if isinstance(dilations, int) else dilations
res = mx.nd.Convolution(data=x, weight=mx.nd.transpose(mx.nd.expand_dims(filters, -1), (2, 3, 0, 1)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters, num_group=num_channels)
if data_format == 'NHWC':
return mx.nd.transpose(res, (0, 2, 3, 1))
else:
return res
# noinspection PyDefaultArgument
def conv3d(x, filters, strides, padding, data_format='NDHWC', dilations=1):
if data_format == 'NDHWC':
x = mx.nd.transpose(x, (0, 4, 1, 2, 3))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0, 0, 0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
strides = [strides]*3 if isinstance(strides, int) else strides
dilations = [dilations]*3 if isinstance(dilations, int) else dilations
res = mx.nd.Convolution(data=x, weight=mx.nd.transpose(filters, (3, 4, 0, 1, 2)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NDHWC':
return mx.nd.transpose(res, (0, 2, 3, 4, 1))
else:
return res
def conv3d_transpose(x, filters, strides, padding, _=None, data_format='NDHWC', dilations=1):
if data_format == 'NDHWC':
x = mx.nd.transpose(x, (0, 4, 1, 2, 3))
filter_shape = filters.shape[0:-2]
num_filters = filters.shape[-1]
kernel = filter_shape
if padding == 'VALID':
padding = [0, 0, 0]
elif padding == 'SAME':
padding = [math.floor(item / 2) for item in filter_shape]
else:
raise Exception('Invalid padding arg {}\n'
'Must be one of: "VALID" or "SAME"'.format(padding))
strides = [strides]*3 if isinstance(strides, int) else strides
dilations = [dilations]*3 if isinstance(dilations, int) else dilations
res = mx.nd.Deconvolution(data=x, weight=mx.nd.transpose(filters, (3, 4, 0, 1, 2)), kernel=kernel, stride=strides, dilate=dilations, pad=padding, no_bias=True, num_filter=num_filters)
if data_format == 'NDHWC':
return mx.nd.transpose(res, (0, 2, 3, 4, 1))
else:
return res
| 41.909639
| 229
| 0.621389
| 977
| 6,957
| 4.340839
| 0.084954
| 0.030182
| 0.064372
| 0.023108
| 0.927611
| 0.919123
| 0.910163
| 0.910163
| 0.883046
| 0.883046
| 0
| 0.028448
| 0.237027
| 6,957
| 165
| 230
| 42.163636
| 0.770535
| 0.01653
| 0
| 0.834483
| 0
| 0
| 0.079906
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048276
| false
| 0
| 0.013793
| 0
| 0.158621
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
53366139365f01eac3593c3a2a2ebb53842f632a
| 65
|
py
|
Python
|
howtobuildmodule.py
|
prabal255/hackerrank_solutions
|
cebc394e49c22e939dee2f972e0e04cf625d0de8
|
[
"MIT"
] | null | null | null |
howtobuildmodule.py
|
prabal255/hackerrank_solutions
|
cebc394e49c22e939dee2f972e0e04cf625d0de8
|
[
"MIT"
] | null | null | null |
howtobuildmodule.py
|
prabal255/hackerrank_solutions
|
cebc394e49c22e939dee2f972e0e04cf625d0de8
|
[
"MIT"
] | null | null | null |
def add(a,b):
return a+b
def multii(a,b):
return a*b
| 13
| 17
| 0.538462
| 14
| 65
| 2.5
| 0.428571
| 0.228571
| 0.457143
| 0.514286
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.307692
| 65
| 5
| 18
| 13
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 8
|
5341b355d102541c4eb6d52cee0905eb8192fc13
| 3,013
|
py
|
Python
|
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | null | null | null |
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | 33
|
2015-05-06T00:47:20.000Z
|
2016-11-08T21:13:44.000Z
|
moi/home/migrations/0017_auto_20160508_1144.py
|
Ecotrust/F2S-MOI
|
aeb38942d6539c50f252ea3ff6fbff07aabc5088
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-05-08 11:44
from __future__ import unicode_literals
import core.models
from django.db import migrations
import wagtail.wagtailcore.blocks
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
dependencies = [
('home', '0016_auto_20160518_1729'),
]
operations = [
migrations.AlterField(
model_name='homepage',
name='body_content',
field=wagtail.wagtailcore.fields.StreamField([(b'number_count_up', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter your main content above. Do not use commas for larger numbers.', label=b'Text')), (b'numbers', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the numbers you'd like to count up - seperated by a semicolon. Do not use commas for larger numbers. Ex: 4; 51000; 15", label=b'Numbers to count', required=False)), (b'colored_text', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the content you'd like to be a different color - each set of content is seperated by a semicolon", required=False)), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter a source for the associated information.', required=False))], icon=b'order', label=b'Content and Number Counter Block')), (b'top_story', wagtail.wagtailcore.blocks.StructBlock([(b'sector', core.models.SectorChoiceBlock(help_text=b'Select the sector/top-story this aligns with')), (b'content', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter your main content above. Do not use commas for larger numbers.', label=b'Text')), (b'numbers', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the numbers you'd like to count up - seperated by a semicolon. Do not use commas for larger numbers. Ex: 4; 51000; 15", label=b'Numbers to count', required=False)), (b'colored_text', wagtail.wagtailcore.blocks.CharBlock(help_text=b"Enter the content you'd like to be a different color - each set of content is seperated by a semicolon", required=False)), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Enter a source for the associated information.', required=False))])), (b'link_caption', wagtail.wagtailcore.blocks.CharBlock(help_text=b'Add the text you would like to display that will link to the sector page', label=b'Link text')), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Display your source here', required=False))], icon=b'title', label=b'Top Story Content Block')), (b'basic_content', wagtail.wagtailcore.blocks.StructBlock([(b'content', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Add your text and/or image content above', label=b'Content Area')), (b'source', wagtail.wagtailcore.blocks.RichTextBlock(help_text=b'Display your source here', required=False))], icon=b'pilcrow', label=b'Basic Content Block'))], blank=True, default=None, null=True),
),
]
| 125.541667
| 2,529
| 0.759044
| 445
| 3,013
| 5.07191
| 0.276404
| 0.151529
| 0.180771
| 0.049623
| 0.678777
| 0.662384
| 0.662384
| 0.643775
| 0.643775
| 0.643775
| 0
| 0.017964
| 0.113176
| 3,013
| 23
| 2,530
| 131
| 0.826722
| 0.022237
| 0
| 0
| 1
| 0.25
| 0.416582
| 0.007815
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3125
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
535b255ae791b87af435b127f31f1251b8eb9e9b
| 8,233
|
py
|
Python
|
edabit/medium/sum_of_two_numbers_twist/test_sum2.py
|
ticotheps/practice_problems
|
943c5ab9eebeac4e5cf162adbdc681119603dc36
|
[
"MIT"
] | null | null | null |
edabit/medium/sum_of_two_numbers_twist/test_sum2.py
|
ticotheps/practice_problems
|
943c5ab9eebeac4e5cf162adbdc681119603dc36
|
[
"MIT"
] | null | null | null |
edabit/medium/sum_of_two_numbers_twist/test_sum2.py
|
ticotheps/practice_problems
|
943c5ab9eebeac4e5cf162adbdc681119603dc36
|
[
"MIT"
] | null | null | null |
import unittest
from sum2 import sum2
class Test(unittest.TestCase):
def test_sum2(self):
# created from given examples in code prompt
self.assertEqual(sum2("5125515215521515", "125261616261626"), "5250776831783141")
self.assertEqual(sum2("6666666666666666666666666666", "99999999999999999999999"), "6666766666666666666666666665")
self.assertEqual(sum2("123456789123456789123456789", "987654321987654321987654329876543"), "987654445444443445444443453333332")
# created from given tests in edabit code editor
self.assertEqual(sum2("51","512"),"563")
self.assertEqual(sum2("1521512512512512515","898989898989988998899898"),"898991420502501511412413")
# 200 digit test
self.assertEqual(sum2("46580672134861691487886856201063433530317493541984174240640117078384844027455455145995264175402994424834479825796316174329467969102257360195385044875023188313698661902232816682563450684527972706431205","20129647448213526330992199933412026717951269059875880213489467074335368047371342207724579931208231032969760043956811494704380198848377355718984761723730087673439394159054420344427904875384087249296946"),"66710319583075217818879056134475460248268762601860054454129584152720212074826797353719844106611225457804239869753127669033848167950634715914369806598753275987138056061287237026991355559912059955728151")
# 400 digit test
self.assertEqual(sum2("9128242816391792390367394318238609154929962550133827657886034828979294413033450307173793450924762143201991300288127408763421237279633517929936847079257713141254694944681428142978110027357322312404627593110196423560326537881370897768020382035189644680256824659171348515208671339529370866296929702167647163038519576331084019822103309755374561623148508523431380245253765653509318684179663600476971689801","5920641803160990513445202815794518152101247319199211634010324208708552138569594568355624738331704740605556159925350097568289164018471525773848461636579024644391854277092707811953956319566890527925989019562020260846251250663758330856266051985217733863782039893158278545291027890391152027767054280498870038607952519452004179810592466387736659835203110761590431605238080432136468832137768846891022675668"),"15048884619552782903812597134033127307031209869333039291896359037687846551603044875529418189256466883807547460213477506331710401298105043703785308715836737785646549221774135954932066346924212840330616612672216684406577788545129228624286434020407378544038864552329627060499699229920522894063983982666517201646472095783088199632695776143111221458351619285021811850491846085645787516317432447367994365469")
# 800 digit test
self.assertEqual(sum2("24050292702239538714424762926989391091054882494797379961190995916419743076846190252322346159955367441832937735205583340798028856059731163836333572978411075895848661770468080051146869104337532213474993926921843996913706778627924709600100860461421587774140722793995230660233453392717776973617724296276959982403528208646617679528431859423982314682036555772485961534695291544266936268924447065901465864784710200365748971482974309578528938725051499783831735126491173069337708438252812165533370751831832345511906521502270947947504198101881866181323122256768406228518806385974069302887460096561970477705646199939356606923830372891944037065847071308700107176794261922547322922787636783283829766004529060524539701495229943211611134317723328686844713489223961776582365551050940511119736023799724295560712462775","32509558457800134082157248923945369106923458582597107662799282973299414325101867489152494482453431986091527569819907127801971978514325573048496062254539304686542784848485721075326183780905103371293505027494462848297668132430517455611475977074377557603798687767852907762254040565866554962529868705211874976201522692137999060766380670853123585546115851899681725949430620582898894763596679656710337017496618525630124192131724949516793735384953877012134805554272494110981312910720545085389502607669244051838541600298937410166860469735980721786025686375273866096353307350361733480286161315578348192126560652007559649899391482681958298876030894402469363045420883354390088168624736238621292726748213941087878476447239860986699971354976451472810499473465596980950104110861804910439253305564075282187561761831"),"56559851160039672796582011850934760197978341077394487623990278889719157401948057741474840642408799427924465305025490468600000834574056736884829635232950380582391446618953801126473052885242635584768498954416306845211374911058442165211576837535799145377939410561848138422487493958584331936147593001488834958605050900784616740294812530277105900228152407672167687484125912127165831032521126722611802882281328725995873163614699259095322674110005376795966540680763667180319021348973357250922873359501076397350448121801208358114364667837862587967348808632042272324872113736335802783173621412140318669832206851946916256823221855573902335941877965711169470222215145276937411091412373021905122492752743001612418177942469804198311105672699780159655212962689558757532469661912745421558989329363799577748274224606")
# 1000 digit test
self.assertEqual(sum2("6809632763916891310120420620586174664812635530867937840217826224568347127990297761160448320170180761000408251375466103628257285208000720825374647976159883503477330018358281993325677527017557058976068333642868984291739483975621870841667306204393840341405348294943813620516550885907643492592684427984599974017532776047374095558566398217709965020793366640224373810304569301398035374905004719899959454164388545839944663454514841628001498947789419801699846006819006975473515954356883318037820103882153723720601881130291354697184471999967716687686218472392686266650318568026273517115609143992129773683289728842208471476663161973115343223846428988702456038387814984825150310010062042877806640480710525528241328472095324930106698917227353621406980530110747055257101350540514070478171412772633866196018240727864894180766177622426511795549818029880640303307250349733954619463752541929370662897223878043296358089043269919860180819045946942402216596728187295625046088616265162395417587774677163023091414012232562","8580486073000341176464481569509088314164648516198308588306341581886828084652373143824178061446100845207932260299989735098857835077503613944792671258723699144053082578718738452102088425930223680001667115358790806513480705990372645625620985201064789499679080382290229510353519900434457951354146351017356497038201676685010533295544810336326460878234636045306593283716955770214654833569407250862467764241301491075773878798755773232056371316349556337923582112418745691372603517681014116333538448075872043025194944464917374343870889032601696418177538646357828263727642286052251539150438138687143116934852022178306775340622808992992303183802309493297583818627523253611781850609208464293666564468773980578995182119812585094120227322948794646916387478317146222403428768533848504528795446827274517795896686680232498391880209937677390643332411043134407239601699748922010617022901854909440001914242164998598960486713118948710525156592829968225269277615537170861633579862039532587858207573589726753466950083211842"),"15390118836917232486584902190095262978977284047066246428524167806455175212642670904984626381616281606208340511675455838727115120285504334770167319234883582647530412597077020445427765952947780738977735449001659790805220189965994516467288291405458629841084428677234043130870070786342101443946830779001956471055734452732384628854111208554036425899028002685530967094021525071612690208474411970762427218405690036915718542253270614860057870264138976139623428119237752666846119472037897434371358551958025766745796825595208729041055361032569413105863757118750514530377960854078525056266047282679272890618141751020515246817285970966107646407648738482000039857015338238436932160619270507171473204949484506107236510591907910024226926240176148268323368008427893277660530119074362575006966859599908383991914927408097392572646387560103902438882229073015047542908950098655965236486654396838810664811466043041895318575756388868570705975638776910627485874343724466486679668478304694983275795348266889776558364095444404")
if __name__ == '__main__':
unittest.main()
| 343.041667
| 3,041
| 0.958824
| 100
| 8,233
| 78.85
| 0.54
| 0.017121
| 0.021687
| 0.012175
| 0.014204
| 0
| 0
| 0
| 0
| 0
| 0
| 0.936379
| 0.024414
| 8,233
| 24
| 3,042
| 343.041667
| 0.045319
| 0.018219
| 0
| 0
| 0
| 0
| 0.929058
| 0.918906
| 0
| 1
| 0
| 0
| 0.6
| 1
| 0.066667
| false
| 0
| 0.133333
| 0
| 0.266667
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
726c7dc383164ed2d1fc448d4e223aad8acd4666
| 18,484
|
py
|
Python
|
plugin/relation.py
|
SkygearIO/social-feed
|
ab52de578785eff543b5e261e1ffa26ec82f2184
|
[
"Apache-2.0"
] | null | null | null |
plugin/relation.py
|
SkygearIO/social-feed
|
ab52de578785eff543b5e261e1ffa26ec82f2184
|
[
"Apache-2.0"
] | null | null | null |
plugin/relation.py
|
SkygearIO/social-feed
|
ab52de578785eff543b5e261e1ffa26ec82f2184
|
[
"Apache-2.0"
] | null | null | null |
import skygear
from skygear import (
op,
)
from skygear.utils import db
import sqlalchemy as sa
from .options import (
DB_NAME,
SOCIAL_FEED_FANOUT_POLICY_JSON_STR,
SOCIAL_FEED_RECORD_TYPES,
SOCIAL_FEED_TABLE_PREFIX,
)
from .table_name import (
name_for_followings_relation_index,
name_for_friends_relation_index
)
from .user import (
should_record_be_indexed,
)
DIRECTION_MUTUAL = 'mutual'
DIRECTION_INWARD = 'inward'
DIRECTION_OUTWARD = 'outward'
RELATION_TABLE_MAP = {
'friends': '_friend',
'following': '_follow',
}
def register_create_index_for_friends():
@op('social_feed:create_index_for_friends', user_required=True)
def social_feed_create_index_for_friends(maybe_my_friends):
if not maybe_my_friends:
return
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
maybe_my_friend_ids = [
user['user_id'] for user in maybe_my_friends
]
maybe_my_friend_ids_tuple = tuple(maybe_my_friend_ids)
sql = sa.text('''
SELECT f1.right_id as id
FROM {db_name}._friend f1
JOIN {db_name}._friend f2
ON f1.right_id = f2.left_id
WHERE f1.left_id = :my_user_id
AND f2.right_id = :my_user_id
AND f1.right_id IN :maybe_my_friend_ids
'''.format(db_name=DB_NAME))
results = conn.execute(
sql,
my_user_id=my_user_id,
maybe_my_friend_ids=maybe_my_friend_ids_tuple
)
my_friend_ids = [user.id for user in results]
if not my_friend_ids:
return
my_friend_ids_tuple = tuple(my_friend_ids)
should_fanout_my_records = should_record_be_indexed(
DB_NAME,
SOCIAL_FEED_RECORD_TYPES,
conn,
my_user_id,
'friends'
)
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_friends_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
create_my_friends_records_index_sql = sa.text('''
INSERT INTO {db_name}.{table_name} (
_id,
_database_id,
_owner_id,
_created_at,
_created_by,
_updated_at,
_updated_by,
_access,
left_id,
right_id,
record_ref
)
SELECT
uuid_generate_v4() as _id,
'' as _database_id,
:my_user_id as _owner_id,
current_timestamp as _created_at,
:my_user_id as _created_by,
current_timestamp as _updated_at,
:my_user_id as _updated_by,
'[]'::jsonb as _access,
:my_user_id as left_id,
record_table._owner_id as right_id,
record_table._id as record_ref
FROM {db_name}.{record_type} record_table
JOIN {db_name}.user user_table
ON (
record_table._owner_id = user_table._id
AND COALESCE(
user_table.social_feed_fanout_policy,
'{default_fanout_policy}'::jsonb
) @> '{req_fanout_policy}'::jsonb
)
WHERE record_table._owner_id in :my_friend_ids
AND NOT EXISTS (
SELECT *
FROM {db_name}.{table_name}
WHERE left_id=:my_user_id
AND right_id IN (record_table._owner_id)
AND record_ref IN (record_table._id)
)
'''.format(
db_name=DB_NAME,
table_name=table_name,
record_type=record_type,
default_fanout_policy=SOCIAL_FEED_FANOUT_POLICY_JSON_STR,
req_fanout_policy='{"friends": true}'
))
conn.execute(
create_my_friends_records_index_sql,
my_user_id=my_user_id,
my_friend_ids=my_friend_ids_tuple
)
if should_fanout_my_records:
create_friends_to_my_records_index_sql = sa.text('''
INSERT INTO {db_name}.{table_name} (
_id,
_database_id,
_owner_id,
_created_at,
_created_by,
_updated_at,
_updated_by,
_access,
left_id,
right_id,
record_ref
)
SELECT
uuid_generate_v4() as _id,
'' as _database_id,
u.id as _owner_id,
current_timestamp as _created_at,
u.id as _created_by,
current_timestamp as _updated_at,
u.id as _updated_by,
'[]'::jsonb as _access,
u.id as left_id,
:my_user_id as right_id,
record_table._id as record_ref
FROM {db_name}.{record_type} record_table,
{db_name}._user u
WHERE record_table._owner_id = :my_user_id
AND u.id in :my_friend_ids
AND NOT EXISTS (
SELECT *
FROM {db_name}.{table_name}
WHERE right_id = :my_user_id
AND left_id IN :my_friend_ids
AND record_ref IN (record_table._id)
)
'''.format(
db_name=DB_NAME,
table_name=table_name,
record_type=record_type
))
conn.execute(
create_friends_to_my_records_index_sql,
my_user_id=my_user_id,
my_friend_ids=my_friend_ids_tuple
)
def register_create_index_for_followee():
@op('social_feed:create_index_for_followees', user_required=True)
def create_index_for_followee(followees):
if not followees:
return
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
my_followees_ids = [followee['user_id'] for followee in followees]
my_followees_ids_tuple = tuple(my_followees_ids)
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_followings_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
create_my_followees_records_index_sql = sa.text('''
INSERT INTO {db_name}.{table_name} (
_id,
_database_id,
_owner_id,
_created_at,
_created_by,
_updated_at,
_updated_by,
_access,
left_id,
right_id,
record_ref
)
SELECT
uuid_generate_v4() as _id,
'' as _database_id,
:my_user_id as _owner_id,
current_timestamp as _created_at,
:my_user_id as _created_by,
current_timestamp as _updated_at,
:my_user_id as _updated_by,
'[]'::jsonb as _access,
:my_user_id as left_id,
record_table._owner_id as right_id,
record_table._id as record_ref
FROM {db_name}.{record_type} record_table
JOIN {db_name}.user user_table
ON (
record_table._owner_id = user_table._id
AND COALESCE(
user_table.social_feed_fanout_policy,
'{default_fanout_policy}'::jsonb
) @> '{req_fanout_policy}'::jsonb
)
WHERE record_table._owner_id in :my_followees_ids
AND NOT EXISTS (
SELECT *
FROM {db_name}.{table_name}
WHERE left_id=:my_user_id
AND right_id IN (record_table._owner_id)
AND record_ref IN (record_table._id)
)
'''.format(
db_name=DB_NAME,
table_name=table_name,
record_type=record_type,
default_fanout_policy=SOCIAL_FEED_FANOUT_POLICY_JSON_STR,
req_fanout_policy='{"following": true}'
))
conn.execute(
create_my_followees_records_index_sql,
my_user_id=my_user_id,
my_followees_ids=my_followees_ids_tuple
)
def register_remove_index_for_friends():
@op('social_feed:remove_index_for_friends', user_required=True)
def remove_index_for_friends(friends):
if not friends:
return
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
my_friends_ids = [friend['user_id'] for friend in friends]
my_friends_ids_tuple = tuple(my_friends_ids)
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_friends_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
remove_my_friends_records_sql = sa.text('''
DELETE from {db_name}.{table_name}
WHERE (
left_id = :my_user_id
AND right_id in :my_friends_ids
)
OR (right_id = :my_user_id AND left_id in :my_friends_ids)
'''.format(db_name=DB_NAME, table_name=table_name))
conn.execute(
remove_my_friends_records_sql,
my_user_id=my_user_id,
my_friends_ids=my_friends_ids_tuple
)
def register_remove_index_for_followees():
@op('social_feed:remove_index_for_followees', user_required=True)
def remove_index_for_followees(followees):
if len(followees) <= 0:
return
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
my_followees_ids = [followee['user_id'] for followee in followees]
my_followees_ids_tuple = tuple(my_followees_ids)
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_followings_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
remove_my_friends_records_sql = sa.text('''
DELETE from {db_name}.{table_name}
WHERE left_id = :my_user_id
AND right_id in :my_followees_ids
'''.format(db_name=DB_NAME, table_name=table_name))
conn.execute(
remove_my_friends_records_sql,
my_user_id=my_user_id,
my_followees_ids=my_followees_ids_tuple
)
def register_reindex_for_friends():
@op('social_feed:reindex_for_friends', user_required=True)
def reindex_for_friends():
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_friends_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
remove_current_index_sql = sa.text('''
DELETE FROM {db_name}.{table_name}
WHERE left_id = :my_user_id
'''.format(db_name=DB_NAME, table_name=table_name))
conn.execute(
remove_current_index_sql,
my_user_id=my_user_id
)
create_my_friends_records_index_sql = sa.text('''
INSERT INTO {db_name}.{table_name} (
_id,
_database_id,
_owner_id,
_created_at,
_created_by,
_updated_at,
_updated_by,
_access,
left_id,
right_id,
record_ref
)
SELECT
uuid_generate_v4() as _id,
'' as _database_id,
:my_user_id as _owner_id,
current_timestamp as _created_at,
:my_user_id as _created_by,
current_timestamp as _updated_at,
:my_user_id as _updated_by,
'[]'::jsonb as _access,
:my_user_id as left_id,
_owner_id as right_id,
_id as record_ref
FROM {db_name}.{record_type} record_table
WHERE _owner_id in (
SELECT f1.right_id as id
FROM {db_name}._friend f1
JOIN {db_name}._friend f2
ON f1.right_id = f2.left_id
WHERE f1.left_id = :my_user_id
AND f2.right_id = :my_user_id
)
AND NOT EXISTS (
SELECT *
FROM {db_name}.{table_name}
WHERE left_id=:my_user_id
AND right_id IN (record_table._owner_id)
AND record_ref IN (record_table._id)
)
'''.format(
db_name=DB_NAME,
table_name=table_name,
record_type=record_type
))
conn.execute(
create_my_friends_records_index_sql,
my_user_id=my_user_id,
)
def register_reindex_for_followees():
@op('social_feed:reindex_for_followees', user_required=True)
def reindex_for_followees():
with db.conn() as conn:
my_user_id = skygear.utils.context.current_user_id()
for record_type in SOCIAL_FEED_RECORD_TYPES:
table_name = name_for_followings_relation_index(
prefix=SOCIAL_FEED_TABLE_PREFIX,
record_type=record_type
)
remove_current_index_sql = sa.text('''
DELETE FROM {db_name}.{table_name}
WHERE left_id = :my_user_id
'''.format(db_name=DB_NAME, table_name=table_name))
conn.execute(
remove_current_index_sql,
my_user_id=my_user_id
)
create_my_friends_records_index_sql = sa.text('''
INSERT INTO {db_name}.{table_name} (
_id,
_database_id,
_owner_id,
_created_at,
_created_by,
_updated_at,
_updated_by,
_access,
left_id,
right_id,
record_ref
)
SELECT
uuid_generate_v4() as _id,
'' as _database_id,
:my_user_id as _owner_id,
current_timestamp as _created_at,
:my_user_id as _created_by,
current_timestamp as _updated_at,
:my_user_id as _updated_by,
'[]'::jsonb as _access,
:my_user_id as left_id,
_owner_id as right_id,
_id as record_ref
FROM {db_name}.{record_type} record_table
WHERE _owner_id in (
SELECT f.right_id as id
FROM {db_name}._follow f
WHERE f.left_id = :my_user_id
)
AND NOT EXISTS (
SELECT *
FROM {db_name}.{table_name}
WHERE left_id=:my_user_id
AND right_id IN (record_table._owner_id)
AND record_ref IN (record_table._id)
)
'''.format(
db_name=DB_NAME,
table_name=table_name,
record_type=record_type
))
conn.execute(
create_my_friends_records_index_sql,
my_user_id=my_user_id,
)
| 39.836207
| 78
| 0.466566
| 1,826
| 18,484
| 4.209748
| 0.05586
| 0.055418
| 0.062443
| 0.040328
| 0.890074
| 0.83986
| 0.79745
| 0.757253
| 0.757253
| 0.747106
| 0
| 0.00218
| 0.478738
| 18,484
| 463
| 79
| 39.922246
| 0.795641
| 0
| 0
| 0.703529
| 0
| 0
| 0.564326
| 0.058375
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028235
| false
| 0
| 0.016471
| 0
| 0.056471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
729e03e834054fbe4ec4fc25c3a4794c4935a7c8
| 25,131
|
py
|
Python
|
sdk/python/pulumi_gcp/compute/manged_ssl_certificate.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 121
|
2018-06-18T19:16:42.000Z
|
2022-03-31T06:06:48.000Z
|
sdk/python/pulumi_gcp/compute/manged_ssl_certificate.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 492
|
2018-06-22T19:41:03.000Z
|
2022-03-31T15:33:53.000Z
|
sdk/python/pulumi_gcp/compute/manged_ssl_certificate.py
|
sisisin/pulumi-gcp
|
af6681d70ea457843409110c1324817fe55f68ad
|
[
"ECL-2.0",
"Apache-2.0"
] | 43
|
2018-06-19T01:43:13.000Z
|
2022-03-23T22:43:37.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['MangedSslCertificateArgs', 'MangedSslCertificate']
@pulumi.input_type
class MangedSslCertificateArgs:
def __init__(__self__, *,
certificate_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input['MangedSslCertificateManagedArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a MangedSslCertificate resource.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input['MangedSslCertificateManagedArgs'] managed: Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
:param pulumi.Input[str] type: Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
if certificate_id is not None:
pulumi.set(__self__, "certificate_id", certificate_id)
if description is not None:
pulumi.set(__self__, "description", description)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateId")
def certificate_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the resource.
"""
return pulumi.get(self, "certificate_id")
@certificate_id.setter
def certificate_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "certificate_id", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def managed(self) -> Optional[pulumi.Input['MangedSslCertificateManagedArgs']]:
"""
Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
"""
return pulumi.get(self, "managed")
@managed.setter
def managed(self, value: Optional[pulumi.Input['MangedSslCertificateManagedArgs']]):
pulumi.set(self, "managed", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
@pulumi.input_type
class _MangedSslCertificateState:
def __init__(__self__, *,
certificate_id: Optional[pulumi.Input[int]] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
expire_time: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input['MangedSslCertificateManagedArgs']] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering MangedSslCertificate resources.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input[str] expire_time: Expire time of the certificate.
:param pulumi.Input['MangedSslCertificateManagedArgs'] managed: Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alternative_names: Domains associated with the certificate via Subject Alternative Name.
:param pulumi.Input[str] type: Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
if certificate_id is not None:
pulumi.set(__self__, "certificate_id", certificate_id)
if creation_timestamp is not None:
pulumi.set(__self__, "creation_timestamp", creation_timestamp)
if description is not None:
pulumi.set(__self__, "description", description)
if expire_time is not None:
pulumi.set(__self__, "expire_time", expire_time)
if managed is not None:
pulumi.set(__self__, "managed", managed)
if name is not None:
pulumi.set(__self__, "name", name)
if project is not None:
pulumi.set(__self__, "project", project)
if self_link is not None:
pulumi.set(__self__, "self_link", self_link)
if subject_alternative_names is not None:
pulumi.set(__self__, "subject_alternative_names", subject_alternative_names)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="certificateId")
def certificate_id(self) -> Optional[pulumi.Input[int]]:
"""
The unique identifier for the resource.
"""
return pulumi.get(self, "certificate_id")
@certificate_id.setter
def certificate_id(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "certificate_id", value)
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> Optional[pulumi.Input[str]]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@creation_timestamp.setter
def creation_timestamp(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "creation_timestamp", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
An optional description of this resource.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> Optional[pulumi.Input[str]]:
"""
Expire time of the certificate.
"""
return pulumi.get(self, "expire_time")
@expire_time.setter
def expire_time(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "expire_time", value)
@property
@pulumi.getter
def managed(self) -> Optional[pulumi.Input['MangedSslCertificateManagedArgs']]:
"""
Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
"""
return pulumi.get(self, "managed")
@managed.setter
def managed(self, value: Optional[pulumi.Input['MangedSslCertificateManagedArgs']]):
pulumi.set(self, "managed", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "self_link")
@self_link.setter
def self_link(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "self_link", value)
@property
@pulumi.getter(name="subjectAlternativeNames")
def subject_alternative_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Domains associated with the certificate via Subject Alternative Name.
"""
return pulumi.get(self, "subject_alternative_names")
@subject_alternative_names.setter
def subject_alternative_names(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "subject_alternative_names", value)
@property
@pulumi.getter
def type(self) -> Optional[pulumi.Input[str]]:
"""
Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "type", value)
warnings.warn("""gcp.compute.MangedSslCertificate has been deprecated in favor of gcp.compute.ManagedSslCertificate""", DeprecationWarning)
class MangedSslCertificate(pulumi.CustomResource):
warnings.warn("""gcp.compute.MangedSslCertificate has been deprecated in favor of gcp.compute.ManagedSslCertificate""", DeprecationWarning)
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input[pulumi.InputType['MangedSslCertificateManagedArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Create a MangedSslCertificate resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input[pulumi.InputType['MangedSslCertificateManagedArgs']] managed: Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
:param pulumi.Input[str] type: Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: Optional[MangedSslCertificateArgs] = None,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a MangedSslCertificate resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param MangedSslCertificateArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(MangedSslCertificateArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
certificate_id: Optional[pulumi.Input[int]] = None,
description: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input[pulumi.InputType['MangedSslCertificateManagedArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
type: Optional[pulumi.Input[str]] = None,
__props__=None):
pulumi.log.warn("""MangedSslCertificate is deprecated: gcp.compute.MangedSslCertificate has been deprecated in favor of gcp.compute.ManagedSslCertificate""")
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = MangedSslCertificateArgs.__new__(MangedSslCertificateArgs)
__props__.__dict__["certificate_id"] = certificate_id
__props__.__dict__["description"] = description
__props__.__dict__["managed"] = managed
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["type"] = type
__props__.__dict__["creation_timestamp"] = None
__props__.__dict__["expire_time"] = None
__props__.__dict__["self_link"] = None
__props__.__dict__["subject_alternative_names"] = None
super(MangedSslCertificate, __self__).__init__(
'gcp:compute/mangedSslCertificate:MangedSslCertificate',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
certificate_id: Optional[pulumi.Input[int]] = None,
creation_timestamp: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
expire_time: Optional[pulumi.Input[str]] = None,
managed: Optional[pulumi.Input[pulumi.InputType['MangedSslCertificateManagedArgs']]] = None,
name: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
self_link: Optional[pulumi.Input[str]] = None,
subject_alternative_names: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
type: Optional[pulumi.Input[str]] = None) -> 'MangedSslCertificate':
"""
Get an existing MangedSslCertificate resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[int] certificate_id: The unique identifier for the resource.
:param pulumi.Input[str] creation_timestamp: Creation timestamp in RFC3339 text format.
:param pulumi.Input[str] description: An optional description of this resource.
:param pulumi.Input[str] expire_time: Expire time of the certificate.
:param pulumi.Input[pulumi.InputType['MangedSslCertificateManagedArgs']] managed: Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
:param pulumi.Input[str] name: Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
:param pulumi.Input[Sequence[pulumi.Input[str]]] subject_alternative_names: Domains associated with the certificate via Subject Alternative Name.
:param pulumi.Input[str] type: Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _MangedSslCertificateState.__new__(_MangedSslCertificateState)
__props__.__dict__["certificate_id"] = certificate_id
__props__.__dict__["creation_timestamp"] = creation_timestamp
__props__.__dict__["description"] = description
__props__.__dict__["expire_time"] = expire_time
__props__.__dict__["managed"] = managed
__props__.__dict__["name"] = name
__props__.__dict__["project"] = project
__props__.__dict__["self_link"] = self_link
__props__.__dict__["subject_alternative_names"] = subject_alternative_names
__props__.__dict__["type"] = type
return MangedSslCertificate(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="certificateId")
def certificate_id(self) -> pulumi.Output[int]:
"""
The unique identifier for the resource.
"""
return pulumi.get(self, "certificate_id")
@property
@pulumi.getter(name="creationTimestamp")
def creation_timestamp(self) -> pulumi.Output[str]:
"""
Creation timestamp in RFC3339 text format.
"""
return pulumi.get(self, "creation_timestamp")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
An optional description of this resource.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="expireTime")
def expire_time(self) -> pulumi.Output[str]:
"""
Expire time of the certificate.
"""
return pulumi.get(self, "expire_time")
@property
@pulumi.getter
def managed(self) -> pulumi.Output[Optional['outputs.MangedSslCertificateManaged']]:
"""
Properties relevant to a managed certificate. These will be used if the certificate is managed (as indicated by a value
of 'MANAGED' in 'type').
"""
return pulumi.get(self, "managed")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the resource. Provided by the client when the resource is created. The name must be 1-63 characters long, and
comply with RFC1035. Specifically, the name must be 1-63 characters long and match the regular expression
'[a-z]([-a-z0-9]*[a-z0-9])?' which means the first character must be a lowercase letter, and all following characters
must be a dash, lowercase letter, or digit, except the last character, which cannot be a dash. These are in the same
namespace as the managed SSL certificates.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
return pulumi.get(self, "project")
@property
@pulumi.getter(name="selfLink")
def self_link(self) -> pulumi.Output[str]:
return pulumi.get(self, "self_link")
@property
@pulumi.getter(name="subjectAlternativeNames")
def subject_alternative_names(self) -> pulumi.Output[Sequence[str]]:
"""
Domains associated with the certificate via Subject Alternative Name.
"""
return pulumi.get(self, "subject_alternative_names")
@property
@pulumi.getter
def type(self) -> pulumi.Output[Optional[str]]:
"""
Enum field whose value is always 'MANAGED' - used to signal to the API which type this is. Default value: "MANAGED"
Possible values: ["MANAGED"]
"""
return pulumi.get(self, "type")
| 48.143678
| 209
| 0.65831
| 2,977
| 25,131
| 5.394693
| 0.069869
| 0.072603
| 0.062765
| 0.065753
| 0.861083
| 0.833811
| 0.808966
| 0.793587
| 0.780448
| 0.749564
| 0
| 0.006049
| 0.243484
| 25,131
| 521
| 210
| 48.236084
| 0.838681
| 0.366917
| 0
| 0.720395
| 1
| 0
| 0.127552
| 0.053226
| 0
| 0
| 0
| 0
| 0
| 1
| 0.161184
| false
| 0.003289
| 0.023026
| 0.016447
| 0.282895
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
72a59f61df915bbbf5abc7c8bedee2682891fb22
| 3,453
|
py
|
Python
|
exeteracovid/algorithms/patient_level_covid_test_measures.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 3
|
2021-03-23T14:23:06.000Z
|
2021-12-29T16:54:42.000Z
|
exeteracovid/algorithms/patient_level_covid_test_measures.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 29
|
2021-02-22T12:12:53.000Z
|
2021-09-27T10:52:25.000Z
|
exeteracovid/algorithms/patient_level_covid_test_measures.py
|
deng113jie/ExeTeraCovid
|
ee9ec90983d7c2c711962c7fe9ac25251392e41b
|
[
"Apache-2.0"
] | 1
|
2021-03-08T15:00:30.000Z
|
2021-03-08T15:00:30.000Z
|
from exetera.core.session import Session
import exetera.core.operations as ops
def test_counts_per_patient_v1(session: Session,
patient_table,
test_table,
dest_patient_table,
dest_patient_name):
"""
Counting the number of tests performed for each patient id.
:param session: The Exetera session instance.
:param patient_table: The patient dataframe.
:param test_table: The tests dataframe.
:param dest_patient_table: The destination dataframe to store the results.
:param dest_patient_name: The name of the destination field to store the results.
"""
pid = 'id'
pids = session.get(patient_table[pid])
pids_ = pids.data[:]
if not ops.is_ordered(pids.data[:]):
raise ValueError("The patient table must be ordered by '{}'".format(pid))
t_pid = 'patient_id'
t_pids = session.get(test_table[t_pid])
t_pids_ = t_pids.data[:]
if not ops.is_ordered(t_pids_):
raise ValueError("The test table must be ordered by '{}'".format(t_pid))
# collapse the test data by patient_id and get the counts
spans_ = session.get_spans(t_pids_)
s_t_pids_ = session.apply_spans_first(spans_, t_pids_)
counts_ = session.apply_spans_count(spans_)
# merge the counts for the test table into the patient table
dest = session.create_numeric(dest_patient_table, dest_patient_name, 'int32')
session.ordered_merge_left(left_on=pids_, right_on=s_t_pids_, right_field_sources=(counts_,),
left_field_sinks=(dest,), left_unique=True, right_unique=True)
def first_test_date_per_patient(session: Session,
patient_table,
test_table,
test_date_name,
dest_patient_table,
dest_patient_name):
"""
Filter the first date of test performed for each patient id.
:param session: The Exetera session instance.
:param patient_table: The patient dataframe.
:param test_table: The tests dataframe.
:param test_date_name: The name of the test dataframe, not used.
:param dest_patient_table: The destination dataframe to store the results.
:param dest_patient_name: The name of the destination field to store the results.
"""
pid = 'id'
pids = session.get(patient_table[pid])
pids_ = pids.data[:]
if not ops.is_ordered(pids.data[:]):
raise ValueError("The patient table must be ordered by '{}'".format(pid))
t_pid = 'patient_id'
t_pids = session.get(test_table[t_pid])
t_pids_ = t_pids.data[:]
if not ops.is_ordered(t_pids_):
raise ValueError("The test table must be ordered by '{}'".format(t_pid))
# collapse the test data by patient_id and get the counts
cats = session.get(test_table['created_at'])
spans_ = session.get_spans(t_pids_)
s_t_pids_ = session.apply_spans_first(spans_, t_pids_)
counts_ = session.apply_spans_first(spans_, cats)
# merge the counts for the test table into the patient table
dest = session.create_numeric(dest_patient_table, dest_patient_name, 'int32')
session.ordered_merge_left(left_on=pids_, right_on=s_t_pids_, right_field_sources=(counts_,),
left_field_sinks=(dest,), left_unique=True, right_unique=True)
| 42.109756
| 97
| 0.661743
| 470
| 3,453
| 4.559574
| 0.157447
| 0.089594
| 0.044797
| 0.037331
| 0.882408
| 0.870275
| 0.808679
| 0.808679
| 0.808679
| 0.808679
| 0
| 0.001943
| 0.254851
| 3,453
| 81
| 98
| 42.62963
| 0.830937
| 0.287866
| 0
| 0.826087
| 0
| 0
| 0.084838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.043478
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
72be671107d1ff63bd9aac3ec029d3ed06cdb1b2
| 11,719
|
py
|
Python
|
test/test_simple_single_in_single_out_architectures.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 12
|
2020-09-20T17:03:48.000Z
|
2022-03-16T06:51:52.000Z
|
test/test_simple_single_in_single_out_architectures.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 5
|
2020-10-24T20:43:45.000Z
|
2022-02-25T19:40:47.000Z
|
test/test_simple_single_in_single_out_architectures.py
|
kundajelab/fastISM
|
1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b
|
[
"MIT"
] | 2
|
2020-10-14T05:18:55.000Z
|
2022-02-21T07:34:14.000Z
|
import tensorflow as tf
import unittest
from context import fastISM
class TestSimpleSingleInSingleOutArchitectures(unittest.TestCase):
def test_conv_fc(self):
# inp -> C -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 3)(inp)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_fc_sequential(self):
# inp -> C -> D -> y
# same as above but with Sequential
model = tf.keras.Sequential()
model.add(tf.keras.Input((100, 4)))
model.add(tf.keras.layers.Conv1D(20, 3))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(1))
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_same_padding_fc(self):
# inp -> C -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 3, padding='same')(inp)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_even_kernel_fc(self):
# inp -> C -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 4)(inp)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_even_kernel_same_padding_fc(self):
# inp -> C -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 4, padding='same')(inp)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_dilated_fc(self):
# inp -> C -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(20, 3, dilation_rate=3)(inp)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_maxpool_fc(self):
# inp -> C -> MXP -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(10, 7)(inp)
x = tf.keras.layers.MaxPooling1D(3)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(2)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_conv_two_maxpool_fc(self):
# inp -> C -> MXP -> MXP -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(10, 7)(inp)
x = tf.keras.layers.MaxPooling1D(3)(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(2)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_two_conv_maxpool_fc(self):
# inp -> C -> MXP -> C -> MXP -> D -> y
inp = tf.keras.Input((100, 4))
x = tf.keras.layers.Conv1D(10, 7, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(3)(x)
x = tf.keras.layers.Conv1D(10, 3)(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(2)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_four_conv_maxpool_two_fc_1(self):
# inp -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
inp = tf.keras.Input((200, 4))
x = tf.keras.layers.Conv1D(10, 7, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(20, 4, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(30, 2, padding='valid')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(10, 6, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(20)(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_four_conv_maxpool_two_fc_2(self):
# inp -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
inp = tf.keras.Input((200, 4))
x = tf.keras.layers.Conv1D(10, 3, dilation_rate=3, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(
25, 4, padding='same', activation='relu')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(
30, 2, dilation_rate=2, padding='valid', activation='tanh')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(10, 6, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(20)(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_four_conv_maxpool_two_fc_3(self):
# inp -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
inp = tf.keras.Input((200, 4))
x = tf.keras.layers.Conv1D(10, 5, use_bias=False, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(
25, 4, padding='same', activation='relu')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(30, 2, dilation_rate=2, use_bias=False,
padding='valid', activation='tanh')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(10, 3, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Flatten()(x)
x = tf.keras.layers.Dense(10)(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_four_conv_maxpool_two_fc_4(self):
# inp -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
# with Dropout and GlobalAveragePoolng1D
inp = tf.keras.Input((200, 4))
x = tf.keras.layers.Conv1D(10, 5, use_bias=False, padding='same')(inp)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(
25, 4, padding='same', activation='relu')(inp)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(30, 2, dilation_rate=2, use_bias=False,
padding='valid', activation='tanh')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Dropout(0.8)(x)
x = tf.keras.layers.Conv1D(10, 3, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
x = tf.keras.layers.Dense(10)(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_pre_act_four_conv_maxpool_two_fc_4_10bp_change_range(self):
# inp -> tanh -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
# with Dropout and GlobalAveragePoolng1D
# activation before first conv!
inp = tf.keras.Input((200, 4))
x = tf.keras.layers.Activation("tanh")(inp)
x = tf.keras.layers.Conv1D(10, 5, use_bias=False, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(
25, 4, padding='same', activation='relu')(inp)
x = tf.keras.layers.Dropout(0.5)(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Conv1D(30, 2, dilation_rate=2, use_bias=False,
padding='valid', activation='tanh')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.Dropout(0.8)(x)
x = tf.keras.layers.Conv1D(10, 3, padding='same')(x)
x = tf.keras.layers.MaxPooling1D(2)(x)
x = tf.keras.layers.GlobalAveragePooling1D()(x)
x = tf.keras.layers.Dense(10)(x)
x = tf.keras.layers.Dropout(0.3)(x)
x = tf.keras.layers.Dense(1)(x)
model = tf.keras.Model(inputs=inp, outputs=x)
fast_ism_model = fastISM.FastISM(
model, change_ranges=[(i, i+10) for i in range(0, 200, 10)],
test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
def test_pre_act_four_conv_maxpool_two_fc_4_sequential(self):
# inp -> tanh -> C -> MXP -> C -> MXP -> C -> MXP -> C -> MXP -> D -> D -> y
# with Dropout and GlobalAveragePoolng1D
# activation before first conv!
# same as above but with Sequential
model = tf.keras.Sequential()
model.add(tf.keras.Input((200, 4)))
model.add(tf.keras.layers.Activation("tanh"))
model.add(tf.keras.layers.Conv1D(
10, 5, use_bias=False, padding='same'))
model.add(tf.keras.layers.MaxPooling1D(2))
model.add(tf.keras.layers.Conv1D(
25, 4, padding='same', activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.MaxPooling1D(2))
model.add(tf.keras.layers.Conv1D(30, 2, dilation_rate=2, use_bias=False,
padding='valid', activation='tanh'))
model.add(tf.keras.layers.MaxPooling1D(2))
model.add(tf.keras.layers.Dropout(0.8))
model.add(tf.keras.layers.Conv1D(10, 3, padding='same'))
model.add(tf.keras.layers.MaxPooling1D(2))
model.add(tf.keras.layers.GlobalAveragePooling1D())
model.add(tf.keras.layers.Dense(10))
model.add(tf.keras.layers.Dropout(0.3))
model.add(tf.keras.layers.Dense(1))
fast_ism_model = fastISM.FastISM(
model, test_correctness=False)
self.assertTrue(fast_ism_model.test_correctness())
if __name__ == '__main__':
unittest.main()
| 40.410345
| 84
| 0.589897
| 1,662
| 11,719
| 4.044525
| 0.054753
| 0.14579
| 0.212734
| 0.19161
| 0.954924
| 0.949717
| 0.93127
| 0.91089
| 0.906278
| 0.906278
| 0
| 0.037567
| 0.254971
| 11,719
| 289
| 85
| 40.550173
| 0.732333
| 0.074494
| 0
| 0.779817
| 0
| 0
| 0.015707
| 0
| 0
| 0
| 0
| 0
| 0.068807
| 1
| 0.068807
| false
| 0
| 0.013761
| 0
| 0.087156
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
f485b90b9ba56669d6522b6a92b581687df5c21b
| 2,599
|
py
|
Python
|
favteacher.py
|
AvinashIkigai/Art-of-Doing
|
396aa765b623815ca506c559954f0ce7d2f87571
|
[
"BSD-2-Clause"
] | 1
|
2020-12-20T14:00:07.000Z
|
2020-12-20T14:00:07.000Z
|
favteacher.py
|
AvinashIkigai/Art-of-Doing
|
396aa765b623815ca506c559954f0ce7d2f87571
|
[
"BSD-2-Clause"
] | null | null | null |
favteacher.py
|
AvinashIkigai/Art-of-Doing
|
396aa765b623815ca506c559954f0ce7d2f87571
|
[
"BSD-2-Clause"
] | null | null | null |
print("Welcome to the Favorite Teachers Program\n")
fav_teachers = []
# Get user input
fav_teachers.append(input("Who is your first favorite teacher: ").title())
fav_teachers.append(input("Who is your second favorite teacher: ").title())
fav_teachers.append(input("Who is your third favorite teacher: ").title())
fav_teachers.append(input("Who is your fourth favorite teacher: ").title())
# Summery of list
print("\nYour favorite teachers ranked are: " + str(fav_teachers))
print("Your Favorite teachers alphabetically are: " + str(sorted(fav_teachers)))
print("Your Favorite teachers in reverse alphabetical order are: " +
str(sorted(fav_teachers, reverse=True)))
print("\nYour top two teachers are " +
fav_teachers[0] + " and " + fav_teachers[1] + ".")
print("Your next two favorite teachers are " +
fav_teachers[2] + " and " + fav_teachers[3])
print("Your last favorite teacher is " + fav_teachers[-1] + " .")
print("You have a total of " + str(len(fav_teachers)) + " favorite teachers.")
# Insert a new favorite teacher
fav_teachers.insert(0, input(
"\nOpps, " + fav_teachers[0] + " is no longer favorite teacher. Who is your new favorite teacher: ").title())
# Summery of list
print("\nYour favorite teachers ranked are: " + str(fav_teachers))
print("Your Favorite teachers alphabetically are: " + str(sorted(fav_teachers)))
print("Your Favorite teachers in reverse alphabetical order are: " +
str(sorted(fav_teachers, reverse=True)))
print("\nYour top two teachers are " +
fav_teachers[0] + " and " + fav_teachers[1] + ".")
print("Your next two favorite teachers are " +
fav_teachers[2] + " and " + fav_teachers[3])
print("Your last favorite teacher is " + fav_teachers[-1] + " .")
print("You have a total of " + str(len(fav_teachers)) + " favorite teachers.")
# Remove a teacher
fav_teachers.remove(input(
"\nYou have decided you no longer like a teacher, which teacher would you like to remove from the list:").title())
# Summery of list
print("\nYour favorite teachers ranked are: " + str(fav_teachers))
print("Your Favorite teachers alphabetically are: " + str(sorted(fav_teachers)))
print("Your Favorite teachers in reverse alphabetical order are: " +
str(sorted(fav_teachers, reverse=True)))
print("\nYour top two teachers are " +
fav_teachers[0] + " and " + fav_teachers[1] + ".")
print("Your next two favorite teachers are " +
fav_teachers[2] + " and " + fav_teachers[3])
print("Your last favorite teacher is " + fav_teachers[-1] + " .")
print("You have a total of " + str(len(fav_teachers)) + " favorite teachers.")
| 49.037736
| 118
| 0.703732
| 365
| 2,599
| 4.915068
| 0.169863
| 0.214604
| 0.053512
| 0.06689
| 0.829989
| 0.829989
| 0.829989
| 0.812709
| 0.812709
| 0.812709
| 0
| 0.007791
| 0.160446
| 2,599
| 52
| 119
| 49.980769
| 0.81439
| 0.041939
| 0
| 0.75
| 0
| 0.025
| 0.48973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.55
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 9
|
be4773af974fb095cfb30d4cbd709b0d584ac364
| 855
|
py
|
Python
|
spider/Config.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
spider/Config.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | 2
|
2021-03-31T18:54:16.000Z
|
2021-12-13T19:49:08.000Z
|
spider/Config.py
|
iecasszyjy/tweet_search-master
|
e4978521a39964c22ae46bf35d6ff17710e8e6c6
|
[
"MIT"
] | null | null | null |
import os
import sys
import pymongo
import redis
def get_spider_config():
#got
if sys.version_info[0] < 3:
import got
else:
import got3 as got
#mongo
client = pymongo.MongoClient(os.environ['MONGOHOST'],27017)
db = client.tweet
db.authenticate(name='admin',password='lixiepeng')
#redis
r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0, password='lixiepeng')
return got,db,r
def get_noau_config():
#got
if sys.version_info[0] < 3:
import got
else:
import got3 as got
#mongo
client = pymongo.MongoClient(os.environ['MONGOHOST'],27017)
db = client.tweet
#db.authenticate(name='admin',password='lixiepeng')
#redis
#r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0, password='lixiepeng')
r = redis.StrictRedis(host=os.environ['REDISHOST'], port=6379, db=0)
return got,db,r
| 19.883721
| 92
| 0.709942
| 125
| 855
| 4.808
| 0.312
| 0.074875
| 0.084859
| 0.104825
| 0.855241
| 0.855241
| 0.855241
| 0.855241
| 0.855241
| 0.855241
| 0
| 0.042408
| 0.145029
| 855
| 43
| 93
| 19.883721
| 0.779754
| 0.194152
| 0
| 0.608696
| 0
| 0
| 0.08651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.086957
| 0.347826
| 0
| 0.521739
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 9
|
be94737b66ceb90435a2f16d98c88421067446ad
| 40,093
|
py
|
Python
|
test/data/array/util/test_grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
test/data/array/util/test_grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
test/data/array/util/test_grid_util.py
|
AshKelly/PyAutoLens
|
043795966338a655339e61782253ad67cc3c14e6
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pytest
from autolens.data.array.util import grid_util
test_data_dir = "{}/../test_files/array/".format(os.path.dirname(os.path.realpath(__file__)))
class TestGrid2d:
def test__array_3x3__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(3, 3), pixel_scales=(2.0, 1.0))
assert (grid_2d == np.array([[[2., -1.], [2., 0.], [2., 1.]],
[[0., -1.], [0., 0.], [0., 1.]],
[[-2., -1.], [-2., 0.], [-2., 1.]]])).all()
def test__array_4x4_and_different_pixel_scale__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(4, 4), pixel_scales=(0.5, 0.5))
assert (grid_2d == np.array([[[0.75, -0.75], [0.75, -0.25], [0.75, 0.25], [0.75, 0.75]],
[[0.25, -0.75], [0.25, -0.25], [0.25, 0.25], [0.25, 0.75]],
[[-0.25, -0.75], [-0.25, -0.25], [-0.25, 0.25], [-0.25, 0.75]],
[[-0.75, -0.75], [-0.75, -0.25], [-0.75, 0.25], [-0.75, 0.75]]])).all()
def test__array_2x3__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(2, 3), pixel_scales=(1.0, 1.0))
assert (grid_2d == np.array([[[0.5, -1.], [0.5, 0.], [0.5, 1.]],
[[-0.5, -1.], [-0.5, 0.], [-0.5, 1.]]])).all()
def test__array_3x2__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(3, 2), pixel_scales=(1.0, 1.0))
assert (grid_2d == np.array([[[1., -0.5], [1., 0.5]],
[[0., -0.5], [0., 0.5]],
[[-1., -0.5], [-1., 0.5]]])).all()
def test__array_3x3___input_origin__shifts_grid_by_origin(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(3, 3), pixel_scales=(2.0, 1.0),
origin=(1.0, 1.0))
assert (grid_2d == np.array([[[3., 0.], [3., 1.], [3., 2.]],
[[1., 0.], [1., 1.], [1., 2.]],
[[-1., 0.], [-1., 1.], [-1., 2.]]])).all()
def test__array_3x2__different_origin(self):
grid_2d = grid_util.regular_grid_2d_from_shape_pixel_scales_and_origin(shape=(3, 2), pixel_scales=(1.0, 1.0),
origin=(3.0, -2.0))
assert (grid_2d == np.array([[[4., -2.5], [4., -1.5]],
[[3., -2.5], [3., -1.5]],
[[2., -2.5], [2., -1.5]]])).all()
class TestGrid1d:
def test__array_3x3__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(3, 3), pixel_scales=(2.0, 1.0))
assert (grid_2d == np.array([[2., -1.], [2., 0.], [2., 1.],
[0., -1.], [0., 0.], [0., 1.],
[-2., -1.], [-2., 0.], [-2., 1.]])).all()
def test__array_4x4_and_different_pixel_scale__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(4, 4), pixel_scales=(0.5, 0.5))
assert (grid_2d == np.array([[0.75, -0.75], [0.75, -0.25], [0.75, 0.25], [0.75, 0.75],
[0.25, -0.75], [0.25, -0.25], [0.25, 0.25], [0.25, 0.75],
[-0.25, -0.75], [-0.25, -0.25], [-0.25, 0.25], [-0.25, 0.75],
[-0.75, -0.75], [-0.75, -0.25], [-0.75, 0.25], [-0.75, 0.75]])).all()
def test__array_2x3__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(2, 3), pixel_scales=(1.0, 1.0))
assert (grid_2d == np.array([[0.5, -1.], [0.5, 0.], [0.5, 1.],
[-0.5, -1.], [-0.5, 0.], [-0.5, 1.]])).all()
def test__array_3x2__sets_up_arcsecond_grid(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(3, 2), pixel_scales=(1.0, 1.0))
assert (grid_2d == np.array([[1., -0.5], [1., 0.5],
[0., -0.5], [0., 0.5],
[-1., -0.5], [-1., 0.5]])).all()
def test__array_3x3__input_origin__shifts_grid_by_origin(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(3, 3), pixel_scales=(2.0, 1.0),
origin=(1.0, 1.0))
assert (grid_2d == np.array([[3., 0.], [3., 1.], [3., 2.],
[1., 0.], [1., 1.], [1., 2.],
[-1., 0.], [-1., 1.], [-1., 2.]])).all()
def test__array_3x2__different_origin(self):
grid_2d = grid_util.regular_grid_1d_from_shape_pixel_scales_and_origin(shape=(3, 2), pixel_scales=(1.0, 1.0),
origin=(3.0, -2.0))
assert (grid_2d == np.array([[4., -2.5], [4., -1.5],
[3., -2.5], [3., -1.5],
[2., -2.5], [2., -1.5]])).all()
class TestRegularGridMasked(object):
def test__setup_3x3_image_1_coordinate_in_mask(self):
mask = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(3.0, 6.0))
assert (image_grid[0] == np.array([0.0, 0.0])).all()
def test__setup_3x3_image__five_coordinates_in_mask(self):
mask = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(6.0, 3.0))
assert (image_grid == np.array([[6., 0.],
[0., -3.], [0., 0.], [0., 3.],
[-6., 0.]])).all()
def test__setup_4x4_image__ten_coordinates_in_grid__new_pixel_scale(self):
mask = np.array([[True, False, False, True],
[False, False, False, True],
[True, False, False, True],
[False, False, False, True]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(1.0, 1.0))
assert (image_grid == np.array([[1.5, -0.5], [1.5, 0.5],
[0.5, -1.5], [0.5, -0.5], [0.5, 0.5],
[-0.5, -0.5], [-0.5, 0.5],
[-1.5, -1.5], [-1.5, -0.5], [-1.5, 0.5]])).all()
def test__setup_3x4_image__six_grid(self):
mask = np.array([[True, False, True, True],
[False, False, False, True],
[True, False, True, False]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(3.0, 3.0))
assert (image_grid == np.array([[3., -1.5],
[0., -4.5], [0., -1.5], [0., 1.5],
[-3., -1.5], [-3., 4.5]])).all()
def test__setup_3x3_image__five_coordinates_in_mask__include_nonzero_origin(self):
mask = np.array([[True, False, True],
[False, False, False],
[True, False, True]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(6.0, 3.0),
origin=(1.0, 1.0))
assert image_grid == pytest.approx(np.array([[7., 1.],
[1., -2.], [1., 1.], [1., 4.],
[-5., 1.]]), 1e-4)
def test__setup_3x4_image__six_grid__include_nonzero_origin(self):
mask = np.array([[True, False, True, True],
[False, False, False, True],
[True, False, True, False]])
image_grid = grid_util.regular_grid_1d_masked_from_mask_pixel_scales_and_origin(mask=mask, pixel_scales=(3.0, 3.0),
origin=(1.0, 2.0))
assert image_grid == pytest.approx(np.array([[4., 0.5],
[1., -2.5], [1., 0.5], [1., 3.5],
[-2., 0.5], [-2., 6.5]]), 1e-4)
class TestSubGridMasked(object):
def test__3x3_mask_with_one_pixel__2x2_sub_grid(self):
mask = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 6.0),
sub_grid_size=2)
assert (sub_grid[0:4] == np.array([[0.5, -1.0], [0.5, 1.0],
[-0.5, -1.0], [-0.5, 1.0]])).all()
def test__3x3_mask_with_row_of_pixels__2x2_sub_grid(self):
mask = np.array([[True, True, True],
[False, False, False],
[True, True, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 3.0),
sub_grid_size=2)
assert (sub_grid[0:4] == np.array([[0.5, -3.5], [0.5, -2.5],
[-0.5, -3.5], [-0.5, -2.5]])).all()
assert (sub_grid[4:8] == np.array([[0.5, -0.5], [0.5, 0.5],
[-0.5, -0.5], [-0.5, 0.5]])).all()
assert (sub_grid[8:12] == np.array([[0.5, 2.5], [0.5, 3.5],
[-0.5, 2.5], [-0.5, 3.5]])).all()
def test__3x3_mask_with_row_and_column_of_pixels__2x2_sub_grid(self):
mask = np.array([[True, True, False],
[False, False, False],
[True, True, False]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 3.0),
sub_grid_size=2)
assert (sub_grid == np.array([[3.5, 2.5], [3.5, 3.5], [2.5, 2.5], [2.5, 3.5],
[0.5, -3.5], [0.5, -2.5], [-0.5, -3.5], [-0.5, -2.5],
[0.5, -0.5], [0.5, 0.5], [-0.5, -0.5], [-0.5, 0.5],
[0.5, 2.5], [0.5, 3.5], [-0.5, 2.5], [-0.5, 3.5],
[-2.5, 2.5], [-2.5, 3.5], [-3.5, 2.5], [-3.5, 3.5]])).all()
def test__3x3_mask_with_row_and_column_of_pixels__2x2_sub_grid__different_pixel_scale(self):
mask = np.array([[True, True, False],
[False, False, False],
[True, True, False]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(0.3, 0.3),
sub_grid_size=2)
sub_grid = np.round(sub_grid, decimals=2)
np.testing.assert_almost_equal(sub_grid,
np.array([[0.35, 0.25], [0.35, 0.35], [0.25, 0.25], [0.25, 0.35],
[0.05, -0.35], [0.05, -0.25], [-0.05, -0.35], [-0.05, -0.25],
[0.05, -0.05], [0.05, 0.05], [-0.05, -0.05], [-0.05, 0.05],
[0.05, 0.25], [0.05, 0.35], [-0.05, 0.25], [-0.05, 0.35],
[-0.25, 0.25], [-0.25, 0.35], [-0.35, 0.25], [-0.35, 0.35]]))
def test__3x3_mask_with_one_pixel__3x3_sub_grid(self):
mask = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 3.0),
sub_grid_size=3)
assert (sub_grid == np.array([[[0.75, -0.75], [0.75, 0.], [0.75, 0.75], [0., -0.75], [0., 0.],
[0., 0.75], [-0.75, -0.75], [-0.75, 0.], [-0.75, 0.75]]])).all()
def test__3x3_mask_with_one_row__3x3_sub_grid(self):
mask = np.array([[True, True, False],
[True, False, True],
[True, True, False]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(2.0, 2.0),
sub_grid_size=3)
assert (sub_grid == np.array([[2.5, 1.5], [2.5, 2.], [2.5, 2.5],
[2., 1.5], [2., 2.], [2., 2.5],
[1.5, 1.5], [1.5, 2.], [1.5, 2.5],
[0.5, -0.5], [0.5, 0.], [0.5, 0.5],
[0., -0.5], [0., 0.], [0., 0.5],
[-0.5, -0.5], [-0.5, 0.], [-0.5, 0.5],
[-1.5, 1.5], [-1.5, 2.], [-1.5, 2.5],
[-2., 1.5], [-2., 2.], [-2., 2.5],
[-2.5, 1.5], [-2.5, 2.], [-2.5, 2.5]])).all()
def test__4x4_mask_with_one_pixel__4x4_sub_grid(self):
mask = np.array([[True, True, True, True],
[True, False, False, True],
[True, False, False, True],
[True, True, True, False]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(2.0, 2.0),
sub_grid_size=4)
sub_grid = np.round(sub_grid, decimals=1)
assert (sub_grid == np.array([[1.6, -1.6], [1.6, -1.2], [1.6, -0.8], [1.6, -0.4],
[1.2, -1.6], [1.2, -1.2], [1.2, -0.8], [1.2, -0.4],
[0.8, -1.6], [0.8, -1.2], [0.8, -0.8], [0.8, -0.4],
[0.4, -1.6], [0.4, -1.2], [0.4, -0.8], [0.4, -0.4],
[1.6, 0.4], [1.6, 0.8], [1.6, 1.2], [1.6, 1.6],
[1.2, 0.4], [1.2, 0.8], [1.2, 1.2], [1.2, 1.6],
[0.8, 0.4], [0.8, 0.8], [0.8, 1.2], [0.8, 1.6],
[0.4, 0.4], [0.4, 0.8], [0.4, 1.2], [0.4, 1.6],
[-0.4, -1.6], [-0.4, -1.2], [-0.4, -0.8], [-0.4, -0.4],
[-0.8, -1.6], [-0.8, -1.2], [-0.8, -0.8], [-0.8, -0.4],
[-1.2, -1.6], [-1.2, -1.2], [-1.2, -0.8], [-1.2, -0.4],
[-1.6, -1.6], [-1.6, -1.2], [-1.6, -0.8], [-1.6, -0.4],
[-0.4, 0.4], [-0.4, 0.8], [-0.4, 1.2], [-0.4, 1.6],
[-0.8, 0.4], [-0.8, 0.8], [-0.8, 1.2], [-0.8, 1.6],
[-1.2, 0.4], [-1.2, 0.8], [-1.2, 1.2], [-1.2, 1.6],
[-1.6, 0.4], [-1.6, 0.8], [-1.6, 1.2], [-1.6, 1.6],
[-2.4, 2.4], [-2.4, 2.8], [-2.4, 3.2], [-2.4, 3.6],
[-2.8, 2.4], [-2.8, 2.8], [-2.8, 3.2], [-2.8, 3.6],
[-3.2, 2.4], [-3.2, 2.8], [-3.2, 3.2], [-3.2, 3.6],
[-3.6, 2.4], [-3.6, 2.8], [-3.6, 3.2], [-3.6, 3.6]])).all()
def test__4x3_mask_with_one_pixel__2x2_sub_grid(self):
mask = np.array([[True, True, True],
[True, False, True],
[True, False, False],
[False, True, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 3.0),
sub_grid_size=2)
assert (sub_grid == np.array([[2., -0.5], [2., 0.5], [1., -0.5], [1., 0.5],
[-1., -0.5], [-1., 0.5], [-2., -0.5], [-2., 0.5],
[-1., 2.5], [-1., 3.5], [-2., 2.5], [-2., 3.5],
[-4., -3.5], [-4., -2.5], [-5., -3.5], [-5., -2.5]])).all()
def test__3x4_mask_with_one_pixel__2x2_sub_grid(self):
mask = np.array([[True, True, True, False],
[True, False, False, True],
[False, True, False, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 3.0),
sub_grid_size=2)
assert (sub_grid == np.array([[3.5, 4.], [3.5, 5.], [2.5, 4.], [2.5, 5.],
[0.5, -2.], [0.5, -1.], [-0.5, -2.], [-0.5, -1.],
[0.5, 1.], [0.5, 2.], [-0.5, 1.], [-0.5, 2.],
[-2.5, -5.], [-2.5, -4.], [-3.5, -5.], [-3.5, -4.],
[-2.5, 1.], [-2.5, 2.], [-3.5, 1.], [-3.5, 2.]])).all()
def test__3x3_mask_with_one_pixel__2x2_sub_grid__include_nonzero_origin(self):
mask = np.array([[True, True, True],
[True, False, True],
[True, True, True]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(3.0, 6.0),
sub_grid_size=2, origin=(1.0, 1.0))
assert sub_grid[0:4] == pytest.approx(np.array([[1.5, 0.0], [1.5, 2.0],
[0.5, 0.0], [0.5, 2.0]]), 1e-4)
def test__3x3_mask_with_one_row__3x3_sub_grid__include_nonzero_origin(self):
mask = np.array([[True, True, False],
[True, False, True],
[True, True, False]])
sub_grid = grid_util.sub_grid_1d_masked_from_mask_pixel_scales_and_sub_grid_size(mask=mask, pixel_scales=(2.0, 2.0),
sub_grid_size=3, origin=(1.0, -1.0))
assert sub_grid == pytest.approx(np.array([[3.5, 0.5], [3.5, 1.], [3.5, 1.5],
[3., 0.5], [3., 1.], [3., 1.5],
[2.5, 0.5], [2.5, 1.], [2.5, 1.5],
[1.5, -1.5], [1.5, -1.], [1.5, -0.5],
[1., -1.5], [1., -1.], [1., -0.5],
[0.5, -1.5], [0.5, -1.], [0.5, -0.5],
[-0.5, 0.5], [-0.5, 1.], [-0.5, 1.5],
[-1., 0.5], [-1., 1.], [-1., 1.5],
[-1.5, 0.5], [-1.5, 1.], [-1.5, 1.5]]), 1e-4)
class TestGridConversions(object):
def test__1d_arc_second_grid_to_1d_pixel_grid__coordinates_in_origins_of_pixels(self):
grid_arc_seconds = np.array([[1.0, -2.0], [1.0, 2.0],
[-1.0, -2.0], [-1.0, 2.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[0.5, 0.5], [0.5, 1.5],
[1.5, 0.5], [1.5, 1.5]])).all()
grid_arc_seconds = np.array([[3.0, -6.0], [3.0, 0.0], [3.0, 6.0],
[0.0, -6.0], [0.0, 0.0], [0.0, 6.0],
[-3.0, -6.0], [-3.0, 0.0], [-3.0, 6.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[0.5, 0.5], [0.5, 1.5], [0.5, 2.5],
[1.5, 0.5], [1.5, 1.5], [1.5, 2.5],
[2.5, 0.5], [2.5, 1.5], [2.5, 2.5]])).all()
def test__same_as_above__pixels__but_coordinates_are_top_left_of_each_pixel(self):
grid_arc_seconds = np.array([[2.0, -4], [2.0, 0.0],
[0.0, -4], [0.0, 0.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])).all()
grid_arc_seconds = np.array([[4.5, -9.0], [4.5, -3.0], [4.5, 3.0],
[1.5, -9.0], [1.5, -3.0], [1.5, 3.0],
[-1.5, -9.0], [-1.5, -3.0], [-1.5, 3.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])).all()
def test__same_as_above___pixels__but_coordinates_are_bottom_right_of_each_pixel(self):
grid_arc_seconds = np.array([[0.0, 0.0], [0.0, 4.0],
[-2.0, 0.0], [-2.0, 4.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[1, 1], [1, 2],
[2, 1], [2, 2]])).all()
grid_arc_seconds = np.array([[1.5, -3.0], [1.5, 3.0], [1.5, 9.0],
[-1.5, -3.0], [-1.5, 3.0], [-1.5, 9.0],
[-4.5, -3.0], [-4.5, 3.0], [-4.5, 9.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]])).all()
def test__same_as_above___arcsec_to_pixel__but_nonzero_origin(self):
# -1.0 from all entries for a origin of (-1.0, -1.0)
grid_arc_seconds = np.array([[-1.0, -1.0], [-1.0, 3.0],
[-3.0, -1.0], [-3.0, 3.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0), origin=(-1.0, -1.0))
assert (grid_pixels == np.array([[1, 1], [1, 2],
[2, 1], [2, 2]])).all()
# -1.0, +2.0, for origin of (-1.0, +2.0)
grid_arc_seconds = np.array([[0.5, -1.0], [0.5, 5.0], [0.5, 11.0],
[-2.5, -1.0], [-2.5, 5.0], [-2.5, 11.0],
[-5.5, -1.0], [-5.5, 5.0], [-5.5, 11.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixels_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0), origin=(-1.0, 2.0))
assert (grid_pixels == np.array([[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]])).all()
def test__1d_arc_second_grid_to_1d_pixel_origind_grid__coordinates_in_origins_of_pixels(self):
grid_arc_seconds = np.array([[1.0, -2.0], [1.0, 2.0],
[-1.0, -2.0], [-1.0, 2.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])).all()
grid_arc_seconds = np.array([[3.0, -6.0], [3.0, 0.0], [3.0, 6.0],
[0.0, -6.0], [0.0, 0.0], [0.0, 6.0],
[-3.0, -6.0], [-3.0, 0.0], [-3.0, 6.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])).all()
def test__same_as_above_but_coordinates_are_top_left_of_each_pixel(self):
grid_arc_seconds = np.array([[1.99, -3.99], [1.99, 0.01],
[-0.01, -3.99], [-0.01, 0.01]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])).all()
grid_arc_seconds = np.array([[4.49, -8.99], [4.49, -2.99], [4.49, 3.01],
[1.49, -8.99], [1.49, -2.99], [1.49, 3.01],
[-1.51, -8.99], [-1.51, -2.99], [-1.51, 3.01]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])).all()
def test__same_as_above_but_coordinates_are_bottom_right_of_each_pixel(self):
grid_arc_seconds = np.array([[0.01, -0.01], [0.01, 3.99],
[-1.99, -0.01], [-1.99, 3.99]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])).all()
grid_arc_seconds = np.array([[1.51, -3.01], [1.51, 2.99], [1.51, 8.99],
[-1.49, -3.01], [-1.49, 2.99], [-1.49, 8.99],
[-4.49, -3.01], [-4.49, 2.99], [-4.49, 8.99]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])).all()
def test__same_as_above__arcsec_to_pixel_origin__but_nonzero_origin(self):
# +1.0 for all entries for a origin of (1.0, 1.0)
grid_arc_seconds = np.array([[2.0, -1.0], [2.0, 3.0],
[0.0, -1.0], [0.0, 3.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0), origin=(1.0, 1.0))
assert (grid_pixels == np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])).all()
# +1.0, -2.0, for origin of (1.0, -2.0)
grid_arc_seconds = np.array([[4.0, -8.0], [4.0, -2.0], [4.0, 4.0],
[1.0, -8.0], [1.0, -2.0], [1.0, 4.0],
[-2.0, -8.0], [-2.0, -2.0], [-2.0, 4.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_centres_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0), origin=(1.0, -2.0))
assert (grid_pixels == np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])).all()
def test__1d_arc_second_grid_to_1d_pixel_1d_index_grid__coordinates_in_origins_of_pixels(self):
grid_arc_seconds = np.array([[1.0, -2.0], [1.0, 2.0],
[-1.0, -2.0], [-1.0, 2.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([0, 1, 2, 3])).all()
grid_arc_seconds = np.array([[3.0, -6.0], [3.0, 0.0], [3.0, 6.0],
[0.0, -6.0], [0.0, 0.0], [0.0, 6.0],
[-3.0, -6.0], [-3.0, 0.0], [-3.0, 6.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all()
def test__same_as_above_1d_index__but_coordinates_are_top_left_of_each_pixel(self):
grid_arc_seconds = np.array([[1.99, -3.99], [1.99, 0.01],
[-0.01, -3.99], [-0.01, 0.01]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([0, 1, 2, 3])).all()
grid_arc_seconds = np.array([[4.49, -8.99], [4.49, -2.99], [4.49, 3.01],
[1.49, -8.99], [1.49, -2.99], [1.49, 3.01],
[-1.51, -8.99], [-1.51, -2.99], [-1.51, 3.01]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all()
def test__same_as_above_1d_index__but_coordinates_are_bottom_right_of_each_pixel(self):
grid_arc_seconds = np.array([[0.01, -0.01], [0.01, 3.99],
[-1.99, -0.01], [-1.99, 3.99]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_pixels == np.array([0, 1, 2, 3])).all()
grid_arc_seconds = np.array([[1.51, -3.01], [1.51, 2.99], [1.51, 8.99],
[-1.49, -3.01], [-1.49, 2.99], [-1.49, 8.99],
[-4.49, -3.01], [-4.49, 2.99], [-4.49, 8.99]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_pixels == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all()
def test__same_as_above__1d_index__arcsec_to_pixel_origin__but_nonzero_origin(self):
# +1.0 for all entries for a origin of (1.0, 1.0)
grid_arc_seconds = np.array([[2.0, -1.0], [2.0, 3.0],
[0.0, -1.0], [0.0, 3.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(2, 2),
pixel_scales=(2.0, 4.0), origin=(1.0, 1.0))
assert (grid_pixels == np.array([0, 1, 2, 3])).all()
# +1.0, -2.0, for origin of (1.0, -2.0)
grid_arc_seconds = np.array([[4.0, -8.0], [4.0, -2.0], [4.0, 4.0],
[1.0, -8.0], [1.0, -2.0], [1.0, 4.0],
[-2.0, -8.0], [-2.0, -2.0], [-2.0, 4.0]])
grid_pixels = grid_util.grid_arc_seconds_1d_to_grid_pixel_indexes_1d(grid_arc_seconds_1d=grid_arc_seconds, shape=(3, 3),
pixel_scales=(3.0, 6.0), origin=(1.0, -2.0))
assert (grid_pixels == np.array([0, 1, 2, 3, 4, 5, 6, 7, 8])).all()
def test__1d_pixel_origin_grid_to_1d_arc_second_grid__coordinates_in_origins_of_pixels(self):
grid_pixels = np.array([[0.5, 0.5], [0.5, 1.5],
[1.5, 0.5], [1.5, 1.5]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_arc_seconds == np.array([[1.0, -2.0], [1.0, 2.0],
[-1.0, -2.0], [-1.0, 2.0]])).all()
grid_pixels = np.array([[0.5, 0.5], [0.5, 1.5], [0.5, 2.5],
[1.5, 0.5], [1.5, 1.5], [1.5, 2.5],
[2.5, 0.5], [2.5, 1.5], [2.5, 2.5]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_arc_seconds == np.array([[3.0, -6.0], [3.0, 0.0], [3.0, 6.0],
[0.0, -6.0], [0.0, 0.0], [0.0, 6.0],
[-3.0, -6.0], [-3.0, 0.0], [-3.0, 6.0]])).all()
def test__same_as_above__pixel_to_arcsec__but_coordinates_are_top_left_of_each_pixel(self):
grid_pixels = np.array([[0, 0], [0, 1],
[1, 0], [1, 1]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_arc_seconds == np.array([[2.0, -4], [2.0, 0.0],
[0.0, -4], [0.0, 0.0]])).all()
grid_pixels = np.array([[0, 0], [0, 1], [0, 2],
[1, 0], [1, 1], [1, 2],
[2, 0], [2, 1], [2, 2]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_arc_seconds == np.array([[4.5, -9.0], [4.5, -3.0], [4.5, 3.0],
[1.5, -9.0], [1.5, -3.0], [1.5, 3.0],
[-1.5, -9.0], [-1.5, -3.0], [-1.5, 3.0]])).all()
def test__same_as_above__pixel_to_arcsec_but_coordinates_are_bottom_right_of_each_pixel(self):
grid_pixels = np.array([[1, 1], [1, 2],
[2, 1], [2, 2]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(2, 2),
pixel_scales=(2.0, 4.0))
assert (grid_arc_seconds == np.array([[0.0, 0.0], [0.0, 4.0],
[-2.0, 0.0], [-2.0, 4.0]])).all()
grid_pixels = np.array([[1, 1], [1, 2], [1, 3],
[2, 1], [2, 2], [2, 3],
[3, 1], [3, 2], [3, 3]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(3, 3),
pixel_scales=(3.0, 6.0))
assert (grid_arc_seconds == np.array([[1.5, -3.0], [1.5, 3.0], [1.5, 9.0],
[-1.5, -3.0], [-1.5, 3.0], [-1.5, 9.0],
[-4.5, -3.0], [-4.5, 3.0], [-4.5, 9.0]])).all()
def test__same_as_above__pixel_to_arcsec__nonzero_origin(self):
grid_pixels = np.array([[0.5, 0.5], [0.5, 1.5],
[1.5, 0.5], [1.5, 1.5]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(2, 2),
pixel_scales=(2.0, 4.0), origin=(-1.0, -1.0))
# -1.0 from all entries for a origin of (-1.0, -1.0)
assert (grid_arc_seconds == np.array([[0.0, -3.0], [0.0, 1.0],
[-2.0, -3.0], [-2.0, 1.0]])).all()
grid_pixels = np.array([[0.5, 0.5], [0.5, 1.5], [0.5, 2.5],
[1.5, 0.5], [1.5, 1.5], [1.5, 2.5],
[2.5, 0.5], [2.5, 1.5], [2.5, 2.5]])
grid_arc_seconds = grid_util.grid_pixels_1d_to_grid_arc_seconds_1d(grid_pixels_1d=grid_pixels, shape=(3, 3),
pixel_scales=(3.0, 6.0), origin=(-1.0, 2.0))
# -1.0, +2.0, for origin of (-1.0, 2.0)
assert grid_arc_seconds == pytest.approx(np.array([[2.0, -4.0], [2.0, 2.0], [2.0, 8.0],
[-1.0, -4.0], [-1.0, 2.0], [-1.0, 8.0],
[-4.0, -4.0], [-4.0, 2.0], [-4.0, 8.0]]), 1e-4)
| 56.074126
| 128
| 0.409847
| 5,757
| 40,093
| 2.58659
| 0.021886
| 0.021624
| 0.11282
| 0.060171
| 0.960849
| 0.940434
| 0.930898
| 0.906924
| 0.885434
| 0.874958
| 0
| 0.147442
| 0.406231
| 40,093
| 715
| 129
| 56.074126
| 0.478073
| 0.00873
| 0
| 0.506098
| 0
| 0
| 0.000579
| 0.000579
| 0
| 0
| 0
| 0
| 0.128049
| 1
| 0.091463
| false
| 0
| 0.00813
| 0
| 0.109756
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
bea07d4e74f2f8650f8665de26d1dfeafc4ae807
| 11,312
|
py
|
Python
|
rr/command_line.py
|
Habstinat/py-race-results
|
370981d12e2c65d5658d96b4e2533edbbb495001
|
[
"MIT"
] | 1
|
2017-01-31T19:49:53.000Z
|
2017-01-31T19:49:53.000Z
|
rr/command_line.py
|
hpr/py-race-results
|
370981d12e2c65d5658d96b4e2533edbbb495001
|
[
"MIT"
] | null | null | null |
rr/command_line.py
|
hpr/py-race-results
|
370981d12e2c65d5658d96b4e2533edbbb495001
|
[
"MIT"
] | null | null | null |
"""
Command line interface to RR.
"""
import argparse
import datetime
from .active import ActiveRR
from .brrr import BestRace
from .crrr import CoolRunning
from .csrr import CompuScore
from .nyrr import NewYorkRR
def run_active():
the_description = 'Process Active race results'
parser = argparse.ArgumentParser(description=the_description)
parser.add_argument('-d', '--day', dest='day', nargs=2, help='day range')
parser.add_argument('-m', '--month', dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
parser.add_argument('-o', '--output', dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-s', '--states',
dest='states',
nargs='+',
default=['NJ'],
help='state, default is NJ')
parser.add_argument('-y', '--year', dest='year',
default=datetime.date.today().year, help='year')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
parser.add_argument('--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
states = [state.upper() for state in args.states]
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
# Make the range the entire month up until now.
start_date = datetime.date(year, month, 1)
stop_date = datetime.date(year, month, datetime.datetime.now().day)
o = ActiveRR(date_range=[start_date, stop_date],
membership_list=args.membership_list,
verbose=args.verbose,
states=states,
output_file=args.output_file)
o.run()
def run_bestrace():
# -ml cannot be used with -d, -m, or -y
# But -y and -m have defaults.
the_description = 'Process BestRace race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
group.add_argument('-d', '--day', dest='day',
nargs=2, help='day range')
parser.add_argument('--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-m', '--month', dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
parser.add_argument('-o', '--output', dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-y', '--year', dest='year',
default=datetime.date.today().year, help='year')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
# Make the range the entire month up until now.
start_date = datetime.date(year, month, 1)
stop_date = datetime.date(year, month, datetime.datetime.now().day)
o = BestRace(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
def run_coolrunning():
# -ml cannot be used with -d, -m, or -y
# But -y and -m have defaults.
the_description = 'Process Coolrunning race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('-s', '--states',
dest='states',
nargs='+',
default=['ma'],
help='state, default is ma')
parser.add_argument('--ml',
dest='membership_list',
help='membership list',
required=True)
group.add_argument('--rl',
dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
if args.day is not None:
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
else:
start_date = None
stop_date = None
o = CoolRunning(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
states=args.states,
verbose=args.verbose)
o.run()
def run_compuscore():
# --ml cannot be used with -m, or -y
the_description = 'Process Compuscore race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
default=[datetime.date.today().day,
datetime.date.today().day],
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('--ml', dest='membership_list',
help='membership list', required=True)
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
o = CompuScore(start_date=start_date,
stop_date=stop_date,
membership_list=args.membership_list,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
def run_nyrr():
# --ml cannot be used with -m, or -y
the_description = 'Process NYRR race results'
parser = argparse.ArgumentParser(description=the_description)
group = parser.add_mutually_exclusive_group()
parser.add_argument('-y', '--year',
dest='year',
default=datetime.date.today().year,
help='year')
parser.add_argument('-m', '--month',
dest='month',
default=datetime.date.today().month,
choices=range(1, 13),
type=int,
help='month')
group.add_argument('-d', '--day',
dest='day',
default=[datetime.date.today().day,
datetime.date.today().day],
nargs=2,
help='day range')
parser.add_argument('-v', '--verbose',
dest='verbose',
choices=['debug', 'info', 'warning', 'error',
'critical'],
default='info',
help='verbosity level, default is "info"')
parser.add_argument('-o', '--output',
dest='output_file',
default='results.html',
help='output file, default is results.html')
parser.add_argument('--team',
dest='team',
default='RARI',
help='team code (i.e. "RARI")')
group.add_argument('--rl', dest='race_list',
help='race list')
args = parser.parse_args()
year = int(args.year)
month = int(args.month)
day = args.day
start_date = datetime.date(year, month, int(day[0]))
stop_date = datetime.date(year, month, int(day[1]))
o = NewYorkRR(start_date=start_date,
stop_date=stop_date,
team=args.team,
race_list=args.race_list,
output_file=args.output_file,
verbose=args.verbose)
o.run()
| 39.141869
| 77
| 0.495138
| 1,145
| 11,312
| 4.767686
| 0.094323
| 0.072541
| 0.087195
| 0.051291
| 0.89815
| 0.893753
| 0.893753
| 0.888624
| 0.862612
| 0.862612
| 0
| 0.004546
| 0.37774
| 11,312
| 288
| 78
| 39.277778
| 0.77099
| 0.028819
| 0
| 0.810484
| 0
| 0
| 0.138456
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020161
| false
| 0
| 0.028226
| 0
| 0.048387
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe38f2f2fb3ffa17642c30447c00ec7f1091fe6d
| 114
|
py
|
Python
|
mincrawler/pipelines/__init__.py
|
altescy/mincrawler
|
36d28172b37c6825d74ec9887bfabe440838d50f
|
[
"MIT"
] | 1
|
2020-05-31T02:16:40.000Z
|
2020-05-31T02:16:40.000Z
|
mincrawler/pipelines/__init__.py
|
altescy/mincrawler
|
36d28172b37c6825d74ec9887bfabe440838d50f
|
[
"MIT"
] | null | null | null |
mincrawler/pipelines/__init__.py
|
altescy/mincrawler
|
36d28172b37c6825d74ec9887bfabe440838d50f
|
[
"MIT"
] | 1
|
2021-09-21T22:36:42.000Z
|
2021-09-21T22:36:42.000Z
|
from mincrawler.pipelines.executors import PipelineExecutor
from mincrawler.pipelines.stages import PipelineStage
| 38
| 59
| 0.894737
| 12
| 114
| 8.5
| 0.666667
| 0.27451
| 0.45098
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070175
| 114
| 2
| 60
| 57
| 0.962264
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fe3b564fab750bd551ddfe2898a9e941c727a571
| 168,477
|
py
|
Python
|
djstripe/migrations/0001_initial.py
|
alecdalelio/dj-stripe
|
24c1116c0809b338ab9c11707936bd95bbccdeaf
|
[
"MIT"
] | 1
|
2021-06-05T09:22:23.000Z
|
2021-06-05T09:22:23.000Z
|
djstripe/migrations/0001_initial.py
|
alecdalelio/dj-stripe
|
24c1116c0809b338ab9c11707936bd95bbccdeaf
|
[
"MIT"
] | 7
|
2021-09-01T05:17:42.000Z
|
2022-03-31T06:13:34.000Z
|
djstripe/migrations/0001_initial.py
|
alecdalelio/dj-stripe
|
24c1116c0809b338ab9c11707936bd95bbccdeaf
|
[
"MIT"
] | 1
|
2022-02-01T14:17:27.000Z
|
2022-02-01T14:17:27.000Z
|
# Generated by Django 3.2.10 on 2021-12-31 09:11
import uuid
import django.core.validators
import django.db.models.deletion
from django.conf import settings
from django.db import migrations, models
import djstripe.enums
import djstripe.fields
import djstripe.models.webhooks
DJSTRIPE_SUBSCRIBER_MODEL: str = getattr(
settings, "DJSTRIPE_SUBSCRIBER_MODEL", settings.AUTH_USER_MODEL
) # type: ignore
# Needed here for external apps that have added the DJSTRIPE_SUBSCRIBER_MODEL
# *not* in the '__first__' migration of the app, which results in:
# ValueError: Related model 'DJSTRIPE_SUBSCRIBER_MODEL' cannot be resolved
# Context: https://github.com/dj-stripe/dj-stripe/issues/707
DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY = getattr(
settings, "DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY", "__first__"
)
DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY = migrations.swappable_dependency(
DJSTRIPE_SUBSCRIBER_MODEL
)
if DJSTRIPE_SUBSCRIBER_MODEL != settings.AUTH_USER_MODEL:
DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY = migrations.migration.SwappableTuple(
(
DJSTRIPE_SUBSCRIBER_MODEL.split(".", 1)[0],
DJSTRIPE_SUBSCRIBER_MODEL_MIGRATION_DEPENDENCY,
),
DJSTRIPE_SUBSCRIBER_MODEL,
)
class Migration(migrations.Migration):
initial = True
dependencies = [DJSTRIPE_SUBSCRIBER_MODEL_DEPENDENCY]
operations = [
migrations.CreateModel(
name="Account",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("business_profile", djstripe.fields.JSONField(blank=True, null=True)),
(
"business_type",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.BusinessType,
max_length=10,
),
),
(
"charges_enabled",
models.BooleanField(
help_text="Whether the account can create live charges"
),
),
(
"country",
models.CharField(
help_text="The country of the account", max_length=2
),
),
("company", djstripe.fields.JSONField(blank=True, null=True)),
(
"default_currency",
djstripe.fields.StripeCurrencyCodeField(max_length=3),
),
(
"details_submitted",
models.BooleanField(
help_text="Whether account details have been submitted. Standard accounts cannot receive payouts before this is true."
),
),
(
"email",
models.CharField(
help_text="The primary user's email address.", max_length=255
),
),
("individual", djstripe.fields.JSONField(blank=True, null=True)),
(
"payouts_enabled",
models.BooleanField(
help_text="Whether Stripe can send payouts to this account"
),
),
(
"product_description",
models.CharField(
blank=True,
default="",
help_text="Internal-only description of the product sold or service provided by the business. It's used by Stripe for risk and underwriting purposes.",
max_length=255,
),
),
("requirements", djstripe.fields.JSONField(blank=True, null=True)),
("settings", djstripe.fields.JSONField(blank=True, null=True)),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.AccountType, max_length=8
),
),
("tos_acceptance", djstripe.fields.JSONField(blank=True, null=True)),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Charge",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_refunded",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"captured",
models.BooleanField(
default=False,
help_text="If the charge was created without capturing, this boolean represents whether or not it is still uncaptured or has since been captured.",
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_code",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.ApiErrorCode,
max_length=42,
),
),
(
"failure_message",
models.TextField(
blank=True,
default="",
help_text="Message to user further explaining reason for charge failure if available.",
max_length=5000,
),
),
("fraud_details", djstripe.fields.JSONField(blank=True, null=True)),
("outcome", djstripe.fields.JSONField(blank=True, null=True)),
(
"paid",
models.BooleanField(
default=False,
help_text="True if the charge succeeded, or was successfully authorized for later capture, False otherwise.",
),
),
(
"payment_method_details",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"receipt_email",
models.TextField(
blank=True,
default="",
help_text="The email address that the receipt for this charge was sent to.",
max_length=800,
),
),
(
"receipt_number",
models.CharField(
blank=True,
default="",
help_text="The transaction number that appears on email receipts sent for this charge.",
max_length=14,
),
),
(
"receipt_url",
models.TextField(
blank=True,
default="",
help_text="This is the URL to view the receipt for this charge. The receipt is kept up-to-date to the latest state of the charge, including any refunds. If the charge is for an Invoice, the receipt will be stylized as an Invoice receipt.",
max_length=5000,
),
),
(
"refunded",
models.BooleanField(
default=False,
help_text="Whether or not the charge has been fully refunded. If the charge is only partially refunded, this attribute will still be false.",
),
),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ChargeStatus, max_length=9
),
),
(
"transfer_group",
models.CharField(
blank=True,
default="",
help_text="A string that identifies this transaction as part of a group.",
max_length=255,
),
),
(
"account",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="charges",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Coupon",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("id", djstripe.fields.StripeIdField(max_length=500)),
(
"amount_off",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, max_length=3, null=True
),
),
(
"duration",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CouponDuration, max_length=9
),
),
(
"duration_in_months",
models.PositiveIntegerField(
blank=True,
help_text="If `duration` is `repeating`, the number of months the coupon applies.",
null=True,
),
),
(
"max_redemptions",
models.PositiveIntegerField(
blank=True,
help_text="Maximum number of times this coupon can be redeemed, in total, before it is no longer valid.",
null=True,
),
),
(
"percent_off",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
(
"redeem_by",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"times_redeemed",
models.PositiveIntegerField(
default=0,
editable=False,
help_text="Number of times this coupon has been applied to a customer.",
),
),
(
"name",
models.TextField(
blank=True,
default="",
help_text="Name of the coupon displayed to customers on for instance invoices or receipts.",
max_length=5000,
),
),
],
options={"unique_together": {("id", "livemode")}},
),
migrations.CreateModel(
name="PaymentMethod",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("billing_details", djstripe.fields.JSONField()),
("card", djstripe.fields.JSONField()),
("card_present", djstripe.fields.JSONField(blank=True, null=True)),
(
"type",
models.CharField(
blank=True,
help_text="The type of the PaymentMethod. An additional hash is included on the PaymentMethod with a name matching this value. It contains additional information specific to the PaymentMethod type.",
max_length=255,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Customer",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("balance", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"business_vat_id",
models.CharField(
blank=True,
default="",
help_text="The customer's VAT identification number.",
max_length=20,
),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, default="", max_length=3
),
),
("delinquent", models.BooleanField()),
(
"coupon_start",
djstripe.fields.StripeDateTimeField(
blank=True, editable=False, null=True
),
),
(
"coupon_end",
djstripe.fields.StripeDateTimeField(
blank=True, editable=False, null=True
),
),
("email", models.TextField(blank=True, default="", max_length=5000)),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
("date_purged", models.DateTimeField(editable=False, null=True)),
(
"coupon",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.coupon",
),
),
(
"default_source",
djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="customers",
to="djstripe.paymentmethod",
),
),
(
"subscriber",
models.ForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="djstripe_customers",
to=DJSTRIPE_SUBSCRIBER_MODEL,
),
),
("address", djstripe.fields.JSONField(blank=True, null=True)),
(
"invoice_prefix",
models.CharField(
blank=True,
default="",
help_text="The prefix for the customer used to generate unique invoice numbers.",
max_length=255,
),
),
("invoice_settings", djstripe.fields.JSONField(blank=True, null=True)),
(
"name",
models.TextField(
blank=True,
default="",
help_text="The customer's full name or business name.",
max_length=5000,
),
),
(
"phone",
models.TextField(
blank=True,
default="",
help_text="The customer's phone number.",
max_length=5000,
),
),
("preferred_locales", djstripe.fields.JSONField(blank=True, null=True)),
(
"tax_exempt",
djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
(
"default_payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"unique_together": {("subscriber", "livemode")}},
),
migrations.CreateModel(
name="Dispute",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField()),
("evidence", djstripe.fields.JSONField()),
("evidence_details", djstripe.fields.JSONField()),
(
"is_charge_refundable",
models.BooleanField(
help_text="If true, it is still possible to refund the disputed payment. Once the payment has been fully refunded, no further funds will be withdrawn from your Stripe account as a result of this dispute."
),
),
(
"reason",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.DisputeReason, max_length=25
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.DisputeStatus, max_length=22
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Event",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"api_version",
models.CharField(
blank=True,
help_text="the API version at which the event data was rendered. Blank for old entries only, all new entries will have this value",
max_length=15,
),
),
("data", djstripe.fields.JSONField()),
(
"request_id",
models.CharField(
blank=True,
default="",
help_text="Information about the request that triggered this event, for traceability purposes. If empty string then this is an old entry without that data. If Null then this is not an old entry, but a Stripe 'automated' event with no associated request.",
max_length=50,
),
),
("idempotency_key", models.TextField(blank=True, default="")),
(
"type",
models.CharField(
help_text="Stripe's event description code", max_length=250
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="FileUpload",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"filename",
models.CharField(
help_text="A filename for the file, suitable for saving to a filesystem.",
max_length=255,
),
),
(
"purpose",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.FilePurpose, max_length=24
),
),
(
"size",
models.IntegerField(
help_text="The size in bytes of the file upload object."
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.FileType, max_length=4
),
),
(
"url",
models.CharField(
help_text="A read-only URL where the uploaded file can be accessed.",
max_length=200,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="DjstripePaymentMethod",
fields=[
(
"id",
models.CharField(max_length=255, primary_key=True, serialize=False),
),
("type", models.CharField(db_index=True, max_length=12)),
],
),
migrations.CreateModel(
name="Plan",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"active",
models.BooleanField(
help_text="Whether the plan is currently available for new subscriptions."
),
),
(
"aggregate_usage",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.PlanAggregateUsage,
max_length=18,
),
),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"billing_scheme",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.BillingScheme,
max_length=8,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"interval",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PlanInterval, max_length=5
),
),
(
"interval_count",
models.IntegerField(
help_text="The number of intervals (specified in the interval property) between each subscription billing.",
null=True,
),
),
(
"nickname",
models.TextField(
blank=True,
default="",
help_text="A brief description of the plan, hidden from customers.",
max_length=5000,
),
),
("tiers", djstripe.fields.JSONField(blank=True, null=True)),
(
"tiers_mode",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.PriceTiersMode,
max_length=9,
null=True,
),
),
("transform_usage", djstripe.fields.JSONField(blank=True, null=True)),
(
"trial_period_days",
models.IntegerField(
help_text="Number of trial period days granted when subscribing a customer to this plan. Null if the plan has no trial period.",
null=True,
),
),
(
"usage_type",
djstripe.fields.StripeEnumField(
default="licensed",
enum=djstripe.enums.PriceUsageType,
max_length=8,
),
),
(
"name",
models.TextField(
blank=True,
help_text="Name of the plan, to be displayed on invoices and in the web interface.",
null=True,
),
),
(
"statement_descriptor",
models.CharField(
blank=True,
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
null=True,
),
),
],
options={"ordering": ["amount"]},
),
migrations.CreateModel(
name="Product",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"name",
models.TextField(
help_text="The product's name, meant to be displayable to the customer. Applicable to both `service` and `good` types.",
max_length=5000,
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ProductType, max_length=7
),
),
(
"active",
models.BooleanField(
help_text="Whether the product is currently available for purchase. Only applicable to products of `type=good`.",
null=True,
),
),
("attributes", djstripe.fields.JSONField(blank=True, null=True)),
(
"caption",
models.TextField(
blank=True,
default="",
help_text="A short one-line description of the product, meant to be displayableto the customer. Only applicable to products of `type=good`.",
max_length=5000,
),
),
("deactivate_on", djstripe.fields.JSONField(blank=True, null=True)),
("images", djstripe.fields.JSONField(blank=True, null=True)),
(
"package_dimensions",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"shippable",
models.BooleanField(
blank=True,
help_text="Whether this product is a shipped good. Only applicable to products of `type=good`.",
null=True,
),
),
(
"url",
models.CharField(
blank=True,
help_text="A URL of a publicly-accessible webpage for this product. Only applicable to products of `type=good`.",
max_length=799,
null=True,
),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a product which will appear on your customer's credit card statement. In the case that multiple products are billed at once, the first statement descriptor will be used. Only available on products of type=`service`.",
max_length=22,
),
),
("unit_label", models.CharField(blank=True, default="", max_length=12)),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Subscription",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"application_fee_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod, max_length=20
),
),
(
"billing_cycle_anchor",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"cancel_at_period_end",
models.BooleanField(
default=False,
help_text="If the subscription has been canceled with the ``at_period_end`` flag set to true, ``cancel_at_period_end`` on the subscription will be true. You can use this attribute to determine whether a subscription that has a status of active is scheduled to be canceled at the end of the current period.",
),
),
(
"canceled_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
("current_period_end", djstripe.fields.StripeDateTimeField()),
("current_period_start", djstripe.fields.StripeDateTimeField()),
(
"days_until_due",
models.IntegerField(
blank=True,
help_text="Number of days a customer has to pay invoices generated by this subscription. This value will be `null` for subscriptions where `billing=charge_automatically`.",
null=True,
),
),
("discount", djstripe.fields.JSONField(blank=True, null=True)),
(
"ended_at",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"next_pending_invoice_item_invoice",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"pending_invoice_item_interval",
djstripe.fields.JSONField(blank=True, null=True),
),
("pending_update", djstripe.fields.JSONField(blank=True, null=True)),
(
"quantity",
models.IntegerField(
blank=True,
help_text="The quantity applied to this subscription. This value will be `null` for multi-plan subscriptions",
null=True,
),
),
("start", djstripe.fields.StripeDateTimeField(null=True)),
(
"start_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SubscriptionStatus, max_length=18
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"trial_end",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"trial_start",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"plan",
models.ForeignKey(
blank=True,
help_text="The plan associated with this subscription. This value will be `null` for multi-plan subscriptions",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="subscriptions",
to="djstripe.plan",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Transfer",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_reversed",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("destination", djstripe.fields.StripeIdField(max_length=255)),
(
"destination_payment",
djstripe.fields.StripeIdField(
blank=True, max_length=255, null=True
),
),
(
"reversed",
models.BooleanField(
default=False,
help_text="Whether or not the transfer has been fully reversed. If the transfer is only partially reversed, this attribute will still be false.",
),
),
(
"source_transaction",
djstripe.fields.StripeIdField(max_length=255, null=True),
),
(
"source_type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.LegacySourceType, max_length=16
),
),
(
"transfer_group",
models.CharField(
blank=True,
default="",
help_text="A string that identifies this transaction as part of a group.",
max_length=255,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="WebhookEventTrigger",
fields=[
("id", models.BigAutoField(primary_key=True, serialize=False)),
(
"remote_ip",
models.GenericIPAddressField(
help_text="IP address of the request client."
),
),
("headers", djstripe.fields.JSONField()),
("body", models.TextField(blank=True)),
(
"valid",
models.BooleanField(
default=False,
help_text="Whether or not the webhook event has passed validation",
),
),
(
"processed",
models.BooleanField(
default=False,
help_text="Whether or not the webhook event has been successfully processed",
),
),
("exception", models.CharField(blank=True, max_length=128)),
(
"traceback",
models.TextField(
blank=True,
help_text="Traceback if an exception was thrown during processing",
),
),
(
"djstripe_version",
models.CharField(
default=djstripe.models.webhooks._get_version,
help_text="The version of dj-stripe when the webhook was received",
max_length=32,
),
),
("created", models.DateTimeField(auto_now_add=True)),
("updated", models.DateTimeField(auto_now=True)),
(
"event",
djstripe.fields.StripeForeignKey(
blank=True,
help_text="Event object contained in the (valid) Webhook",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.event",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
),
migrations.AddField(
model_name="paymentmethod",
name="customer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="payment_methods",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="plan",
name="product",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.product",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.CreateModel(
name="Invoice",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount_due",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_paid",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"amount_remaining",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"application_fee_amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"attempt_count",
models.IntegerField(
help_text="Number of payment attempts made for this invoice, from the perspective of the payment retry schedule. Any payment attempt counts as the first attempt, and subsequently only automatic retries increment the attempt count. In other words, manual payment attempts after the first attempt do not affect the retry schedule."
),
),
(
"attempted",
models.BooleanField(
default=False,
help_text="Whether or not an attempt has been made to pay the invoice. An invoice is not attempted until 1 hour after the ``invoice.created`` webhook, for example, so you might not want to display that invoice as unpaid to your users.",
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod,
max_length=20,
null=True,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"due_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"ending_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(null=True),
),
(
"hosted_invoice_url",
models.TextField(
blank=True,
default="",
help_text="The URL for the hosted invoice page, which allows customers to view and pay an invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"invoice_pdf",
models.TextField(
blank=True,
default="",
help_text="The link to download the PDF for the invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"next_payment_attempt",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"number",
models.CharField(
blank=True,
default="",
help_text="A unique, identifying string that appears on emails sent to the customer for this invoice. This starts with the customer's unique invoice_prefix if it is specified.",
max_length=64,
),
),
(
"paid",
models.BooleanField(
default=False,
help_text="Whether payment was successfully collected for this invoice. An invoice can be paid (most commonly) with a charge or with credit from the customer's account balance.",
),
),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"receipt_number",
models.CharField(
blank=True,
help_text="This is the transaction number that appears on email receipts sent for this invoice.",
max_length=64,
null=True,
),
),
(
"starting_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
),
),
(
"subscription_proration_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"subtotal",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"tax",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1.0),
django.core.validators.MaxValueValidator(100.0),
],
),
),
(
"total",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2,
max_digits=11,
verbose_name="Total (as decimal) after discount.",
),
),
(
"webhooks_delivered_at",
djstripe.fields.StripeDateTimeField(null=True),
),
(
"charge",
models.OneToOneField(
help_text="The latest charge generated for this invoice, if any.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="latest_invoice",
to="djstripe.charge",
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invoices",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoices",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"auto_advance",
models.BooleanField(
help_text="Controls whether Stripe will perform automatic collection of the invoice. When false, the invoice's state will not automatically advance without an explicit action.",
null=True,
),
),
(
"status_transitions",
djstripe.fields.JSONField(blank=True, null=True),
),
],
options={"ordering": ["-created"]},
),
migrations.CreateModel(
name="IdempotencyKey",
fields=[
(
"uuid",
models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
),
),
("action", models.CharField(max_length=100)),
(
"livemode",
models.BooleanField(
help_text="Whether the key was used in live or test mode."
),
),
("created", models.DateTimeField(auto_now_add=True)),
],
options={"unique_together": {("action", "livemode")}},
),
migrations.AddField(
model_name="charge",
name="customer",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="charge",
name="dispute",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.dispute",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="charge",
name="invoice",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="charges",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="charge",
name="source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentmethod",
),
),
migrations.AddField(
model_name="charge",
name="transfer",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.transfer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="account",
name="branding_icon",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="icon_account",
to="djstripe.fileupload",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.CreateModel(
name="BankAccount",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"account_holder_name",
models.TextField(
blank=True,
default="",
help_text="The name of the person or business that owns the bank account.",
max_length=5000,
),
),
(
"account_holder_type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BankAccountHolderType, max_length=10
),
),
(
"bank_name",
models.CharField(
help_text="Name of the bank associated with the routing number (e.g., `WELLS FARGO`).",
max_length=255,
),
),
(
"country",
models.CharField(
help_text="Two-letter ISO code representing the country the bank account is located in.",
max_length=2,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"default_for_currency",
models.BooleanField(
help_text="Whether this external account is the default account for its currency.",
null=True,
),
),
(
"fingerprint",
models.CharField(
help_text="Uniquely identifies this particular bank account. You can use this attribute to check whether two bank accounts are the same.",
max_length=16,
),
),
("last4", models.CharField(max_length=4)),
(
"routing_number",
models.CharField(
help_text="The routing transit number for the bank account.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BankAccountStatus, max_length=19
),
),
(
"account",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.PROTECT,
related_name="bank_account",
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="bank_account",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="CountrySpec",
fields=[
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"id",
models.CharField(max_length=2, primary_key=True, serialize=False),
),
(
"default_currency",
djstripe.fields.StripeCurrencyCodeField(max_length=3),
),
("supported_bank_account_currencies", djstripe.fields.JSONField()),
("supported_payment_currencies", djstripe.fields.JSONField()),
("supported_payment_methods", djstripe.fields.JSONField()),
("supported_transfer_countries", djstripe.fields.JSONField()),
("verification_fields", djstripe.fields.JSONField()),
],
options={"abstract": False},
),
migrations.CreateModel(
name="BalanceTransaction",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
help_text="Gross amount of the transaction, in cents."
),
),
(
"available_on",
djstripe.fields.StripeDateTimeField(
help_text="The date the transaction's net funds will become available in the Stripe balance."
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"exchange_rate",
models.DecimalField(decimal_places=6, max_digits=8, null=True),
),
("fee", djstripe.fields.StripeQuantumCurrencyAmountField()),
("fee_details", djstripe.fields.JSONField()),
("net", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BalanceTransactionStatus, max_length=9
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.BalanceTransactionType, max_length=29
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="ScheduledQueryRun",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("data_load_time", djstripe.fields.StripeDateTimeField()),
("error", djstripe.fields.JSONField(blank=True, null=True)),
("result_available_until", djstripe.fields.StripeDateTimeField()),
(
"sql",
models.TextField(help_text="SQL for the query.", max_length=5000),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ScheduledQueryRunStatus, max_length=9
),
),
(
"title",
models.TextField(help_text="Title of the query.", max_length=5000),
),
(
"file",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.fileupload",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="SubscriptionItem",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"quantity",
models.PositiveIntegerField(
blank=True,
help_text="The quantity of the plan to which the customer should be subscribed.",
null=True,
),
),
(
"plan",
models.ForeignKey(
help_text="The plan the customer is subscribed to.",
on_delete=django.db.models.deletion.CASCADE,
related_name="subscription_items",
to="djstripe.plan",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="items",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="TransferReversal",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="transfer_reversals",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"transfer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="reversals",
to="djstripe.transfer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="UsageRecord",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"quantity",
models.PositiveIntegerField(
help_text="The quantity of the plan to which the customer should be subscribed."
),
),
(
"subscription_item",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="usage_records",
to="djstripe.subscriptionitem",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="ApplicationFee",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("amount_refunded", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"refunded",
models.BooleanField(
help_text="Whether the fee has been fully refunded. If the fee is only partially refunded, this attribute will still be false."
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"charge",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.charge",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="ApplicationFeeRefund",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"fee",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="refunds",
to="djstripe.applicationfee",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="balance_transaction",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="transfer",
name="balance_transaction",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.CreateModel(
name="Card",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"address_city",
models.TextField(
blank=True,
default="",
help_text="City/District/Suburb/Town/Village.",
max_length=5000,
),
),
(
"address_country",
models.TextField(
blank=True,
default="",
help_text="Billing address country.",
max_length=5000,
),
),
(
"address_line1",
models.TextField(
blank=True,
default="",
help_text="Street address/PO Box/Company name.",
max_length=5000,
),
),
(
"address_line1_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"address_line2",
models.TextField(
blank=True,
default="",
help_text="Apartment/Suite/Unit/Building.",
max_length=5000,
),
),
(
"address_state",
models.TextField(
blank=True,
default="",
help_text="State/County/Province/Region.",
max_length=5000,
),
),
(
"address_zip",
models.TextField(
blank=True,
default="",
help_text="ZIP or postal code.",
max_length=5000,
),
),
(
"address_zip_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"brand",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CardBrand, max_length=16
),
),
(
"country",
models.CharField(
blank=True,
default="",
help_text="Two-letter ISO code representing the country of the card.",
max_length=2,
),
),
(
"cvc_check",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardCheckResult,
max_length=11,
),
),
(
"dynamic_last4",
models.CharField(
blank=True,
default="",
help_text="(For tokenized numbers only.) The last four digits of the device account number.",
max_length=4,
),
),
("exp_month", models.IntegerField(help_text="Card expiration month.")),
("exp_year", models.IntegerField(help_text="Card expiration year.")),
(
"fingerprint",
models.CharField(
blank=True,
default="",
help_text="Uniquely identifies this particular card number.",
max_length=16,
),
),
(
"funding",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CardFundingType, max_length=7
),
),
(
"last4",
models.CharField(
help_text="Last four digits of Card number.", max_length=4
),
),
(
"name",
models.TextField(
blank=True,
default="",
help_text="Cardholder name.",
max_length=5000,
),
),
(
"tokenization_method",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.CardTokenizationMethod,
max_length=11,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="legacy_cards",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="account",
name="branding_logo",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="logo_account",
to="djstripe.fileupload",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.CreateModel(
name="SetupIntent",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"application",
models.CharField(
blank=True,
help_text="ID of the Connect application that created the SetupIntent.",
max_length=255,
),
),
(
"cancellation_reason",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.SetupIntentCancellationReason,
max_length=21,
),
),
(
"client_secret",
models.TextField(
blank=True,
help_text="The client secret of this SetupIntent. Used for client-side retrieval using a publishable key.",
max_length=5000,
),
),
("last_setup_error", djstripe.fields.JSONField(blank=True, null=True)),
("next_action", djstripe.fields.JSONField(blank=True, null=True)),
("payment_method_types", djstripe.fields.JSONField()),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SetupIntentStatus, max_length=23
),
),
(
"usage",
djstripe.fields.StripeEnumField(
default="off_session",
enum=djstripe.enums.IntentUsage,
max_length=11,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"on_behalf_of",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="PaymentIntent",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"amount_capturable",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
("amount_received", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"canceled_at",
djstripe.fields.StripeDateTimeField(
blank=True, default=None, null=True
),
),
(
"cancellation_reason",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.PaymentIntentCancellationReason,
max_length=21,
),
),
(
"capture_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.CaptureMethod, max_length=9
),
),
(
"client_secret",
models.TextField(
help_text="The client secret of this PaymentIntent. Used for client-side retrieval using a publishable key.",
max_length=5000,
),
),
(
"confirmation_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.ConfirmationMethod, max_length=9
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"description",
models.TextField(
blank=True,
default="",
help_text="An arbitrary string attached to the object. Often useful for displaying to users.",
max_length=1000,
),
),
(
"last_payment_error",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"next_action",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"payment_method_types",
djstripe.fields.JSONField(),
),
(
"receipt_email",
models.CharField(
blank=True,
help_text="Email address that the receipt for the resulting payment will be sent to.",
max_length=255,
),
),
(
"setup_future_usage",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.IntentUsage,
max_length=11,
null=True,
),
),
("shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"statement_descriptor",
models.CharField(
blank=True,
help_text="For non-card charges, you can use this value as the complete description that appears on your customers' statements. Must contain at least one letter, maximum 22 characters.",
max_length=22,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PaymentIntentStatus, max_length=23
),
),
("transfer_data", djstripe.fields.JSONField(blank=True, null=True)),
(
"transfer_group",
models.CharField(
blank=True,
help_text="A string that identifies the resulting payment as part of a group. See the PaymentIntents Connect usage guide for details.",
max_length=255,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"on_behalf_of",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.account",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="payment_intent",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="invoice",
name="payment_intent",
field=models.OneToOneField(
help_text="The PaymentIntent associated with this invoice. The PaymentIntent is generated when the invoice is finalized, and can then be used to pay the invoice.Note that voiding an invoice will cancel the PaymentIntent",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.paymentintent",
),
),
migrations.AddField(
model_name="subscription",
name="pending_setup_intent",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="setup_intents",
to="djstripe.setupintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.CreateModel(
name="Session",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"billing_address_collection",
djstripe.fields.StripeEnumField(
blank=True,
enum=djstripe.enums.SessionBillingAddressCollection,
max_length=8,
),
),
(
"cancel_url",
models.TextField(
blank=True,
help_text="The URL the customer will be directed to if theydecide to cancel payment and return to your website.",
max_length=5000,
),
),
(
"client_reference_id",
models.TextField(
blank=True,
help_text="A unique string to reference the Checkout Session.This can be a customer ID, a cart ID, or similar, andcan be used to reconcile the session with your internal systems.",
max_length=5000,
),
),
(
"customer_email",
models.CharField(
blank=True,
help_text="If provided, this value will be used when the Customer object is created.",
max_length=255,
),
),
("display_items", djstripe.fields.JSONField(blank=True, null=True)),
(
"locale",
models.CharField(
blank=True,
help_text="The IETF language tag of the locale Checkout is displayed in.If blank or auto, the browser's locale is used.",
max_length=255,
),
),
("payment_method_types", djstripe.fields.JSONField()),
(
"submit_type",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.SubmitTypeStatus, max_length=6
),
),
(
"success_url",
models.TextField(
blank=True,
help_text="The URL the customer will be directed to after the payment or subscriptioncreation is successful.",
max_length=5000,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"payment_intent",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.paymentintent",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"mode",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.SessionMode, max_length=12
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="charge",
name="payment_method",
field=djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="charges",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="invoice",
name="account_country",
field=models.CharField(
blank=True,
default="",
help_text="The country of the business associated with this invoice, most often the business creating the invoice.",
max_length=2,
),
),
migrations.AddField(
model_name="invoice",
name="account_name",
field=models.TextField(
blank=True,
help_text="The public name of the business associated with this invoice, most often the business creating the invoice.",
max_length=5000,
),
),
migrations.AddField(
model_name="invoice",
name="billing_reason",
field=djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceBillingReason,
max_length=22,
),
),
migrations.AddField(
model_name="invoice",
name="customer_address",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="invoice",
name="customer_email",
field=models.TextField(
blank=True,
help_text="The customer's email. Until the invoice is finalized, this field will equal customer.email. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
migrations.AddField(
model_name="invoice",
name="customer_name",
field=models.TextField(
blank=True,
help_text="The customer's name. Until the invoice is finalized, this field will equal customer.name. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
migrations.AddField(
model_name="invoice",
name="customer_phone",
field=models.TextField(
blank=True,
help_text="The customer's phone number. Until the invoice is finalized, this field will equal customer.phone. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
migrations.AddField(
model_name="invoice",
name="customer_shipping",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="invoice",
name="customer_tax_exempt",
field=djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
migrations.AddField(
model_name="invoice",
name="default_payment_method",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="invoice",
name="footer",
field=models.TextField(
blank=True,
help_text="Footer displayed on the invoice.",
max_length=5000,
),
),
migrations.AddField(
model_name="invoice",
name="post_payment_credit_notes_amount",
field=djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
migrations.AddField(
model_name="invoice",
name="pre_payment_credit_notes_amount",
field=djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
migrations.AddField(
model_name="invoice",
name="threshold_reason",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.AddField(
model_name="invoice",
name="status",
field=djstripe.fields.StripeEnumField(
blank=True, default="", enum=djstripe.enums.InvoiceStatus, max_length=13
),
),
migrations.AddField(
model_name="invoice",
name="discount",
field=djstripe.fields.JSONField(blank=True, null=True),
),
migrations.CreateModel(
name="Payout",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
("arrival_date", djstripe.fields.StripeDateTimeField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_code",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.PayoutFailureCode,
max_length=23,
),
),
(
"failure_message",
models.TextField(
blank=True,
default="",
help_text="Message to user further explaining reason for payout failure if available.",
),
),
(
"method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutMethod, max_length=8
),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a payout to be displayed on the user's bank statement.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutStatus, max_length=10
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.PayoutType, max_length=12
),
),
(
"destination",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.PROTECT,
to="djstripe.bankaccount",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"failure_balance_transaction",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="failure_payouts",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Source",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"client_secret",
models.CharField(
help_text="The client secret of the source. Used for client-side retrieval using a publishable key.",
max_length=255,
),
),
(
"currency",
djstripe.fields.StripeCurrencyCodeField(
blank=True, default="", max_length=3
),
),
(
"flow",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceFlow, max_length=17
),
),
("owner", djstripe.fields.JSONField()),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="Extra information about a source. This will appear on your customer's statement every time you charge the source.",
max_length=255,
),
),
(
"status",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceStatus, max_length=10
),
),
(
"type",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceType, max_length=20
),
),
(
"usage",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.SourceUsage, max_length=10
),
),
("code_verification", djstripe.fields.JSONField(blank=True, null=True)),
("receiver", djstripe.fields.JSONField(blank=True, null=True)),
("redirect", djstripe.fields.JSONField(blank=True, null=True)),
("source_data", djstripe.fields.JSONField()),
(
"customer",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="sources",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="Refund",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
(
"failure_reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.RefundFailureReason,
max_length=24,
),
),
(
"reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.RefundReason,
max_length=25,
),
),
(
"receipt_number",
models.CharField(
blank=True,
default="",
help_text="The transaction number that appears on email receipts sent for this charge.",
max_length=9,
),
),
(
"status",
djstripe.fields.StripeEnumField(
blank=True, enum=djstripe.enums.RefundStatus, max_length=9
),
),
(
"charge",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="refunds",
to="djstripe.charge",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"balance_transaction",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"failure_balance_transaction",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="failure_refunds",
to="djstripe.balancetransaction",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.CreateModel(
name="UpcomingInvoice",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"account_country",
models.CharField(
blank=True,
default="",
help_text="The country of the business associated with this invoice, most often the business creating the invoice.",
max_length=2,
),
),
(
"account_name",
models.TextField(
blank=True,
help_text="The public name of the business associated with this invoice, most often the business creating the invoice.",
max_length=5000,
),
),
(
"amount_due",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"amount_paid",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"amount_remaining",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11, null=True
),
),
(
"application_fee_amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"attempt_count",
models.IntegerField(
help_text="Number of payment attempts made for this invoice, from the perspective of the payment retry schedule. Any payment attempt counts as the first attempt, and subsequently only automatic retries increment the attempt count. In other words, manual payment attempts after the first attempt do not affect the retry schedule."
),
),
(
"attempted",
models.BooleanField(
default=False,
help_text="Whether or not an attempt has been made to pay the invoice. An invoice is not attempted until 1 hour after the ``invoice.created`` webhook, for example, so you might not want to display that invoice as unpaid to your users.",
),
),
(
"auto_advance",
models.BooleanField(
help_text="Controls whether Stripe will perform automatic collection of the invoice. When false, the invoice's state will not automatically advance without an explicit action.",
null=True,
),
),
(
"billing_reason",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceBillingReason,
max_length=22,
),
),
(
"collection_method",
djstripe.fields.StripeEnumField(
enum=djstripe.enums.InvoiceCollectionMethod,
max_length=20,
null=True,
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("customer_address", djstripe.fields.JSONField(blank=True, null=True)),
(
"customer_email",
models.TextField(
blank=True,
help_text="The customer's email. Until the invoice is finalized, this field will equal customer.email. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_name",
models.TextField(
blank=True,
help_text="The customer's name. Until the invoice is finalized, this field will equal customer.name. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
(
"customer_phone",
models.TextField(
blank=True,
help_text="The customer's phone number. Until the invoice is finalized, this field will equal customer.phone. Once the invoice is finalized, this field will no longer be updated.",
max_length=5000,
),
),
("customer_shipping", djstripe.fields.JSONField(blank=True, null=True)),
(
"customer_tax_exempt",
djstripe.fields.StripeEnumField(
default="", enum=djstripe.enums.CustomerTaxExempt, max_length=7
),
),
(
"due_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"ending_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(null=True),
),
(
"footer",
models.TextField(
blank=True,
help_text="Footer displayed on the invoice.",
max_length=5000,
),
),
(
"hosted_invoice_url",
models.TextField(
blank=True,
default="",
help_text="The URL for the hosted invoice page, which allows customers to view and pay an invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"invoice_pdf",
models.TextField(
blank=True,
default="",
help_text="The link to download the PDF for the invoice. If the invoice has not been frozen yet, this will be null.",
max_length=799,
),
),
(
"next_payment_attempt",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"number",
models.CharField(
blank=True,
default="",
help_text="A unique, identifying string that appears on emails sent to the customer for this invoice. This starts with the customer's unique invoice_prefix if it is specified.",
max_length=64,
),
),
(
"paid",
models.BooleanField(
default=False,
help_text="Whether payment was successfully collected for this invoice. An invoice can be paid (most commonly) with a charge or with credit from the customer's account balance.",
),
),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"post_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"pre_payment_credit_notes_amount",
djstripe.fields.StripeQuantumCurrencyAmountField(
blank=True, null=True
),
),
(
"receipt_number",
models.CharField(
blank=True,
help_text="This is the transaction number that appears on email receipts sent for this invoice.",
max_length=64,
null=True,
),
),
(
"starting_balance",
djstripe.fields.StripeQuantumCurrencyAmountField(),
),
(
"statement_descriptor",
models.CharField(
blank=True,
default="",
help_text="An arbitrary string to be displayed on your customer's credit card statement. The statement description may not include <>\"' characters, and will appear on your customer's statement in capital letters. Non-ASCII characters are automatically stripped. While most banks display this information consistently, some may display it incorrectly or not at all.",
max_length=22,
),
),
(
"status_transitions",
djstripe.fields.JSONField(blank=True, null=True),
),
(
"subscription_proration_date",
djstripe.fields.StripeDateTimeField(blank=True, null=True),
),
(
"subtotal",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
(
"tax",
djstripe.fields.StripeDecimalCurrencyAmountField(
blank=True, decimal_places=2, max_digits=11, null=True
),
),
(
"tax_percent",
djstripe.fields.StripePercentField(
blank=True,
decimal_places=2,
max_digits=5,
null=True,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
("threshold_reason", djstripe.fields.JSONField(blank=True, null=True)),
(
"total",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2,
max_digits=11,
verbose_name="Total (as decimal) after discount.",
),
),
(
"webhooks_delivered_at",
djstripe.fields.StripeDateTimeField(null=True),
),
(
"charge",
models.OneToOneField(
help_text="The latest charge generated for this invoice, if any.",
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="latest_upcominginvoice",
to="djstripe.charge",
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="upcominginvoices",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"default_payment_method",
djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"payment_intent",
models.OneToOneField(
help_text="The PaymentIntent associated with this invoice. The PaymentIntent is generated when the invoice is finalized, and can then be used to pay the invoice.Note that voiding an invoice will cancel the PaymentIntent",
null=True,
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.paymentintent",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="upcominginvoices",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"status",
djstripe.fields.StripeEnumField(
blank=True,
default="",
enum=djstripe.enums.InvoiceStatus,
max_length=13,
),
),
(
"default_source",
djstripe.fields.PaymentMethodForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="upcoming_invoices",
to="djstripe.djstripepaymentmethod",
),
),
("discount", djstripe.fields.JSONField(blank=True, null=True)),
],
options={"abstract": False, "ordering": ["-created"]},
),
migrations.CreateModel(
name="TaxRate",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"active",
models.BooleanField(
default=True,
help_text="Defaults to true. When set to false, this tax rate cannot be applied to objects in the API, but will still be applied to subscriptions and invoices that already have it set.",
),
),
(
"display_name",
models.CharField(
blank=True,
default="",
help_text="The display name of the tax rates as it will appear to your customer on their receipt email, PDF, and the hosted invoice page.",
max_length=50,
),
),
(
"inclusive",
models.BooleanField(
help_text="This specifies if the tax rate is inclusive or exclusive."
),
),
(
"jurisdiction",
models.CharField(
blank=True,
default="",
help_text="The jurisdiction for the tax rate.",
max_length=50,
),
),
(
"percentage",
djstripe.fields.StripePercentField(
decimal_places=2,
max_digits=5,
validators=[
django.core.validators.MinValueValidator(1),
django.core.validators.MaxValueValidator(100),
],
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="invoice",
name="default_tax_rates",
field=models.ManyToManyField(
blank=True,
db_table="djstripe_djstripeinvoicedefaulttaxrate",
help_text="The tax rates applied to this invoice, if any.",
related_name="_invoice_default_tax_rates_+",
to="djstripe.TaxRate",
),
),
migrations.CreateModel(
name="InvoiceItem",
fields=[
(
"djstripe_id",
models.BigAutoField(
primary_key=True, serialize=False, verbose_name="ID"
),
),
("id", djstripe.fields.StripeIdField(max_length=255, unique=True)),
(
"livemode",
models.BooleanField(
blank=True,
default=None,
help_text="Null here indicates that the livemode status is unknown or was previously unrecorded. Otherwise, this field indicates whether this record comes from Stripe test mode or live mode operation.",
null=True,
),
),
("created", djstripe.fields.StripeDateTimeField(blank=True, null=True)),
("metadata", djstripe.fields.JSONField(blank=True, null=True)),
(
"description",
models.TextField(
blank=True, help_text="A description of this object.", null=True
),
),
("djstripe_created", models.DateTimeField(auto_now_add=True)),
("djstripe_updated", models.DateTimeField(auto_now=True)),
(
"amount",
djstripe.fields.StripeDecimalCurrencyAmountField(
decimal_places=2, max_digits=11
),
),
("currency", djstripe.fields.StripeCurrencyCodeField(max_length=3)),
("date", djstripe.fields.StripeDateTimeField()),
(
"discountable",
models.BooleanField(
default=False,
help_text="If True, discounts will apply to this invoice item. Always False for prorations.",
),
),
("period", djstripe.fields.JSONField()),
("period_end", djstripe.fields.StripeDateTimeField()),
("period_start", djstripe.fields.StripeDateTimeField()),
(
"proration",
models.BooleanField(
default=False,
help_text="Whether or not the invoice item was created automatically as a proration adjustment when the customer switched plans.",
),
),
(
"quantity",
models.IntegerField(
blank=True,
help_text="If the invoice item is a proration, the quantity of the subscription for which the proration was computed.",
null=True,
),
),
(
"customer",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="invoiceitems",
to="djstripe.customer",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"invoice",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="invoiceitems",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"plan",
models.ForeignKey(
help_text="If the invoice item is a proration, the plan of the subscription for which the proration was computed.",
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoiceitems",
to="djstripe.plan",
),
),
(
"subscription",
djstripe.fields.StripeForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoiceitems",
to="djstripe.subscription",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"tax_rates",
models.ManyToManyField(
blank=True,
db_table="djstripe_djstripeinvoiceitemtaxrate",
help_text="The tax rates which apply to this invoice item. When set, the default_tax_rates on the invoice do not apply to this invoice item.",
related_name="_invoiceitem_tax_rates_+",
to="djstripe.TaxRate",
),
),
],
options={"abstract": False, "get_latest_by": "created"},
),
migrations.AddField(
model_name="subscription",
name="default_tax_rates",
field=models.ManyToManyField(
blank=True,
db_table="djstripe_djstripesubscriptiondefaulttaxrate",
help_text="The tax rates that will apply to any subscription item that does not have tax_rates set. Invoices created will have their default_tax_rates populated from the subscription.",
related_name="_subscription_default_tax_rates_+",
to="djstripe.TaxRate",
),
),
migrations.AddField(
model_name="subscriptionitem",
name="tax_rates",
field=models.ManyToManyField(
blank=True,
db_table="djstripe_djstripesubscriptionitemtaxrate",
help_text="The tax rates which apply to this subscription_item. When set, the default_tax_rates on the subscription do not apply to this subscription_item.",
related_name="_subscriptionitem_tax_rates_+",
to="djstripe.TaxRate",
),
),
migrations.CreateModel(
name="DjstripeUpcomingInvoiceTotalTaxAmount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"inclusive",
models.BooleanField(
help_text="Whether this tax amount is inclusive or exclusive."
),
),
(
"invoice",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="djstripe.upcominginvoice",
),
),
(
"tax_rate",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.taxrate",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"unique_together": {("invoice", "tax_rate")}},
),
migrations.CreateModel(
name="DjstripeInvoiceTotalTaxAmount",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("amount", djstripe.fields.StripeQuantumCurrencyAmountField()),
(
"inclusive",
models.BooleanField(
help_text="Whether this tax amount is inclusive or exclusive."
),
),
(
"invoice",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="total_tax_amounts",
to="djstripe.invoice",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
(
"tax_rate",
djstripe.fields.StripeForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to="djstripe.taxrate",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
],
options={"unique_together": {("invoice", "tax_rate")}},
),
migrations.AddField(
model_name="subscription",
name="default_payment_method",
field=djstripe.fields.StripeForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="+",
to="djstripe.paymentmethod",
to_field=settings.DJSTRIPE_FOREIGN_KEY_TO_FIELD,
),
),
migrations.AddField(
model_name="invoice",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="invoices",
to="djstripe.djstripepaymentmethod",
),
),
migrations.AddField(
model_name="subscription",
name="default_source",
field=djstripe.fields.PaymentMethodForeignKey(
blank=True,
null=True,
on_delete=django.db.models.deletion.SET_NULL,
related_name="subscriptions",
to="djstripe.djstripepaymentmethod",
),
),
]
| 43.366023
| 391
| 0.443841
| 12,425
| 168,477
| 5.878873
| 0.065674
| 0.074173
| 0.026696
| 0.03491
| 0.828955
| 0.80345
| 0.778328
| 0.726087
| 0.700116
| 0.682155
| 0
| 0.007218
| 0.474563
| 168,477
| 3,884
| 392
| 43.377188
| 0.817925
| 0.001971
| 0
| 0.759503
| 1
| 0.017585
| 0.1987
| 0.012317
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.000259
| 0.002069
| 0
| 0.003103
| 0.000517
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fe51339b5270af36aa312925039de3515bb071d1
| 113
|
py
|
Python
|
molmodmt/native/topology.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/native/topology.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
molmodmt/native/topology.py
|
LMMV/MolModMT
|
5725d6d5627b07edcbbd5e55318345a136b28c35
|
[
"MIT"
] | null | null | null |
from simtk.openmm.app.topology import Topology as _openmm_Topology
class Topology(_openmm_Topology):
pass
| 16.142857
| 66
| 0.80531
| 15
| 113
| 5.8
| 0.6
| 0.321839
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.141593
| 113
| 6
| 67
| 18.833333
| 0.896907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
fe57395860c76be677fba32cb3b292d28366176a
| 4,002
|
py
|
Python
|
test-framework/test-suites/integration/tests/remove/test_remove_host_firewall.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 123
|
2015-05-12T23:36:45.000Z
|
2017-07-05T23:26:57.000Z
|
test-framework/test-suites/integration/tests/remove/test_remove_host_firewall.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 177
|
2015-06-05T19:17:47.000Z
|
2017-07-07T17:57:24.000Z
|
test-framework/test-suites/integration/tests/remove/test_remove_host_firewall.py
|
knutsonchris/stacki
|
33087dd5fa311984a66ccecfeee6f9c2c25f665d
|
[
"BSD-3-Clause"
] | 32
|
2015-06-07T02:25:03.000Z
|
2017-06-23T07:35:35.000Z
|
import json
from textwrap import dedent
class TestRemoveHostFirewall:
def test_no_args(self, host):
result = host.run('stack remove host firewall')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "host" argument is required
{host ...} {rulename=string}
''')
def test_invalid(self, host, invalid_host):
result = host.run(
f'stack remove host firewall {invalid_host} rulename=test'
)
assert result.rc == 255
assert result.stderr == f'error - cannot resolve host "{invalid_host}"\n'
def test_no_rulename(self, host, add_host):
result = host.run('stack remove host firewall backend-0-0')
assert result.rc == 255
assert result.stderr == dedent('''\
error - "rulename" parameter is required
{host ...} {rulename=string}
''')
def test_invalid_rulename(self, host, add_host):
result = host.run('stack remove host firewall backend-0-0 rulename=test')
assert result.rc == 255
assert result.stderr == 'error - rule named "test" does not exist\n'
def test_one_arg(self, host, add_host):
# Add a firewall rule
result = host.run(
'stack add host firewall backend-0-0 service=1234 chain=INPUT '
'action=ACCEPT protocol=TCP network=private rulename=test'
)
assert result.rc == 0
# Make sure it is in the DB now
result = host.run('stack list host firewall backend-0-0 output-format=json')
assert result.rc == 0
rules = [
rule for rule in json.loads(result.stdout)
if rule['name'] == 'test'
]
assert rules == [{
'host': 'backend-0-0',
'name': 'test',
'table': 'filter',
'service': '1234',
'protocol': 'TCP',
'chain': 'INPUT',
'action': 'ACCEPT',
'network': 'private',
'output-network': None,
'flags': None,
'comment': None,
'source': 'H',
'type': 'var'
}]
# Delete the rule
result = host.run('stack remove host firewall backend-0-0 rulename=test')
assert result.rc == 0
# Make sure it is gone now
result = host.run('stack list host firewall backend-0-0 output-format=json')
assert result.rc == 0
rules = [
rule for rule in json.loads(result.stdout)
if rule['name'] == 'test'
]
assert rules == []
def test_multiple_args(self, host, add_host):
# Add a firewall rule for our first host
result = host.run(
'stack add host firewall backend-0-0 service=1234 chain=INPUT '
'action=ACCEPT protocol=TCP network=private rulename=test'
)
assert result.rc == 0
# Add a second test host
add_host('backend-0-1', '0', '1', 'backend')
# It gets a rule too
result = host.run(
'stack add host firewall backend-0-1 service=1234 chain=INPUT '
'action=ACCEPT protocol=TCP network=private rulename=test'
)
assert result.rc == 0
# Make sure both hosts have their rules in the DB now
result = host.run('stack list host firewall backend-0-0 backend-0-1 output-format=json')
assert result.rc == 0
rules = [
rule for rule in json.loads(result.stdout)
if rule['name'] == 'test'
]
assert rules == [
{
'host': 'backend-0-0',
'name': 'test',
'table': 'filter',
'service': '1234',
'protocol': 'TCP',
'chain': 'INPUT',
'action': 'ACCEPT',
'network': 'private',
'output-network': None,
'flags': None,
'comment': None,
'source': 'H',
'type': 'var'
},
{
'host': 'backend-0-1',
'name': 'test',
'table': 'filter',
'service': '1234',
'protocol': 'TCP',
'chain': 'INPUT',
'action': 'ACCEPT',
'network': 'private',
'output-network': None,
'flags': None,
'comment': None,
'source': 'H',
'type': 'var'
}
]
# Delete the host rules
result = host.run('stack remove host firewall backend-0-0 backend-0-1 rulename=test')
assert result.rc == 0
# Make sure the rules are gone now
result = host.run('stack list host firewall backend-0-0 backend-0-1 output-format=json')
assert result.rc == 0
rules = [
rule for rule in json.loads(result.stdout)
if rule['name'] == 'test'
]
assert rules == []
| 26.328947
| 90
| 0.636182
| 559
| 4,002
| 4.520572
| 0.168157
| 0.056985
| 0.066878
| 0.085477
| 0.839335
| 0.836169
| 0.836169
| 0.836169
| 0.745152
| 0.665611
| 0
| 0.026307
| 0.211644
| 4,002
| 151
| 91
| 26.503311
| 0.774643
| 0.069465
| 0
| 0.704918
| 0
| 0
| 0.423647
| 0
| 0
| 0
| 0
| 0
| 0.172131
| 1
| 0.04918
| false
| 0
| 0.016393
| 0
| 0.07377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe7f32a3f042deb335ae1fc4c59aea99d88d69e0
| 73
|
py
|
Python
|
model/__init__.py
|
sudoRicheek/Image-Enhancer
|
ba1bd17d3066be7007b191e579b17c07bb03a1ac
|
[
"MIT"
] | 8
|
2020-05-21T18:35:18.000Z
|
2022-01-07T20:08:06.000Z
|
model/__init__.py
|
sudoRicheek/Image-Enhancer
|
ba1bd17d3066be7007b191e579b17c07bb03a1ac
|
[
"MIT"
] | 1
|
2021-12-23T03:27:51.000Z
|
2021-12-23T03:27:51.000Z
|
model/__init__.py
|
sudoRicheek/Image-Enhancer
|
ba1bd17d3066be7007b191e579b17c07bb03a1ac
|
[
"MIT"
] | 2
|
2020-07-14T11:41:25.000Z
|
2022-03-23T19:27:05.000Z
|
from model.common import resolve
from model.common import resolve_single
| 24.333333
| 39
| 0.863014
| 11
| 73
| 5.636364
| 0.545455
| 0.290323
| 0.483871
| 0.677419
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109589
| 73
| 2
| 40
| 36.5
| 0.953846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 9
|
22d2e29295de7f627f27ca93e6445e4d79c5e1ee
| 467
|
py
|
Python
|
mongo_test/utils/models/__init__.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
mongo_test/utils/models/__init__.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
mongo_test/utils/models/__init__.py
|
Vuong02011996/data_base_test
|
a57940970ce52a25e10f2262fb94530b1ae2681c
|
[
"MIT"
] | null | null | null |
from mongo_test.utils.models.identity import *
from mongo_test.utils.models.object import *
from mongo_test.utils.models.detection import *
from mongo_test.utils.models.user import *
from mongo_test.utils.models.process import *
from mongo_test.utils.models.camera import *
from mongo_test.utils.models.logger import *
from mongo_test.utils.models.cluster import *
from mongo_test.utils.models.cluster_element import *
from mongo_test.utils.models.parameter import *
| 42.454545
| 53
| 0.828694
| 71
| 467
| 5.295775
| 0.225352
| 0.239362
| 0.345745
| 0.478723
| 0.819149
| 0.755319
| 0.196809
| 0
| 0
| 0
| 0
| 0
| 0.085653
| 467
| 10
| 54
| 46.7
| 0.880562
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
22d93f4b7a8fd920b6d99f4b2a2bf62c16da637d
| 3,366
|
py
|
Python
|
owl/strategies.py
|
cmrudolph/simulation
|
e8507524fc32efc9e84b0f6a487d725ed4ec3a6b
|
[
"MIT"
] | null | null | null |
owl/strategies.py
|
cmrudolph/simulation
|
e8507524fc32efc9e84b0f6a487d725ed4ec3a6b
|
[
"MIT"
] | null | null | null |
owl/strategies.py
|
cmrudolph/simulation
|
e8507524fc32efc9e84b0f6a487d725ed4ec3a6b
|
[
"MIT"
] | null | null | null |
def back_owl_random_card(game, hands, hand_idx, randint):
hand = hands[hand_idx]
card_idx = randint(0, 2)
return (game.occupied[0], hand.cards[card_idx])
def random_owl_random_card(game, hands, hand_idx, randint):
hand = hands[hand_idx]
owl_idx = randint(0, game.owls - 1)
card_idx = randint(0, len(hand.cards) - 1)
return (game.occupied[owl_idx], hand.cards[card_idx])
def front_owl_random_card(game, hands, hand_idx, randint):
hand = hands[hand_idx]
card_idx = randint(0, 2)
return (game.occupied[-1], hand.cards[card_idx])
def back_owl_smallest_gain(game, hands, hand_idx, randint):
worst_gain = 888
start = game.occupied[0]
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain < worst_gain:
worst_gain = gain
worst_card = card
return (start, worst_card)
def any_owl_smallest_gain(game, hands, hand_idx, randint):
worst_gain = 888
for start in game.occupied:
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain < worst_gain:
worst_gain = gain
worst_start = start
worst_card = card
return (worst_start, worst_card)
def front_owl_smallest_gain(game, hands, hand_idx, randint):
worst_gain = 888
start = game.occupied[-1]
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain < worst_gain:
worst_gain = gain
worst_card = card
return (start, worst_card)
def back_owl_biggest_gain(game, hands, hand_idx, randint):
best_gain = 0
start = game.occupied[0]
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain > best_gain:
best_gain = gain
best_card = card
return (start, best_card)
def any_owl_biggest_gain(game, hands, hand_idx, randint):
best_gain = 0
for start in game.occupied:
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain > best_gain:
best_gain = gain
best_start = start
best_card = card
return (best_start, best_card)
def front_owl_biggest_gain(game, hands, hand_idx, randint):
best_gain = 0
start = game.occupied[-1]
for card in hands[hand_idx].cards:
end = game.compute_end(start, card.color)
gain = end - start
if gain > best_gain:
best_gain = gain
best_card = card
return (start, best_card)
def back_owl_color_priority(game, hands, hand_idx, randint):
min_card = 888
start = game.occupied[0]
for card in hands[hand_idx].cards:
if card.color.value < min_card:
min_card = card.color.value
best_card = card
return (start, best_card)
def front_owl_color_priority(game, hands, hand_idx, randint):
min_card = 888
start = game.occupied[-1]
for card in hands[hand_idx].cards:
if card.color.value < min_card:
min_card = card.color.value
best_card = card
return (start, best_card)
| 28.05
| 61
| 0.620618
| 474
| 3,366
| 4.168776
| 0.075949
| 0.100202
| 0.133603
| 0.089069
| 0.898279
| 0.869433
| 0.856275
| 0.856275
| 0.854757
| 0.854757
| 0
| 0.014173
| 0.287285
| 3,366
| 119
| 62
| 28.285714
| 0.809504
| 0
| 0
| 0.78022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120879
| false
| 0
| 0
| 0
| 0.241758
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fe1238ceddbcac0a72954ebfeed67bf6956029a1
| 39,314
|
py
|
Python
|
services/key_management/src/oci_cli_kms_management/generated/kmsmanagement_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/key_management/src/oci_cli_kms_management/generated/kmsmanagement_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
services/key_management/src/oci_cli_kms_management/generated/kmsmanagement_cli.py
|
andrewtvuong/oci-cli
|
7673a808613308a4899c7026964fa2383c30c397
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
from oci_cli_key_management.generated import kms_service_cli
@click.command(cli_util.override('kms_management_root_group.command_name', 'kms-management'), cls=CommandGroupWithAlias, help=cli_util.override('kms_management_root_group.help', """API for managing and performing operations with keys and vaults."""), short_help=cli_util.override('kms_management_root_group.short_help', """Key Management Service API"""))
@cli_util.help_option_group
def kms_management_root_group():
pass
@click.command(cli_util.override('key_version_group.command_name', 'key-version'), cls=CommandGroupWithAlias, help="""""")
@cli_util.help_option_group
def key_version_group():
pass
@click.command(cli_util.override('key_group.command_name', 'key'), cls=CommandGroupWithAlias, help="""""")
@cli_util.help_option_group
def key_group():
pass
kms_service_cli.kms_service_group.add_command(kms_management_root_group)
kms_management_root_group.add_command(key_version_group)
kms_management_root_group.add_command(key_group)
@key_group.command(name=cli_util.override('cancel_key_deletion.command_name', 'cancel-key-deletion'), help=u"""Cancels the scheduled deletion of the specified key. Canceling a scheduled deletion restores the key to the respective states they were in before the deletion was scheduled.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def cancel_key_deletion(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, key_id, if_match):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.cancel_key_deletion(
key_id=key_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('create_key.command_name', 'create'), help=u"""Creates a new key.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment that contains this key.""")
@cli_util.option('--display-name', required=True, help=u"""A user-friendly name for the key. It does not have to be unique, and it is changeable. Avoid entering confidential information.""")
@cli_util.option('--key-shape', required=True, type=custom_types.CLI_COMPLEX_TYPE, help=u"""""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{\"foo-namespace\": {\"bar-key\": \"foo-value\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{\"bar-key\": \"value\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'key_management', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'key_management', 'class': 'dict(str, string)'}, 'key-shape': {'module': 'key_management', 'class': 'KeyShape'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'key_management', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'key_management', 'class': 'dict(str, string)'}, 'key-shape': {'module': 'key_management', 'class': 'KeyShape'}}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def create_key(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, display_name, key_shape, defined_tags, freeform_tags):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['displayName'] = display_name
details['keyShape'] = cli_util.parse_json_parameter("key_shape", key_shape)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
client = cli_util.build_client('kms_management', ctx)
result = client.create_key(
create_key_details=details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@key_version_group.command(name=cli_util.override('create_key_version.command_name', 'create'), help=u"""Generates new cryptographic material for a key. The key must be in an `ENABLED` state to be rotated.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'KeyVersion'})
@cli_util.wrap_exceptions
def create_key_version(ctx, from_json, key_id):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.create_key_version(
key_id=key_id,
**kwargs
)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('disable_key.command_name', 'disable'), help=u"""Disables a key to make it unavailable for encryption or decryption.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def disable_key(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, key_id, if_match):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.disable_key(
key_id=key_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('enable_key.command_name', 'enable'), help=u"""Enables a key to make it available for encryption or decryption.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def enable_key(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, key_id, if_match):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.enable_key(
key_id=key_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('get_key.command_name', 'get'), help=u"""Gets information about the specified key.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def get_key(ctx, from_json, key_id):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.get_key(
key_id=key_id,
**kwargs
)
cli_util.render_response(result, ctx)
@key_version_group.command(name=cli_util.override('get_key_version.command_name', 'get'), help=u"""Gets information about the specified key version.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--key-version-id', required=True, help=u"""The OCID of the key version.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'KeyVersion'})
@cli_util.wrap_exceptions
def get_key_version(ctx, from_json, key_id, key_version_id):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
if isinstance(key_version_id, six.string_types) and len(key_version_id.strip()) == 0:
raise click.UsageError('Parameter --key-version-id cannot be whitespace or empty string')
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
result = client.get_key_version(
key_id=key_id,
key_version_id=key_version_id,
**kwargs
)
cli_util.render_response(result, ctx)
@key_version_group.command(name=cli_util.override('list_key_versions.command_name', 'list'), help=u"""Lists all key versions for the specified key.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIMECREATED", "DISPLAYNAME"]), help=u"""The field to sort by. You can specify only one sort order. The default order for TIMECREATED is descending. The default order for DISPLAYNAME is ascending.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""The sort order to use, either ascending (`ASC`) or descending (`DESC`).""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'list[KeyVersionSummary]'})
@cli_util.wrap_exceptions
def list_key_versions(ctx, from_json, all_pages, page_size, key_id, limit, page, sort_by, sort_order):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_key_versions,
key_id=key_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_key_versions,
limit,
page_size,
key_id=key_id,
**kwargs
)
else:
result = client.list_key_versions(
key_id=key_id,
**kwargs
)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('list_keys.command_name', 'list'), help=u"""Lists the keys in the specified vault and compartment.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment.""")
@cli_util.option('--limit', type=click.INT, help=u"""The maximum number of items to return in a paginated \"List\" call.""")
@cli_util.option('--page', help=u"""The value of the `opc-next-page` response header from the previous \"List\" call.""")
@cli_util.option('--sort-by', type=custom_types.CliCaseInsensitiveChoice(["TIMECREATED", "DISPLAYNAME"]), help=u"""The field to sort by. You can specify only one sort order. The default order for TIMECREATED is descending. The default order for DISPLAYNAME is ascending.""")
@cli_util.option('--sort-order', type=custom_types.CliCaseInsensitiveChoice(["ASC", "DESC"]), help=u"""The sort order to use, either ascending (`ASC`) or descending (`DESC`).""")
@cli_util.option('--all', 'all_pages', is_flag=True, help="""Fetches all pages of results. If you provide this option, then you cannot provide the --limit option.""")
@cli_util.option('--page-size', type=click.INT, help="""When fetching results, the number of results to fetch per call. Only valid when used with --all or --limit, and ignored otherwise.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'list[KeySummary]'})
@cli_util.wrap_exceptions
def list_keys(ctx, from_json, all_pages, page_size, compartment_id, limit, page, sort_by, sort_order):
if all_pages and limit:
raise click.UsageError('If you provide the --all option you cannot provide the --limit option')
kwargs = {}
if limit is not None:
kwargs['limit'] = limit
if page is not None:
kwargs['page'] = page
if sort_by is not None:
kwargs['sort_by'] = sort_by
if sort_order is not None:
kwargs['sort_order'] = sort_order
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('kms_management', ctx)
if all_pages:
if page_size:
kwargs['limit'] = page_size
result = cli_util.list_call_get_all_results(
client.list_keys,
compartment_id=compartment_id,
**kwargs
)
elif limit is not None:
result = cli_util.list_call_get_up_to_limit(
client.list_keys,
limit,
page_size,
compartment_id=compartment_id,
**kwargs
)
else:
result = client.list_keys(
compartment_id=compartment_id,
**kwargs
)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('schedule_key_deletion.command_name', 'schedule-key-deletion'), help=u"""Schedules the deletion of the specified key. This sets the state of the key to `PENDING_DELETION` and then deletes it after the retention period ends.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--time-of-deletion', type=custom_types.CLI_DATETIME, help=u"""An optional property to indicate the deletion time of the key, expressed in [RFC 3339] timestamp format. The specified time must be between 7 and 30 days from the time when the request is received. If this property is missing, it will be set to 30 days from the time of the request by default.""" + custom_types.CLI_DATETIME.VALID_DATETIME_CLI_HELP_MESSAGE)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def schedule_key_deletion(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, key_id, time_of_deletion, if_match):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if time_of_deletion is not None:
details['timeOfDeletion'] = time_of_deletion
client = cli_util.build_client('kms_management', ctx)
result = client.schedule_key_deletion(
key_id=key_id,
schedule_key_deletion_details=details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@key_group.command(name=cli_util.override('update_key.command_name', 'update'), help=u"""Updates the properties of a key. Specifically, you can update the `displayName`, `freeformTags`, and `definedTags` properties. Furthermore, the key must in an `ACTIVE` or `CREATING` state to be updated.
The top level --endpoint parameter must be supplied for this operation.""")
@cli_util.option('--key-id', required=True, help=u"""The OCID of the key.""")
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Usage of predefined tag keys. These predefined keys are scoped to namespaces. Example: `{\"foo-namespace\": {\"bar-key\": \"foo-value\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--display-name', help=u"""A user-friendly name for the key. It does not have to be unique, and it is changeable. Avoid entering confidential information.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Simple key-value pair that is applied without any predefined name, type, or scope. Exists for cross-compatibility only. Example: `{\"bar-key\": \"value\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--if-match', help=u"""For optimistic concurrency control. In the PUT or DELETE call for a resource, set the `if-match` parameter to the value of the etag from a previous GET or POST response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.option('--force', help="""Perform update without prompting for confirmation.""", is_flag=True)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ENABLING", "ENABLED", "DISABLING", "DISABLED", "DELETING", "DELETED", "PENDING_DELETION", "SCHEDULING_DELETION", "CANCELLING_DELETION"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'defined-tags': {'module': 'key_management', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'key_management', 'class': 'dict(str, string)'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'defined-tags': {'module': 'key_management', 'class': 'dict(str, dict(str, object))'}, 'freeform-tags': {'module': 'key_management', 'class': 'dict(str, string)'}}, output_type={'module': 'key_management', 'class': 'Key'})
@cli_util.wrap_exceptions
def update_key(ctx, from_json, force, wait_for_state, max_wait_seconds, wait_interval_seconds, key_id, defined_tags, display_name, freeform_tags, if_match):
if isinstance(key_id, six.string_types) and len(key_id.strip()) == 0:
raise click.UsageError('Parameter --key-id cannot be whitespace or empty string')
if not force:
if defined_tags or freeform_tags:
if not click.confirm("WARNING: Updates to defined-tags and freeform-tags will replace any existing values. Are you sure you want to continue?"):
ctx.abort()
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
if display_name is not None:
details['displayName'] = display_name
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
client = cli_util.build_client('kms_management', ctx)
result = client.update_key(
key_id=key_id,
update_key_details=details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_key') and callable(getattr(client, 'get_key')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_key(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
| 67.666093
| 550
| 0.718981
| 5,657
| 39,314
| 4.791586
| 0.068057
| 0.039253
| 0.026858
| 0.015937
| 0.908175
| 0.895042
| 0.886815
| 0.876891
| 0.861765
| 0.860289
| 0
| 0.002971
| 0.169558
| 39,314
| 580
| 551
| 67.782759
| 0.827279
| 0.018492
| 0
| 0.800821
| 0
| 0.088296
| 0.425894
| 0.01934
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028747
| false
| 0.028747
| 0.022587
| 0
| 0.051335
| 0.002053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
a3dd541f05ec471e5efe7fd6ab9bcbbb5573b697
| 18,824
|
py
|
Python
|
ven2/lib/python2.7/site-packages/zope/security/tests/test_adapter.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 3
|
2017-10-25T06:29:33.000Z
|
2018-03-15T14:51:53.000Z
|
ven2/lib/python2.7/site-packages/zope/security/tests/test_adapter.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 71
|
2015-01-24T17:58:13.000Z
|
2022-03-18T08:50:27.000Z
|
ven2/lib/python2.7/site-packages/zope/security/tests/test_adapter.py
|
manliu1225/Facebook_crawler
|
0f75a1c4382dd4effc3178d84b99b0cad97337cd
|
[
"Apache-2.0"
] | 8
|
2015-04-03T09:37:26.000Z
|
2019-10-25T00:28:09.000Z
|
##############################################################################
#
# Copyright (c) 2004 Zope Foundation and Contributors.
# All Rights Reserved.
#
# This software is subject to the provisions of the Zope Public License,
# Version 2.1 (ZPL). A copy of the ZPL should accompany this distribution.
# THIS SOFTWARE IS PROVIDED "AS IS" AND ANY AND ALL EXPRESS OR IMPLIED
# WARRANTIES ARE DISCLAIMED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF TITLE, MERCHANTABILITY, AGAINST INFRINGEMENT, AND FITNESS
# FOR A PARTICULAR PURPOSE.
#
##############################################################################
import unittest
from zope.interface import directlyProvides
from zope.interface import implementer
from zope.location import ILocation
from zope.location import LocationProxy
from zope.proxy import getProxiedObject
# pylint:disable=attribute-defined-outside-init,protected-access
class Test_assertLocation(unittest.TestCase):
def _callFUT(self, adapter, parent):
from zope.security.adapter import assertLocation
return assertLocation(adapter, parent)
def test_w_non_ILocation(self):
class _NotAdapter(object):
pass
adapter = _NotAdapter()
parent = object()
returned = self._callFUT(adapter, parent)
self.assertTrue(isinstance(returned, LocationProxy))
self.assertIs(getProxiedObject(returned), adapter)
self.assertIs(returned.__parent__, parent)
def test_w_ILocation_no_parent(self):
@implementer(ILocation)
class _Adapter(object):
__parent__ = None
adapter = _Adapter()
parent = object()
returned = self._callFUT(adapter, parent)
self.assertIs(returned, adapter)
self.assertIs(returned.__parent__, parent)
def test_w_ILocation_w_parent(self):
parent = object()
@implementer(ILocation)
class _Adapter(object):
__parent__ = parent
adapter = _Adapter()
new_parent = object()
returned = self._callFUT(adapter, new_parent)
self.assertIs(returned, adapter)
self.assertIs(returned.__parent__, parent)
class LocatingTrustedAdapterFactoryTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.adapter import LocatingTrustedAdapterFactory
return LocatingTrustedAdapterFactory
def _makeOne(self, factory):
return self._getTargetClass()(factory)
def _makeFactory(self):
class _Factory(object):
__name__ = 'testing'
__module__ = 'zope.security.tests.test_adapter'
_called_with = ()
def __call__(self, *args):
self._called_with = args
return self
return _Factory()
def test_ctor(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
self.assertIs(ltaf.factory, factory)
self.assertEqual(ltaf.__name__, 'testing')
self.assertEqual(ltaf.__module__, 'zope.security.tests.test_adapter')
def test__call__w_non_ILocation_non_spacesuit(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
before = factory.__dict__.copy()
returned = ltaf(adapter)
self.assertIs(returned, factory)
after = {k: v for k, v in returned.__dict__.items()
if k != '_called_with'}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_non_spacesuit_multiple_args(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
extra = object()
before = factory.__dict__.copy()
returned = ltaf(adapter, extra)
self.assertIs(returned, factory)
after = {k: v for k, v in returned.__dict__.items()
if k != '_called_with'}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_existing_parent_non_spacesuit(self):
factory = self._makeFactory()
parent = factory.__parent__ = object()
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
returned = ltaf(adapter)
self.assertIs(returned, factory)
self.assertIs(returned.__parent__, parent)
def test__call__w_ILocation_wo_existing_parent_non_spacesuit(self):
factory = self._makeFactory()
factory.__parent__ = None
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
returned = ltaf(adapter)
self.assertIs(returned, factory)
self.assertIs(returned.__parent__, adapter)
def test__call__w_non_ILocation_w_spacesuit(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
proxy = ProxyFactory(adapter)
before = factory.__dict__.copy()
returned = ltaf(proxy)
self.assertFalse(returned is factory)
ploc = removeSecurityProxy(returned)
self.assertIs(ploc.__parent__, adapter)
unwrapped = getProxiedObject(ploc)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_w_spacesuit_multiple_args(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
extra = object()
proxy = ProxyFactory(adapter)
before = factory.__dict__.copy()
returned = ltaf(proxy, extra)
self.assertFalse(returned is factory)
ploc = removeSecurityProxy(returned)
self.assertIs(ploc.__parent__, adapter)
unwrapped = getProxiedObject(ploc)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_multiple_args_extra_spacesuit(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
class _Extra(object):
pass
adapter = _NotAdapter()
extra = _Extra()
proxy = ProxyFactory(extra)
before = factory.__dict__.copy()
returned = ltaf(adapter, proxy)
self.assertFalse(returned is factory)
ploc = removeSecurityProxy(returned)
self.assertIs(ploc.__parent__, adapter)
unwrapped = getProxiedObject(ploc)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_spacesuit(self):
from zope.security.proxy import getObject
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
factory.__parent__ = factory.__name__ = None
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _Adapter(object):
pass
adapter = _Adapter()
proxy = ProxyFactory(adapter)
before = {k: v for k, v in factory.__dict__.items()
if k not in ('_called_with', '__parent__')}
returned = ltaf(proxy)
self.assertFalse(returned is factory)
ploc = removeSecurityProxy(returned)
self.assertIs(ploc.__parent__, adapter)
unwrapped = getObject(ploc)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with', '__parent__')}
self.assertEqual(factory._called_with, (adapter,))
self.assertIs(factory.__parent__, adapter)
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_spacesuit_w_existing_parent(self):
from zope.security.proxy import getObject
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
factory.__name__ = None
factory.__parent__ = parent = object()
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _Adapter(object):
pass
adapter = _Adapter()
proxy = ProxyFactory(adapter)
before = {k: v for k, v in factory.__dict__.items()
if k not in ('_called_with', '__parent__')}
returned = ltaf(proxy)
self.assertFalse(returned is factory)
ploc = removeSecurityProxy(returned)
self.assertIs(ploc.__parent__, parent)
unwrapped = getObject(ploc)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with', '__parent__')}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
class TrustedAdapterFactoryTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.adapter import TrustedAdapterFactory
return TrustedAdapterFactory
def _makeOne(self, factory):
return self._getTargetClass()(factory)
def _makeFactory(self):
class _Factory(object):
__name__ = 'testing'
__module__ = 'zope.security.tests.test_adapter'
def __call__(self, *args):
self._called_with = args
return self
return _Factory()
def test__call__w_non_ILocation_w_spacesuit(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
proxy = ProxyFactory(adapter)
before = factory.__dict__.copy()
returned = ltaf(proxy)
self.assertFalse(returned is factory)
unwrapped = removeSecurityProxy(returned)
self.assertTrue('__parent__' not in unwrapped.__dict__)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_w_spacesuit_multiple_args(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
extra = object()
proxy = ProxyFactory(adapter)
before = factory.__dict__.copy()
returned = ltaf(proxy, extra)
self.assertFalse(returned is factory)
unwrapped = removeSecurityProxy(returned)
self.assertTrue('__parent__' not in unwrapped.__dict__)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_multiple_args_extra_spacesuit(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
class _Extra(object):
pass
adapter = _NotAdapter()
extra = _Extra()
proxy = ProxyFactory(extra)
before = factory.__dict__.copy()
returned = ltaf(adapter, proxy)
self.assertFalse(returned is factory)
unwrapped = removeSecurityProxy(returned)
self.assertTrue('__parent__' not in unwrapped.__dict__)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with',)}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_spacesuit(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
factory.__parent__ = factory.__name__ = None
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _Adapter(object):
pass
adapter = _Adapter()
proxy = ProxyFactory(adapter)
before = {k: v for k, v in factory.__dict__.items()
if k not in ('_called_with', '__parent__')}
returned = ltaf(proxy)
self.assertFalse(returned is factory)
unwrapped = removeSecurityProxy(returned)
self.assertIs(unwrapped.__parent__, adapter)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with', '__parent__')}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_spacesuit_w_existing_parent(self):
from zope.security.proxy import ProxyFactory
from zope.security.proxy import removeSecurityProxy
factory = self._makeFactory()
factory.__name__ = None
factory.__parent__ = parent = object()
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _Adapter(object):
pass
adapter = _Adapter()
proxy = ProxyFactory(adapter)
before = {k: v for k, v in factory.__dict__.items()
if k not in ('_called_with', '__parent__')}
returned = ltaf(proxy)
self.assertFalse(returned is factory)
unwrapped = removeSecurityProxy(returned)
self.assertIs(unwrapped.__parent__, parent)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in unwrapped.__dict__.items()
if k not in ('_called_with', '__parent__')}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
class LocatingUntrustedAdapterFactoryTests(unittest.TestCase):
def _getTargetClass(self):
from zope.security.adapter import LocatingUntrustedAdapterFactory
return LocatingUntrustedAdapterFactory
def _makeOne(self, factory):
return self._getTargetClass()(factory)
def _makeFactory(self):
class _Factory(object):
__name__ = 'testing'
__module__ = 'zope.security.tests.test_adapter'
_called_with = ()
def __call__(self, *args):
self._called_with = args
return self
return _Factory()
def test_ctor(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
self.assertIs(ltaf.factory, factory)
self.assertEqual(ltaf.__name__, 'testing')
self.assertEqual(ltaf.__module__, 'zope.security.tests.test_adapter')
def test__call__w_non_ILocation(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
before = factory.__dict__.copy()
returned = ltaf(adapter)
self.assertFalse(returned is factory)
unwrapped = getProxiedObject(returned)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in returned.__dict__.items()
if k != '_called_with'}
self.assertEqual(factory._called_with, (adapter,))
self.assertEqual(after, before) # no added attrs
def test__call__w_non_ILocation_multiple_args(self):
factory = self._makeFactory()
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
extra = object()
before = factory.__dict__.copy()
returned = ltaf(adapter, extra)
self.assertFalse(returned is factory)
unwrapped = getProxiedObject(returned)
self.assertIs(unwrapped, factory)
after = {k: v for k, v in returned.__dict__.items()
if k != '_called_with'}
self.assertEqual(factory._called_with, (adapter, extra))
self.assertEqual(after, before) # no added attrs
def test__call__w_ILocation_w_existing_parent(self):
factory = self._makeFactory()
parent = factory.__parent__ = object()
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
returned = ltaf(adapter)
self.assertIs(returned, factory)
self.assertIs(returned.__parent__, parent)
def test__call__w_ILocation_wo_existing_parent(self):
factory = self._makeFactory()
factory.__parent__ = None
directlyProvides(factory, ILocation)
ltaf = self._makeOne(factory)
class _NotAdapter(object):
pass
adapter = _NotAdapter()
returned = ltaf(adapter)
self.assertIs(returned, factory)
self.assertIs(returned.__parent__, adapter)
def test_suite():
return unittest.defaultTestLoader.loadTestsFromName(__name__)
| 39.463312
| 78
| 0.644496
| 1,940
| 18,824
| 5.891753
| 0.070619
| 0.039895
| 0.036395
| 0.04042
| 0.888626
| 0.886089
| 0.871391
| 0.871391
| 0.859668
| 0.859668
| 0
| 0.000432
| 0.261368
| 18,824
| 476
| 79
| 39.546218
| 0.821634
| 0.038674
| 0
| 0.899522
| 0
| 0
| 0.029085
| 0.008932
| 0
| 0
| 0
| 0
| 0.212919
| 1
| 0.088517
| false
| 0.050239
| 0.076555
| 0.009569
| 0.270335
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
a3de7eca3453850030b226e6b49ee1f75676ae22
| 183
|
py
|
Python
|
_plotly_future_/extract_chart_studio.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 6
|
2019-05-03T02:12:04.000Z
|
2020-03-01T06:33:21.000Z
|
_plotly_future_/extract_chart_studio.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | null | null | null |
_plotly_future_/extract_chart_studio.py
|
piyush1301/plotly.py
|
50cd5c4cd4732042422751c7760acbab8dd8a50d
|
[
"MIT"
] | 5
|
2019-05-18T16:50:11.000Z
|
2021-07-06T21:14:36.000Z
|
from __future__ import absolute_import
from _plotly_future_ import _future_flags, _assert_plotly_not_imported
_assert_plotly_not_imported()
_future_flags.add('extract_chart_studio')
| 30.5
| 70
| 0.89071
| 25
| 183
| 5.64
| 0.52
| 0.170213
| 0.212766
| 0.326241
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 183
| 5
| 71
| 36.6
| 0.824561
| 0
| 0
| 0
| 0
| 0
| 0.10929
| 0
| 0
| 0
| 0
| 0
| 0.5
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
433939d1208ddbf5d2e9ffaff01f537bd1548ed7
| 39,899
|
py
|
Python
|
hallo/test/modules/channel_control/test_de_voice.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2018-05-19T22:27:20.000Z
|
2018-05-19T22:27:20.000Z
|
hallo/test/modules/channel_control/test_de_voice.py
|
joshcoales/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 75
|
2015-09-26T18:07:18.000Z
|
2022-01-04T07:15:11.000Z
|
hallo/test/modules/channel_control/test_de_voice.py
|
SpangleLabs/Hallo
|
17145d8f76552ecd4cbc5caef8924bd2cf0cbf24
|
[
"MIT"
] | 1
|
2021-04-10T12:02:47.000Z
|
2021-04-10T12:02:47.000Z
|
from hallo.events import EventMessage, EventMode
from hallo.server import Server
from hallo.test.server_mock import ServerMock
def test_devoice_not_irc(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = "NOT_IRC"
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "only available for irc" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_privmsg(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, None, user1, "devoice"))
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "in a private message" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
chan1.add_user(user1)
chan1.add_user(
serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
)
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_0(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(EventMessage(serv1, chan1, user1, "devoice"))
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice other_channel")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "other_channel is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
serv1.get_channel_by_address("test_chan2", "test_chan2")
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(1, user1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1priv(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_voice = True
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, None, user1, "devoice test_chan1")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].user == user1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user2)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user1 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_op = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_voice = False
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1.add_user(user_hallo)
chan2.add_user(user1)
chan2.add_user(user_hallo)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2_user1 = chan2.get_membership_by_user(user1)
chan2_user1.is_voice = True
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user1.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_not_here(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user2 is not in test_chan1" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = False
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_voice = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_1_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan1.add_user(user2)
chan1_user2 = chan1.get_membership_by_user(user2)
chan1_user2.is_voice = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan1
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user3")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_chan(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_chan2 test_user2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_not_in_channel(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = False
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "i'm not in that channel" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_user_not_known(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not known" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_user_not_there(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
serv1.get_user_by_address("test_user3", "test_user3")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user3 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "test_user3 is not in test_chan2" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_no_power(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user1 = chan2.get_membership_by_user(user2)
chan2_user1.is_op = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = False
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "don't have power" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user_not_voice(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = False
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(1, chan1, EventMessage)
assert "error" in data[0].text.lower()
assert "doesn't have voice" in data[0].text.lower()
finally:
test_hallo.remove_server(serv1)
def test_devoice_2_user(hallo_getter):
test_hallo = hallo_getter({"channel_control"})
serv1 = ServerMock(test_hallo)
serv1.name = "test_serv1"
serv1.type = Server.TYPE_IRC
test_hallo.add_server(serv1)
chan1 = serv1.get_channel_by_address("test_chan1".lower(), "test_chan1")
chan1.in_channel = True
chan2 = serv1.get_channel_by_address("test_chan2".lower(), "test_chan2")
chan2.in_channel = True
user1 = serv1.get_user_by_address("test_user1".lower(), "test_user1")
user2 = serv1.get_user_by_address("test_user2".lower(), "test_user2")
user_hallo = serv1.get_user_by_address(serv1.get_nick().lower(), serv1.get_nick())
chan1.add_user(user1)
chan1_user1 = chan1.get_membership_by_user(user1)
chan1_user1.is_op = False
chan1.add_user(user_hallo)
chan1_hallo = chan1.get_membership_by_user(user_hallo)
chan1_hallo.is_op = True
chan2.add_user(user2)
chan2_user2 = chan2.get_membership_by_user(user2)
chan2_user2.is_voice = True
chan2.add_user(user_hallo)
chan2_hallo = chan2.get_membership_by_user(user_hallo)
chan2_hallo.is_op = True
try:
test_hallo.function_dispatcher.dispatch(
EventMessage(serv1, chan1, user1, "devoice test_user2 test_chan2")
)
data = serv1.get_send_data(2)
assert "error" not in data[1].text.lower()
assert data[0].channel == chan2
assert data[1].channel == chan1
assert data[0].__class__ == EventMode
assert data[1].__class__ == EventMessage
assert "-v " + user2.name in data[0].mode_changes
assert "status taken" in data[1].text.lower()
finally:
test_hallo.remove_server(serv1)
| 41.303313
| 93
| 0.709391
| 5,685
| 39,899
| 4.614072
| 0.014424
| 0.066181
| 0.048073
| 0.061568
| 0.991003
| 0.990812
| 0.990812
| 0.989554
| 0.989554
| 0.989554
| 0
| 0.04801
| 0.179353
| 39,899
| 965
| 94
| 41.346114
| 0.753108
| 0
| 0
| 0.909392
| 0
| 0
| 0.101782
| 0
| 0
| 0
| 0
| 0
| 0.099448
| 1
| 0.033149
| false
| 0
| 0.003315
| 0
| 0.036464
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43470d9cbc609a02fb8fceade0e73248e68aad24
| 44,379
|
py
|
Python
|
fireworks/core/tests/test_launchpad.py
|
talkative/fireworks
|
582e9d2bc8b513171012a30873ddd860dbcc5472
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
fireworks/core/tests/test_launchpad.py
|
talkative/fireworks
|
582e9d2bc8b513171012a30873ddd860dbcc5472
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
fireworks/core/tests/test_launchpad.py
|
talkative/fireworks
|
582e9d2bc8b513171012a30873ddd860dbcc5472
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# coding: utf-8
from __future__ import unicode_literals, division
__author__ = "Bharat Medasani"
__copyright__ = "Copyright 2012, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Bharat Medasani"
__email__ = "mbkumar@gmail.com"
__date__ = "7/01/14"
import unittest
import time
import os
import glob
import shutil
import datetime
from multiprocessing import Process
import filecmp
from fireworks import Firework, Workflow, LaunchPad, FWorker
from fw_tutorials.dynamic_wf.addmod_task import AddModifyTask
from fireworks.core.rocket_launcher import rapidfire, launch_rocket
from fireworks.queue.queue_launcher import setup_offline_job
from fireworks.user_objects.firetasks.script_task import ScriptTask, PyTask
from fireworks.core.tests.tasks import ExceptionTestTask, ExecutionCounterTask, SlowAdditionTask, WaitWFLockTask
import fireworks.fw_config
from monty.os import cd
TESTDB_NAME = 'fireworks_unittest'
MODULE_DIR = os.path.dirname(os.path.abspath(__file__))
class LaunchPadTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
self.old_wd = os.getcwd()
self.LP_LOC = os.path.join(MODULE_DIR,'launchpad.yaml')
self.lp.to_file(self.LP_LOC)
def tearDown(self):
self.lp.reset(password=None,require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR,"launcher_*")):
shutil.rmtree(ldir)
if os.path.exists(self.LP_LOC):
os.remove(self.LP_LOC)
def test_dict_from_file(self):
lp = LaunchPad.from_file(self.LP_LOC)
lp_dict = lp.to_dict()
new_lp = LaunchPad.from_dict(lp_dict)
self.assertIsInstance(new_lp, LaunchPad)
def test_reset(self):
# Store some test fireworks
# Atempt couple of ways to reset the lp and check
fw = Firework(ScriptTask.from_str('echo "hello"'), name="hello")
wf = Workflow([fw], name='test_workflow')
self.lp.add_wf(wf)
self.assertRaises(ValueError, self.lp.reset, '', False, 0)
self.assertEqual(self.lp.workflows.count(), 1)
self.lp.reset('',require_password=False)
self.assertFalse(self.lp.get_fw_ids())
self.assertFalse(self.lp.get_wf_ids())
# test failsafe in a strict way
for x in range(30):
self.lp.add_wf(Workflow([Firework(ScriptTask.from_str('echo "hello"'))]))
self.assertRaises(ValueError, self.lp.reset, '')
self.lp.reset('', False, 100) # reset back
def test_pw_check(self):
fw = Firework(ScriptTask.from_str('echo "hello"'), name="hello")
self.lp.add_wf(fw)
args = ('',)
self.assertRaises(ValueError,self.lp.reset, *args)
def test_add_wf(self):
fw = Firework(ScriptTask.from_str('echo "hello"'), name="hello")
self.lp.add_wf(fw)
wf_id = self.lp.get_wf_ids()
self.assertEqual(len(wf_id), 1)
for fw_id in self.lp.get_wf_ids():
wf = self.lp.get_wf_by_fw_id_lzyfw(fw_id)
self.assertEqual(len(wf.id_fw.keys()), 1)
fw2 = Firework(ScriptTask.from_str('echo "goodbye"'), name="goodbye")
wf = Workflow([fw, fw2], name='test_workflow')
self.lp.add_wf(wf)
#fw = self.lp.get_fw_ids()
#self.assertEqual(len(wf.id_fw.keys()), 2)
fw_ids = self.lp.get_fw_ids()
self.assertEqual(len(fw_ids), 3)
self.lp.reset('',require_password=False)
class LaunchPadDefuseReigniteRerunArchiveDeleteTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
# define the individual FireWorks used in the Workflow
# Parent Firework
fw_p = Firework(ScriptTask.from_str(
'echo "Cronus is the ruler of titans"',
{'store_stdout':True}), name="parent", fw_id=1)
# Sibling fireworks
fw_s1 = Firework(ScriptTask.from_str(
'echo "Zeus is son of Cronus"',
{'store_stdout':True}), name="sib1", fw_id=2, parents=fw_p)
fw_s2 = Firework(ScriptTask.from_str(
'echo "Poisedon is brother of Zeus"',
{'store_stdout':True}), name="sib2", fw_id=3, parents=fw_p)
fw_s3 = Firework(ScriptTask.from_str(
'echo "Hades is brother of Zeus"',
{'store_stdout':True}), name="sib3", fw_id=4, parents=fw_p)
fw_s4 = Firework(ScriptTask.from_str(
'echo "Demeter is sister & wife of Zeus"',
{'store_stdout':True}), name="sib4", fw_id=5, parents=fw_p)
fw_s5 = Firework(ScriptTask.from_str(
'echo "Lapetus is son of Oceanus"',
{'store_stdout':True}), name="cousin1", fw_id=6)
# Children fireworks
fw_c1 = Firework(ScriptTask.from_str(
'echo "Ares is son of Zeus"',
{'store_stdout':True}), name="c1", fw_id=7, parents=fw_s1)
fw_c2 = Firework(ScriptTask.from_str(
'echo "Persephone is daughter of Zeus & Demeter and wife of Hades"',
{'store_stdout':True}), name="c2", fw_id=8, parents=[fw_s1,fw_s4])
fw_c3 = Firework(ScriptTask.from_str(
'echo "Makaria is daughter of Hades & Persephone"',
{'store_stdout':True}), name="c3", fw_id=9, parents=[fw_s3,fw_c2])
fw_c4 = Firework(ScriptTask.from_str(
'echo "Dione is descendant of Lapetus"',
{'store_stdout':True}), name="c4", fw_id=10, parents=fw_s5)
fw_c5 = Firework(ScriptTask.from_str(
'echo "Aphrodite is son of of Zeus and Dione"',
{'store_stdout':True}), name="c5", fw_id=11, parents=[fw_s1,fw_c4])
fw_c6 = Firework(ScriptTask.from_str(
'echo "Atlas is son of of Lapetus"',
{'store_stdout':True}), name="c6", fw_id=12,parents=fw_s5)
fw_c7 = Firework(ScriptTask.from_str(
'echo "Maia is daughter of Atlas"',
{'store_stdout':True}), name="c7", fw_id=13, parents=fw_c6)
fw_c8 = Firework(ScriptTask.from_str(
'echo "Hermes is daughter of Maia and Zeus"',
{'store_stdout':True}), name="c8", fw_id=14, parents=[fw_s1,fw_c7])
# assemble Workflow from FireWorks and their connections by id
workflow = Workflow([fw_p,fw_s1,fw_s2,fw_s3,fw_s4,fw_s5,fw_c1,fw_c2,
fw_c3,fw_c4,fw_c5,fw_c6,fw_c7,fw_c8])
self.lp.add_wf(workflow)
# Give names to fw_ids
self.zeus_fw_id = 2
self.zeus_child_fw_ids = set([7,8,9,11,14])
self.lapetus_desc_fw_ids = set([6,10,12,13])
self.zeus_sib_fw_ids = set([3,4,5])
self.par_fw_id = 1
self.all_ids = self.zeus_child_fw_ids | self.lapetus_desc_fw_ids | \
self.zeus_sib_fw_ids | set([self.zeus_fw_id]) | \
set([self.par_fw_id])
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None,require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR,"launcher_*")):
shutil.rmtree(ldir)
def _teardown(self, dests):
for f in dests:
if os.path.exists(f):
os.remove(f)
def test_pause_fw(self):
self.lp.pause_fw(self.zeus_fw_id)
paused_ids = self.lp.get_fw_ids({'state':'PAUSED'})
self.assertIn(self.zeus_fw_id, paused_ids)
try:
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Ensure except for Zeus and his children, all other fw are launched
completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
# Check that Lapetus and his descendants are subset of completed fwids
self.assertTrue(self.lapetus_desc_fw_ids.issubset(completed_ids))
# Check that Zeus siblings are subset of completed fwids
self.assertTrue(self.zeus_sib_fw_ids.issubset(completed_ids))
# Check that Zeus and children are subset of incompleted fwids
fws_no_run = set(self.lp.get_fw_ids({'state':{'$nin':['COMPLETED']}}))
self.assertIn(self.zeus_fw_id,fws_no_run)
self.assertTrue(self.zeus_child_fw_ids.issubset(fws_no_run))
# Setup Zeus to run
self.lp.resume_fw(self.zeus_fw_id)
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Check that Zeus and children are all completed now
completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertIn(self.zeus_fw_id,completed_ids)
self.assertTrue(self.zeus_child_fw_ids.issubset(completed_ids))
except:
raise
def test_defuse_fw(self):
# defuse Zeus
self.lp.defuse_fw(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id, defused_ids)
try:
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Ensure except for Zeus and his children, all other fw are launched
completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
# Check that Lapetus and his descendants are subset of completed fwids
self.assertTrue(self.lapetus_desc_fw_ids.issubset(completed_ids))
# Check that Zeus siblings are subset of completed fwids
self.assertTrue(self.zeus_sib_fw_ids.issubset(completed_ids))
# Check that Zeus and children are subset of incompleted fwids
fws_no_run = set(self.lp.get_fw_ids({'state':{'$nin':['COMPLETED']}}))
self.assertIn(self.zeus_fw_id,fws_no_run)
self.assertTrue(self.zeus_child_fw_ids.issubset(fws_no_run))
except:
raise
def test_defuse_fw_after_completion(self):
# Launch rockets in rapidfire
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# defuse Zeus
self.lp.defuse_fw(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id,defused_ids)
completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertFalse(self.zeus_child_fw_ids.issubset(completed_ids))
def test_reignite_fw(self):
# Defuse Zeus
self.lp.defuse_fw(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id,defused_ids)
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Reignite Zeus and his children's fireworks and launch them
self.lp.reignite_fw(self.zeus_fw_id)
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Check for the status of Zeus and children in completed fwids
completed_ids = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertIn(self.zeus_fw_id,completed_ids)
self.assertTrue(self.zeus_child_fw_ids.issubset(completed_ids))
def test_pause_wf(self):
# pause Workflow containing Zeus
self.lp.pause_wf(self.zeus_fw_id)
paused_ids = self.lp.get_fw_ids({'state':'PAUSED'})
self.assertIn(self.zeus_fw_id,paused_ids)
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Check for the state of all fws in Zeus workflow in incomplete fwids
fws_no_run = set(self.lp.get_fw_ids({'state':{'$nin':['COMPLETED']}}))
self.assertEqual(fws_no_run,self.all_ids)
def test_defuse_wf(self):
# defuse Workflow containing Zeus
self.lp.defuse_wf(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id,defused_ids)
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Check for the state of all fws in Zeus workflow in incomplete fwids
fws_no_run = set(self.lp.get_fw_ids({'state':{'$nin':['COMPLETED']}}))
self.assertEqual(fws_no_run,self.all_ids)
def test_defuse_wf_after_partial_run(self):
# Run a firework before defusing Zeus
launch_rocket(self.lp, self.fworker)
print('----------\nafter launch rocket\n--------')
# defuse Workflow containing Zeus
print('----------\nstarting defuse rocket\n--------')
self.lp.defuse_wf(self.zeus_fw_id)
print('----------\nafter defuse rocket\n--------')
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
print('def ids', defused_ids)
print('zeus id', self.zeus_fw_id)
self.assertIn(self.zeus_fw_id,defused_ids)
fws_no_run = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertEqual(len(fws_no_run),0)
# Try launching fireworks and check if any are launched
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
fws_no_run = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertEqual(len(fws_no_run),0)
def test_reignite_wf(self):
# Defuse workflow containing Zeus
self.lp.defuse_wf(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id,defused_ids)
# Launch any remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Reignite Zeus and his children's fireworks and launch them
self.lp.reignite_wf(self.zeus_fw_id)
rapidfire(self.lp, FWorker(),m_dir=MODULE_DIR)
# Check for the status of all fireworks Zeus workflow in completed fwids
fws_completed = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertEqual(fws_completed, self.all_ids)
def test_archive_wf(self):
# Run a firework before archiving Zeus
launch_rocket(self.lp, self.fworker)
# archive Workflow containing Zeus. Ensure all are archived
self.lp.archive_wf(self.zeus_fw_id)
archived_ids = set(self.lp.get_fw_ids({'state':'ARCHIVED'}))
self.assertEqual(archived_ids, self.all_ids)
# Try to launch the fireworks and check if any are launched
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
fws_completed = set(self.lp.get_fw_ids({'state':'COMPLETED'}))
self.assertFalse(fws_completed)
# Query for provenance
fw = self.lp.get_fw_by_id(self.zeus_fw_id)
self.assertEqual(fw.state,'ARCHIVED')
def test_delete_wf(self):
# Run a firework before deleting Zeus
launch_rocket(self.lp, self.fworker)
# Delete workflow containing Zeus.
self.lp.delete_wf(self.zeus_fw_id)
# Check if any fireworks and the workflow are available
with self.assertRaises(ValueError):
self.lp.get_wf_by_fw_id(self.zeus_fw_id)
fw_ids = self.lp.get_fw_ids()
self.assertFalse(fw_ids)
wf_ids = self.lp.get_wf_ids()
self.assertFalse(wf_ids)
def test_rerun_fws2(self):
# Launch all fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
fw = self.lp.get_fw_by_id(self.zeus_fw_id)
launches = fw.launches
first_ldir = launches[0].launch_dir
ts = datetime.datetime.utcnow()
# Rerun Zeus
self.lp.rerun_fw(self.zeus_fw_id, rerun_duplicates=True)
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
fw = self.lp.get_fw_by_id(self.zeus_fw_id)
launches = fw.launches
fw_start_t = launches[0].time_start
second_ldir = launches[0].launch_dir
self.assertNotEqual(first_ldir,second_ldir)
self.assertTrue(fw_start_t > ts)
for fw_id in self.zeus_child_fw_ids:
fw = self.lp.get_fw_by_id(fw_id)
fw_start_t = fw.launches[0].time_start
self.assertTrue(fw_start_t > ts)
for fw_id in self.zeus_sib_fw_ids:
fw = self.lp.get_fw_by_id(fw_id)
fw_start_t = fw.launches[0].time_start
self.assertFalse(fw_start_t > ts)
class LaunchPadLostRunsDetectTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
# Define a timed fireWork
fw_timer = Firework(PyTask(func='time.sleep',args=[5]), name="timer")
self.lp.add_wf(fw_timer)
# Get assigned fwid for timer firework
self.fw_id = self.lp.get_fw_ids({'name':'timer'},limit=1)[0]
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None,require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR,"launcher_*")):
shutil.rmtree(ldir)
def test_detect_lostruns(self):
# Launch the timed firework in a separate process
class RocketProcess(Process):
def __init__(self, lpad, fworker):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
def run(self):
launch_rocket(self.lpad, self.fworker)
rp = RocketProcess(self.lp, self.fworker)
rp.start()
# Wait for fw to start
it = 0
while not any([f.state == 'RUNNING' for f in self.lp.get_wf_by_fw_id_lzyfw(self.fw_id).fws]):
time.sleep(1) # Wait 1 sec
it += 1
if it == 10:
raise ValueError("FW never starts running")
rp.terminate() # Kill the rocket
l, f, i = self.lp.detect_lostruns(0.01, max_runtime=5, min_runtime=0)
self.assertEqual((l, f), ([1], [1]))
time.sleep(4) # Wait double the expected exec time and test
l, f, i = self.lp.detect_lostruns(2)
self.assertEqual((l, f), ([1], [1]))
l, f, i = self.lp.detect_lostruns(2, min_runtime=10) # script did not run for 10 secs
self.assertEqual((l, f), ([], []))
l, f, i = self.lp.detect_lostruns(2, max_runtime=-1) # script ran more than -1 secs
self.assertEqual((l, f), ([], []))
l, f, i = self.lp.detect_lostruns(0.01, max_runtime=5, min_runtime=0, rerun=True)
self.assertEqual((l, f), ([1], [1]))
self.assertEqual(self.lp.get_fw_by_id(1).state, 'READY')
def test_detect_lostruns_defuse(self):
# Launch the timed firework in a separate process
class RocketProcess(Process):
def __init__(self, lpad, fworker):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
def run(self):
launch_rocket(self.lpad, self.fworker)
rp = RocketProcess(self.lp, self.fworker)
rp.start()
# Wait for fw to start
it = 0
while not any([f.state == 'RUNNING' for f in self.lp.get_wf_by_fw_id_lzyfw(self.fw_id).fws]):
time.sleep(1) # Wait 1 sec
it += 1
if it == 10:
raise ValueError("FW never starts running")
rp.terminate() # Kill the rocket
l, f, i = self.lp.detect_lostruns(0.01)
self.assertEqual((l, f), ([1], [1]))
self.lp.defuse_fw(1)
l, f, i = self.lp.detect_lostruns(0.01, rerun=True)
self.assertEqual((l, f), ([1], []))
self.assertEqual(self.lp.get_fw_by_id(1).state, 'DEFUSED')
def test_state_after_run_start(self):
# Launch the timed firework in a separate process
class RocketProcess(Process):
def __init__(self, lpad, fworker):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
def run(self):
launch_rocket(self.lpad, self.fworker)
rp = RocketProcess(self.lp, self.fworker)
rp.start()
# Wait for running
it = 0
while not any([f.state == 'RUNNING' for f in self.lp.get_wf_by_fw_id_lzyfw(self.fw_id).fws]):
time.sleep(1) # Wait 1 sec
it += 1
if it == 10:
raise ValueError("FW never starts running")
# WF should be running
wf = self.lp.get_wf_by_fw_id_lzyfw(self.fw_id)
for fw_id in wf.fw_states:
self.assertEqual(wf.id_fw[fw_id].state, wf.fw_states[fw_id])
self.assertEqual(wf.fw_states[fw_id], 'RUNNING')
rp.terminate()
class WorkflowFireworkStatesTest(unittest.TestCase):
"""
Class to test the firework states locally cached in workflow.
The states have to be in sync with the actual firework state.
"""
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
# define the individual FireWorks used in the Workflow
# Parent Firework
fw_p = Firework(ScriptTask.from_str(
'echo "Cronus is the ruler of titans"',
{'store_stdout':True}), name="parent", fw_id=1)
# Sibling fireworks
#fw_s1 = Firework(ScriptTask.from_str(
# 'echo "Zeus is son of Cronus"',
# {'store_stdout':True}), name="sib1", fw_id=2, parents=fw_p)
# Timed firework
fw_s1 = Firework(PyTask(func='time.sleep',args=[5]), name="sib1",
fw_id=2, parents=fw_p)
fw_s2 = Firework(ScriptTask.from_str(
'echo "Poisedon is brother of Zeus"',
{'store_stdout':True}), name="sib2", fw_id=3, parents=fw_p)
fw_s3 = Firework(ScriptTask.from_str(
'echo "Hades is brother of Zeus"',
{'store_stdout':True}), name="sib3", fw_id=4, parents=fw_p)
fw_s4 = Firework(ScriptTask.from_str(
'echo "Demeter is sister & wife of Zeus"',
{'store_stdout':True}), name="sib4", fw_id=5, parents=fw_p)
fw_s5 = Firework(ScriptTask.from_str(
'echo "Lapetus is son of Oceanus"',
{'store_stdout':True}), name="cousin1", fw_id=6)
# Children fireworks
fw_c1 = Firework(ScriptTask.from_str(
'echo "Ares is son of Zeus"',
{'store_stdout':True}), name="c1", fw_id=7, parents=fw_s1)
fw_c2 = Firework(ScriptTask.from_str(
'echo "Persephone is daughter of Zeus & Demeter and wife of Hades"',
{'store_stdout':True}), name="c2", fw_id=8, parents=[fw_s1,fw_s4])
fw_c3 = Firework(ScriptTask.from_str(
'echo "Makaria is daughter of Hades & Persephone"',
{'store_stdout':True}), name="c3", fw_id=9, parents=[fw_s3,fw_c2])
fw_c4 = Firework(ScriptTask.from_str(
'echo "Dione is descendant of Lapetus"',
{'store_stdout':True}), name="c4", fw_id=10, parents=fw_s5)
fw_c5 = Firework(ScriptTask.from_str(
'echo "Aphrodite is son of of Zeus and Dione"',
{'store_stdout':True}), name="c5", fw_id=11, parents=[fw_s1,fw_c4])
fw_c6 = Firework(ScriptTask.from_str(
'echo "Atlas is son of of Lapetus"',
{'store_stdout':True}), name="c6", fw_id=12,parents=fw_s5)
fw_c7 = Firework(ScriptTask.from_str(
'echo "Maia is daughter of Atlas"',
{'store_stdout':True}), name="c7", fw_id=13, parents=fw_c6)
fw_c8 = Firework(ScriptTask.from_str(
'echo "Hermes is daughter of Maia and Zeus"',
{'store_stdout':True}), name="c8", fw_id=14, parents=[fw_s1,fw_c7])
# assemble Workflow from FireWorks and their connections by id
workflow = Workflow([fw_p,fw_s1,fw_s2,fw_s3,fw_s4,fw_s5,fw_c1,fw_c2,
fw_c3,fw_c4,fw_c5,fw_c6,fw_c7,fw_c8])
self.lp.add_wf(workflow)
# Give names to fw_ids
self.zeus_fw_id = 2
self.zeus_child_fw_ids = set([7,8,9,11,14])
self.lapetus_desc_fw_ids = set([6,10,12,13])
self.zeus_sib_fw_ids = set([3,4,5])
self.par_fw_id = 1
self.all_ids = self.zeus_child_fw_ids | self.lapetus_desc_fw_ids | \
self.zeus_sib_fw_ids | set([self.zeus_fw_id]) | \
set([self.par_fw_id])
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None,require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR,"launcher_*")):
shutil.rmtree(ldir)
def _teardown(self, dests):
for f in dests:
if os.path.exists(f):
os.remove(f)
def test_defuse_fw(self):
# defuse Zeus
self.lp.defuse_fw(self.zeus_fw_id)
# Ensure the states are sync after defusing fw
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
try:
# Launch remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Ensure the states are sync after launching remaining fw
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
except:
raise
def test_defuse_fw_after_completion(self):
# Launch rockets in rapidfire
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# defuse Zeus
self.lp.defuse_fw(self.zeus_fw_id)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_reignite_fw(self):
# Defuse Zeus and launch remaining fireworks
self.lp.defuse_fw(self.zeus_fw_id)
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Reignite Zeus and his children's fireworks
self.lp.reignite_fw(self.zeus_fw_id)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_defuse_wf(self):
# defuse Workflow containing Zeus
self.lp.defuse_wf(self.zeus_fw_id)
defused_ids = self.lp.get_fw_ids({'state':'DEFUSED'})
self.assertIn(self.zeus_fw_id,defused_ids)
# Ensure the states are in sync after defusing wf
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_reignite_wf(self):
# Defuse workflow containing Zeus
self.lp.defuse_wf(self.zeus_fw_id)
# Launch any remaining fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
# Reignite Zeus and his children's fireworks and launch them
self.lp.reignite_wf(self.zeus_fw_id)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_archive_wf(self):
# Run a firework before archiving Zeus
launch_rocket(self.lp, self.fworker)
# archive Workflow containing Zeus.
self.lp.archive_wf(self.zeus_fw_id)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_rerun_fws(self):
# Launch all fireworks
rapidfire(self.lp, self.fworker,m_dir=MODULE_DIR)
fw = self.lp.get_fw_by_id(self.zeus_fw_id)
launches = fw.launches
first_ldir = launches[0].launch_dir
# Rerun Zeus
self.lp.rerun_fw(self.zeus_fw_id, rerun_duplicates=True)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
def test_rerun_timed_fws(self):
# Launch all fireworks in a separate process
class RapidfireProcess(Process):
def __init__(self, lpad, fworker):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
def run(self):
rapidfire(self.lpad, self.fworker)
rp = RapidfireProcess(self.lp, self.fworker)
rp.start()
time.sleep(1) # Wait 1 sec and kill the running fws
rp.terminate()
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
# Detect lost runs
lost_lids, lost_fwids, inconsistent_fwids = self.lp.detect_lostruns(expiration_secs=0.5)
# Ensure the states are sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
# Rerun fizzled runs
for fw_id in lost_fwids:
self.lp.rerun_fw(fw_id)
rp = RapidfireProcess(self.lp, self.fworker)
rp.start()
for i in range(20):
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
if fws[self.zeus_fw_id].state == 'READY':
time.sleep(0.5) # Wait 1 sec
else:
break
else:
# Firework hasn't started yet. Waited too long.
rp.terminate()
return
time.sleep(1)
# Ensure the states are in sync
wf = self.lp.get_wf_by_fw_id_lzyfw(self.zeus_fw_id)
fws = wf.id_fw
for fw_id in wf.fw_states:
fw_state = fws[fw_id].state
fw_cache_state = wf.fw_states[fw_id]
self.assertEqual(fw_state, fw_cache_state)
rp.terminate()
class LaunchPadRerunExceptionTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
fireworks.core.firework.EXCEPT_DETAILS_ON_RERUN = True
self.error_test_dict = {'error': 'description', 'error_code': 1}
fw = Firework([ExecutionCounterTask(),
ScriptTask.from_str('date +"%s %N"', parameters={'stdout_file': 'date_file'}),
ExceptionTestTask(exc_details=self.error_test_dict)])
self.lp.add_wf(fw)
ExecutionCounterTask.exec_counter = 0
ExceptionTestTask.exec_counter = 0
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None, require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR, "launcher_*")):
shutil.rmtree(ldir)
def test_except_details_on_rerun(self):
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
self.lp.rerun_fw(1)
fw = self.lp.get_fw_by_id(1)
self.assertEqual(fw.spec['_exception_details'], self.error_test_dict)
def test_task_level_rerun(self):
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
self.lp.rerun_fw(1, recover_launch='last')
self.lp.update_spec([1], {'skip_exception': True})
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
dirs = sorted(glob.glob(os.path.join(MODULE_DIR, "launcher_*")))
self.assertEqual(self.lp.get_fw_by_id(1).state, 'COMPLETED')
self.assertEqual(ExecutionCounterTask.exec_counter, 1)
self.assertEqual(ExceptionTestTask.exec_counter, 2)
self.assertFalse(os.path.exists(os.path.join(dirs[1], "date_file")))
# Ensure rerun deletes recovery by default
self.lp.rerun_fw(1)
fw = self.lp.get_fw_by_id(1)
self.assertFalse("_recovery" in fw.spec)
def test_task_level_rerun_cp(self):
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
self.lp.rerun_fw(1, recover_launch='last', recover_mode="cp")
self.lp.update_spec([1], {'skip_exception': True})
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
dirs = sorted(glob.glob(os.path.join(MODULE_DIR, "launcher_*")))
self.assertEqual(self.lp.get_fw_by_id(1).state, 'COMPLETED')
self.assertEqual(ExecutionCounterTask.exec_counter, 1)
self.assertEqual(ExceptionTestTask.exec_counter, 2)
self.assertTrue(filecmp.cmp(os.path.join(dirs[0], "date_file"), os.path.join(dirs[1], "date_file")))
def test_task_level_rerun_prev_dir(self):
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
self.assertEqual(os.getcwd(), MODULE_DIR)
self.lp.rerun_fw(1, recover_launch='last', recover_mode="prev_dir")
self.lp.update_spec([1], {'skip_exception': True})
rapidfire(self.lp, self.fworker, m_dir=MODULE_DIR)
fw = self.lp.get_fw_by_id(1)
self.assertEqual(os.getcwd(), MODULE_DIR)
self.assertEqual(fw.state, 'COMPLETED')
self.assertEqual(fw.launches[0].launch_dir, fw.archived_launches[0].launch_dir)
self.assertEqual(ExecutionCounterTask.exec_counter, 1)
self.assertEqual(ExceptionTestTask.exec_counter, 2)
class WFLockTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
# set the defaults in the init of wflock to break the lock quickly
fireworks.core.launchpad.WFLock(3, False).__init__.__func__.__defaults__= (3, False)
self.error_test_dict = {'error': 'description', 'error_code': 1}
fw_slow = Firework(SlowAdditionTask(), spec={'seconds': 10}, fw_id=1)
fw_fast = Firework(WaitWFLockTask(), fw_id=2, spec={'_add_launchpad_and_fw_id': True})
fw_child = Firework(ScriptTask.from_str('echo "child"'), fw_id=3)
wf = Workflow([fw_slow, fw_fast, fw_child], {fw_slow: fw_child, fw_fast: fw_child})
self.lp.add_wf(wf)
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None, require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR, "launcher_*")):
shutil.rmtree(ldir)
def test_fix_db_inconsistencies_completed(self):
class RocketProcess(Process):
def __init__(self, lpad, fworker, fw_id):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
self.fw_id = fw_id
def run(self):
launch_rocket(self.lpad, self.fworker, fw_id=self.fw_id)
# Launch the slow firework in a separate process
rp = RocketProcess(self.lp, self.fworker, fw_id=1)
rp.start()
time.sleep(1)
launch_rocket(self.lp, self.fworker, fw_id=2)
# wait for the slow to complete
rp.join()
fast_fw = self.lp.get_fw_by_id(2)
if fast_fw.state == 'FIZZLED':
stacktrace = self.lp.launches.find_one(
{'fw_id': 2}, {'action.stored_data._exception._stacktrace': 1})['action']['stored_data']['_exception']['_stacktrace']
if 'SkipTest' in stacktrace:
self.skipTest("The test didn't run correctly")
self.assertEqual(fast_fw.state, 'RUNNING')
child_fw = self.lp.get_fw_by_id(3)
self.assertTrue("SlowAdditionTask" in child_fw.spec)
self.assertFalse("WaitWFLockTask" in child_fw.spec)
self.lp._refresh_wf(fw_id=2)
child_fw = self.lp.get_fw_by_id(3)
self.assertTrue("WaitWFLockTask" in child_fw.spec)
fast_fw = self.lp.get_fw_by_id(2)
self.assertEqual(fast_fw.state, 'COMPLETED')
def test_fix_db_inconsistencies_fizzled(self):
class RocketProcess(Process):
def __init__(self, lpad, fworker, fw_id):
super(self.__class__,self).__init__()
self.lpad = lpad
self.fworker = fworker
self.fw_id = fw_id
def run(self):
launch_rocket(self.lpad, self.fworker, fw_id=self.fw_id)
self.lp.update_spec([2], {'fizzle': True})
# Launch the slow firework in a separate process
rp = RocketProcess(self.lp, self.fworker, fw_id=1)
rp.start()
time.sleep(1)
launch_rocket(self.lp, self.fworker, fw_id=2)
# wait for the slow to complete
rp.join()
fast_fw = self.lp.get_fw_by_id(2)
if fast_fw.state == 'FIZZLED':
stacktrace = self.lp.launches.find_one(
{'fw_id': 2}, {'action.stored_data._exception._stacktrace': 1})['action']['stored_data']['_exception']['_stacktrace']
if 'SkipTest' in stacktrace:
self.skipTest("The test didn't run correctly")
self.assertEqual(fast_fw.state, 'RUNNING')
child_fw = self.lp.get_fw_by_id(3)
self.assertTrue("SlowAdditionTask" in child_fw.spec)
self.assertFalse("WaitWFLockTask" in child_fw.spec)
self.lp._refresh_wf(fw_id=2)
fast_fw = self.lp.get_fw_by_id(2)
self.assertEqual(fast_fw.state, 'FIZZLED')
class LaunchPadOfflineTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.lp = None
cls.fworker = FWorker()
try:
cls.lp = LaunchPad(name=TESTDB_NAME, strm_lvl='ERROR')
cls.lp.reset(password=None, require_password=False)
except:
raise unittest.SkipTest('MongoDB is not running in localhost:27017! Skipping tests.')
@classmethod
def tearDownClass(cls):
if cls.lp:
cls.lp.connection.drop_database(TESTDB_NAME)
def setUp(self):
fireworks.core.firework.EXCEPT_DETAILS_ON_RERUN = True
self.error_test_dict = {'error': 'description', 'error_code': 1}
fw = Firework(ScriptTask.from_str(
'echo "test offline"',
{'store_stdout':True}), name="offline_fw", fw_id=1)
self.lp.add_wf(fw)
self.launch_dir = os.path.join(MODULE_DIR, "launcher_offline")
os.makedirs(self.launch_dir)
self.old_wd = os.getcwd()
def tearDown(self):
self.lp.reset(password=None, require_password=False)
# Delete launch locations
if os.path.exists(os.path.join('FW.json')):
os.remove('FW.json')
os.chdir(self.old_wd)
for ldir in glob.glob(os.path.join(MODULE_DIR, "launcher_*")):
shutil.rmtree(ldir, ignore_errors=True)
def test__recover_completed(self):
fw, launch_id = self.lp.reserve_fw(self.fworker, self.launch_dir)
fw = self.lp.get_fw_by_id(1)
with cd(self.launch_dir):
setup_offline_job(self.lp, fw, launch_id)
# launch rocket without launchpad to trigger offline mode
launch_rocket(launchpad=None, fworker=self.fworker, fw_id=1)
self.assertIsNone(self.lp.recover_offline(launch_id))
fw = self.lp.get_fw_by_id(launch_id)
self.assertEqual(fw.state, 'COMPLETED')
def test_recover_errors(self):
fw, launch_id = self.lp.reserve_fw(self.fworker, self.launch_dir)
fw = self.lp.get_fw_by_id(1)
with cd(self.launch_dir):
setup_offline_job(self.lp, fw, launch_id)
# remove the directory to cause an exception
shutil.rmtree(self.launch_dir)
# recover ignoring errors
self.assertIsNotNone(self.lp.recover_offline(launch_id, ignore_errors=True, print_errors=True))
fw = self.lp.get_fw_by_id(launch_id)
self.assertEqual(fw.state, 'RESERVED')
#fizzle
self.assertIsNotNone(self.lp.recover_offline(launch_id, ignore_errors=False))
fw = self.lp.get_fw_by_id(launch_id)
self.assertEqual(fw.state, 'FIZZLED')
if __name__ == '__main__':
unittest.main()
| 38.590435
| 133
| 0.624642
| 6,186
| 44,379
| 4.240705
| 0.07129
| 0.045515
| 0.025731
| 0.026989
| 0.843098
| 0.813517
| 0.791408
| 0.777608
| 0.7571
| 0.752106
| 0
| 0.011602
| 0.261971
| 44,379
| 1,149
| 134
| 38.624021
| 0.789332
| 0.10327
| 0
| 0.781174
| 0
| 0
| 0.092617
| 0.002674
| 0
| 0
| 0
| 0
| 0.128362
| 1
| 0.09291
| false
| 0.01956
| 0.020782
| 0
| 0.130807
| 0.007335
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
4a38a1ec6f62e2ffb19d816f8654c77f3a6aedb2
| 259
|
py
|
Python
|
challenge_08.py
|
eskemojoe007/Python_Challenge
|
47553701f0ad0bf7b0e6f398c88fc558d7ac29e7
|
[
"MIT"
] | null | null | null |
challenge_08.py
|
eskemojoe007/Python_Challenge
|
47553701f0ad0bf7b0e6f398c88fc558d7ac29e7
|
[
"MIT"
] | null | null | null |
challenge_08.py
|
eskemojoe007/Python_Challenge
|
47553701f0ad0bf7b0e6f398c88fc558d7ac29e7
|
[
"MIT"
] | null | null | null |
import bz2
bz2.decompress(b'BZh91AY&SYA\xaf\x82\r\x00\x00\x01\x01\x80\x02\xc0\x02\x00 \x00!\x9ah3M\x07<]\xc9\x14\xe1BA\x06\xbe\x084')
bz2.decompress(b'BZh91AY&SY\x94$|\x0e\x00\x00\x00\x81\x00\x03$ \x00!\x9ah3M\x13<]\xc9\x14\xe1BBP\x91\xf08')
# huge
# file
| 28.777778
| 122
| 0.718147
| 51
| 259
| 3.647059
| 0.647059
| 0.129032
| 0.150538
| 0.225806
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.284553
| 0.050193
| 259
| 8
| 123
| 32.375
| 0.471545
| 0.034749
| 0
| 0
| 0
| 0.666667
| 0.773279
| 0.765182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 9
|
4a43b83f1506ac7420d92537a98634c0923ddcae
| 130
|
py
|
Python
|
synapse_pay_rest/tests/__init__.py
|
EquityZen/SynapseFI-Python
|
caf069d63f0a7bcdbc8b9180648638ab0bc83ef7
|
[
"MIT",
"Unlicense"
] | null | null | null |
synapse_pay_rest/tests/__init__.py
|
EquityZen/SynapseFI-Python
|
caf069d63f0a7bcdbc8b9180648638ab0bc83ef7
|
[
"MIT",
"Unlicense"
] | null | null | null |
synapse_pay_rest/tests/__init__.py
|
EquityZen/SynapseFI-Python
|
caf069d63f0a7bcdbc8b9180648638ab0bc83ef7
|
[
"MIT",
"Unlicense"
] | null | null | null |
from .errors_tests import *
from .http_client_tests import *
from .client_tests import *
from .api import *
from .models import *
| 21.666667
| 32
| 0.769231
| 19
| 130
| 5.052632
| 0.421053
| 0.416667
| 0.46875
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 130
| 5
| 33
| 26
| 0.872727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
4a842297c3963a86c69a0245ab973bf73e13026f
| 13,084
|
py
|
Python
|
utils.py
|
fnando/sublime-switch-case
|
6e03a675cf6071f02f338d33651fe0171b3ba646
|
[
"Unlicense",
"MIT"
] | 1
|
2020-06-08T08:03:31.000Z
|
2020-06-08T08:03:31.000Z
|
utils.py
|
fnando/sublime-switch-case
|
6e03a675cf6071f02f338d33651fe0171b3ba646
|
[
"Unlicense",
"MIT"
] | null | null | null |
utils.py
|
fnando/sublime-switch-case
|
6e03a675cf6071f02f338d33651fe0171b3ba646
|
[
"Unlicense",
"MIT"
] | null | null | null |
import re
def convert(text, case):
words = to_words(text)
if case == "hyphenated":
return "-".join(words)
elif case == "snake":
return "_".join(words)
elif case == "scream_snake":
return "_".join(map(lambda word: word.upper(), words))
elif case == "dot":
return ".".join(words)
elif case == "space":
return " ".join(words)
elif case == "camel":
return "".join(map(lambda word: word.title(), words))
elif case == "camel_back":
result = "".join(map(lambda word: word.title(), words))
return result[:1].lower() + result[1:]
elif case == "slash":
return "/".join(words)
elif case == "backslash":
return "\\".join(words)
elif case == "lower":
return words[0].lower()
elif case == "upper":
return words[0].upper()
elif case == "title":
return words[0].title()
else:
return text
def to_words(text):
if re.match(r"^([a-zA-Z][a-z0-9]*|[A-Z]+[A-Z0-9]*)$", text):
words = [text]
elif re.match(r"^[a-zA-Z][a-z0-9]*([A-Z][A-Za-z0-9]+)+$", text):
text = re.sub(r"([A-Z]+|[0-9]+)", "__SWITCH_CASE_SEPARATOR__\\1", text[:1].lower() + text[1:])
words = re.split(r"__SWITCH_CASE_SEPARATOR__", text)
elif re.match(r"^[a-z]+[a-z0-9]*([-_. /\\]+[a-z0-9]+)*$", text, flags=re.IGNORECASE):
words = re.split(r"[-_. /\\]+", text)
else:
words = [text]
return list(map(lambda word: word.lower(), words))
matchers_multiple_words = [
{"name": "snake", "match": lambda text: re.match(r"^[a-z]+(_[a-z0-9]+)+$", text)},
{"name": "scream_snake", "match": lambda text: re.match(r"^[A-Z]+(_[A-Z0-9]+)+$", text)},
{"name": "camel", "match": lambda text: re.match(r"^[A-Z]+[a-z0-9]*([A-Z]+[a-z0-9]+)+$", text)},
{"name": "camel_back", "match": lambda text: re.match(r"^[a-z]+[a-z0-9]*([A-Z]+[a-z0-9]+)+$", text)},
{"name": "hyphenated", "match": lambda text: re.match(r"^[a-z]+(-[a-z0-9]+)+$", text, flags=re.IGNORECASE)},
{"name": "dot", "match": lambda text: re.match(r"^[a-z]+(\.[a-z0-9]+)+$", text, flags=re.IGNORECASE)},
{"name": "space", "match": lambda text: re.match(r"^[a-z]+( [a-z0-9]+)+$", text, flags=re.IGNORECASE)},
{"name": "slash", "match": lambda text: re.match(r"^[a-z]+(/[a-z0-9]+)+$", text, flags=re.IGNORECASE)},
{"name": "backslash", "match": lambda text: re.match(r"^[a-z]+(\\[a-z0-9]+)+$", text, flags=re.IGNORECASE)}
]
matchers_single_word = [
{"name": "lower", "match": lambda text: re.match(r"^[a-z][a-z0-9]*$", text)},
{"name": "upper", "match": lambda text: re.match(r"^[A-Z][A-Z0-9]*$", text)},
{"name": "title", "match": lambda text: re.match(r"^[A-Z][a-z0-9]*$", text)}
]
def alternate(text):
words = to_words(text)
matchers = matchers_multiple_words if len(words) > 1 else matchers_single_word
current_matcher = next(matcher for matcher in matchers if matcher["match"](text))
if current_matcher:
index = matchers.index(current_matcher) + 1
next_matcher = matchers[index if index < len(matchers) else 0]
else:
next_matcher = matchers[0]
return convert(text, next_matcher["name"])
if __name__ == "__main__":
import unittest
class TestStringMethods(unittest.TestCase):
def test_convert_to_words(self):
self.assertEqual(to_words("this-is-hyphenated-1234"), ["this", "is", "hyphenated", "1234"])
self.assertEqual(to_words("this"), ["this"])
self.assertEqual(to_words("This"), ["this"])
self.assertEqual(to_words("1234"), ["1234"])
self.assertEqual(to_words("this is spaced 1234"), ["this", "is", "spaced", "1234"])
self.assertEqual(to_words("this is spaced 1234"), ["this", "is", "spaced", "1234"])
self.assertEqual(to_words("THIS-IS-HYPHENATED-1234"), ["this", "is", "hyphenated", "1234"])
self.assertEqual(to_words("this_is_snake_case_1234"), ["this", "is", "snake", "case", "1234"])
self.assertEqual(to_words("THIS_IS_SCREAM_SNAKE_CASE_1234"), ["this", "is", "scream", "snake", "case", "1234"])
self.assertEqual(to_words("this.is.dot.case.1234"), ["this", "is", "dot", "case", "1234"])
self.assertEqual(to_words("THIS.IS.DOT.CASE.1234"), ["this", "is", "dot", "case", "1234"])
self.assertEqual(to_words("ThisIsCamelCase1234"), ["this", "is", "camel", "case", "1234"])
self.assertEqual(to_words("thisIsCamelBackCase1234"), ["this", "is", "camel", "back", "case", "1234"])
self.assertEqual(to_words("getURL"), ["get", "url"])
def test_convert_to_hyphenated(self):
self.assertEqual(convert("multiple-words-1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multiple_words_1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multipleWords1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("MultipleWords1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multiple words 1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multiple.words.1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multiple/words/1234", "hyphenated"), "multiple-words-1234")
self.assertEqual(convert("multiple\\words\\1234", "hyphenated"), "multiple-words-1234")
def test_convert_to_snake_case(self):
self.assertEqual(convert("multiple-words-1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multiple_words_1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multipleWords1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("MultipleWords1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multiple words 1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multiple.words.1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multiple/words/1234", "snake"), "multiple_words_1234")
self.assertEqual(convert("multiple\\words\\1234", "snake"), "multiple_words_1234")
def test_convert_to_scream_snake_case(self):
self.assertEqual(convert("multiple-words-1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multiple_words_1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multipleWords1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("MultipleWords1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multiple words 1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multiple.words.1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multiple/words/1234", "scream_snake"), "MULTIPLE_WORDS_1234")
self.assertEqual(convert("multiple\\words\\1234", "scream_snake"), "MULTIPLE_WORDS_1234")
def test_convert_to_dot_case(self):
self.assertEqual(convert("multiple-words-1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multiple_words_1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multipleWords1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("MultipleWords1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multiple words 1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multiple.words.1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multiple/words/1234", "dot"), "multiple.words.1234")
self.assertEqual(convert("multiple\\words\\1234", "dot"), "multiple.words.1234")
def test_convert_to_space_case(self):
self.assertEqual(convert("multiple-words-1234", "space"), "multiple words 1234")
self.assertEqual(convert("multiple_words_1234", "space"), "multiple words 1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "space"), "multiple words 1234")
self.assertEqual(convert("multipleWords1234", "space"), "multiple words 1234")
self.assertEqual(convert("MultipleWords1234", "space"), "multiple words 1234")
self.assertEqual(convert("multiple words 1234", "space"), "multiple words 1234")
self.assertEqual(convert("multiple.words.1234", "space"), "multiple words 1234")
self.assertEqual(convert("multiple/words/1234", "space"), "multiple words 1234")
self.assertEqual(convert("multiple\\words\\1234", "space"), "multiple words 1234")
def test_convert_to_camel_case(self):
self.assertEqual(convert("multiple-words-1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multiple_words_1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multipleWords1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("MultipleWords1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multiple words 1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multiple.words.1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multiple/words/1234", "camel"), "MultipleWords1234")
self.assertEqual(convert("multiple\\words\\1234", "camel"), "MultipleWords1234")
def test_convert_to_camel_back_case(self):
self.assertEqual(convert("multiple-words-1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multiple_words_1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multipleWords1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("MultipleWords1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multiple words 1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multiple.words.1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multiple/words/1234", "camel_back"), "multipleWords1234")
self.assertEqual(convert("multiple\\words\\1234", "camel_back"), "multipleWords1234")
def test_convert_to_slash_case(self):
self.assertEqual(convert("multiple-words-1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multiple_words_1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multipleWords1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("MultipleWords1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multiple words 1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multiple.words.1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multiple/words/1234", "slash"), "multiple/words/1234")
self.assertEqual(convert("multiple\\words\\1234", "slash"), "multiple/words/1234")
def test_convert_to_backslash_case(self):
self.assertEqual(convert("multiple-words-1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multiple_words_1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("MULTIPLE_WORDS_1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multipleWords1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("MultipleWords1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multiple words 1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multiple.words.1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multiple/words/1234", "backslash"), "multiple\\words\\1234")
self.assertEqual(convert("multiple\\words\\1234", "backslash"), "multiple\\words\\1234")
def test_alternate(self):
result = alternate("multiple-words-1234")
self.assertEqual(result, "multiple.words.1234")
result = alternate(result)
self.assertEqual(result, "multiple words 1234")
result = alternate(result)
self.assertEqual(result, "multiple/words/1234")
result = alternate(result)
self.assertEqual(result, "multiple\\words\\1234")
result = alternate(result)
self.assertEqual(result, "multiple_words_1234")
result = alternate(result)
self.assertEqual(result, "MULTIPLE_WORDS_1234")
result = alternate(result)
self.assertEqual(result, "MultipleWords1234")
result = alternate(result)
self.assertEqual(result, "multipleWords1234")
result = alternate(result)
self.assertEqual(result, "multiple-words-1234")
def test_alternate_one_word(self):
result = alternate("word")
self.assertEqual(result, "WORD")
result = alternate(result)
self.assertEqual(result, "Word")
result = alternate(result)
self.assertEqual(result, "word")
unittest.main()
| 55.440678
| 117
| 0.678386
| 1,568
| 13,084
| 5.528061
| 0.048469
| 0.203969
| 0.262806
| 0.218043
| 0.867097
| 0.842178
| 0.822335
| 0.804338
| 0.795801
| 0.781957
| 0
| 0.072903
| 0.129853
| 13,084
| 235
| 118
| 55.676596
| 0.68845
| 0
| 0
| 0.094059
| 0
| 0.019802
| 0.384286
| 0.069627
| 0
| 0
| 0
| 0
| 0.529703
| 1
| 0.074257
| false
| 0
| 0.009901
| 0
| 0.163366
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
4a9b7e1402d0463338391bb328c130eca078cd67
| 4,677
|
py
|
Python
|
ParaViewCore/ServerManager/Default/Testing/Python/ProgrammableFilter.py
|
mathstuf/ParaView
|
e867e280545ada10c4ed137f6a966d9d2f3db4cb
|
[
"Apache-2.0"
] | 1
|
2020-05-21T20:20:59.000Z
|
2020-05-21T20:20:59.000Z
|
ParaViewCore/ServerManager/Default/Testing/Python/ProgrammableFilter.py
|
mathstuf/ParaView
|
e867e280545ada10c4ed137f6a966d9d2f3db4cb
|
[
"Apache-2.0"
] | null | null | null |
ParaViewCore/ServerManager/Default/Testing/Python/ProgrammableFilter.py
|
mathstuf/ParaView
|
e867e280545ada10c4ed137f6a966d9d2f3db4cb
|
[
"Apache-2.0"
] | 5
|
2016-04-14T13:42:37.000Z
|
2021-05-22T04:59:42.000Z
|
from paraview import servermanager
import sys
import math
import os
import os.path
from paraview import smtesting
smtesting.ProcessCommandLineArguments()
# Connect to the "builtin"
#(in-process) ParaView server ...
#=================================
servermanager.Connect()
# Create an Exodus reader to load our data ...
#==============================================
exodus_file = os.path.join(smtesting.DataDir, "disk_out_ref.ex2")
reader = servermanager.sources.ExodusIIReader(FileName=exodus_file)
reader.UpdatePipeline()
reader.UpdatePropertyInformation()
pxm = servermanager.ProxyManager()
pxm.RegisterProxy("sources", "my reader", reader)
# Create our programmable filter and set its program ...
#========================================================
filter = servermanager.filters.ProgrammableFilter()
filter.GetProperty("Script").SetElement(0, """
input = self.GetInputDataObject(0, 0)
output = self.GetOutputDataObject(0)
output.DeepCopy(input)
""")
# Connect the reader output to
# the programmable filter input ...
#===================================
filter.Input = reader
pxm.RegisterProxy("sources", "my programmable filter", filter)
# Perform a sum operation
#=========================
sum = servermanager.filters.MinMax(Operation="SUM")
# Reduce the programmable filter output
# data using our "max" algorithm,
# returning just the maximum error value
# (instead of transferring the entire
# dataset to the client)
#=======================================
myoutput = servermanager.Fetch(filter, sum, sum)
cellData = myoutput.GetCellData()
if cellData.GetArray("ObjectId").GetValue(0) != 7472:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(0).GetName()
sys.exit(1)
if cellData.GetArray("GlobalElementId").GetValue(0) != 27919128:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(1).GetName()
sys.exit(1)
if cellData.GetArray("PedigreeElementId").GetValue(0) != 27919128:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(2).GetName()
sys.exit(1)
pointData = myoutput.GetPointData()
if pointData.GetArray("GlobalNodeId").GetValue(0) != 36120750:
print "ERROR: Wrong value returned from point %s array." % pointData.GetArray(0).GetName()
sys.exit(1)
if pointData.GetArray("PedigreeNodeId").GetValue(0) != 36120750:
print "ERROR: Wrong value returned from point %s array." % pointData.GetArray(1).GetName()
sys.exit(1)
#---------------Now repeat--------------
# Create an Exodus reader to load our data ...
#==============================================
exodus_file = os.path.join(smtesting.DataDir, "disk_out_ref.ex2")
reader = servermanager.sources.ExodusIIReader(FileName=exodus_file)
reader.UpdatePipeline()
reader.UpdatePropertyInformation()
pxm = servermanager.ProxyManager()
pxm.RegisterProxy("sources", "my reader", reader)
# Create our programmable filter and set its program ...
#========================================================
filter = servermanager.filters.ProgrammableFilter()
filter.GetProperty("Script").SetElement(0, """
input = self.GetInputDataObject(0, 0)
output = self.GetOutputDataObject(0)
output.DeepCopy(input)
""")
# Connect the reader output to
# the programmable filter input ...
#===================================
filter.Input = reader
pxm.RegisterProxy("sources", "my programmable filter", filter)
# Perform a sum operation
#=========================
sum = servermanager.filters.MinMax(Operation="SUM")
# Reduce the programmable filter output
# data using our "max" algorithm,
# returning just the maximum error value
# (instead of transferring the entire
# dataset to the client)
#=======================================
myoutput = servermanager.Fetch(filter, sum, sum)
cellData = myoutput.GetCellData()
if cellData.GetArray("ObjectId").GetValue(0) != 7472:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(0).GetName()
sys.exit(1)
if cellData.GetArray("GlobalElementId").GetValue(0) != 27919128:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(1).GetName()
sys.exit(1)
if cellData.GetArray("PedigreeElementId").GetValue(0) != 27919128:
print "ERROR: Wrong value returned from cell %s array." % cellData.GetArray(2).GetName()
sys.exit(1)
pointData = myoutput.GetPointData()
if pointData.GetArray("GlobalNodeId").GetValue(0) != 36120750:
print "ERROR: Wrong value returned from point %s array." % pointData.GetArray(0).GetName()
sys.exit(1)
if pointData.GetArray("PedigreeNodeId").GetValue(0) != 36120750:
print "ERROR: Wrong value returned from point %s array." % pointData.GetArray(1).GetName()
sys.exit(1)
| 33.647482
| 92
| 0.67693
| 529
| 4,677
| 5.969754
| 0.204159
| 0.060798
| 0.047498
| 0.063331
| 0.935402
| 0.935402
| 0.935402
| 0.935402
| 0.935402
| 0.935402
| 0
| 0.026878
| 0.109044
| 4,677
| 138
| 93
| 33.891304
| 0.730982
| 0.265127
| 0
| 0.891892
| 0
| 0
| 0.278824
| 0.044118
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.081081
| null | null | 0.135135
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
43816ba88af80cfe3d1a61d7b4e1ac08ffee92e8
| 40,407
|
py
|
Python
|
python/paddle/distributed/auto_parallel/operators/dist_matmul.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 4
|
2021-02-08T13:07:15.000Z
|
2021-10-22T00:58:33.000Z
|
python/paddle/distributed/auto_parallel/operators/dist_matmul.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 2
|
2019-07-26T04:06:05.000Z
|
2019-07-29T04:25:24.000Z
|
python/paddle/distributed/auto_parallel/operators/dist_matmul.py
|
2742195759/Paddle
|
ce034db1834af85539b22ab68492df9972ff3e69
|
[
"Apache-2.0"
] | 1
|
2020-11-25T10:41:52.000Z
|
2020-11-25T10:41:52.000Z
|
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
from .common import DistributedOperatorImplContainer
from .common import DistributedOperatorImpl
from .common import register_distributed_operator_impl_container
from .common import register_distributed_operator_impl
from .common import copy_distributed_attr_for_var
from .common import copy_distributed_attr_for_dist_op
from ..utils import is_dim_shard
from ..utils import is_dim_replicate
from ..utils import is_valid_list_index
from ..utils import compute_compatible_dim_mapping
from ..utils import compute_compatible_dims_mapping
from ..utils import compute_compatible_and_update_dim_mapping
from ..dist_attribute import OperatorDistributedAttribute
from paddle.fluid import core, unique_name
from paddle.fluid.framework import in_dygraph_mode
from paddle.fluid.framework import Program, Parameter, Variable, program_guard
from paddle.fluid.data_feeder import check_variable_and_dtype, check_dtype
from paddle.distributed.fleet.meta_optimizers.common import OpRole, OP_ROLE_KEY, OP_ROLE_VAR_KEY
from ..process_group import new_process_group
from ..utils import _get_comm_group, _get_corresponding_rank
def _update_dims_mapping_for_matmul(dist_op):
changed = False
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
out_name = op_desc.output('Out')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
x_dims_mapping_len = len(x_dims_mapping)
y_dims_mapping_len = len(y_dims_mapping)
out_dims_mapping_len = len(out_dims_mapping)
# Add dim mapping to Make sure the length dims_mapping be at least 2
if x_dims_mapping_len == 1:
x_dims_mapping.insert(0, -1)
if y_dims_mapping_len == 1:
y_dims_mapping.insert(1, -1)
# Deal with dim > 2 and take care of broadcasting
if out_dims_mapping_len > 2:
broadcast_x_dims_mapping = []
broadcast_y_dims_mapping = []
broadcast_out_dims_mapping = []
for i in range(out_dims_mapping_len - x_dims_mapping_len):
broadcast_x_dims_mapping.append(out_dims_mapping[i])
for i in range(x_dims_mapping_len - 2):
broadcast_x_dims_mapping.append(x_dims_mapping[i])
for i in range(out_dims_mapping_len - y_dims_mapping_len):
broadcast_y_dims_mapping.append(out_dims_mapping[i])
for i in range(y_dims_mapping_len - 2):
broadcast_y_dims_mapping.append(y_dims_mapping[i])
for i in range(out_dims_mapping_len - 2):
broadcast_out_dims_mapping.append(out_dims_mapping[i])
compatible_dims_mapping = compute_compatible_dims_mapping([
broadcast_x_dims_mapping, broadcast_y_dims_mapping,
broadcast_out_dims_mapping
])
assert compatible_dims_mapping is not None, "There is no compatible dim mapping."
for i in range(x_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - x_dims_mapping_len)
if x_dims_mapping[i] != compatible_dims_mapping[new_idx]:
x_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(y_dims_mapping_len - 2):
new_idx = i + (out_dims_mapping_len - y_dims_mapping_len)
if y_dims_mapping[i] != compatible_dims_mapping[new_idx]:
y_dims_mapping[i] = compatible_dims_mapping[new_idx]
changed = True
for i in range(out_dims_mapping_len - 2):
if out_dims_mapping[i] != compatible_dims_mapping[i]:
out_dims_mapping[i] = compatible_dims_mapping[i]
changed = True
# The following which uses negative index can be work
# when len(out_dims_mapping) > 2 and len(out_dims_mapping) <=2
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, y_dims_mapping], [-1, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[x_dims_mapping, out_dims_mapping], [-2, -2])
if dim_changed:
changed = True
dim_changed = compute_compatible_and_update_dim_mapping(
[y_dims_mapping, out_dims_mapping], [-1, -1])
if dim_changed:
changed = True
# Remove unnecessary dim mapping to make sure the length of dims_mapping is same as its tensor
if x_dims_mapping_len == 1:
x_dims_mapping.pop(0)
if y_dims_mapping_len == 1:
y_dims_mapping.pop(1)
assert len(x_dims_mapping) == x_dims_mapping_len
assert len(y_dims_mapping) == y_dims_mapping_len
assert len(out_dims_mapping) == out_dims_mapping_len
return changed
def _right_operand_parameter_matmul_backward(ctx, *args, **kwargs):
# by now the backward function only insert the gradient allreduce for dist op itself
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
backward_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
dist_attr = ctx.get_op_dist_attr_for_program(backward_op)
assert dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(backward_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, dist_attr.process_mesh, rank_id)
# check if need gradient allreduce
need_gradient_allreduce = False
assert 'Y' in kwargs, "input [{}] is not given".format('Y')
assert 'X' in kwargs, "input [{}] is not given".format('X')
assert 'Out@GRAD' in kwargs, "input [{}] is not given".format('Out@GRAD')
assert 'Y@GRAD' in kwargs, "output [{}] is not given".format('Y@GRAD')
assert 'X@GRAD' in kwargs, "output [{}] is not given".format('X@GRAD')
assert len(
kwargs['Y']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['Y'])
assert len(
kwargs['X']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['X'])
assert len(
kwargs['Out@GRAD']
) == 1, "row_parallel_embedding input Ids take 1 variable but got {}".format(
kwargs['Out'])
assert len(
kwargs['Y@GRAD']
) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format(
kwargs['Y@GRAD'])
assert len(
kwargs['X@GRAD']
) == 1, "row_parallel_embedding output Ids take 1 variable but got {}".format(
kwargs['X@GRAD'])
X_var = main_block.var(kwargs['X'][0])
assert not X_var.is_parameter, "left operand(X) [{}] of dist matmul should not be parameter".format(
X_var.name)
process_mesh = dist_attr.process_mesh
var_dim_mapping = dist_attr.get_input_dims_mapping(X_var.name)
mesh_shape = process_mesh.topology
batch_size_axis = var_dim_mapping[0]
if batch_size_axis > -1 and mesh_shape[batch_size_axis] > 1:
need_gradient_allreduce = True
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology, batch_size_axis,
rank_id)
dp_degree = len(group_ranks)
dp_group = new_process_group(group_ranks)
Y_var = main_block.var(kwargs['Y'][0])
if need_gradient_allreduce and Y_var.is_parameter:
Y_Grad_var = main_block.var(kwargs['Y@GRAD'][0])
allreduce_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': [Y_Grad_var]},
outputs={'Out': [Y_Grad_var]},
attrs={
'ring_id': dp_group.id,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Backward
})
scale_op = main_block.append_op(
type='scale',
inputs={'X': Y_Grad_var},
outputs={'Out': Y_Grad_var},
attrs={'scale': 1.0 / dp_degree,
OP_ROLE_KEY: OpRole.Backward})
main_block._sync_with_cpp()
dims_mapping = ctx.get_tensor_dist_attr_for_program(
Y_Grad_var).dims_mapping
process_mesh = dist_attr.process_mesh
for op in [allreduce_op, scale_op]:
op_attr = OperatorDistributedAttribute()
op_attr.process_mesh = process_mesh
op_attr.set_output_dims_mapping(Y_Grad_var.name, dims_mapping)
op_attr.set_input_dims_mapping(Y_Grad_var.name, dims_mapping)
ctx.set_op_dist_attr_for_program(op, op_attr)
def _init_param_sync(Weight_var, dist_op_context, startup_block, ctx, rank_id):
assert Weight_var.name not in dist_op_context.already_init_sync_vars
assert startup_block.has_var(Weight_var.name)
dist_op_context.already_init_sync_vars.add(Weight_var.name)
param = startup_block.var(Weight_var.name)
param_dist_attr = ctx.get_tensor_dist_attr_for_program(param)
process_mesh = param_dist_attr.process_mesh
dim_mapping = param_dist_attr.dims_mapping
for axis, size in enumerate(process_mesh.topology):
if size <= 1 or axis in dim_mapping:
pass
else:
group_ranks = _get_comm_group(process_mesh.processes,
process_mesh.topology, axis, rank_id)
sync_group = new_process_group(group_ranks)
startup_block.append_op(
type='c_broadcast',
inputs={'X': param},
outputs={'Out': param},
attrs={
'ring_id': sync_group.id,
'root': 0,
'use_calc_stream': True,
OP_ROLE_KEY: OpRole.Forward
})
startup_block._sync_with_cpp()
class DistributedMatmul(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedMatmul, self).__init__()
self._name = name
register_distributed_operator_impl_container("matmul",
DistributedMatmul("matmul"))
# ColumnParallel
class DistributedMatmulImpl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[0]) or is_dim_replicate(y_dims_mapping[
1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_replicate(out_dims_mapping[-1]):
return False
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[1]
assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_col_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_col_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])),
dtype=X_var.dtype,
shape=X_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_var.stop_gradient)
# copy X_var's dist_attr to intermediate_var_0's dist_attr
copy_distributed_attr_for_var(ctx, intermediate_var_0, X_var)
check_variable_and_dtype(
X_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity')
c_identity_op = main_block.append_op(
type='c_identity',
inputs={'X': [X_var]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'], 'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]}
matmul_op = main_block.append_op(
type='matmul', inputs=inputs, outputs={'Out': Out_var}, attrs=attrs)
# copy serial op's dist_attr to dist op's dist_attr
copy_distributed_attr_for_dist_op(ctx, c_identity_op, main_block,
op_dist_attr)
copy_distributed_attr_for_dist_op(ctx, matmul_op, main_block,
op_dist_attr)
# init param sync
if Weight_var.is_parameter:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# RowParallel
class DistributedMatmulImpl1(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl1, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_replicate(x_dims_mapping[-1]):
return False
if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard(y_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[0]
assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_row_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_row_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
attrs = {
'transpose_X': False,
'transpose_Y': False,
'alpha': 1,
}
inputs = {'X': X_var, 'Y': Weight_var}
intermediate_var_0 = main_block.create_var(
shape=Out_var.shape,
dtype=Out_var.dtype,
type=Out_var.type,
lod_level=Out_var.lod_level,
persistable=False,
is_data=False,
need_check_feed=Out_var.desc.need_check_feed())
# copy Out_var's dist_attr to intermediate_var_0's dist_attr
copy_distributed_attr_for_var(ctx, intermediate_var_0, Out_var)
matmul_op = main_block.append_op(
type='matmul',
inputs=inputs,
outputs={'Out': intermediate_var_0},
attrs=attrs)
c_allreduce_sum_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': intermediate_var_0},
outputs={'Out': Out_var},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True
})
# copy serial op's dist_attr to dist op's dist_attr
copy_distributed_attr_for_dist_op(ctx, matmul_op, main_block,
op_dist_attr)
copy_distributed_attr_for_dist_op(ctx, c_allreduce_sum_op, main_block,
op_dist_attr)
# init param sync
if Weight_var.is_parameter:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# ReplicateParallel
class DistributedMatmulImpl2(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulImpl2, self).__init__()
self._name = name
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_valid_list_index(x_dims_mapping,
-2) and is_dim_shard(x_dims_mapping[-2]):
return False
if is_dim_shard(y_dims_mapping[-1]):
return False
if is_valid_list_index(y_dims_mapping,
-2) and is_dim_shard(y_dims_mapping[-2]):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
if is_valid_list_index(out_dims_mapping,
-2) and is_dim_shard(out_dims_mapping[-2]):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
register_distributed_operator_impl("matmul",
DistributedMatmulImpl0("column_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl1("row_parallel"))
register_distributed_operator_impl("matmul",
DistributedMatmulImpl2("replicate_parallel"))
class DistributedMatmulV2(DistributedOperatorImplContainer):
def __init__(self, name):
super(DistributedMatmulV2, self).__init__()
self._name = name
register_distributed_operator_impl_container("matmul_v2",
DistributedMatmulV2("matmul_v2"))
# ColumnParallel
class DistributedMatmulV2Impl0(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl0, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_dim_shard(y_dims_mapping[0]) or is_dim_replicate(y_dims_mapping[
1]):
return False
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_replicate(out_dims_mapping[-1]):
return False
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_col_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[1]
assert matmul_col_dim_mapping >= 0, "col_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_col_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_col_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
intermediate_var_0 = main_block.create_var(
name=unique_name.generate_with_ignorable_key(".".join(
["c_identity", 'tmp'])),
dtype=X_var.dtype,
shape=X_var.shape,
type=core.VarDesc.VarType.LOD_TENSOR,
persistable=False,
stop_gradient=X_var.stop_gradient)
# copy X_var's dist_attr to intermediate_var_0's dist_attr
copy_distributed_attr_for_var(ctx, intermediate_var_0, X_var)
check_variable_and_dtype(
X_var, 'tensor',
['float16', 'float32', 'float64', 'int32', 'int64'], '_c_identity')
c_identity_op = main_block.append_op(
type='c_identity',
inputs={'X': [X_var]},
outputs={'Out': intermediate_var_0},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True,
})
check_variable_and_dtype(intermediate_var_0, 'x',
['float16', 'float32', 'float64'], 'linear')
check_dtype(intermediate_var_0.dtype, 'dtype',
['float16', 'float32', 'float64'], 'linear')
attrs = {'trans_x': False, 'trans_y': False}
inputs = {'X': [intermediate_var_0], 'Y': [Weight_var]}
matmul_v2_op = main_block.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': Out_var},
attrs=attrs)
# copy serial op's dist_attr to dist op's dist_attr
copy_distributed_attr_for_dist_op(ctx, c_identity_op, main_block,
op_dist_attr)
copy_distributed_attr_for_dist_op(ctx, matmul_v2_op, main_block,
op_dist_attr)
# init param sync
if Weight_var.is_parameter:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# RowParallel
class DistributedMatmulV2Impl1(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl1, self).__init__()
self._name = name
self._forward_implemented = True
self._backward_implemented = True
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_replicate(x_dims_mapping[-1]):
return False
if is_dim_replicate(y_dims_mapping[-2]) or is_dim_shard(y_dims_mapping[
-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in x_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
# Other dimensions must be replicate except the batch dimension
for mapping in out_dims_mapping[1:-1]:
if is_dim_shard(mapping):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def forward(ctx, *args, **kwargs):
"""
kwargs: inputname_mapping & outputname_mapping
"""
dist_op_context = ctx.dist_op_context
main_block = dist_op_context.get_dst_main_program().global_block()
startup_block = dist_op_context.get_dst_startup_program().global_block()
src_op = dist_op_context.get_cur_src_op()
rank_id = dist_op_context.get_rank_id()
op_dist_attr = ctx.get_op_dist_attr_for_program(src_op)
assert op_dist_attr is not None, "backward op [{}] don't have dist attribute !".format(
str(src_op))
# FIXME (JZ-LIANG) Remove this hack to support any op mesh group for Pipeline Parallelism
if rank_id not in op_dist_attr.process_mesh.processes:
rank_id = _get_corresponding_rank(ctx, op_dist_attr.process_mesh,
rank_id)
# check validation of inputs / outputs
for input_name in src_op.desc.input_names():
assert input_name in kwargs, "input [{}] is not given".format(
input_name)
assert len(kwargs[input_name]) == len(
src_op.desc.input(input_name)
), "number of tensor for input [{}] is not match".format(input_name)
for output_name in src_op.desc.output_names():
assert output_name in kwargs, "input [{}] is not given".format(
output_name)
assert len(kwargs[output_name]) == len(
src_op.desc.output(output_name)
), "number of tensor for input [{}] is not match".format(
output_name)
X_var = main_block.var(kwargs['X'][0])
Weight_var = main_block.var(kwargs['Y'][0])
Out_var = main_block.var(kwargs['Out'][0])
# TODO infer logic comm presentation
matmul_row_dim_mapping = op_dist_attr.get_input_dims_mapping(
Weight_var.name)[0]
assert matmul_row_dim_mapping >= 0, "row_parallel_matmul's row should be divided by a specific mesh axis, but got [{}]".format(
matmul_row_dim_mapping)
process_mesh_shape = op_dist_attr.process_mesh.topology
process_mesh_group = op_dist_attr.process_mesh.processes
parallel_axis = matmul_row_dim_mapping
group_ranks = _get_comm_group(process_mesh_group, process_mesh_shape,
parallel_axis, rank_id)
group = new_process_group(group_ranks)
check_variable_and_dtype(X_var, 'x', ['float16', 'float32', 'float64'],
'linear')
check_dtype(X_var.dtype, 'dtype', ['float16', 'float32', 'float64'],
'linear')
attrs = {'trans_x': False, 'trans_y': False}
inputs = {'X': X_var, 'Y': Weight_var}
intermediate_var_0 = main_block.create_var(
shape=Out_var.shape,
dtype=Out_var.dtype,
type=Out_var.type,
lod_level=Out_var.lod_level,
persistable=False,
is_data=False,
need_check_feed=Out_var.desc.need_check_feed())
# copy Out_var's dist_attr to intermediate_var_0's dist_attr
copy_distributed_attr_for_var(ctx, intermediate_var_0, Out_var)
matmul_v2_op = main_block.append_op(
type='matmul_v2',
inputs=inputs,
outputs={'Out': intermediate_var_0},
attrs=attrs)
c_allreduce_sum_op = main_block.append_op(
type='c_allreduce_sum',
inputs={'X': intermediate_var_0},
outputs={'Out': Out_var},
attrs={
'ring_id': group.id,
'use_calc_stream': True,
'use_model_parallel': True
})
# copy serial op's dist_attr to dist op's dist_attr
copy_distributed_attr_for_dist_op(ctx, matmul_v2_op, main_block,
op_dist_attr)
copy_distributed_attr_for_dist_op(ctx, c_allreduce_sum_op, main_block,
op_dist_attr)
# init param sync
if Weight_var.is_parameter:
_init_param_sync(Weight_var, dist_op_context, startup_block, ctx,
rank_id)
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
# ReplicateParallel
class DistributedMatmulV2Impl2(DistributedOperatorImpl):
def __init__(self, name):
super(DistributedMatmulV2Impl2, self).__init__()
self._name = name
def is_input_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
x_name = op_desc.input('X')[0]
y_name = op_desc.input('Y')[0]
x_dims_mapping = op_dist_attr.get_input_dims_mapping(x_name)
y_dims_mapping = op_dist_attr.get_input_dims_mapping(y_name)
if is_dim_shard(x_dims_mapping[-1]):
return False
if is_valid_list_index(x_dims_mapping,
-2) and is_dim_shard(x_dims_mapping[-2]):
return False
if is_dim_shard(y_dims_mapping[-1]):
return False
if is_valid_list_index(y_dims_mapping,
-2) and is_dim_shard(y_dims_mapping[-2]):
return False
return True
def is_output_compatible(self, dist_op):
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
op_desc = dist_op.serial_op.desc
op_dist_attr = dist_op.dist_attr
out_name = op_desc.output('Out')[0]
out_dims_mapping = op_dist_attr.get_output_dims_mapping(out_name)
if is_dim_shard(out_dims_mapping[-1]):
return False
if is_valid_list_index(out_dims_mapping,
-2) and is_dim_shard(out_dims_mapping[-2]):
return False
return True
def update_dims_mapping(self, dist_op):
changed = False
dim_changed = _update_dims_mapping_for_matmul(dist_op)
if dim_changed:
changed = True
return changed
@staticmethod
def backward(ctx, *args, **kwargs):
_right_operand_parameter_matmul_backward(ctx, *args, **kwargs)
register_distributed_operator_impl("matmul_v2",
DistributedMatmulV2Impl0("column_parallel"))
register_distributed_operator_impl("matmul_v2",
DistributedMatmulV2Impl1("row_parallel"))
register_distributed_operator_impl(
"matmul_v2", DistributedMatmulV2Impl2("replicate_parallel"))
| 40.650905
| 135
| 0.635311
| 5,302
| 40,407
| 4.441343
| 0.058091
| 0.086886
| 0.038644
| 0.018048
| 0.866103
| 0.849669
| 0.813657
| 0.778453
| 0.758493
| 0.74363
| 0
| 0.009645
| 0.278962
| 40,407
| 993
| 136
| 40.691843
| 0.798593
| 0.068552
| 0
| 0.786718
| 0
| 0
| 0.072473
| 0.005175
| 0
| 0
| 0
| 0.005035
| 0.05364
| 1
| 0.049808
| false
| 0.001277
| 0.025543
| 0
| 0.150702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43857c453b00064d502ce89f9c3e15677b6419c5
| 15,094
|
py
|
Python
|
tests/unit/api/controllers/test_instances.py
|
ilveroluca/life_monitor
|
61752952cff6be8daea1d87b8f395ccb4dbe424c
|
[
"MIT"
] | null | null | null |
tests/unit/api/controllers/test_instances.py
|
ilveroluca/life_monitor
|
61752952cff6be8daea1d87b8f395ccb4dbe424c
|
[
"MIT"
] | 1
|
2021-04-16T09:08:26.000Z
|
2021-04-16T09:08:26.000Z
|
tests/unit/api/controllers/test_instances.py
|
ilveroluca/life_monitor
|
61752952cff6be8daea1d87b8f395ccb4dbe424c
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2020-2021 CRS4
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import json
import logging
import os
from unittest.mock import MagicMock, Mock, patch
import lifemonitor.api.controllers as controllers
import lifemonitor.api.models as models
import lifemonitor.auth as auth
import lifemonitor.exceptions as lm_exceptions
import lifemonitor.lang.messages as messages
import pytest
from flask import Response
from tests.utils import assert_status_code
logger = logging.getLogger(__name__)
@patch("lifemonitor.api.controllers.lm")
def test_get_instances_no_authorization(m, request_context):
assert auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
with pytest.raises(auth.NotAuthorizedException):
controllers.instances_get_by_id("1234")
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_error_not_found(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
m.get_test_instance.return_value = None
response = controllers.instances_get_by_id("123456")
m.get_test_instance.assert_called_once()
assert_status_code(404, response.status_code)
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_by_user_error_forbidden(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
# Mock instance
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = []
m.get_suite.return_value = instance.suite
m.get_user_workflow_version = Mock(side_effect=lm_exceptions.NotAuthorizedException)
# m.get_registry_workflow_version = workflow
response = controllers.instances_get_by_id(instance['uuid'])
logger.debug("Response: %r", response.data)
m.get_test_instance.assert_called_once()
m.get_suite.assert_called_once()
m.get_user_workflow_version.assert_called_once()
assert_status_code(403, response.status_code)
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_by_user(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = []
m.get_suite.return_value = instance.suite
m.get_user_workflow_version = workflow
response = controllers.instances_get_by_id(instance['uuid'])
m.get_test_instance.assert_called_once()
m.get_suite.assert_called_once()
m.get_user_workflow_version.assert_called_once()
assert not isinstance(response, Response), "Unexpected response type"
assert isinstance(response, dict), "Unexpected response type"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_by_user_error_not_found(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
instance = MagicMock()
instance.get_test_build.side_effect = \
lm_exceptions.EntityNotFoundException(models.TestBuild)
m.get_test_instance.return_value = instance
response = controllers.instances_builds_get_by_id('111', '12345')
logger.debug("Response: %r", response)
m.get_test_instance.assert_called_once()
assert isinstance(response, Response), "Unexpected response type"
assert_status_code(404, response.status_code)
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_by_user(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
build = MagicMock()
build.id = "1"
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.get_test_build.return_value = build
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = []
m.get_suite.return_value = instance.suite
m.get_user_workflow_version = workflow
response = controllers.instances_builds_get_by_id(instance['uuid'], build.id)
m.get_test_instance.assert_called_once()
m.get_suite.assert_called_once()
m.get_user_workflow_version.assert_called_once()
assert isinstance(response, dict), "Unexpected response type"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_last_logs_by_user(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
workflow = {"uuid": "1111-222"}
build = MagicMock()
build.id = "1"
build.output = os.urandom(2048)
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.get_test_build.return_value = build
instance.test_suite.workflow = workflow
response = controllers.instances_builds_get_by_id(instance['uuid'], build.id)
m.get_test_instance.assert_called_once()
m.get_suite.assert_called_once()
m.get_user_workflow_version.assert_called_once()
assert isinstance(response, dict), "Unexpected response type"
logger.debug("The loaded instance: %r", response)
assert len(response["last_logs"]) <= 131072, "Unexpected log length: it should be limited to the last 131072 bytes"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_logs_by_user_invalid_offset(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
workflow = {"uuid": "1111-222"}
build = MagicMock()
build.id = "1"
default_limit = 131072
build.output = str(os.urandom(default_limit))
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.get_test_build.return_value = build
instance.test_suite.workflow = workflow
# test get logs defaults offset and limit
response = controllers.instances_builds_get_logs(instance['uuid'], build.id, offset_bytes=-1000)
logger.debug("Response: %r", response)
assert response.status_code == 400, "Unexpected response"
error = json.loads(response.data)
logger.debug("Error object: %r", error)
assert isinstance(error, dict), "Unexpected response type"
assert messages.invalid_log_offset in error["detail"], "Unexpected error message"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_logs_by_user_invalid_limit(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
workflow = {"uuid": "1111-222"}
build = MagicMock()
build.id = "1"
default_limit = 131072
build.output = str(os.urandom(default_limit))
instance = MagicMock()
instance.uuid = '12345'
instance.get_test_build.return_value = build
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = [workflow]
# test get logs defaults offset and limit
response = controllers.instances_builds_get_logs(instance['uuid'], build.id, limit_bytes=-1000)
logger.debug("Response: %r", response)
assert response.status_code == 400, "Unexpected response"
error = json.loads(response.data)
logger.debug("Error object: %r", error)
assert isinstance(error, dict), "Unexpected response type"
assert messages.invalid_log_limit in error["detail"], "Unexpected error message"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_logs_by_user(m, request_context, mock_user):
assert not auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry is not None, "Unexpected registry in session"
# pagination settings
default_limit = 131072
parts = 4
part_size = round(default_limit / parts)
logger.debug("Number of parts: %d", parts)
logger.debug("Part size: %d", part_size)
# set workflow/test_instance/test_build
workflow = {"uuid": "1111-222"}
build = MagicMock()
build.id = "1"
output_part = str("n" * part_size)
build.get_output.return_value = output_part
logger.debug("Part length: %r", len(output_part))
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.get_test_build.return_value = build
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = []
m.get_suite.return_value = instance.suite
m.get_user_workflow_version = workflow
# test get logs defaults offset and limit
response = controllers.instances_builds_get_logs(instance['uuid'], build.id)
m.get_test_instance.assert_called_once()
m.get_suite.assert_called_once()
m.get_user_workflow_version.assert_called_once()
assert isinstance(response, str), "Unexpected response type"
assert len(response) == part_size, f"Unexpected log length: it should be limited to {part_size} bytes"
# test pagination
for n in range(0, parts):
# test get logs defaults offset and limit
response = controllers.instances_builds_get_logs(
instance['uuid'], build.id,
limit_bytes=part_size, offset_bytes=part_size * n)
assert isinstance(response, str), "Unexpected response type"
assert len(response) == part_size, f"Unexpected log length: it should be limited to {part_size} bytes"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_by_registry_error_forbidden(m, request_context, mock_registry):
assert auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry, "Unexpected registry in session"
workflow = MagicMock()
workflow.uuid = "1111-222"
instance = MagicMock()
instance.uuid = '12345'
instance.suite = MagicMock()
instance.suite.uuid = '1111'
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
m.get_user_workflows.return_value = []
m.get_suite.return_value = instance.suite
m.get_registry_workflow_version = Mock(side_effect=lm_exceptions.NotAuthorizedException)
response = controllers.instances_get_by_id(instance['uuid'])
m.get_test_instance.assert_called_once()
assert isinstance(response, Response), "Unexpected response type"
assert_status_code(403, response.status_code)
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_by_registry_error_not_found(m, request_context, mock_registry):
assert auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry, "Unexpected registry in session"
workflow = {"uuid": "1111-222"}
instance = MagicMock()
instance.uuid = '12345'
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
mock_registry.registered_workflow_versions = [workflow]
response = controllers.instances_get_by_id(instance['uuid'])
m.get_test_instance.assert_called_once()
assert isinstance(response, dict), "Unexpected response type"
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_by_registry_error_not_found(m, request_context, mock_registry):
assert auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry, "Unexpected registry in session"
build = MagicMock()
build.id = "1"
workflow = {'uuid': '11111'}
instance = MagicMock()
instance.uuid = '12345'
instance.get_test_build = Mock(side_effect=lm_exceptions.EntityNotFoundException(models.TestBuild))
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
mock_registry.registered_workflow_versions = [workflow]
response = controllers.instances_builds_get_by_id(instance['uuid'], '2222')
m.get_test_instance.assert_called_once()
assert isinstance(response, Response), "Unexpected response type"
assert_status_code(404, response.status_code)
@patch("lifemonitor.api.controllers.lm")
def test_get_instance_build_by_registry(m, request_context, mock_registry):
assert auth.current_user.is_anonymous, "Unexpected user in session"
assert auth.current_registry, "Unexpected registry in session"
build = MagicMock()
build.id = "1"
workflow = {'uuid': '11111'}
instance = MagicMock()
instance.uuid = '12345'
instance.test_builds.return_value = [build]
instance.test_suite.workflow = workflow
m.get_test_instance.return_value = instance
mock_registry.registered_workflow_versions = [workflow]
response = controllers.instances_builds_get_by_id(instance['uuid'], build.id)
m.get_test_instance.assert_called_once()
assert isinstance(response, dict), "Unexpected response type"
| 44.656805
| 119
| 0.754141
| 1,998
| 15,094
| 5.46046
| 0.10961
| 0.017965
| 0.016132
| 0.032264
| 0.833181
| 0.823373
| 0.815582
| 0.792851
| 0.772411
| 0.759853
| 0
| 0.021892
| 0.152643
| 15,094
| 337
| 120
| 44.789318
| 0.831118
| 0.088777
| 0
| 0.780576
| 0
| 0
| 0.164954
| 0.030601
| 0
| 0
| 0
| 0
| 0.269784
| 1
| 0.05036
| false
| 0
| 0.043165
| 0
| 0.093525
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
43c1d9b8c855eed558e37531a835099b9880b05f
| 75,495
|
py
|
Python
|
quest/keras/layers/attention.py
|
shuokabe/deepQuest-mod
|
7140a57c30deedb0570bc835c6ad3c848f0039f4
|
[
"BSD-3-Clause"
] | 2
|
2021-09-28T02:26:46.000Z
|
2021-09-28T04:47:55.000Z
|
keras/layers/attention.py
|
ruizhang-ai/GCP
|
7a0f30c6c3d732627fa269ce943c62a9005cc40f
|
[
"MIT"
] | null | null | null |
keras/layers/attention.py
|
ruizhang-ai/GCP
|
7a0f30c6c3d732627fa269ce943c62a9005cc40f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import absolute_import
import numpy as np
np.set_printoptions(threshold=np.inf)
from .. import backend as K
from .. import activations, initializations, regularizers, constraints
from ..engine import Layer, InputSpec
# Access to attention layers from recurrent.py
class Attention(Layer):
''' Attention layer that does not depend on temporal information. The output information
provided are the attention vectors 'alpha' over the input data.
# Arguments
nb_attention: number of attention mechanisms applied over the input vectors
kernel_initializer: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
recurrent_initializer: initialization function of the inner cells.
forget_bias_initializer: initialization function for the bias of the forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
recurrent_activation: activation function for the inner cells.
w_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
W_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_a_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_w_a: float between 0 and 1.
dropout_W_a: float between 0 and 1.
dropout_U_a: float between 0 and 1.
# Formulation
'''
def __init__(self, nb_attention,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
dropout_Wa=0.,
Wa_regularizer=None, ba_regularizer=None,
**kwargs):
self.nb_attention = nb_attention
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
# attention model learnable params
self.Wa_regularizer = regularizers.get(Wa_regularizer)
self.ba_regularizer = regularizers.get(ba_regularizer)
self.dropout_Wa = dropout_Wa
if self.dropout_Wa:
self.uses_learning_phase = True
super(Attention, self).__init__(**kwargs)
self.input_spec = [InputSpec(ndim=3)]
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape, ndim=3)]
self.input_dim = input_shape[-1]
# Initialize Att model params (following the same format for any option of self.consume_less)
self.Wa = self.init((self.input_dim, self.nb_attention),
name='{}_Wa'.format(self.name))
self.ba = K.variable((np.zeros(self.nb_attention)),
name='{}_ba'.format(self.name))
self.trainable_weights = [self.Wa, self.ba]
self.regularizers = []
# Att regularizers
if self.Wa_regularizer:
self.Wa_regularizer.set_param(self.Wa)
self.regularizers.append(self.Wa_regularizer)
if self.ba_regularizer:
self.ba_regularizer.set_param(self.ba)
self.regularizers.append(self.ba_regularizer)
# if self.initial_weights is not None:
# self.set_weights(self.initial_weights)
# del self.initial_weights
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape must be:
# (nb_samples, temporal_or_spatial_dimensions, input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
assert len(input_shape) == 3, 'Input shape must be: (nb_samples, temporal_or_spatial_dimensions, input_dim)'
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of temporal_or_spatial_dimensions of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
attention = self.attention_step(preprocessed_input, constants)
return attention
def attention_step(self, x, constants):
# Att model dropouts
B_Wa = constants[0]
# AttModel (see Formulation in class header)
# e = K.dot(K.tanh(K.dot(x * B_W, self.W) + self.b) * B_w, self.w)
# Attention spatial weights 'alpha'
# e = K.permute_dimensions(e, (0,2,1))
# alpha = K.softmax_3d(e)
# alpha = K.permute_dimensions(alpha, (0,2,1))
# Attention class weights 'beta'
# beta = K.sigmoid(K.dot(alpha * B_Wa, self.Wa) + self.ba)
beta = K.sigmoid(K.dot(x * B_Wa, self.Wa) + self.ba)
# TODO: complete formulas in class description
return beta
def get_constants(self, x):
constants = []
# AttModel
if 0 < self.dropout_Wa < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, :, 0, 0], (-1, input_shape[1], 1)))
ones = K.concatenate([ones] * input_dim, 2)
B_Wa = K.in_train_phase(K.dropout(ones, self.dropout_Wa), ones)
constants.append(B_Wa)
else:
constants.append([K.cast_to_floatx(1.)])
return constants
def get_output_shape_for(self, input_shape):
return tuple(list(input_shape[:2]) + [self.nb_attention])
def get_config(self):
config = {'nb_attention': self.nb_attention,
'kernel_initializer': self.init.__name__,
'recurrent_initializer': self.inner_init.__name__,
'forget_bias_initializer': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'recurrent_activation': self.inner_activation.__name__,
'Wa_regularizer': self.Wa_regularizer.get_config() if self.Wa_regularizer else None,
'ba_regularizer': self.ba_regularizer.get_config() if self.ba_regularizer else None,
'dropout_Wa': self.dropout_Wa}
base_config = super(Attention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SoftAttention(Layer):
''' Simple soft Attention layer
The output information provided are the attended input an the attention weights 'alpha' over the input data.
# Arguments
att_units: Soft alignment MLP dimension
kernel_initializer: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
w_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
W_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_a_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_w_a: float between 0 and 1.
dropout_W_a: float between 0 and 1.
dropout_U_a: float between 0 and 1.
# Formulation
The resulting attention vector 'phi' at time 't' is formed by applying a weighted sum over
the set of inputs 'x_i' contained in 'X':
phi(X, t) = ∑_i alpha_i(t) * x_i,
where each 'alpha_i' at time 't' is a weighting vector over all the input dimension that
accomplishes the following condition:
∑_i alpha_i = 1
and is dynamically adapted at each timestep w.r.t. the following formula:
alpha_i(t) = exp{e_i(t)} / ∑_j exp{e_j(t)}
where each 'e_i' at time 't' is calculated as:
e_i(t) = wa' * tanh( Wa * x_i + Ua * h(t-1) + ba ),
where the following are learnable with the respectively named sizes:
wa Wa Ua ba
[input_dim] [input_dim, input_dim] [units, input_dim] [input_dim]
'''
def __init__(self, att_dim, sum_weighted_output=True,
init='glorot_uniform', activation='tanh',
dropout_Wa=0., dropout_Ua=0.,
wa_regularizer=None, Wa_regularizer=None, Ua_regularizer=None, ba_regularizer=None, ca_regularizer=None,
**kwargs):
self.att_dim = att_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.sum_weighted_output = sum_weighted_output
self.dropout_Wa, self.dropout_Ua = dropout_Wa, dropout_Ua
# attention model learnable params
self.wa_regularizer = regularizers.get(wa_regularizer)
self.Wa_regularizer = regularizers.get(Wa_regularizer)
self.Ua_regularizer = regularizers.get(Ua_regularizer)
self.ba_regularizer = regularizers.get(ba_regularizer)
self.ca_regularizer = regularizers.get(ca_regularizer)
if self.dropout_Wa or self.dropout_Ua :
self.uses_learning_phase = True
super(SoftAttention, self).__init__(**kwargs)
#self.input_spec = [InputSpec(ndim=3)]
def build(self, input_shape):
assert len(input_shape) == 2, 'You should pass two inputs to SoftAttention '
self.input_spec = [InputSpec(shape=input_shape[0]), InputSpec(shape=input_shape[1])]
self.input_steps = input_shape[0][1]
self.input_dim = input_shape[0][2]
self.context_dim = input_shape[1][1]
# Initialize Att model params (following the same format for any option of self.consume_less)
self.wa = self.add_weight((self.att_dim, ),
initializer=self.init,
name='{}_wa'.format(self.name),
regularizer=self.wa_regularizer)
self.Wa = self.add_weight((self.input_dim, self.att_dim),
initializer=self.init,
name='{}_Wa'.format(self.name),
regularizer=self.Wa_regularizer)
self.Ua = self.add_weight((self.context_dim, self.att_dim),
initializer=self.init,
name='{}_Ua'.format(self.name),
regularizer=self.Ua_regularizer)
self.ba = self.add_weight(self.att_dim,
initializer='zero',
name='{}_ba'.format(self.name),
regularizer=self.ba_regularizer)
self.ca = self.add_weight(self.input_steps,
initializer='zero',
name='{}_ca'.format(self.name),
regularizer=self.ca_regularizer)
self.trainable_weights = [self.wa, self.Wa, self.Ua, self.ba, self.ca] # AttModel parameters
self.built = True
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape must be:
# (nb_samples, temporal_or_spatial_dimensions, input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
state_below = x[0]
self.context = x[1]
assert len(input_shape) == 3, 'Input shape must be: (nb_samples, temporal_or_spatial_dimensions, input_dim)'
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of temporal_or_spatial_dimensions of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
constants = self.get_constants(state_below, mask[1])
preprocessed_input = self.preprocess_input(state_below)
[attended_representation, alphas] = self.attention_step(preprocessed_input, constants)
return [attended_representation, alphas]
def attention_step(self, x, constants):
# Att model dropouts
B_Wa = constants[0] # Dropout Wa
pctx_ = constants[1] # Original context
# Attention model (see Formulation in class header)
p_state_ = K.dot(x * B_Wa[0], self.Wa)
pctx_ = self.activation(pctx_[:, None, :] + p_state_)
e = K.dot(pctx_, self.wa) + self.ca
alphas_shape = e.shape
alphas = K.softmax(e.reshape([alphas_shape[0], alphas_shape[1]]))
# sum over the in_timesteps dimension resulting in [batch_size, input_dim]
ctx_ = x * alphas[:, :, None]
if self.sum_weighted_output:
ctx_ = (ctx_).sum(axis=1)
return [ctx_, alphas]
def get_constants(self, x, mask_context):
constants = []
# constants[0]
if 0 < self.dropout_Wa < 1:
input_dim = self.context_dim
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_Wa = [K.in_train_phase(K.dropout(ones, self.dropout_Wa), ones)]
constants.append(B_Wa)
else:
constants.append([K.cast_to_floatx(1.)])
# constants[1]
if 0 < self.dropout_Ua < 1:
input_dim = self.context_dim
ones = K.ones_like(K.reshape(self.context[:, :, 0], (-1, self.context.shape[1], 1)))
ones = K.concatenate([ones] * input_dim, axis=2)
B_Ua = [K.in_train_phase(K.dropout(ones, self.dropout_Ua), ones)]
pctx = K.dot(self.context * B_Ua[0], self.Ua) + self.ba
else:
pctx = K.dot(self.context, self.Ua) + self.ba
constants.append(pctx)
return constants
def get_output_shape_for(self, input_shape):
if self.sum_weighted_output:
dim_x_att = (input_shape[0][0], input_shape[0][2])
else:
dim_x_att = (input_shape[0])
dim_alpha_att = (input_shape[0][0], input_shape[0][1])
main_out = [dim_x_att, dim_alpha_att]
return main_out
def compute_mask(self, input, input_mask=None):
return [None, None]
def get_config(self):
config = {'att_units': self.att_dim,
'kernel_initializer': self.init.__name__,
'activation': self.activation.__name__,
'sum_weighted_output': self.sum_weighted_output,
'wa_regularizer': self.wa_regularizer.get_config() if self.wa_regularizer else None,
'Wa_regularizer': self.Wa_regularizer.get_config() if self.Wa_regularizer else None,
'Ua_regularizer': self.Ua_regularizer.get_config() if self.Ua_regularizer else None,
'ba_regularizer': self.ba_regularizer.get_config() if self.ba_regularizer else None,
'ca_regularizer': self.ca_regularizer.get_config() if self.ca_regularizer else None,
'dropout_Wa': self.dropout_Wa,
'dropout_Ua': self.dropout_Ua,
}
base_config = super(SoftAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class SoftMultistepsAttention(Layer):
''' Multi timesteps soft Attention layer
The output information provided are the attended input an the attention weights 'alpha' over the input data.
# Arguments
att_units: Soft alignment MLP dimension
kernel_initializer: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
w_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
W_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_a_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_w_a: float between 0 and 1.
dropout_W_a: float between 0 and 1.
dropout_U_a: float between 0 and 1.
# Formulation
The resulting attention vector 'phi' at time 't' is formed by applying a weighted sum over
the set of inputs 'x_i' contained in 'X':
phi(X, t) = ∑_i alpha_i(t) * x_i,
where each 'alpha_i' at time 't' is a weighting vector over all the input dimension that
accomplishes the following condition:
∑_i alpha_i = 1
and is dynamically adapted at each timestep w.r.t. the following formula:
alpha_i(t) = exp{e_i(t)} / ∑_j exp{e_j(t)}
where each 'e_i' at time 't' is calculated as:
e_i(t) = wa' * tanh( Wa * x_i + Ua * h(t-1) + ba ),
where the following are learnable with the respectively named sizes:
wa Wa Ua ba
[input_dim] [input_dim, input_dim] [units, input_dim] [input_dim]
'''
def __init__(self, att_dim, sum_weighted_output=True,
init='glorot_uniform', activation='tanh',
return_sequences=True,
dropout_Wa=0., dropout_Ua=0.,
wa_regularizer=None, Wa_regularizer=None, Ua_regularizer=None, ba_regularizer=None, ca_regularizer=None,
**kwargs):
self.att_dim = att_dim
self.init = initializations.get(init)
self.activation = activations.get(activation)
self.sum_weighted_output = sum_weighted_output
self.return_sequences = return_sequences
self.dropout_Wa, self.dropout_Ua = dropout_Wa, dropout_Ua
# attention model learnable params
self.wa_regularizer = regularizers.get(wa_regularizer)
self.Wa_regularizer = regularizers.get(Wa_regularizer)
self.Ua_regularizer = regularizers.get(Ua_regularizer)
self.ba_regularizer = regularizers.get(ba_regularizer)
self.ca_regularizer = regularizers.get(ca_regularizer)
if self.dropout_Wa or self.dropout_Ua :
self.uses_learning_phase = True
super(SoftMultistepsAttention, self).__init__(**kwargs)
#self.input_spec = [InputSpec(ndim=3)]
def build(self, input_shape):
assert len(input_shape) == 2, 'You should pass two inputs to SoftMultistepsAttention '
self.input_spec = [InputSpec(shape=input_shape[0]), InputSpec(shape=input_shape[1])]
self.input_steps = input_shape[0][1]
self.input_dim = input_shape[0][2]
self.context_dim = input_shape[1][2]
# Initialize Att model params (following the same format for any option of self.consume_less)
self.wa = self.add_weight((self.att_dim, ),
initializer=self.init,
name='{}_wa'.format(self.name),
regularizer=self.wa_regularizer)
self.Wa = self.add_weight((self.input_dim, self.att_dim),
initializer=self.init,
name='{}_Wa'.format(self.name),
regularizer=self.Wa_regularizer)
self.Ua = self.add_weight((self.context_dim, self.att_dim),
initializer=self.init,
name='{}_Ua'.format(self.name),
regularizer=self.Ua_regularizer)
self.ba = self.add_weight(self.att_dim,
initializer='zero',
name='{}_ba'.format(self.name),
regularizer=self.ba_regularizer)
self.ca = self.add_weight(self.input_steps,
initializer='zero',
name='{}_ca'.format(self.name),
regularizer=self.ca_regularizer)
self.trainable_weights = [self.wa, self.Wa, self.Ua, self.ba, self.ca] # AttModel parameters
self.built = True
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape must be:
# (nb_samples, temporal_or_spatial_dimensions, input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
state_below = x[0]
self.context = x[1]
assert len(input_shape) == 3, 'Input shape must be: (nb_samples, temporal_or_spatial_dimensions, input_dim)'
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of temporal_or_spatial_dimensions of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
constants = self.get_constants(state_below, mask[1])
preprocessed_input = self.preprocess_input(state_below)
last_output, outputs, states = K.rnn(self.attention_step, preprocessed_input,
[None, None], #self.get_extra_states(x),
go_backwards=False,
mask=None,
# mask[1], #TODO: What does this mask mean? How should it be applied?
constants=constants,
unroll=False,
input_length=self.input_steps)
if self.return_sequences:
return states
return [states[0][-1], states[1][-1]]
def get_initial_states(self, x):
pctx_state = K.zeros_like(x[1]) # (samples, height*width, features_in)
pctx_state = K.sum(pctx_state, axis=(-1))
alpha_state = pctx_state
pctx_state = K.expand_dims(pctx_state, dim=-1)
pctx_state = K.repeat_elements(pctx_state, self.att_dim, -1)
return [pctx_state, alpha_state]
def attention_step(self, x, constants):
# Att model dropouts
B_Wa = constants[0] # Dropout Wa
pctx_ = constants[1] # Original context
# Attention model (see Formulation in class header)
p_state_ = K.dot(x * B_Wa[0], self.Wa)
pctx_ = self.activation(pctx_ + p_state_[:, None, :])
e = K.dot(pctx_, self.wa) + self.ca
alphas_shape = e.shape
alphas = K.softmax(e.reshape([alphas_shape[0], alphas_shape[1]]))
# sum over the in_timesteps dimension resulting in [batch_size, input_dim]
ctx_ = x * alphas[:, :, None]
if self.sum_weighted_output:
ctx_ = (ctx_).sum(axis=1)
return ctx_, [ctx_, alphas]
def get_constants(self, x, mask_context):
constants = []
# constants[0]
if 0 < self.dropout_Wa < 1:
input_dim = self.context_dim
ones = K.ones_like(K.reshape(x[:, 0, 0], (-1, 1)))
ones = K.concatenate([ones] * input_dim, 1)
B_Wa = [K.in_train_phase(K.dropout(ones, self.dropout_Wa), ones)]
constants.append(B_Wa)
else:
constants.append([K.cast_to_floatx(1.)])
# constants[1]
if 0 < self.dropout_Ua < 1:
input_dim = self.context_dim
ones = K.ones_like(K.reshape(self.context[:, :, 0], (-1, self.context.shape[1], 1)))
ones = K.concatenate([ones] * input_dim, axis=2)
B_Ua = [K.in_train_phase(K.dropout(ones, self.dropout_Ua), ones)]
pctx = K.dot(self.context * B_Ua[0], self.Ua) + self.ba
else:
pctx = K.dot(self.context, self.Ua) + self.ba
constants.append(pctx)
return constants
def get_output_shape_for(self, input_shape):
if self.sum_weighted_output:
dim_x_att = (input_shape[1][0], input_shape[0][1], self.att_dim)
else:
dim_x_att = (input_shape[1][0], input_shape[0][1], input_shape[1][1], self.att_dim)
dim_alpha_att = (input_shape[1][0], input_shape[0][1], input_shape[1][1])
main_out = [dim_x_att, dim_alpha_att]
return main_out
def compute_mask(self, input, input_mask=None):
return [None, None]
def get_config(self):
config = {'att_units': self.att_dim,
'kernel_initializer': self.init.__name__,
'activation': self.activation.__name__,
'sum_weighted_output': self.sum_weighted_output,
'return_sequences': self.return_sequences,
'wa_regularizer': self.wa_regularizer.get_config() if self.wa_regularizer else None,
'Wa_regularizer': self.Wa_regularizer.get_config() if self.Wa_regularizer else None,
'Ua_regularizer': self.Ua_regularizer.get_config() if self.Ua_regularizer else None,
'ba_regularizer': self.ba_regularizer.get_config() if self.ba_regularizer else None,
'ca_regularizer': self.ca_regularizer.get_config() if self.ca_regularizer else None,
'dropout_Wa': self.dropout_Wa,
'dropout_Ua': self.dropout_Ua,
}
base_config = super(SoftMultistepsAttention, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class AttentionComplex(Layer):
''' Attention layer that does not depend on temporal information. The output information
provided are the attention vectors 'alpha' over the input data.
# Arguments
nb_attention: number of attention mechanisms applied over the input vectors
kernel_initializer: weight initialization function.
Can be the name of an existing function (str),
or a Theano function (see: [initializations](../initializations.md)).
recurrent_initializer: initialization function of the inner cells.
forget_bias_initializer: initialization function for the bias of the forget gate.
[Jozefowicz et al.](http://www.jmlr.org/proceedings/papers/v37/jozefowicz15.pdf)
recommend initializing with ones.
activation: activation function.
Can be the name of an existing function (str),
or a Theano function (see: [activations](../activations.md)).
recurrent_activation: activation function for the inner cells.
w_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
W_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the input weights matrices.
U_a_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the recurrent weights matrices.
b_a_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
dropout_w_a: float between 0 and 1.
dropout_W_a: float between 0 and 1.
dropout_U_a: float between 0 and 1.
# Formulation
'''
def __init__(self, nb_attention,
init='glorot_uniform', inner_init='orthogonal',
forget_bias_init='one', activation='tanh',
inner_activation='hard_sigmoid',
dropout_w=0., dropout_W=0., dropout_Wa=0.,
w_regularizer=None, W_regularizer=None, b_regularizer=None, Wa_regularizer=None, ba_regularizer=None,
**kwargs):
self.nb_attention = nb_attention
self.init = initializations.get(init)
self.inner_init = initializations.get(inner_init)
self.forget_bias_init = initializations.get(forget_bias_init)
self.activation = activations.get(activation)
self.inner_activation = activations.get(inner_activation)
# attention model learnable params
self.w_regularizer = regularizers.get(w_regularizer)
self.W_regularizer = regularizers.get(W_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.Wa_regularizer = regularizers.get(Wa_regularizer)
self.ba_regularizer = regularizers.get(ba_regularizer)
self.dropout_w, self.dropout_W, self.dropout_Wa = dropout_w, dropout_W, dropout_Wa
if self.dropout_w or self.dropout_W or self.dropout_Wa:
self.uses_learning_phase = True
super(AttentionComplex, self).__init__(**kwargs)
self.input_spec = [InputSpec(ndim=3)]
def build(self, input_shape):
self.input_spec = [InputSpec(shape=input_shape, ndim=3)]
self.input_dim = input_shape[-1]
# Initialize Att model params (following the same format for any option of self.consume_less)
# self.w = self.add_weight((self.input_dim,),
self.w = self.add_weight((self.input_dim, self.nb_attention),
initializer=self.init,
name='{}_w'.format(self.name),
regularizer=self.w_regularizer)
# self.W = self.add_weight((self.input_dim, self.nb_attention, self.input_dim),
self.W = self.add_weight((self.input_dim, self.input_dim),
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer)
self.b = self.add_weight(self.input_dim,
initializer='zero',
regularizer=self.b_regularizer)
"""
self.Wa = self.add_weight((self.nb_attention, self.nb_attention),
initializer=self.kernel_initializer,
name='{}_Wa'.format(self.name),
regularizer=self.Wa_regularizer)
self.ba = self.add_weight(self.input_dim,
initializer= 'zero',
regularizer=self.ba_regularizer)
self.trainable_weights = [self.w, self.W, self.b, self.Wa, self.ba] # AttModel parameters
"""
self.trainable_weights = [self.w, self.W, self.b]
# if self.initial_weights is not None:
# self.set_weights(self.initial_weights)
# del self.initial_weights
def preprocess_input(self, x):
return x
def call(self, x, mask=None):
# input shape must be:
# (nb_samples, temporal_or_spatial_dimensions, input_dim)
# note that the .build() method of subclasses MUST define
# self.input_spec with a complete input shape.
input_shape = self.input_spec[0].shape
assert len(input_shape) == 3, 'Input shape must be: (nb_samples, temporal_or_spatial_dimensions, input_dim)'
if K._BACKEND == 'tensorflow':
if not input_shape[1]:
raise Exception('When using TensorFlow, you should define '
'explicitly the number of temporal_or_spatial_dimensions of '
'your sequences.\n'
'If your first layer is an Embedding, '
'make sure to pass it an "input_length" '
'argument. Otherwise, make sure '
'the first layer has '
'an "input_shape" or "batch_input_shape" '
'argument, including the time axis. '
'Found input shape at layer ' + self.name +
': ' + str(input_shape))
constants = self.get_constants(x)
preprocessed_input = self.preprocess_input(x)
attention = self.attention_step(preprocessed_input, constants)
return attention
def attention_step(self, x, constants):
# Att model dropouts
B_w = constants[0]
B_W = constants[1]
B_Wa = constants[2]
# AttModel (see Formulation in class header)
e = K.dot(K.tanh(K.dot(x * B_W, self.W) + self.b) * B_w, self.w)
return e
# Attention spatial weights 'alpha'
##e = e.dimshuffle((0, 2, 1))
e = K.permute_dimensions(e, (0, 2, 1))
# alpha = K.softmax(e)
# return alpha
alpha = K.softmax_3d(e)
alpha = K.permute_dimensions(alpha, (0, 2, 1))
return alpha
##alpha = alpha.dimshuffle((0,2,1))
# Attention class weights 'beta'
beta = K.sigmoid(K.dot(alpha * B_Wa, self.Wa) + self.ba)
##beta = K.softmax_3d(K.dot(alpha * B_Wa, self.Wa) + self.ba)
# Sum over the in_timesteps dimension resulting in [batch_size, input_dim]
##x_att = (x * alpha[:,:,None]).sum(axis=1)
# TODO: complete formulas in class description
return beta
def get_constants(self, x):
constants = []
# AttModel
if 0 < self.dropout_w < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, :, 0, 0], (-1, input_shape[1], 1)))
ones = K.concatenate([ones] * input_dim, 2)
B_w = K.in_train_phase(K.dropout(ones, self.dropout_w), ones)
constants.append(B_w)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.dropout_W < 1:
input_shape = self.input_spec[0].shape
input_dim = input_shape[-1]
ones = K.ones_like(K.reshape(x[:, :, 0, 0], (-1, input_shape[1], 1)))
ones = K.concatenate([ones] * input_dim, 2)
B_W = K.in_train_phase(K.dropout(ones, self.dropout_W), ones)
constants.append(B_W)
else:
constants.append(K.cast_to_floatx(1.))
if 0 < self.dropout_Wa < 1:
input_shape = self.input_spec[0].shape
ones = K.ones_like(K.reshape(x[:, :, 0, 0], (-1, input_shape[1], 1)))
ones = K.concatenate([ones] * self.nb_attention, 2)
B_Wa = K.in_train_phase(K.dropout(ones, self.dropout_Wa), ones)
constants.append(B_Wa)
else:
constants.append(K.cast_to_floatx(1.))
return constants
def get_output_shape_for(self, input_shape):
return tuple(list(input_shape[:2]) + [self.nb_attention])
def get_config(self):
config = {'nb_attention': self.nb_attention,
'kernel_initializer': self.init.__name__,
'recurrent_initializer': self.inner_init.__name__,
'forget_bias_initializer': self.forget_bias_init.__name__,
'activation': self.activation.__name__,
'recurrent_activation': self.inner_activation.__name__,
'w_regularizer': self.w_regularizer.get_config() if self.w_regularizer else None,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'Wa_regularizer': self.Wa_regularizer.get_config() if self.Wa_regularizer else None,
'ba_regularizer': self.ba_regularizer.get_config() if self.ba_regularizer else None,
'dropout_w': self.dropout_w,
'dropout_W': self.dropout_W,
'dropout_Wa': self.dropout_Wa}
base_config = super(AttentionComplex, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
class ConvAtt(Layer):
'''Convolution operator for filtering windows of two-dimensional inputs with Attention mechanism.
The first input corresponds to the image and the second input to the weighting vector (which contains a set of steps).
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(3, 128, 128)` for 128x128 RGB pictures. An additional input for modulating the attention is required.
# Examples
```python
# apply a 3x3 convolution with 64 output filters on a 256x256 image:
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=(3, 256, 256)))
# now model.output_shape == (None, 64, 256, 256)
# add a 3x3 convolution on top, with 32 output filters:
model.add(Convolution2D(32, 3, 3, border_mode='same'))
# now model.output_shape == (None, 32, 256, 256)
```
# Arguments
nb_filter: Number of convolution filters to use.
kernel_initializer: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)), or alternatively,
Theano function to use for weights initialization.
This parameter is only relevant if you don't pass
a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
border_mode: 'valid', 'same' or 'full'. ('full' requires the Theano backend.)
subsample: tuple of length 2. Factor by which to subsample output.
Also called strides elsewhere.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode is it at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "tf".
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
and 4D tensor with shape:
`(samples, steps, features)`
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, nb_filter)` if dim_ordering='tf'.
`rows` and `cols` values might have changed due to padding.
'''
def __init__(self, nb_embedding, nb_glimpses=1, concat_timesteps=True,
init='glorot_uniform', activation=None, weights=None, return_states=True,
border_mode='valid', dim_ordering='default',
W_regularizer=None, U_regularizer=None, V_regularizer=None, b_regularizer=None,
activity_regularizer=None,
W_constraint=None, U_constraint=None, V_constraint=None, b_constraint=None,
W_learning_rate_multiplier=None, b_learning_rate_multiplier=None,
bias=True, **kwargs):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if border_mode not in {'valid', 'same', 'full'}:
raise ValueError('Invalid border mode for Convolution2D:', border_mode)
self.nb_embedding = nb_embedding
self.nb_glimpses = nb_glimpses
self.concat_timesteps = concat_timesteps # if True output_size=(samples, nb_glimpses*num_timesteps, rows, cols)
# if False output_size=(samples, num_timesteps, nb_glimpses, rows, cols)
self.nb_row = 1
self.nb_col = 1
self.return_states = return_states
self.init = initializations.get(init, dim_ordering=dim_ordering)
self.activation = activations.get(activation)
self.border_mode = border_mode
self.subsample = tuple((1, 1))
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf, th}.')
self.dim_ordering = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
if self.nb_glimpses > 0:
self.U_regularizer = regularizers.get(U_regularizer)
else:
self.U_regularizer = None
self.V_regularizer = regularizers.get(V_regularizer)
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
if self.nb_glimpses > 0:
self.U_constraint = constraints.get(U_constraint)
else:
self.U_constraint = None
self.V_constraint = constraints.get(V_constraint)
self.b_constraint = constraints.get(b_constraint)
self.W_learning_rate_multiplier = W_learning_rate_multiplier
self.b_learning_rate_multiplier = b_learning_rate_multiplier
self.learning_rate_multipliers = [self.W_learning_rate_multiplier, self.b_learning_rate_multiplier]
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
self.supports_masking = True
super(ConvAtt, self).__init__(**kwargs)
def build(self, input_shape):
self.num_words = input_shape[1][1]
if self.dim_ordering == 'th':
img_size = input_shape[0][1]
qst_size = input_shape[1][2]
if self.nb_glimpses > 0:
self.U_shape = (self.nb_glimpses, self.nb_embedding, self.nb_row, self.nb_col)
self.V_shape = (qst_size, self.nb_embedding)
self.W_shape = (self.nb_embedding, img_size, self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
img_size = input_shape[0][3]
qst_size = input_shape[1][2]
if self.nb_glimpses > 0:
self.U_shape = (self.nb_row, self.nb_col, self.nb_embedding, self.nb_glimpses)
self.V_shape = (qst_size, self.nb_embedding)
self.W_shape = (self.nb_row, self.nb_col, img_size, self.nb_embedding)
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
if self.nb_glimpses > 0:
self.U = self.add_weight(self.U_shape,
initializer=self.init,
name='{}_U'.format(self.name),
regularizer=self.U_regularizer,
constraint=self.U_constraint)
else:
self.U = None
self.V = self.add_weight(self.V_shape,
initializer=self.init,
name='{}_V'.format(self.name),
regularizer=self.V_regularizer,
constraint=self.V_constraint)
self.W = self.add_weight(self.W_shape,
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.nb_embedding,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def preprocess_input(self, x):
return K.dot(x, self.V)
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'th':
rows = input_shape[0][2]
cols = input_shape[0][3]
elif self.dim_ordering == 'tf':
rows = input_shape[0][1]
cols = input_shape[0][2]
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
'''
rows = conv_output_length(rows, self.nb_row,
self.border_mode, self.subsample[0])
cols = conv_output_length(cols, self.nb_col,
self.border_mode, self.subsample[1])
'''
#return (input_shape[0][0], self.num_words, self.nb_embedding, rows, cols)
if self.return_states:
if False:#self.nb_glimpses > 0:
if self.concat_timesteps:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_glimpses * self.num_words, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_glimpses * self.num_words)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.num_words, self.nb_glimpses, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], self.num_words, rows, cols, self.nb_glimpses)
else:
if self.concat_timesteps:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_embedding * self.num_words, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_embedding * self.num_words)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.num_words, self.nb_embedding, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], self.num_words, rows, cols, self.nb_embedding)
else:
if False:#self.nb_glimpses > 0:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_glimpses, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_glimpses)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_embedding, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_embedding)
def call(self, x, mask=None):
preprocessed_img = K.conv2d(x[0], self.W, strides=self.subsample,
border_mode=self.border_mode,
dim_ordering=self.dim_ordering,
filter_shape=self.W_shape)
preprocessed_input = self.preprocess_input(x[1]) # TODO: Dropout?
if self.bias:
if self.dim_ordering == 'th':
preprocessed_img += K.reshape(self.b, (1, self.nb_embedding, 1, 1))
elif self.dim_ordering == 'tf':
preprocessed_img += K.reshape(self.b, (1, 1, 1, self.nb_embedding))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
last_output, outputs, states = K.rnn(self.step,
preprocessed_input,
self.get_initial_states(x),
go_backwards=False,
mask=None,
# mask[1], #TODO: What does this mask mean? How should it be applied?
constants=[preprocessed_img],
unroll=False,
input_length=self.num_words)
if self.return_states:
# Join temporal and glimpses dimensions
if self.concat_timesteps:
outputs = K.permute_dimensions(outputs, (0,3,4,2,1))
shp = outputs.shape
outputs = K.reshape(outputs, (shp[0], shp[1], shp[2], -1))
outputs = K.permute_dimensions(outputs, (0, 3, 1, 2))
return outputs
else:
return last_output
def get_initial_states(self, x):
initial_state = K.zeros_like(x[0]) # (samples, features_in, height, width)
initial_state = K.sum(initial_state, axis=(1))
initial_state = K.expand_dims(initial_state, dim=1)
"""
if self.nb_glimpses > 0:
initial_state = K.repeat_elements(initial_state, self.nb_glimpses, 1)
else:
initial_state = K.repeat_elements(initial_state, self.nb_embedding, 1)
"""
initial_state = K.repeat_elements(initial_state, self.nb_embedding, 1)
return [initial_state]
#return [initial_state, initial_state] # (samples, nb_glimpses, height, width)
def step(self, x, states):
context = states[1]
activation_t = K.tanh(context + x[:, :, None, None])
if self.nb_glimpses > 0:
e_t = K.conv2d(activation_t,
self.U,
strides=(1, 1),
border_mode='valid',
dim_ordering=self.dim_ordering,
filter_shape=self.U_shape)
else:
e_t = activation_t
# Apply softmax on att. weights
e_t_reshaped = e_t.sum(axis=1)
alphas_shape = e_t_reshaped.shape
e_t_reshaped = e_t_reshaped.reshape([alphas_shape[0], alphas_shape[1] * alphas_shape[2]])
alphas = K.softmax(e_t_reshaped)
alphas = alphas.reshape([alphas_shape[0], alphas_shape[1], alphas_shape[2]])
# Weight input image vectors according to alphas
attended_ctx = context * alphas[:, None, :, :]
############################################################
"""
alphas_shape = e_t.shape
e_t_reshaped = e_t.reshape([alphas_shape[0], alphas_shape[1], alphas_shape[2]*alphas_shape[3]])
e_t_reshaped = K.permute_dimensions(e_t_reshaped, [0,2,1])
alphas = K.softmax_3d(e_t_reshaped)
alphas = K.permute_dimensions(alphas, [0, 2, 1])
alphas = alphas.reshape([alphas_shape[0], alphas_shape[1], alphas_shape[2], alphas_shape[3]])
# Weight input image vectors according to alphas
attended_ctx = context * alphas
#if self.sum_weighted_output:
# attended_ctx = (attended_ctx).sum(axis=1)
"""
#return e_t, [e_t]
return attended_ctx, [attended_ctx] #[attended_ctx, e_t]
def compute_mask(self, input, mask):
if self.nb_glimpses > 0:
out_mask = K.repeat(mask[1], self.nb_glimpses)
else:
out_mask = K.repeat(mask[1], self.nb_embedding)
out_mask = K.repeat(mask[1], self.nb_embedding)
out_mask = K.flatten(out_mask)
return out_mask
def get_config(self):
config = {'nb_embedding': self.nb_embedding,
'nb_glimpses': self.nb_glimpses,
'concat_timesteps': self.concat_timesteps,
'return_state': self.return_states,
'kernel_initializer': self.init.__name__,
'activation': self.activation.__name__,
'border_mode': self.border_mode,
'dim_ordering': self.dim_ordering,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'V_regularizer': self.V_regularizer.get_config() if self.V_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'U_constraint': self.U_constraint.get_config() if self.U_constraint else None,
'V_constraint': self.V_constraint.get_config() if self.V_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'W_learning_rate_multiplier': self.W_learning_rate_multiplier,
'b_learning_rate_multiplier': self.b_learning_rate_multiplier,
'bias': self.bias}
base_config = super(ConvAtt, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_lr_multipliers(self, W_learning_rate_multiplier, b_learning_rate_multiplier):
self.W_learning_rate_multiplier = W_learning_rate_multiplier
self.b_learning_rate_multiplier = b_learning_rate_multiplier
self.learning_rate_multipliers = [self.W_learning_rate_multiplier,
self.b_learning_rate_multiplier]
class ConvCoAtt(Layer):
'''Convolution operator for filtering windows of two-dimensional inputs with Attention mechanism.
The first input corresponds to the image and the second input to the weighting vector (which contains a set of steps).
When using this layer as the first layer in a model,
provide the keyword argument `input_shape`
(tuple of integers, does not include the sample axis),
e.g. `input_shape=(3, 128, 128)` for 128x128 RGB pictures. An additional input for modulating the attention is required.
# Examples
```python
# apply a 3x3 convolution with 64 output filters on a 256x256 image:
model = Sequential()
model.add(Convolution2D(64, 3, 3, border_mode='same', input_shape=(3, 256, 256)))
# now model.output_shape == (None, 64, 256, 256)
# add a 3x3 convolution on top, with 32 output filters:
model.add(Convolution2D(32, 3, 3, border_mode='same'))
# now model.output_shape == (None, 32, 256, 256)
```
# Arguments
nb_filter: Number of convolution filters to use.
kernel_initializer: name of initialization function for the weights of the layer
(see [initializations](../initializations.md)), or alternatively,
Theano function to use for weights initialization.
This parameter is only relevant if you don't pass
a `weights` argument.
activation: name of activation function to use
(see [activations](../activations.md)),
or alternatively, elementwise Theano function.
If you don't specify anything, no activation is applied
(ie. "linear" activation: a(x) = x).
weights: list of numpy arrays to set as initial weights.
border_mode: 'valid', 'same' or 'full'. ('full' requires the Theano backend.)
subsample: tuple of length 2. Factor by which to subsample output.
Also called strides elsewhere.
W_regularizer: instance of [WeightRegularizer](../regularizers.md)
(eg. L1 or L2 regularization), applied to the main weights matrix.
b_regularizer: instance of [WeightRegularizer](../regularizers.md),
applied to the bias.
activity_regularizer: instance of [ActivityRegularizer](../regularizers.md),
applied to the network output.
W_constraint: instance of the [constraints](../constraints.md) module
(eg. maxnorm, nonneg), applied to the main weights matrix.
b_constraint: instance of the [constraints](../constraints.md) module,
applied to the bias.
dim_ordering: 'th' or 'tf'. In 'th' mode, the channels dimension
(the depth) is at index 1, in 'tf' mode is it at index 3.
It defaults to the `image_dim_ordering` value found in your
Keras config file at `~/.keras/keras.json`.
If you never set it, then it will be "tf".
bias: whether to include a bias
(i.e. make the layer affine rather than linear).
# Input shape
4D tensor with shape:
`(samples, channels, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, channels)` if dim_ordering='tf'.
and 4D tensor with shape:
`(samples, steps, features)`
# Output shape
4D tensor with shape:
`(samples, nb_filter, rows, cols)` if dim_ordering='th'
or 4D tensor with shape:
`(samples, rows, cols, nb_filter)` if dim_ordering='tf'.
`rows` and `cols` values might have changed due to padding.
'''
def __init__(self, nb_embedding, nb_glimpses=1, concat_timesteps=True,
init='glorot_uniform', activation=None, weights=None, return_states=True,
border_mode='valid', dim_ordering='default',
W_regularizer=None, U_regularizer=None, b_regularizer=None,
activity_regularizer=None,
W_constraint=None, U_constraint=None, b_constraint=None,
W_learning_rate_multiplier=None, b_learning_rate_multiplier=None,
bias=True, **kwargs):
if dim_ordering == 'default':
dim_ordering = K.image_dim_ordering()
if border_mode not in {'valid', 'same', 'full'}:
raise ValueError('Invalid border mode for Convolution2D:', border_mode)
self.nb_embedding = nb_embedding
self.nb_glimpses = nb_glimpses
self.return_states = return_states # if True see self.concat_timesteps
# if False output_size=(samples, nb_glimpses, rows, cols)
self.concat_timesteps = concat_timesteps # if True output_size=(samples, nb_glimpses*num_timesteps, rows, cols)
# if False output_size=(samples, num_timesteps, nb_glimpses, rows, cols)
self.nb_row = 1
self.nb_col = 1
self.init = initializations.get(init, dim_ordering=dim_ordering)
self.activation = activations.get(activation)
self.border_mode = border_mode
self.subsample = tuple((1, 1))
if dim_ordering not in {'tf', 'th'}:
raise ValueError('dim_ordering must be in {tf, th}.')
self.dim_ordering = dim_ordering
self.W_regularizer = regularizers.get(W_regularizer)
if self.nb_glimpses > 0:
self.U_regularizer = regularizers.get(U_regularizer)
else:
self.U_regularizer = None
self.b_regularizer = regularizers.get(b_regularizer)
self.activity_regularizer = regularizers.get(activity_regularizer)
self.W_constraint = constraints.get(W_constraint)
if self.nb_glimpses > 0:
self.U_constraint = constraints.get(U_constraint)
else:
self.U_constraint = None
self.b_constraint = constraints.get(b_constraint)
self.W_learning_rate_multiplier = W_learning_rate_multiplier
self.b_learning_rate_multiplier = b_learning_rate_multiplier
self.learning_rate_multipliers = [self.W_learning_rate_multiplier, self.b_learning_rate_multiplier]
self.bias = bias
self.input_spec = [InputSpec(ndim=4)]
self.initial_weights = weights
self.supports_masking = True
super(ConvCoAtt, self).__init__(**kwargs)
def build(self, input_shape):
self.num_words = input_shape[1][1]
if self.dim_ordering == 'th':
img_size = input_shape[0][1]
qst_size = input_shape[1][2]
self.num_row = input_shape[0][2]
self.num_col = input_shape[0][3]
if self.nb_glimpses > 0:
self.U_shape = (self.nb_glimpses, self.nb_embedding, self.nb_row, self.nb_col)
self.W_shape = (self.nb_embedding, img_size+qst_size, self.nb_row, self.nb_col)
elif self.dim_ordering == 'tf':
img_size = input_shape[0][3]
qst_size = input_shape[1][2]
self.num_row = input_shape[0][1]
self.num_col = input_shape[0][2]
if self.nb_glimpses > 0:
self.U_shape = (self.nb_row, self.nb_col, self.nb_embedding, self.nb_glimpses)
self.W_shape = (self.nb_row, self.nb_col, img_size+qst_size, self.nb_embedding)
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
if self.nb_glimpses > 0:
self.U = self.add_weight(self.U_shape,
initializer=self.init,
name='{}_U'.format(self.name),
regularizer=self.U_regularizer,
constraint=self.U_constraint)
else:
self.U = None
self.W = self.add_weight(self.W_shape,
initializer=self.init,
name='{}_W'.format(self.name),
regularizer=self.W_regularizer,
constraint=self.W_constraint)
if self.bias:
self.b = self.add_weight((self.nb_embedding,),
initializer='zero',
name='{}_b'.format(self.name),
regularizer=self.b_regularizer,
constraint=self.b_constraint)
else:
self.b = None
if self.initial_weights is not None:
self.set_weights(self.initial_weights)
del self.initial_weights
self.built = True
def preprocess_input(self, x):
return x
def get_output_shape_for(self, input_shape):
if self.dim_ordering == 'th':
rows = input_shape[0][2]
cols = input_shape[0][3]
elif self.dim_ordering == 'tf':
rows = input_shape[0][1]
cols = input_shape[0][2]
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
'''
rows = conv_output_length(rows, self.nb_row,
self.border_mode, self.subsample[0])
cols = conv_output_length(cols, self.nb_col,
self.border_mode, self.subsample[1])
'''
#return (input_shape[0][0], self.num_words, self.nb_embedding, rows, cols)
if self.return_states:
if self.nb_glimpses > 0:
if self.concat_timesteps:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_glimpses * self.num_words, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_glimpses * self.num_words)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.num_words, self.nb_glimpses, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], self.num_words, rows, cols, self.nb_glimpses)
else:
if self.concat_timesteps:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_embedding * self.num_words, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_embedding * self.num_words)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.num_words, self.nb_embedding, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], self.num_words, rows, cols, self.nb_embedding)
else:
if self.nb_glimpses > 0:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_glimpses, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_glimpses)
else:
if self.dim_ordering == 'th':
return (input_shape[0][0], self.nb_embedding, rows, cols)
elif self.dim_ordering == 'tf':
return (input_shape[0][0], rows, cols, self.nb_embedding)
def call(self, x, mask=None):
preprocessed_img = x[0]
preprocessed_input = self.preprocess_input(x[1])
last_output, outputs, states = K.rnn(self.step,
preprocessed_input,
self.get_initial_states(x),
go_backwards=False,
mask=None,
# mask[1], #TODO: What does this mask mean? How should it be applied?
constants=[preprocessed_img],
unroll=False,
input_length=self.num_words)
if self.return_states:
# Join temporal and glimpses dimensions
if self.concat_timesteps:
outputs = K.permute_dimensions(outputs, (0,3,4,2,1))
shp = outputs.shape
outputs = K.reshape(outputs, (shp[0], shp[1], shp[2], -1))
outputs = K.permute_dimensions(outputs, (0, 3, 1, 2))
return outputs
else:
return last_output
def get_initial_states(self, x):
initial_state = K.zeros_like(x[0]) # (samples, features_in, height, width)
initial_state = K.sum(initial_state, axis=(1))
initial_state = K.expand_dims(initial_state, dim=1)
initial_state = K.repeat_elements(initial_state, self.nb_embedding, 1)
return [initial_state]
#return [initial_state, initial_state] # (samples, nb_glimpses, height, width)
def step(self, x, states):
context = states[1]
if self.dim_ordering == 'th':
x = K.repeatRdim(x, self.num_row, axis=2)
x = K.repeatRdim(x, self.num_col, axis=3)
concat_axis = 1
elif self.dim_ordering == 'tf':
x = K.repeatRdim(x, self.num_row, axis=1)
x = K.repeatRdim(x, self.num_col, axis=2)
concat_axis = 3
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
word_ctx = K.concatenate([x, context], axis=concat_axis)
word_ctx = K.conv2d(word_ctx,
self.W,
strides=(1, 1),
border_mode='valid',
dim_ordering=self.dim_ordering,
filter_shape=self.W_shape)
if self.bias:
if self.dim_ordering == 'th':
word_ctx = word_ctx + K.reshape(self.b, (1, self.nb_embedding, 1, 1))
elif self.dim_ordering == 'tf':
word_ctx = word_ctx + K.reshape(self.b, (1, 1, 1, self.nb_embedding))
else:
raise ValueError('Invalid dim_ordering:', self.dim_ordering)
activation_t = K.relu(word_ctx)
if self.nb_glimpses > 0:
e_t = K.conv2d(activation_t,
self.U,
strides=(1, 1),
border_mode='valid',
dim_ordering=self.dim_ordering,
filter_shape=self.U_shape)
else:
e_t = activation_t
# Apply softmax on att. weights
e_t_reshaped = e_t.sum(axis=1)
alphas_shape = e_t_reshaped.shape
e_t_reshaped = e_t_reshaped.reshape([alphas_shape[0], alphas_shape[1] * alphas_shape[2]])
alphas = K.softmax(e_t_reshaped)
alphas = alphas.reshape([alphas_shape[0], alphas_shape[1], alphas_shape[2]])
# Weight input image vectors according to alphas
#attended = context * alphas[:, None, :, :]
attended = word_ctx * alphas[:, None, :, :]
#return e_t, [e_t]
return attended, [attended] #[attended, e_t]
def compute_mask(self, input, mask):
if self.nb_glimpses > 0:
out_mask = K.repeat(mask[1], self.nb_glimpses)
else:
out_mask = K.repeat(mask[1], self.nb_embedding)
out_mask = K.repeat(mask[1], self.nb_embedding)
out_mask = K.flatten(out_mask)
return out_mask
def get_config(self):
config = {'nb_embedding': self.nb_embedding,
'nb_glimpses': self.nb_glimpses,
'concat_timesteps': self.concat_timesteps,
'return_state': self.return_states,
'kernel_initializer': self.init.__name__,
'activation': self.activation.__name__,
'border_mode': self.border_mode,
'dim_ordering': self.dim_ordering,
'W_regularizer': self.W_regularizer.get_config() if self.W_regularizer else None,
'U_regularizer': self.U_regularizer.get_config() if self.U_regularizer else None,
'b_regularizer': self.b_regularizer.get_config() if self.b_regularizer else None,
'activity_regularizer': self.activity_regularizer.get_config() if self.activity_regularizer else None,
'W_constraint': self.W_constraint.get_config() if self.W_constraint else None,
'U_constraint': self.U_constraint.get_config() if self.U_constraint else None,
'b_constraint': self.b_constraint.get_config() if self.b_constraint else None,
'W_learning_rate_multiplier': self.W_learning_rate_multiplier,
'b_learning_rate_multiplier': self.b_learning_rate_multiplier,
'bias': self.bias}
base_config = super(ConvCoAtt, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def set_lr_multipliers(self, W_learning_rate_multiplier, b_learning_rate_multiplier):
self.W_learning_rate_multiplier = W_learning_rate_multiplier
self.b_learning_rate_multiplier = b_learning_rate_multiplier
self.learning_rate_multipliers = [self.W_learning_rate_multiplier,
self.b_learning_rate_multiplier]
| 46.775093
| 124
| 0.581204
| 9,003
| 75,495
| 4.65256
| 0.049428
| 0.035811
| 0.014706
| 0.011818
| 0.95082
| 0.935231
| 0.928307
| 0.921861
| 0.912479
| 0.903311
| 0
| 0.012644
| 0.322207
| 75,495
| 1,613
| 125
| 46.804092
| 0.805824
| 0.249712
| 0
| 0.843035
| 0
| 0
| 0.069903
| 0.008579
| 0
| 0
| 0
| 0.0031
| 0.006237
| 1
| 0.057173
| false
| 0.006237
| 0.005198
| 0.010395
| 0.139293
| 0.00104
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
78d9d3d0584465474f5df52672e154daba510b66
| 67,493
|
py
|
Python
|
reviewboard/reviews/tests/test_entries.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 2
|
2020-06-19T14:57:49.000Z
|
2020-06-19T15:17:40.000Z
|
reviewboard/reviews/tests/test_entries.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | 1
|
2019-08-03T01:48:33.000Z
|
2019-08-03T01:48:33.000Z
|
reviewboard/reviews/tests/test_entries.py
|
amalik2/reviewboard
|
676aa2dce38ce619a74f2d4cb3cfae9bce21416e
|
[
"MIT"
] | null | null | null |
"""Unit tests for review request page entries."""
from __future__ import unicode_literals
import logging
from datetime import datetime, timedelta
from django.contrib.auth.models import AnonymousUser, User
from django.template import RequestContext
from django.test.client import RequestFactory
from django.utils import six, timezone
from django.utils.timezone import utc
from djblets.testing.decorators import add_fixtures
from kgb import SpyAgency
from reviewboard.changedescs.models import ChangeDescription
from reviewboard.reviews.detail import (BaseReviewRequestPageEntry,
ChangeEntry,
InitialStatusUpdatesEntry,
ReviewEntry,
ReviewRequestPageData,
StatusUpdatesEntryMixin)
from reviewboard.reviews.models import (BaseComment, GeneralComment,
StatusUpdate)
from reviewboard.testing import TestCase
class BaseReviewRequestPageEntryTests(SpyAgency, TestCase):
"""Unit tests for BaseReviewRequestPageEntry."""
fixtures = ['test_users']
def setUp(self):
super(BaseReviewRequestPageEntryTests, self).setUp()
self.review_request = self.create_review_request()
self.request = RequestFactory().request()
self.request.user = AnonymousUser()
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_init_with_no_updated_timestamp(self):
"""Testing BaseReviewRequestPageEntry.__init__ without an
updated_timestamp specified
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_render_to_string(self):
"""Testing BaseReviewRequestPageEntry.render_to_string"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertNotEqual(html, '')
def test_render_to_string_with_entry_pos_main(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_pos=ENTRY_POS_MAIN
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_MAIN
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertIn('<div class="box-statuses">', html)
def test_render_to_string_with_entry_pos_initial(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_pos=ENTRY_POS_INITIAL
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.entry_pos = BaseReviewRequestPageEntry.ENTRY_POS_INITIAL
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertNotIn('<div class="box-statuses">', html)
def test_render_to_string_with_new_entry(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_is_new=True
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
entry.template_name = 'reviews/entries/base.html'
self.request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
}))
self.assertIn(
'class="review-request-page-entry new-review-request-page-entry',
html)
def test_render_to_string_without_new_entry(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
entry_is_new=False
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
entry.template_name = 'reviews/entries/base.html'
self.request.user = User.objects.create_user(username='test-user',
email='user@example.com')
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': datetime(2017, 9, 7, 18, 0, 0, tzinfo=utc),
}))
self.assertNotEqual(html, '')
self.assertNotIn(
'class="review-request-page-entry new-review-request-page-entry"',
html)
def test_render_to_string_with_no_template(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
template_name=None
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
def test_render_to_string_with_has_content_false(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
has_content=False
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/base.html'
entry.has_content = False
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
def test_render_to_string_with_exception(self):
"""Testing BaseReviewRequestPageEntry.render_to_string with
exception
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
entry.template_name = 'reviews/entries/NOT_FOUND.html'
self.spy_on(logging.exception)
html = entry.render_to_string(
self.request,
RequestContext(self.request, {
'last_visited': timezone.now(),
}))
self.assertEqual(html, '')
self.assertTrue(logging.exception.spy.called)
self.assertEqual(logging.exception.spy.calls[0].args[0],
'Error rendering template for %s (ID=%s): %s')
def test_is_entry_new_with_timestamp(self):
"""Testing BaseReviewRequestPageEntry.is_entry_new with timestamp"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 7, 15, 36, 0, tzinfo=utc))
user = User.objects.create_user(username='test-user',
email='user@example.com')
self.assertTrue(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 16, 0, 0, tzinfo=utc),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 15, 36, 0, tzinfo=utc),
user=user))
def test_is_entry_new_without_timestamp(self):
"""Testing BaseReviewRequestPageEntry.is_entry_new without timestamp
"""
entry = BaseReviewRequestPageEntry(data=self.data,
entry_id='test',
added_timestamp=None)
self.assertFalse(entry.is_entry_new(
last_visited=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc),
user=User.objects.create_user(username='test-user',
email='user@example.com')))
def test_collapsed_with_older_than_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed with entry older than
last visited
"""
self.data.latest_changedesc_timestamp = \
self.review_request.time_added + timedelta(days=5)
self.data.last_visited = datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.last_visited - timedelta(days=2),
updated_timestamp=self.data.last_visited - timedelta(days=1))
self.assertTrue(entry.collapsed)
def test_collapsed_with_newer_than_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed with entry newer than
last visited
"""
self.data.last_visited = datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.last_visited,
updated_timestamp=self.data.last_visited + timedelta(days=1))
self.assertFalse(entry.collapsed)
def test_collapsed_without_last_visited(self):
"""Testing BaseReviewRequestPageEntry.collapsed without last visited
timestamp
"""
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=datetime(2017, 9, 6, 10, 0, 0, tzinfo=utc),
updated_timestamp=datetime(2017, 9, 7, 10, 0, 0, tzinfo=utc))
self.assertFalse(entry.collapsed)
def test_collapsed_with_older_than_changedesc(self):
"""Testing BaseReviewRequestPageEntry.collapsed with older than latest
Change Description
"""
self.data.latest_changedesc_timestamp = \
self.review_request.time_added + timedelta(days=5)
self.data.last_visited = \
self.review_request.time_added + timedelta(days=10)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=(self.data.latest_changedesc_timestamp -
timedelta(days=2)),
updated_timestamp=(self.data.latest_changedesc_timestamp -
timedelta(days=1)))
self.assertTrue(entry.collapsed)
def test_collapsed_with_newer_than_changedesc(self):
"""Testing BaseReviewRequestPageEntry.collapsed with newer than latest
Change Description
"""
self.data.latest_changedesc_timestamp = self.review_request.time_added
self.data.last_visited = \
self.review_request.time_added + timedelta(days=10)
entry = BaseReviewRequestPageEntry(
data=self.data,
entry_id='test',
added_timestamp=self.data.latest_changedesc_timestamp,
updated_timestamp=(self.data.latest_changedesc_timestamp +
timedelta(days=1)))
self.assertFalse(entry.collapsed)
class StatusUpdatesEntryMixinTests(TestCase):
"""Unit tests for StatusUpdatesEntryMixin."""
def test_add_update_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_FAILURE"""
status_update = StatusUpdate(state=StatusUpdate.DONE_FAILURE)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_error(self):
"""Testing StatusUpdatesEntryMixin.add_update with ERROR"""
status_update = StatusUpdate(state=StatusUpdate.ERROR)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update with TIMEOUT"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-failure')
def test_add_update_with_pending(self):
"""Testing StatusUpdatesEntryMixin.add_update with PENDING"""
status_update = StatusUpdate(state=StatusUpdate.PENDING)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-pending')
def test_add_update_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.add_update with DONE_SUCCESS"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(status_update.header_class,
'status-update-state-success')
def test_add_update_html_rendering(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
'</div>'))
def test_add_update_html_rendering_with_url(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">https://example.com/</a>'
'</div>'))
def test_add_update_html_rendering_with_url_and_text(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with URL
and URL text
"""
status_update = StatusUpdate(state=StatusUpdate.DONE_SUCCESS,
description='My description.',
summary='My summary.',
url='https://example.com/',
url_text='My URL')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-success">\n'
' <span class="summary">My summary.</span>\n'
' My description.\n'
' <a href="https://example.com/">My URL</a>'
'</div>'))
def test_add_update_html_rendering_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.add_update HTML rendering with
timeout
"""
status_update = StatusUpdate(state=StatusUpdate.TIMEOUT,
description='My description.',
summary='My summary.')
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
self.assertHTMLEqual(
status_update.summary_html,
('<div class="status-update-summary-entry'
' status-update-state-failure">\n'
' <span class="summary">My summary.</span>\n'
' timed out.\n'
'</div>'))
@add_fixtures(['test_users'])
def test_add_comment(self):
"""Testing StatusUpdatesEntryMixin.add_comment"""
review_request = self.create_review_request()
review = self.create_review(review_request)
comment = self.create_general_comment(review)
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment.review_obj = review
status_update = self.create_status_update(
review_request=review_request,
review=review)
entry = StatusUpdatesEntryMixin()
entry.add_update(status_update)
entry.add_comment('general_comments', comment)
self.assertEqual(status_update.comments['general_comments'], [comment])
def test_finalize_with_all_states(self):
"""Testing StatusUpdatesEntryMixin.finalize with all states"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
for i in range(2):
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
for i in range(3):
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
for i in range(4):
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
for i in range(5):
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(
entry.state_summary,
'1 failed, 2 succeeded, 3 pending, 4 failed with error, '
'5 timed out')
def test_finalize_with_done_failure(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_FAILURE"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_error(self):
"""Testing StatusUpdatesEntryMixin.finalize with ERROR"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.ERROR))
entry.finalize()
self.assertEqual(entry.state_summary, '1 failed with error')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_timeout(self):
"""Testing StatusUpdatesEntryMixin.finalize with TIMEOUT"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.TIMEOUT))
entry.finalize()
self.assertEqual(entry.state_summary, '1 timed out')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.finalize()
self.assertEqual(entry.state_summary, '1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
def test_finalize_with_done_success(self):
"""Testing StatusUpdatesEntryMixin.finalize with DONE_SUCCESS"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded')
self.assertEqual(entry.state_summary_class,
'status-update-state-success')
def test_finalize_with_failures_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with failures taking
precedence over PENDING and DONE_SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_FAILURE))
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary,
'1 failed, 1 succeeded, 1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-failure')
def test_finalize_with_pending_take_precedence(self):
"""Testing StatusUpdatesEntryMixin.finalize with PENDING taking
precedence SUCCESS
"""
entry = StatusUpdatesEntryMixin()
entry.add_update(StatusUpdate(state=StatusUpdate.PENDING))
entry.add_update(StatusUpdate(state=StatusUpdate.DONE_SUCCESS))
entry.finalize()
self.assertEqual(entry.state_summary, '1 succeeded, 1 pending')
self.assertEqual(entry.state_summary_class,
'status-update-state-pending')
@add_fixtures(['test_users'])
def test_populate_status_updates(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
entry = StatusUpdatesEntryMixin()
entry.collapsed = True
entry.data = data
entry.populate_status_updates(status_updates)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
@add_fixtures(['test_users'])
def test_populate_status_updates_with_draft_replies(self):
"""Testing StatusUpdatesEntryMixin.populate_status_updates with
draft replies
"""
review_request = self.create_review_request()
review = self.create_review(review_request, public=True)
comment = self.create_general_comment(review)
reply = self.create_reply(review)
reply_comment = self.create_general_comment(reply, reply_to=comment)
# This state is normally set in ReviewRequestPageData.
comment._type = 'general_comments'
comment.review_obj = review
status_updates = [
StatusUpdate(state=StatusUpdate.PENDING),
StatusUpdate(state=StatusUpdate.DONE_FAILURE,
review=review)
]
request = RequestFactory().get('/r/1/')
request.user = AnonymousUser()
data = ReviewRequestPageData(review_request=review_request,
request=request)
data.review_comments[review.pk] = [comment]
data.draft_reply_comments[review.pk] = [reply_comment]
entry = StatusUpdatesEntryMixin()
entry.data = data
entry.populate_status_updates(status_updates)
self.assertEqual(entry.status_updates, status_updates)
status_update = entry.status_updates[0]
self.assertIsNone(status_update.review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
status_update = entry.status_updates[1]
self.assertEqual(status_update.review, review)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class InitialStatusUpdatesEntryTests(TestCase):
"""Unit tests for InitialStatusUpdatesEntry."""
fixtures = ['test_users']
def setUp(self):
super(InitialStatusUpdatesEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request(
time_added=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.review = self.create_review(
self.review_request,
public=True,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.general_comment = self.create_general_comment(self.review,
issue_opened=False)
self.status_update = self.create_status_update(
self.review_request,
review=self.review,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc),
state=StatusUpdate.DONE_FAILURE)
self.data = ReviewRequestPageData(
review_request=self.review_request,
request=self.request,
last_visited=self.review_request.time_added + timedelta(days=10))
def test_added_timestamp(self):
"""Testing InitialStatusUpdatesEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing InitialStatusUpdatesEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_build_entries(self):
"""Testing InitialStatusUpdatesEntry.build_entries"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
def test_build_entries_with_changedesc(self):
"""Testing InitialStatusUpdatesEntry.build_entries with
ChangeDescription following this entry
"""
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(InitialStatusUpdatesEntry.build_entries(self.data))
self.assertEqual(len(entries), 1)
entry = entries[0]
self.assertEqual(entry.status_updates, [self.status_update])
self.assertEqual(
entry.status_updates_by_review,
{
self.review.pk: self.status_update,
})
status_update = entry.status_updates[0]
self.assertEqual(status_update.review, self.review)
self.assertIsNone(status_update.change_description)
self.assertEqual(
status_update.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [self.general_comment],
})
def test_is_entry_new_with_timestamp(self):
"""Testing InitialStatusUpdatesEntry.is_entry_new"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
user = User.objects.create_user(username='test-user',
email='user@example.com')
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.is_entry_new(
last_visited=self.review_request.last_updated - timedelta(days=1),
user=user))
def test_collapsed_with_no_changedescs_and_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with no Change
Descriptions and page previously visited
"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) == 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_no_changedescs_and_not_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with no Change
Descriptions and page not previously visited
"""
self.data.last_visited = None
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) == 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_changedescs_and_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with Change Descriptions
and page previously visited
"""
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) > 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_changedescs_and_no_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with Change Descriptions
and page not previously visited
"""
self.data.last_visited = None
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertTrue(len(self.data.changedescs) > 0)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_pending_status_updates(self):
"""Testing InitialStatusUpdatesEntry.collapsed with pending status
updates
"""
self.status_update.state = StatusUpdate.PENDING
self.status_update.review = None
self.status_update.save(update_fields=('state', 'review'))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_gt_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status update
timestamp newer than last visited
"""
# To update the status update's timestamp, we need to perform an
# update() call on the queryset and reload.
StatusUpdate.objects.filter(pk=self.status_update.pk).update(
timestamp=self.data.last_visited + timedelta(days=1))
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertTrue(self.status_update.timestamp > self.data.last_visited)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_lt_last_visited(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status update
timestamp newer than last visited
"""
# To update the status update's timestamp, we need to perform an
# update() call on the queryset and reload.
StatusUpdate.objects.filter(pk=self.status_update.pk).update(
timestamp=self.data.last_visited - timedelta(days=1))
self.status_update = StatusUpdate.objects.get(pk=self.status_update.pk)
self.assertTrue(self.status_update.timestamp < self.data.last_visited)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_no_reviews(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
and no reviews
"""
self.status_update.state = StatusUpdate.DONE_SUCCESS
self.status_update.review = None
self.status_update.save(update_fields=('state', 'review'))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_comment_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft comment replies
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
reply = self.create_reply(self.review, user=self.request.user)
self.create_general_comment(reply, reply_to=self.general_comment)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_reply_comments)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_top_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft replies to body_top
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
self.create_reply(self.review,
user=self.request.user,
body_top_reply_to=self.review)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_top_replies)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_bottom_replies(self):
"""Testing InitialStatusUpdatesEntry.collapsed with status updates
containing draft replies to body_bottom
"""
self.request.user = self.review_request.submitter
self.assertEqual(self.status_update.state, StatusUpdate.DONE_FAILURE)
self.create_reply(self.review,
user=self.request.user,
body_bottom_reply_to=self.review)
self.review_request.changedescs.create(public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_bottom_replies)
entry = InitialStatusUpdatesEntry(data=self.data)
self.assertFalse(entry.collapsed)
class ReviewEntryTests(TestCase):
"""Unit tests for ReviewEntry."""
fixtures = ['test_users']
def setUp(self):
super(ReviewEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.review = self.create_review(
self.review_request,
id=123,
public=True,
timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.changedesc = self.review_request.changedescs.create(
timestamp=self.review.timestamp + timedelta(days=10),
public=True)
self.data = ReviewRequestPageData(
review_request=self.review_request,
request=self.request,
last_visited=self.changedesc.timestamp)
def test_added_timestamp(self):
"""Testing ReviewEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing ReviewEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp_with_replies(self):
"""Testing ReviewEntry.updated_timestamp with replies"""
self.create_reply(self.review,
timestamp=datetime(2017, 9, 14, 15, 40, 0,
tzinfo=utc),
publish=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_get_dom_element_id(self):
"""Testing ReviewEntry.get_dom_element_id"""
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.get_dom_element_id(), 'review123')
def test_collapsed_with_open_issues(self):
"""Testing ReviewEntry.collapsed with open issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.OPEN)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_open_issues_verifying_resolved(self):
"""Testing ReviewEntry.collapsed with open issues marked Verifying
Resolved
"""
self.create_general_comment(
self.review,
issue_opened=True,
issue_status=BaseComment.VERIFYING_RESOLVED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_open_issues_verifying_dropped(self):
"""Testing ReviewEntry.collapsed with open issues marked Verifying
Dropped
"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.VERIFYING_DROPPED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_dropped_issues(self):
"""Testing ReviewEntry.collapsed with dropped issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.DROPPED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_resolved_issues(self):
"""Testing ReviewEntry.collapsed with resolved issues"""
self.create_general_comment(self.review,
issue_opened=True,
issue_status=BaseComment.RESOLVED)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_draft_reply_comments(self):
"""Testing ReviewEntry.collapsed with draft reply comments"""
self.request.user = self.review_request.submitter
comment = self.create_general_comment(self.review)
reply = self.create_reply(self.review, user=self.request.user)
self.create_general_comment(reply, reply_to=comment)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_reply_comments)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_draft_body_top_replies(self):
"""Testing ReviewEntry.collapsed with draft replies to body_top"""
self.request.user = self.review_request.submitter
self.create_reply(self.review,
user=self.request.user,
body_top_reply_to=self.review)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_top_replies)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_draft_body_bottom_replies(self):
"""Testing ReviewEntry.collapsed with draft replies to body_bottom"""
self.request.user = self.review_request.submitter
self.create_reply(self.review,
user=self.request.user,
body_bottom_reply_to=self.review)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(self.review.pk, self.data.draft_body_bottom_replies)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_collapsed_with_reply_older_than_last_visited(self):
"""Testing ReviewEntry.collapsed with reply older than last visited"""
reply = self.create_reply(
self.review,
publish=True,
timestamp=self.review.timestamp + timedelta(days=2))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.data.last_visited = reply.timestamp + timedelta(days=1)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertTrue(entry.collapsed)
def test_collapsed_with_reply_newer_than_last_visited(self):
"""Testing ReviewEntry.collapsed with reply newer than last visited"""
reply = self.create_reply(
self.review,
publish=True,
timestamp=self.review.timestamp + timedelta(days=2))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.data.last_visited = reply.timestamp - timedelta(days=1)
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.collapsed)
def test_get_js_model_data(self):
"""Testing ReviewEntry.get_js_model_data"""
self.review.ship_it = True
self.review.publish()
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': True,
},
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_diff_comments(self):
"""Testing ReviewEntry.get_js_model_data with diff comments"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
comment1 = self.create_diff_comment(self.review, filediff)
comment2 = self.create_diff_comment(self.review, filediff)
self.review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = self.review
comment2.review_obj = self.review
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ReviewEntry(data=self.data,
review=self.review)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewData': {
'id': self.review.pk,
'bodyTop': 'Test Body Top',
'bodyBottom': 'Test Body Bottom',
'public': True,
'shipIt': False,
},
'diffCommentsData': [
(six.text_type(comment1.pk), six.text_type(filediff.pk)),
(six.text_type(comment2.pk), six.text_type(filediff.pk)),
],
})
def test_add_comment_with_no_open_issues(self):
"""Testing ReviewEntry.add_comment with comment not opening an issue"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments', GeneralComment())
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
def test_add_comment_with_open_issues(self):
"""Testing ReviewEntry.add_comment with comment opening an issue"""
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
def test_add_comment_with_open_issues_and_viewer_is_owner(self):
"""Testing ReviewEntry.add_comment with comment opening an issue and
the review request owner is viewing the page
"""
self.request.user = self.review_request.submitter
entry = ReviewEntry(data=self.data,
review=self.review)
self.assertFalse(entry.has_issues)
self.assertEqual(entry.issue_open_count, 0)
entry.add_comment('general_comments',
GeneralComment(issue_opened=True,
issue_status=GeneralComment.OPEN))
self.assertTrue(entry.has_issues)
self.assertEqual(entry.issue_open_count, 1)
def test_build_entries(self):
"""Testing ReviewEntry.build_entries"""
review1 = self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=2),
public=True)
review2 = self.review
comment = self.create_general_comment(review1)
# These shouldn't show up in the results.
self.create_review(
self.review_request,
timestamp=self.review.timestamp - timedelta(days=1),
public=False)
self.create_reply(review1)
status_update_review = self.create_review(self.review_request,
public=True)
self.create_general_comment(status_update_review)
self.create_status_update(self.review_request,
review=status_update_review,
state=StatusUpdate.DONE_FAILURE)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ReviewEntry.build_entries(self.data))
self.assertEqual(len(entries), 2)
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.review, review2)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [],
})
entry = entries[1]
self.assertEqual(entry.review, review1)
self.assertEqual(
entry.comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
class ChangeEntryTests(TestCase):
"""Unit tests for ChangeEntry."""
fixtures = ['test_users']
def setUp(self):
super(ChangeEntryTests, self).setUp()
self.request = RequestFactory().get('/r/1/')
self.request.user = AnonymousUser()
self.review_request = self.create_review_request()
self.changedesc = ChangeDescription.objects.create(
id=123,
public=True,
timestamp=datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
self.review_request.changedescs.add(self.changedesc)
self.data = ReviewRequestPageData(review_request=self.review_request,
request=self.request)
def test_added_timestamp(self):
"""Testing ChangeEntry.added_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.added_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp(self):
"""Testing ChangeEntry.updated_timestamp"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 7, 17, 0, 0, tzinfo=utc))
def test_updated_timestamp_with_status_update(self):
"""Testing ChangeEntry.updated_timestamp with status updates"""
self.create_status_update(
self.review_request,
change_description=self.changedesc,
timestamp=datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.updated_timestamp,
datetime(2017, 9, 14, 15, 40, 0, tzinfo=utc))
def test_get_dom_element_id(self):
"""Testing ChangeEntry.get_dom_element_id"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.get_dom_element_id(), 'changedesc123')
def test_collapsed_with_older_than_latest_changedesc(self):
"""Testing ChangeEntry.collapsed with older than latest Change
Description
"""
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_latest_changedesc(self):
"""Testing ChangeEntry.collapsed with older than latest Change
Description
"""
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertEqual(self.changedesc.timestamp,
self.data.latest_changedesc_timestamp)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_no_reviews(self):
"""Testing ChangeEntry.collapsed with status updates and no reviews"""
self.create_status_update(self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_comment_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
comment = self.create_general_comment(review)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
reply = self.create_reply(review, user=self.request.user)
self.create_general_comment(reply, reply_to=comment)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_reply_comments)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_pending_status_updates(self):
"""Testing ChangeEntry.collapsed with pending status updates"""
self.request.user = self.review_request.submitter
self.create_status_update(self.review_request,
change_description=self.changedesc,
state=StatusUpdate.PENDING)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_gt_last_visited(self):
"""Testing ChangeEntry.collapsed with status update timestamp newer
than last visited
"""
self.request.user = self.review_request.submitter
self.data.last_visited = self.changedesc.timestamp + timedelta(days=1)
status_update = self.create_status_update(
self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS,
timestamp=self.data.last_visited + timedelta(days=1))
self.assertTrue(status_update.timestamp > self.data.last_visited)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_update_timestamp_lt_last_visited(self):
"""Testing ChangeEntry.collapsed with status update timestamp older
than last visited
"""
self.request.user = self.review_request.submitter
self.data.last_visited = self.changedesc.timestamp + timedelta(days=1)
status_update = self.create_status_update(
self.review_request,
change_description=self.changedesc,
state=StatusUpdate.DONE_SUCCESS,
timestamp=self.data.last_visited - timedelta(days=1))
self.assertTrue(status_update.timestamp < self.data.last_visited)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertTrue(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_top_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies to body_top
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
self.create_reply(review,
user=self.request.user,
body_top_reply_to=review)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_body_top_replies)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_collapsed_with_status_updates_and_draft_body_bottom_replies(self):
"""Testing ChangeEntry.collapsed with status updates containing draft
comment replies to body_bottom
"""
self.request.user = self.review_request.submitter
review = self.create_review(self.review_request, publish=True)
self.create_status_update(self.review_request,
review=review,
change_description=self.changedesc,
state=StatusUpdate.DONE_FAILURE)
self.create_reply(review,
user=self.request.user,
body_bottom_reply_to=review)
self.review_request.changedescs.create(
timestamp=self.changedesc.timestamp + timedelta(days=1),
public=True)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
self.assertIn(review.pk, self.data.draft_body_bottom_replies)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertFalse(entry.collapsed)
def test_get_js_model_data(self):
"""Testing ChangeEntry.get_js_model_data for standard ChangeDescription
"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
self.assertEqual(entry.get_js_model_data(), {
'pendingStatusUpdates': False,
})
@add_fixtures(['test_scmtools'])
def test_get_js_model_data_with_status_updates(self):
"""Testing ChangeEntry.get_js_model_data for ChangeDescription with
status updates
"""
self.review_request.repository = self.create_repository()
diffset = self.create_diffset(self.review_request)
filediff = self.create_filediff(diffset)
review = self.create_review(self.review_request,
body_top='Body top',
body_bottom='Body bottom',
ship_it=True)
comment1 = self.create_diff_comment(review, filediff)
comment2 = self.create_diff_comment(review, filediff)
review.publish()
# This is needed by the entry's add_comment(). It's normally built when
# creating the entries and their data.
comment1.review_obj = review
comment2.review_obj = review
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=self.changedesc)
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
entry.add_update(status_update)
entry.add_comment('diff_comments', comment1)
entry.add_comment('diff_comments', comment2)
self.assertEqual(entry.get_js_model_data(), {
'reviewsData': [
{
'id': review.pk,
'bodyTop': 'Body top',
'bodyBottom': 'Body bottom',
'public': True,
'shipIt': True,
},
],
'diffCommentsData': [
(six.text_type(comment1.pk), six.text_type(filediff.pk)),
(six.text_type(comment2.pk), six.text_type(filediff.pk)),
],
'pendingStatusUpdates': False,
})
def test_build_entries(self):
"""Testing ChangeEntry.build_entries"""
changedesc1 = self.changedesc
changedesc2 = self.review_request.changedescs.create(
timestamp=changedesc1.timestamp + timedelta(days=1),
public=True)
review = self.create_review(self.review_request, public=True)
comment = self.create_general_comment(review)
status_update = self.create_status_update(
self.review_request,
review=review,
change_description=changedesc2)
self.data.query_data_pre_etag()
self.data.query_data_post_etag()
entries = list(ChangeEntry.build_entries(self.data))
# These will actually be in database query order (newest to oldest),
# not the order shown on the page.
entry = entries[0]
self.assertEqual(entry.changedesc, changedesc2)
self.assertFalse(entry.collapsed)
self.assertEqual(entry.status_updates, [status_update])
self.assertEqual(
entry.status_updates_by_review,
{
review.pk: status_update,
})
self.assertEqual(
entry.status_updates[0].comments,
{
'diff_comments': [],
'screenshot_comments': [],
'file_attachment_comments': [],
'general_comments': [comment],
})
entry = entries[1]
self.assertEqual(entry.changedesc, changedesc1)
self.assertTrue(entry.collapsed)
self.assertEqual(entry.status_updates, [])
def test_is_entry_new_with_timestamp(self):
"""Testing ChangeEntry.is_entry_new with timestamp"""
entry = ChangeEntry(data=self.data,
changedesc=self.changedesc)
user = User.objects.create_user(username='test-user',
email='user@example.com')
self.assertTrue(entry.is_entry_new(
last_visited=self.changedesc.timestamp - timedelta(days=1),
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=self.changedesc.timestamp,
user=user))
self.assertFalse(entry.is_entry_new(
last_visited=self.changedesc.timestamp + timedelta(days=1),
user=user))
| 38.370097
| 79
| 0.62119
| 6,932
| 67,493
| 5.810877
| 0.042412
| 0.040515
| 0.0284
| 0.037139
| 0.915494
| 0.890122
| 0.850078
| 0.801668
| 0.774683
| 0.731759
| 0
| 0.009779
| 0.287882
| 67,493
| 1,758
| 80
| 38.391923
| 0.828309
| 0.106396
| 0
| 0.773144
| 0
| 0
| 0.05541
| 0.019366
| 0
| 0
| 0
| 0
| 0.140117
| 1
| 0.077565
| false
| 0
| 0.011676
| 0
| 0.096747
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
78f72ad695b7ba3f5bf5b3ace244f0b5b7b56f73
| 3,174
|
py
|
Python
|
library/migrations/0001_initial.py
|
kas2337/kas-library
|
67bf612597ae4c03433fa683b85bff7093d6ffbe
|
[
"MIT"
] | null | null | null |
library/migrations/0001_initial.py
|
kas2337/kas-library
|
67bf612597ae4c03433fa683b85bff7093d6ffbe
|
[
"MIT"
] | null | null | null |
library/migrations/0001_initial.py
|
kas2337/kas-library
|
67bf612597ae4c03433fa683b85bff7093d6ffbe
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.7 on 2021-09-24 21:15
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('accounts', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Tag',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата/время создания')),
('update_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Дата/время изменения')),
('tag_name', models.CharField(blank=True, max_length=256, verbose_name='Ссылка на статью в интернете')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Note',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата/время создания')),
('update_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Дата/время изменения')),
('header', models.CharField(blank=True, max_length=100, verbose_name='Заголовок')),
('body', models.CharField(blank=True, max_length=2048, verbose_name='Текст')),
('tag', models.ManyToManyField(blank=True, related_name='tag_note', related_query_name='tag', to='library.Tag', verbose_name='ссылка на тэг')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.user', verbose_name='Ссылка на Пользователя')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_date', models.DateTimeField(auto_now_add=True, null=True, verbose_name='Дата/время создания')),
('update_date', models.DateTimeField(auto_now=True, null=True, verbose_name='Дата/время изменения')),
('header', models.CharField(blank=True, max_length=100, verbose_name='Заголовок')),
('body', models.CharField(blank=True, max_length=2048, verbose_name='Текст')),
('url', models.CharField(blank=True, max_length=256, verbose_name='Ссылка на статью в интернете')),
('tag', models.ManyToManyField(blank=True, related_name='tag_article', related_query_name='tag', to='library.Tag', verbose_name='ссылка на тэг')),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='accounts.user', verbose_name='Ссылка на Пользователя')),
],
options={
'abstract': False,
},
),
]
| 52.9
| 171
| 0.612476
| 348
| 3,174
| 5.41954
| 0.232759
| 0.110817
| 0.050901
| 0.085896
| 0.872216
| 0.872216
| 0.835101
| 0.835101
| 0.78526
| 0.78526
| 0
| 0.016291
| 0.245747
| 3,174
| 59
| 172
| 53.79661
| 0.771512
| 0.014178
| 0
| 0.634615
| 1
| 0
| 0.168852
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
600f4cd50d872ef77376839824ee4a5b93bc6d57
| 3,602
|
py
|
Python
|
core/migrations/0089_auto_20210828_0716.py
|
Nephrolog-lt/nephrolog-api
|
ccd2162aff02b2abfab0f285779e5d8457be1788
|
[
"Apache-2.0"
] | 2
|
2020-12-17T13:50:42.000Z
|
2021-01-09T07:01:07.000Z
|
core/migrations/0089_auto_20210828_0716.py
|
Nephrolog-lt/nephrolog-api
|
ccd2162aff02b2abfab0f285779e5d8457be1788
|
[
"Apache-2.0"
] | 2
|
2021-08-25T05:02:56.000Z
|
2022-01-16T18:29:49.000Z
|
core/migrations/0089_auto_20210828_0716.py
|
Nephrolog-lt/nephrolog-api
|
ccd2162aff02b2abfab0f285779e5d8457be1788
|
[
"Apache-2.0"
] | 1
|
2020-11-16T01:40:15.000Z
|
2020-11-16T01:40:15.000Z
|
# Generated by Django 3.2.6 on 2021-08-28 07:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0088_alter_doctorpatient_patient_user'),
]
operations = [
migrations.AlterField(
model_name='historicaluserprofile',
name='chronic_kidney_disease_age',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('<1', 'Ne ilgiau nei metus'), ('1-5', 'Nuo 1 iki 5 metų'), ('6-10', 'Nuo 6 iki 10 metų'), ('>10', 'Daugiau nei 10 metų')], default='Unknown', max_length=16),
),
migrations.AlterField(
model_name='historicaluserprofile',
name='chronic_kidney_disease_stage',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('Stage1', '1 stadija'), ('Stage2', '2 stadija'), ('Stage3', '3 stadija'), ('Stage4', '4 stadija'), ('Stage5', '5 stadija')], max_length=16),
),
migrations.AlterField(
model_name='historicaluserprofile',
name='diabetes_type',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('Type1', '1 tipo'), ('Type2', '2 tipo'), ('No', 'Neserga')], default='Unknown', max_length=16),
),
migrations.AlterField(
model_name='historicaluserprofile',
name='dialysis',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('AutomaticPeritonealDialysis', 'Automatinė peritoninė dializė'), ('ManualPeritonealDialysis', 'Ambulatorinė peritoninė dializė'), ('Hemodialysis', 'Hemodializė'), ('PostTransplant', 'Neatlieka, po inkstų transplantacijos'), ('NotPerformed', 'Neatlieka')], default='Unknown', max_length=32),
),
migrations.AlterField(
model_name='historicaluserprofile',
name='gender',
field=models.CharField(choices=[('Male', 'Vyras'), ('Female', 'Moteris')], max_length=8),
),
migrations.AlterField(
model_name='userprofile',
name='chronic_kidney_disease_age',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('<1', 'Ne ilgiau nei metus'), ('1-5', 'Nuo 1 iki 5 metų'), ('6-10', 'Nuo 6 iki 10 metų'), ('>10', 'Daugiau nei 10 metų')], default='Unknown', max_length=16),
),
migrations.AlterField(
model_name='userprofile',
name='chronic_kidney_disease_stage',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('Stage1', '1 stadija'), ('Stage2', '2 stadija'), ('Stage3', '3 stadija'), ('Stage4', '4 stadija'), ('Stage5', '5 stadija')], max_length=16),
),
migrations.AlterField(
model_name='userprofile',
name='diabetes_type',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('Type1', '1 tipo'), ('Type2', '2 tipo'), ('No', 'Neserga')], default='Unknown', max_length=16),
),
migrations.AlterField(
model_name='userprofile',
name='dialysis',
field=models.CharField(choices=[('Unknown', 'Nežinoma'), ('AutomaticPeritonealDialysis', 'Automatinė peritoninė dializė'), ('ManualPeritonealDialysis', 'Ambulatorinė peritoninė dializė'), ('Hemodialysis', 'Hemodializė'), ('PostTransplant', 'Neatlieka, po inkstų transplantacijos'), ('NotPerformed', 'Neatlieka')], default='Unknown', max_length=32),
),
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(choices=[('Male', 'Vyras'), ('Female', 'Moteris')], max_length=8),
),
]
| 56.28125
| 360
| 0.606885
| 348
| 3,602
| 6.172414
| 0.264368
| 0.09311
| 0.116387
| 0.135009
| 0.925512
| 0.925512
| 0.910615
| 0.910615
| 0.910615
| 0.853818
| 0
| 0.03364
| 0.215991
| 3,602
| 63
| 361
| 57.174603
| 0.726983
| 0.012493
| 0
| 0.877193
| 1
| 0
| 0.382278
| 0.099015
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.017544
| 0
| 0.070175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
6015cdf87f6ee5ecb15105f2e166176e66a5a24e
| 21,834
|
py
|
Python
|
core/nets/unets.py
|
liuph0119/Semantic_Segmentation_Keras
|
b595f0e2d62c471256dcc800f9539dbdf354d391
|
[
"Apache-2.0"
] | 17
|
2019-03-18T08:00:24.000Z
|
2021-03-10T06:52:18.000Z
|
core/nets/unets.py
|
123fengye741/Semantic_Segmentation_Keras
|
b595f0e2d62c471256dcc800f9539dbdf354d391
|
[
"Apache-2.0"
] | 2
|
2019-05-15T00:18:38.000Z
|
2019-05-22T03:21:11.000Z
|
core/nets/unets.py
|
123fengye741/Semantic_Segmentation_Keras
|
b595f0e2d62c471256dcc800f9539dbdf354d391
|
[
"Apache-2.0"
] | 8
|
2019-03-08T15:42:31.000Z
|
2019-12-19T02:33:18.000Z
|
from keras.engine import Input
from keras.layers.convolutional import Conv2D, Conv2DTranspose, SeparableConv2D
from keras.layers.pooling import MaxPooling2D
from keras.layers.core import Dropout, Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.merge import Concatenate, Add
from keras.models import Model
from keras.regularizers import l2
from ..utils.net_utils import conv_bn_act_block, bn_act_convtranspose
def UNet(input_shape,
n_class,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99,
init_filters=64,
dropout=0.5):
""" Implementation of U-Net for semantic segmentation.
ref: Ronneberger O , Fischer P , Brox T . U-Net: Convolutional Networks for Biomedical Image Segmentation[J].
arXiv preprint arXiv: 1505.04597, 2015.
:param input_shape: tuple, i.e., (width, height, channel).
:param n_class: int, number of classes, at least 2.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:param init_filters: int, initial filters, default 64.
:param dropout: float, default 0.5.
:return: a Keras Model instance.
"""
input_x = Input(shape=input_shape)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(input_x)
conv1 = Conv2D(init_filters * 1, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
conv1 = Dropout(dropout)(conv1)
conv1 = Conv2D(init_filters * 1, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv1)
pool1 = MaxPooling2D()(conv1)
conv2 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(pool1)
conv2 = Dropout(dropout)(conv2)
conv2 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv2)
pool2 = MaxPooling2D()(conv2)
conv3 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(pool2)
conv3 = Dropout(dropout)(conv3)
conv3 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv3)
pool3 = MaxPooling2D()(conv3)
conv4 = Conv2D(init_filters * 8, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(pool3)
conv4 = Dropout(dropout)(conv4)
conv4 = Conv2D(init_filters * 8, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv4)
pool4 = MaxPooling2D()(conv4)
conv5 = Conv2D(init_filters * 16, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(pool4)
conv5 = Dropout(dropout)(conv5)
conv5 = Conv2D(init_filters * 16, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv5)
up1 = Concatenate()([Conv2DTranspose(init_filters * 8, (3, 3), padding="same", strides=(2, 2),
kernel_regularizer=l2(weight_decay),
kernel_initializer=kernel_initializer)(conv5), conv4])
conv6 = Conv2D(init_filters * 8, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up1)
conv6 = Dropout(dropout)(conv6)
conv6 = Conv2D(init_filters * 8, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv6)
up2 = Concatenate()([Conv2DTranspose(init_filters * 4, (3, 3), padding="same", strides=(2, 2),
kernel_regularizer=l2(weight_decay),
kernel_initializer=kernel_initializer)(conv6), conv3])
conv7 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up2)
conv7 = Dropout(dropout)(conv7)
conv7 = Conv2D(init_filters * 4, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv7)
up3 = Concatenate()([Conv2DTranspose(init_filters * 2, (3, 3), padding="same", strides=(2, 2),
kernel_regularizer=l2(weight_decay),
kernel_initializer=kernel_initializer)(conv7), conv2])
conv8 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up3)
conv8 = Dropout(dropout)(conv8)
conv8 = Conv2D(init_filters * 2, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv8)
up4 = Concatenate()([Conv2DTranspose(init_filters, (3, 3), padding="same", strides=(2, 2),
kernel_regularizer=l2(weight_decay),
kernel_initializer=kernel_initializer)(conv8), conv1])
conv9 = Conv2D(init_filters, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(up4)
conv9 = Conv2D(init_filters, (3, 3), activation='relu', padding='same',
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv9)
output = Conv2D(n_class, (1, 1), activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(conv9)
output = Activation("softmax")(output)
return Model(input_x, output)
################################################ ResUNet ################
# def convolution_block(x, filters, size, strides=(1, 1), padding='same', activation=True):
# x = Conv2D(filters, size, strides=strides, padding=padding)(x)
# if activation == True:
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
# return x
#
#
# def residual_block(blockInput, num_filters=16, batch_activate=False):
# x = BatchNormalization()(blockInput)
# x = Activation("relu")(x)
# x = convolution_block(x, num_filters, (3, 3))
# x = convolution_block(x, num_filters, (3, 3), activation=False)
# x = Add()([x, blockInput])
# if batch_activate:
# x = BatchNormalization()(x)
# x = Activation("relu")(x)
# return x
def convolutional_residual_block(inputs, n_filters, weight_decay=1e-4, kernel_initializer="he_normal", bn_epsilon=1e-3, bn_momentum=0.99):
x = conv_bn_act_block(inputs, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
x = conv_bn_act_block(x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
x = Conv2D(n_filters, kernel_size=(3, 3), padding="same", activation=None, use_bias=False,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
x = Add()([inputs, x])
_x = x
x = conv_bn_act_block(_x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
x = conv_bn_act_block(x, n_filters, weight_decay, kernel_initializer, bn_epsilon, bn_momentum)
x = Conv2D(n_filters, kernel_size=(3, 3), padding="same", activation=None, use_bias=False,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
x = Add()([_x, x])
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
x = Activation("relu")
return x
def ResUNet(input_shape,
n_class,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99,
init_filters=64,
dropout=0.5):
""" modification of U-Net.
replace the Conv+BN+Act with Residual Convolutions.
:param input_shape: tuple, i.e., (width, height, channel).
:param n_class: int, number of classes, at least 2.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:param init_filters: int, initial filters, default 64.
:param dropout: float, default 0.5.
:return: a Keras Model instance.
"""
input_x = Input(shape=input_shape)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(input_x)
conv1 = convolutional_residual_block(x, init_filters*1, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
pool1 = MaxPooling2D((2, 2))(conv1)
pool1 = Dropout(dropout / 2)(pool1)
conv2 = convolutional_residual_block(pool1, init_filters*2, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
pool2 = MaxPooling2D((2, 2))(conv2)
pool2 = Dropout(dropout)(pool2)
conv3 = convolutional_residual_block(pool2, init_filters*4, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
pool3 = MaxPooling2D((2, 2))(conv3)
pool3 = Dropout(dropout)(pool3)
conv4 = convolutional_residual_block(pool3, init_filters*8, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
pool4 = MaxPooling2D((2, 2))(conv4)
pool4 = Dropout(dropout)(pool4)
convm = convolutional_residual_block(pool4, init_filters*16, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
deconv4 = Conv2DTranspose(init_filters * 8, (3, 3), strides=(2, 2), padding="same",
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(convm)
uconv4 = Concatenate()([deconv4, conv4])
uconv4 = Dropout(dropout)(uconv4)
uconv4 = convolutional_residual_block(uconv4, init_filters*8, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
deconv3 = Conv2DTranspose(init_filters * 4, (3, 3), strides=(2, 2), padding="same",
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(uconv4)
uconv3 = Concatenate()([deconv3, conv3])
uconv3 = Dropout(dropout)(uconv3)
uconv3 = convolutional_residual_block(uconv3, init_filters*4, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
deconv2 = Conv2DTranspose(init_filters * 2, (3, 3), strides=(2, 2), padding="same",
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(uconv3)
uconv2 = Concatenate()([deconv2, conv2])
uconv2 = Dropout(dropout)(uconv2)
uconv2 = convolutional_residual_block(uconv2, init_filters*2, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
deconv1 = Conv2DTranspose(init_filters * 1, (3, 3), strides=(2, 2), padding="same",
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(uconv2)
uconv1 = Concatenate()([deconv1, conv1])
uconv1 = Dropout(dropout)(uconv1)
uconv1 = convolutional_residual_block(uconv1, init_filters*1, weight_decay,
kernel_initializer, bn_epsilon, bn_momentum)
output = Conv2D(n_class, (1, 1), padding="same", activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(uconv1)
output = Activation("softmax")(output)
return Model(input_x, output)
# # # ===========================================================================================================
def DepthwiseSeparableConvBlock(inputs,
n_filters,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99):
""" Depthwise separable convolutional block
:param inputs: 4-D tensor, shape of (batch_size, hwight, width, channel).
:param n_filters: int, number of filters.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:return: 4-D tensor, shape of (batch_size, height, width, channel).
"""
x = SeparableConv2D(inputs, (3, 3), activation=None, padding="same", depth_multiplier=1,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(inputs)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
x = Activation("relu")(x)
x = Conv2D(n_filters, (1, 1), activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(x)
x = Activation("relu")(x)
return x
def MobileUNet(input_shape,
n_class,
weight_decay=1e-4,
kernel_initializer="he_normal",
bn_epsilon=1e-3,
bn_momentum=0.99,
preset_model="MobileUNet-Skip"):
"""
:param input_shape: 3-D tuple, i.e., (height, width, channel).
:param n_class: int, number of classes, at least 2.
:param weight_decay: float, default 1e-4.
:param kernel_initializer: string, default "he_normal".
:param bn_epsilon: float, default 1e-3.
:param bn_momentum: float, default 0.99.
:param preset_model: string, "MobileUNet-Skip" or "MobileUNet".
:return: a Keras Model instance.
"""
if preset_model == "MobileUNet":
has_skip = False
elif preset_model == "MobileUNet-Skip":
has_skip = True
else:
raise ValueError(
"Unsupported MobileUNet model '%s'. This function only supports MobileUNet and MobileUNet-Skip" % (
preset_model))
input_x = Input(shape=input_shape)
x = BatchNormalization(epsilon=bn_epsilon, momentum=bn_momentum)(input_x)
x = conv_bn_act_block(x, 64, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 64, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = MaxPooling2D()(x)
skip_1 = x
x = DepthwiseSeparableConvBlock(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = MaxPooling2D()(x)
skip_2 = x
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = MaxPooling2D()(x)
skip_3 = x
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = MaxPooling2D()(x)
skip_4 = x
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = MaxPooling2D()(x)
x = bn_act_convtranspose(x, 512, kernel_size=3, scale=2, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
if has_skip:
x = Add()([x, skip_4])
x = bn_act_convtranspose(x, 512, kernel_size=3, scale=2, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 512, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
if has_skip:
x = Add()([x, skip_3])
x = bn_act_convtranspose(x, 256, kernel_size=3, scale=2, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 256, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
if has_skip:
x = Add()([x, skip_2])
x = bn_act_convtranspose(x, 128, kernel_size=3, scale=2, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 128, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 64, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
if has_skip:
x = Add()([x, skip_1])
x = bn_act_convtranspose(x, 64, kernel_size=3, scale=2, weight_decay=weight_decay,
kernel_initializer=kernel_initializer, bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 64, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = DepthwiseSeparableConvBlock(x, 64, weight_decay=weight_decay, kernel_initializer=kernel_initializer,
bn_epsilon=bn_epsilon, bn_momentum=bn_momentum)
x = Conv2D(n_class, (1, 1), activation=None,
kernel_regularizer=l2(weight_decay), kernel_initializer=kernel_initializer)(x)
output = Activation("softmax")(x)
return Model(input_x, output)
| 55.841432
| 138
| 0.655675
| 2,492
| 21,834
| 5.477127
| 0.072231
| 0.189318
| 0.09715
| 0.160012
| 0.800718
| 0.787457
| 0.77295
| 0.768261
| 0.759689
| 0.74782
| 0
| 0.033481
| 0.236695
| 21,834
| 390
| 139
| 55.984615
| 0.785491
| 0.123706
| 0
| 0.557196
| 0
| 0
| 0.021368
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01845
| false
| 0
| 0.03321
| 0
| 0.070111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60c3703945ab4e8a3c8c3f839c1d6e1b2287d398
| 11,887
|
py
|
Python
|
modelAE_GD.py
|
czq142857/DECOR-GAN
|
79c80fc202b8af982989a3e3bb3afe85e606b71f
|
[
"MIT"
] | 55
|
2021-03-26T01:35:20.000Z
|
2022-03-30T02:52:20.000Z
|
modelAE_GD.py
|
czq142857/DECOR-GAN
|
79c80fc202b8af982989a3e3bb3afe85e606b71f
|
[
"MIT"
] | 2
|
2021-05-15T12:56:51.000Z
|
2021-06-15T11:13:01.000Z
|
modelAE_GD.py
|
czq142857/DECOR-GAN
|
79c80fc202b8af982989a3e3bb3afe85e606b71f
|
[
"MIT"
] | 10
|
2021-04-16T07:07:52.000Z
|
2022-02-28T15:06:15.000Z
|
import torch
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
from torch.autograd import Variable
#cell = 4
#input 256
#output 120 (128-4-4)
#receptive field = 18
# 0 18
#conv 4x4 s1 4 15
#conv 3x3 s2 6 7
#conv 3x3 s1 10 5
#conv 3x3 s1 14 3
#conv 3x3 s1 18 1
#conv 1x1 s1 1 1
class discriminator(nn.Module):
def __init__(self, d_dim, z_dim):
super(discriminator, self).__init__()
self.d_dim = d_dim
self.z_dim = z_dim
self.conv_1 = nn.Conv3d(1, self.d_dim, 4, stride=1, padding=0, bias=True)
self.conv_2 = nn.Conv3d(self.d_dim, self.d_dim*2, 3, stride=2, padding=0, bias=True)
self.conv_3 = nn.Conv3d(self.d_dim*2, self.d_dim*4, 3, stride=1, padding=0, bias=True)
self.conv_4 = nn.Conv3d(self.d_dim*4, self.d_dim*8, 3, stride=1, padding=0, bias=True)
self.conv_5 = nn.Conv3d(self.d_dim*8, self.d_dim*16, 3, stride=1, padding=0, bias=True)
self.conv_6 = nn.Conv3d(self.d_dim*16, self.z_dim, 1, stride=1, padding=0, bias=True)
def forward(self, voxels, is_training=False):
out = voxels
out = self.conv_1(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_2(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_3(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_4(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_5(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_6(out)
out = torch.sigmoid(out)
return out
#64 -> 256
class generator(nn.Module):
def __init__(self, g_dim, prob_dim, z_dim):
super(generator, self).__init__()
self.g_dim = g_dim
self.prob_dim = prob_dim
self.z_dim = z_dim
style_codes = torch.zeros((self.prob_dim, self.z_dim))
self.style_codes = nn.Parameter(style_codes)
nn.init.constant_(self.style_codes, 0.0)
self.conv_0 = nn.Conv3d(1+self.z_dim, self.g_dim, 5, stride=1, dilation=1, padding=2, bias=True)
self.conv_1 = nn.Conv3d(self.g_dim+self.z_dim, self.g_dim*2, 5, stride=1, dilation=2, padding=4, bias=True)
self.conv_2 = nn.Conv3d(self.g_dim*2+self.z_dim, self.g_dim*4, 5, stride=1, dilation=2, padding=4, bias=True)
self.conv_3 = nn.Conv3d(self.g_dim*4+self.z_dim, self.g_dim*8, 5, stride=1, dilation=1, padding=2, bias=True)
self.conv_4 = nn.Conv3d(self.g_dim*8+self.z_dim, self.g_dim*4, 5, stride=1, dilation=1, padding=2, bias=True)
self.conv_5 = nn.ConvTranspose3d(self.g_dim*4, self.g_dim*2, 4, stride=2, padding=1, bias=True)
self.conv_6 = nn.Conv3d(self.g_dim*2, self.g_dim*2, 3, stride=1, padding=1, bias=True)
self.conv_7 = nn.ConvTranspose3d(self.g_dim*2, self.g_dim, 4, stride=2, padding=1, bias=True)
self.conv_8 = nn.Conv3d(self.g_dim, 1, 3, stride=1, padding=1, bias=True)
def forward(self, voxels, z, mask_, is_training=False):
out = voxels
mask = F.interpolate(mask_, scale_factor=4, mode='nearest')
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_0(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_1(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_2(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_3(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_4(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_5(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_6(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_7(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_8(out)
#out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
#out = out.clamp(max=1.0)
out = torch.max(torch.min(out, out*0.002+0.998), out*0.002)
#out = torch.sigmoid(out)
out = out*mask
return out
#32 -> 128
class generator_halfsize(nn.Module):
def __init__(self, g_dim, prob_dim, z_dim):
super(generator_halfsize, self).__init__()
self.g_dim = g_dim
self.prob_dim = prob_dim
self.z_dim = z_dim
style_codes = torch.zeros((self.prob_dim, self.z_dim))
self.style_codes = nn.Parameter(style_codes)
nn.init.constant_(self.style_codes, 0.0)
self.conv_0 = nn.Conv3d(1+self.z_dim, self.g_dim, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_1 = nn.Conv3d(self.g_dim+self.z_dim, self.g_dim*2, 3, stride=1, dilation=2, padding=2, bias=True)
self.conv_2 = nn.Conv3d(self.g_dim*2+self.z_dim, self.g_dim*4, 3, stride=1, dilation=2, padding=2, bias=True)
self.conv_3 = nn.Conv3d(self.g_dim*4+self.z_dim, self.g_dim*8, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_4 = nn.Conv3d(self.g_dim*8+self.z_dim, self.g_dim*4, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_5 = nn.ConvTranspose3d(self.g_dim*4, self.g_dim*2, 4, stride=2, padding=1, bias=True)
self.conv_6 = nn.Conv3d(self.g_dim*2, self.g_dim*2, 3, stride=1, padding=1, bias=True)
self.conv_7 = nn.ConvTranspose3d(self.g_dim*2, self.g_dim, 4, stride=2, padding=1, bias=True)
self.conv_8 = nn.Conv3d(self.g_dim, 1, 3, stride=1, padding=1, bias=True)
def forward(self, voxels, z, mask_, is_training=False):
out = voxels
mask = F.interpolate(mask_, scale_factor=4, mode='nearest')
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_0(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_1(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_2(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_3(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_4(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_5(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_6(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_7(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_8(out)
#out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
#out = out.clamp(max=1.0)
out = torch.max(torch.min(out, out*0.002+0.998), out*0.002)
#out = torch.sigmoid(out)
out = out*mask
return out
#32 -> 256
class generator_halfsize_x8(nn.Module):
def __init__(self, g_dim, prob_dim, z_dim):
super(generator_halfsize_x8, self).__init__()
self.g_dim = g_dim
self.prob_dim = prob_dim
self.z_dim = z_dim
style_codes = torch.zeros((self.prob_dim, self.z_dim))
self.style_codes = nn.Parameter(style_codes)
nn.init.constant_(self.style_codes, 0.0)
self.conv_0 = nn.Conv3d(1+self.z_dim, self.g_dim, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_1 = nn.Conv3d(self.g_dim+self.z_dim, self.g_dim*2, 3, stride=1, dilation=2, padding=2, bias=True)
self.conv_2 = nn.Conv3d(self.g_dim*2+self.z_dim, self.g_dim*4, 3, stride=1, dilation=2, padding=2, bias=True)
self.conv_3 = nn.Conv3d(self.g_dim*4+self.z_dim, self.g_dim*8, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_4 = nn.Conv3d(self.g_dim*8+self.z_dim, self.g_dim*8, 3, stride=1, dilation=1, padding=1, bias=True)
self.conv_5 = nn.ConvTranspose3d(self.g_dim*8, self.g_dim*4, 4, stride=2, padding=1, bias=True)
self.conv_6 = nn.Conv3d(self.g_dim*4, self.g_dim*4, 3, stride=1, padding=1, bias=True)
self.conv_7 = nn.ConvTranspose3d(self.g_dim*4, self.g_dim*2, 4, stride=2, padding=1, bias=True)
self.conv_8 = nn.Conv3d(self.g_dim*2, self.g_dim*2, 3, stride=1, padding=1, bias=True)
self.conv_9 = nn.ConvTranspose3d(self.g_dim*2, self.g_dim, 4, stride=2, padding=1, bias=True)
self.conv_10 = nn.Conv3d(self.g_dim, 1, 3, stride=1, padding=1, bias=True)
def forward(self, voxels, z, mask_, is_training=False):
out = voxels
mask = F.interpolate(mask_, scale_factor=4, mode='nearest')
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_0(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_1(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_2(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_3(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
_,_,dimx,dimy,dimz = out.size()
zs = z.repeat(1,1,dimx,dimy,dimz)
out = torch.cat([out,zs],axis=1)
out = self.conv_4(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_5(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_6(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_7(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_8(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_9(out)
out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
out = self.conv_10(out)
#out = F.leaky_relu(out, negative_slope=0.02, inplace=True)
#out = out.clamp(max=1.0)
out = torch.max(torch.min(out, out*0.002+0.998), out*0.002)
#out = torch.sigmoid(out)
out = out*mask
return out
| 40.569966
| 119
| 0.611677
| 2,030
| 11,887
| 3.408867
| 0.050739
| 0.080925
| 0.067052
| 0.05896
| 0.930347
| 0.91604
| 0.905491
| 0.904046
| 0.88974
| 0.875867
| 0
| 0.060733
| 0.238159
| 11,887
| 292
| 120
| 40.708904
| 0.703401
| 0.043409
| 0
| 0.81068
| 0
| 0
| 0.001851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038835
| false
| 0
| 0.029126
| 0
| 0.106796
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60c6ec4da3bb0c1ff22cc670a8cb4646b37d1f67
| 5,247
|
py
|
Python
|
pybench/With.py
|
haypo/pymicrobench
|
7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4
|
[
"MIT"
] | 3
|
2018-01-17T18:45:23.000Z
|
2020-10-02T06:26:03.000Z
|
pybench/With.py
|
vstinner/pymicrobench
|
7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4
|
[
"MIT"
] | null | null | null |
pybench/With.py
|
vstinner/pymicrobench
|
7c6b92deaf5cf0c3fc965fcfcbc6a78f7d0d10f4
|
[
"MIT"
] | 4
|
2018-01-17T18:45:23.000Z
|
2020-10-08T15:24:51.000Z
|
from __future__ import with_statement
import pyperf
from six.moves import xrange
from pybench import Test
class WithFinally(Test):
version = 2.0
operations = 20
inner_loops = 20
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
pass
def test(self, loops):
cm = self.ContextManager()
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
with cm:
pass
return pyperf.perf_counter() - t0
class TryFinally(Test):
version = 2.0
operations = 20
inner_loops = 20
class ContextManager(object):
def __enter__(self):
pass
def __exit__(self):
# "Context manager" objects used just for their cleanup
# actions in finally blocks usually don't have parameters.
pass
def test(self, loops):
cm = self.ContextManager()
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
cm.__enter__()
try:
pass
finally:
cm.__exit__()
return pyperf.perf_counter() - t0
class WithRaiseExcept(Test):
version = 2.0
operations = 2 + 3 + 3
inner_loops = 8
class BlockExceptions(object):
def __enter__(self):
pass
def __exit__(self, exc, val, tb):
return True
def test(self, loops):
error = ValueError
be = self.BlockExceptions()
range_it = xrange(loops)
t0 = pyperf.perf_counter()
for _ in range_it:
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
with be:
raise error("something")
return pyperf.perf_counter() - t0
| 20.337209
| 70
| 0.385554
| 422
| 5,247
| 4.303318
| 0.156398
| 0.066079
| 0.110132
| 0.154185
| 0.827093
| 0.800661
| 0.767621
| 0.767621
| 0.767621
| 0.767621
| 0
| 0.010139
| 0.548885
| 5,247
| 257
| 71
| 20.416342
| 0.757076
| 0.020964
| 0
| 0.932039
| 0
| 0
| 0.014024
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043689
| false
| 0.218447
| 0.019417
| 0.004854
| 0.15534
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
60d0318ac97f51b2b71af53edd7b10ca7d9a0a95
| 48,365
|
py
|
Python
|
src/sage/groups/class_function.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | 10
|
2018-06-01T21:54:53.000Z
|
2022-03-14T20:11:34.000Z
|
src/sage/groups/class_function.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | 2
|
2021-04-02T20:43:29.000Z
|
2021-04-05T23:38:58.000Z
|
src/sage/groups/class_function.py
|
yzpopulation/sage
|
d2dc2f80b5a8e039701e292653e25366e3e5ec1e
|
[
"BSL-1.0"
] | 15
|
2020-07-23T10:46:25.000Z
|
2022-01-25T15:37:24.000Z
|
r"""
Class functions of groups.
This module implements a wrapper of GAP's ClassFunction function.
NOTE: The ordering of the columns of the character table of a group
corresponds to the ordering of the list. However, in general there is
no way to canonically list (or index) the conjugacy classes of a group.
Therefore the ordering of the columns of the character table of
a group is somewhat random.
AUTHORS:
- Franco Saliola (November 2008): initial version
- Volker Braun (October 2010): Bugfixes, exterior and symmetric power.
"""
#*****************************************************************************
# Copyright (C) 2008 Franco Saliola <saliola@gmail.com>
#
# Distributed under the terms of the GNU General Public License (GPL)
# https://www.gnu.org/licenses/
#*****************************************************************************
from sage.structure.sage_object import SageObject
from sage.structure.richcmp import richcmp, richcmp_method
from sage.interfaces.gap import gap
from sage.rings.all import Integer
from sage.rings.all import CyclotomicField
from sage.libs.gap.element import GapElement
from sage.libs.gap.libgap import libgap
from sage.libs.gap.element import GapElement as LibGapElement
# TODO:
#
# This module needs to be rewritten to implement the ring of class
# functions in the usual parent/element pattern. But
# http://trac.sagemath.org/14014 is already too long...
def ClassFunction(group, values):
"""
Construct a class function.
INPUT:
- ``group`` -- a group.
- ``values`` -- list/tuple/iterable of numbers. The values of the
class function on the conjugacy classes, in that order.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: G.conjugacy_classes()
[Conjugacy class of () in Cyclic group of order 4 as a permutation group,
Conjugacy class of (1,2,3,4) in Cyclic group of order 4 as a permutation group,
Conjugacy class of (1,3)(2,4) in Cyclic group of order 4 as a permutation group,
Conjugacy class of (1,4,3,2) in Cyclic group of order 4 as a permutation group]
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
"""
try:
return group.class_function(values)
except AttributeError:
pass
if isinstance(values, LibGapElement):
return ClassFunction_libgap(group, values)
return ClassFunction_gap(group, values)
#####################################################################
###
### GAP Interface-based Class Function
###
### This is old code that should be deleted once we have transitioned
### everything to using the library interface to GAP.
###
#####################################################################
@richcmp_method
class ClassFunction_gap(SageObject):
"""
A wrapper of GAP's ClassFunction function.
.. NOTE::
It is *not* checked whether the given values describes a character,
since GAP does not do this.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
sage: loads(dumps(chi)) == chi
True
"""
def __init__(self, G, values):
r"""
Return the character of the group ``G`` with values given by the list
values. The order of the values must correspond to the output of
``G.conjugacy_classes_representatives()``.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
"""
self._group = G
if isinstance(values, GapElement) and gap.IsClassFunction(values):
self._gap_classfunction = values
else:
self._gap_classfunction = gap.ClassFunction(G, list(values))
e = self._gap_classfunction.Conductor()
self._base_ring = CyclotomicField(e)
def _gap_init_(self):
r"""
Returns a string showing how to declare / initialize self in Gap.
Stored in the \code{self._gap_string} attribute.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: ClassFunction(G, values)._gap_init_()
'ClassFunction( CharacterTable( Group( [ (1,2,3,4) ] ) ), [ 1, -1, 1, -1 ] )'
"""
return str(self._gap_classfunction)
def _gap_(self, *args):
r"""
Coerce self into a GAP element.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
sage: type(_)
<class 'sage.groups.class_function.ClassFunction_gap'>
sage: chi._gap_()
ClassFunction( CharacterTable( Group( [ (1,2,3,4) ] ) ), [ 1, -1, 1, -1 ] )
sage: type(_)
<class 'sage.interfaces.gap.GapElement'>
"""
return self._gap_classfunction
def __repr__(self):
r"""
Return a string representation.
OUTPUT:
A string.
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: values = [1, -1, 1, 1, -1]
sage: ClassFunction(G, values)
Character of Symmetric group of order 4! as a permutation group
"""
return "Character of %s" % repr(self._group)
def __iter__(self):
r"""
Iterate through the values of self evaluated on the conjugacy
classes.
EXAMPLES::
sage: xi = ClassFunction(SymmetricGroup(4), [1, -1, 1, 1, -1])
sage: list(xi)
[1, -1, 1, 1, -1]
"""
for v in self._gap_classfunction:
yield self._base_ring(v)
def __richcmp__(self, other, op):
r"""
Rich comparison for class functions.
Compares groups and then the values of the class function on the
conjugacy classes.
EXAMPLES::
sage: G = PermutationGroup([[(1,2,3),(4,5)],[(3,4)]])
sage: chi = G.character([1, 1, 1, 1, 1, 1, 1])
sage: H = PermutationGroup([[(1,2,3),(4,5)]])
sage: xi = H.character([1, 1, 1, 1, 1, 1])
sage: chi == chi
True
sage: xi == xi
True
sage: xi == chi
False
sage: chi < xi
False
sage: xi < chi
True
"""
if isinstance(other, ClassFunction_gap):
return richcmp((self._group, self.values()),
(other._group, other.values()), op)
else:
return NotImplemented
def __hash__(self):
r"""
TESTS::
sage: G = SymmetricGroup(5)
sage: chi1 = ClassFunction(G,[1,1,1,1,1,1,1])
sage: d = {chi1:'trivial'}
"""
return hash((self._group, tuple(self)))
def __reduce__(self):
r"""
Add pickle support.
EXAMPLES::
sage: G = PermutationGroup([[(1,2,3),(4,5)],[(3,4)]])
sage: chi = ClassFunction(G, [1, 1, 1, 1, 1, 1, 1])
sage: type(chi)
<class 'sage.groups.class_function.ClassFunction_gap'>
sage: loads(dumps(chi)) == chi
True
"""
return ClassFunction_gap, (self._group, self.values())
def domain(self):
r"""
Returns the domain of the self.
OUTPUT:
The underlying group of the class function.
EXAMPLES::
sage: ClassFunction(SymmetricGroup(4), [1,-1,1,1,-1]).domain()
Symmetric group of order 4! as a permutation group
"""
return self._group
def __call__(self, g):
"""
Evaluate the character on the group element `g`.
Return an error if `g` is not in `G`.
EXAMPLES::
sage: G = GL(2,7)
sage: values = G.gap().CharacterTable().Irr()[2].List().sage()
sage: chi = ClassFunction(G, values)
sage: z = G([[3,0],[0,3]]); z
[3 0]
[0 3]
sage: chi(z)
zeta3
sage: G = GL(2,3)
sage: chi = G.irreducible_characters()[3]
sage: g = G.conjugacy_classes_representatives()[6]
sage: chi(g)
zeta8^3 + zeta8
sage: G = SymmetricGroup(3)
sage: h = G((2,3))
sage: triv = G.trivial_character()
sage: triv(h)
1
"""
return self._base_ring(gap(g)._operation("^", self._gap_classfunction))
def __add__(self, other):
r"""
Returns the sum of the characters self and other.
INPUT:
- ``other`` -- a :class:`ClassFunction` of the same group as
``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: s = chi+chi
sage: s
Character of Symmetric group of order 4! as a permutation group
sage: s.values()
[6, 2, -2, 0, -2]
"""
if not isinstance(other, ClassFunction_gap):
raise NotImplementedError
s = self._gap_classfunction + other._gap_classfunction
return ClassFunction(self._group, s)
def __sub__(self, other):
r"""
Returns the difference of the characters ``self`` and ``other``.
INPUT:
- ``other`` -- a :class:`ClassFunction` of the same group as
``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi1 = ClassFunction(G, [3, 1, -1, 0, -1])
sage: chi2 = ClassFunction(G, [1, -1, 1, 1, -1])
sage: s = chi1 - chi2
sage: s
Character of Symmetric group of order 4! as a permutation group
sage: s.values()
[2, 2, -2, -1, 0]
"""
if not isinstance(other, ClassFunction_gap):
raise NotImplementedError
s = self._gap_classfunction - other._gap_classfunction
return ClassFunction(self._group, s)
def __mul__(self, other):
r"""
Return the product of the character with ``other``.
INPUT:
- ``other`` -- either a number or a :class:`ClassFunction` of
the same group as ``self``. A number can be anything that
can be converted into GAP: integers, rational, and elements
of certain number fields.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi1 = ClassFunction(G, [3, 1, -1, 0, -1])
sage: 3*chi1
Character of Symmetric group of order 4! as a permutation group
sage: 3*chi1 == chi1+chi1+chi1
True
sage: (3*chi1).values()
[9, 3, -3, 0, -3]
sage: (1/2*chi1).values()
[3/2, 1/2, -1/2, 0, -1/2]
sage: CF3 = CyclotomicField(3)
sage: CF3.inject_variables()
Defining zeta3
sage: (zeta3 * chi1).values()
[3*zeta3, zeta3, -zeta3, 0, -zeta3]
sage: chi2 = ClassFunction(G, [1, -1, 1, 1, -1])
sage: p = chi1*chi2
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[3, -1, -1, 0, 1]
"""
if isinstance(other, ClassFunction_gap):
p = self._gap_classfunction * other._gap_classfunction
return ClassFunction(self._group, p)
else:
return ClassFunction(self._group, other * self._gap_classfunction)
def __rmul__(self, other):
r"""
Return the reverse multiplication of ``self`` and ``other``.
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi = ClassFunction(G, [3, 1, -1, 0, -1])
sage: chi * 4 # calls chi.__mul__
Character of Symmetric group of order 4! as a permutation group
sage: 4 * chi # calls chi.__rmul__
Character of Symmetric group of order 4! as a permutation group
sage: (4 * chi).values()
[12, 4, -4, 0, -4]
"""
return self * other
def __pos__(self):
r"""
Return ``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: +chi
Character of Symmetric group of order 4! as a permutation group
sage: _.values()
[3, 1, -1, 0, -1]
sage: chi.__pos__() == +chi
True
"""
return ClassFunction(self._group, self._gap_classfunction)
def __neg__(self):
r"""
Return the additive inverse of ``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: -chi
Character of Symmetric group of order 4! as a permutation group
sage: _.values()
[-3, -1, 1, 0, 1]
sage: chi.__neg__() == -chi
True
"""
return ClassFunction(self._group, -self._gap_classfunction)
def __pow__(self, other):
r"""
Returns the product of self with itself other times.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi**3
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[27, 1, -1, 0, -1]
"""
if not isinstance(other, (int,Integer)):
raise NotImplementedError
return ClassFunction(self._group, self._gap_classfunction ** other)
def symmetric_power(self, n):
r"""
Returns the symmetrized product of self with itself ``n`` times.
INPUT:
- ``n`` -- a positive integer.
OUTPUT:
The ``n``-th symmetrized power of ``self`` as a
:class:`ClassFunction`.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi.symmetric_power(3)
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[10, 2, -2, 1, 0]
"""
n = Integer(n)
tbl = gap.UnderlyingCharacterTable(self)
return ClassFunction(self._group, gap.SymmetricParts(tbl,[self],n)[1])
def exterior_power(self, n):
r"""
Returns the anti-symmetrized product of self with itself ``n`` times.
INPUT:
- ``n`` -- a positive integer.
OUTPUT:
The ``n``-th anti-symmetrized power of ``self`` as a
:class:`ClassFunction`.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi.exterior_power(3) # the highest anti-symmetric power for a 3-d character
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[1, -1, 1, 1, -1]
sage: p == chi.determinant_character()
True
"""
n = Integer(n)
tbl = gap.UnderlyingCharacterTable(self)
return ClassFunction(self._group, gap.AntiSymmetricParts(tbl,[self],n)[1])
def scalar_product(self, other):
r"""
Returns the scalar product of self with other.
EXAMPLES::
sage: S4 = SymmetricGroup(4)
sage: irr = S4.irreducible_characters()
sage: [[x.scalar_product(y) for x in irr] for y in irr]
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
"""
return self._gap_classfunction.ScalarProduct(other)
def is_irreducible(self):
r"""
Returns True if self cannot be written as the sum of two nonzero
characters of self.
EXAMPLES::
sage: S4 = SymmetricGroup(4)
sage: irr = S4.irreducible_characters()
sage: [x.is_irreducible() for x in irr]
[True, True, True, True, True]
"""
return bool(self._gap_classfunction.IsIrreducible())
def degree(self):
r"""
Returns the degree of the character self.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: irr = S5.irreducible_characters()
sage: [x.degree() for x in irr]
[1, 4, 5, 6, 5, 4, 1]
"""
return Integer(self._gap_classfunction.DegreeOfCharacter())
def irreducible_constituents(self):
r"""
Returns a list of the characters that appear in the decomposition
of chi.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: chi = ClassFunction(S5, [22, -8, 2, 1, 1, 2, -3])
sage: irr = chi.irreducible_constituents(); irr
(Character of Symmetric group of order 5! as a permutation group,
Character of Symmetric group of order 5! as a permutation group)
sage: list(map(list, irr))
[[4, -2, 0, 1, 1, 0, -1], [5, -1, 1, -1, -1, 1, 0]]
sage: G = GL(2,3)
sage: chi = ClassFunction(G, [-1, -1, -1, -1, -1, -1, -1, -1])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [1, 1, 1, 1, 1, 1, 1, 1])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [2, 2, 2, 2, 2, 2, 2, 2])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [-1, -1, -1, -1, 3, -1, -1, 1])
sage: ic = chi.irreducible_constituents(); ic
(Character of General Linear Group of degree 2 over Finite Field of size 3,
Character of General Linear Group of degree 2 over Finite Field of size 3)
sage: list(map(list, ic))
[[2, -1, 2, -1, 2, 0, 0, 0], [3, 0, 3, 0, -1, 1, 1, -1]]
"""
L = self._gap_classfunction.ConstituentsOfCharacter()
return tuple(ClassFunction(self._group, list(l)) for l in L)
def decompose(self):
r"""
Returns a list of the characters that appear in the decomposition
of chi.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: chi = ClassFunction(S5, [22, -8, 2, 1, 1, 2, -3])
sage: chi.decompose()
((3, Character of Symmetric group of order 5! as a permutation group),
(2, Character of Symmetric group of order 5! as a permutation group))
"""
L = []
for irr in self.irreducible_constituents():
L.append((self.scalar_product(irr), irr))
return tuple(L)
def norm(self):
r"""
Returns the norm of self.
EXAMPLES::
sage: A5 = AlternatingGroup(5)
sage: [x.norm() for x in A5.irreducible_characters()]
[1, 1, 1, 1, 1]
"""
return self._gap_classfunction.Norm()
def values(self):
r"""
Return the list of values of self on the conjugacy classes.
EXAMPLES::
sage: G = GL(2,3)
sage: [x.values() for x in G.irreducible_characters()] #random
[[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, -1, -1, -1],
[2, -1, 2, -1, 2, 0, 0, 0],
[2, 1, -2, -1, 0, -zeta8^3 - zeta8, zeta8^3 + zeta8, 0],
[2, 1, -2, -1, 0, zeta8^3 + zeta8, -zeta8^3 - zeta8, 0],
[3, 0, 3, 0, -1, -1, -1, 1],
[3, 0, 3, 0, -1, 1, 1, -1],
[4, -1, -4, 1, 0, 0, 0, 0]]
TESTS::
sage: G = GL(2,3)
sage: k = CyclotomicField(8)
sage: zeta8 = k.gen()
sage: v = [tuple(x.values()) for x in G.irreducible_characters()]
sage: set(v) == set([(1, 1, 1, 1, 1, 1, 1, 1), (1, 1, 1, 1, 1, -1, -1, -1), (2, -1, 2, -1, 2, 0, 0, 0), (2, 1, -2, -1, 0, -zeta8^3 - zeta8, zeta8^3 + zeta8, 0), (2, 1, -2, -1, 0, zeta8^3 + zeta8, -zeta8^3 - zeta8, 0), (3, 0, 3, 0, -1, -1, -1, 1), (3, 0, 3, 0, -1, 1, 1, -1), (4, -1, -4, 1, 0, 0, 0, 0)])
True
"""
return list(self)
def central_character(self):
r"""
Returns the central character of self.
EXAMPLES::
sage: t = SymmetricGroup(4).trivial_character()
sage: t.central_character().values()
[1, 6, 3, 8, 6]
"""
return ClassFunction(self._group, self._gap_classfunction.CentralCharacter())
def determinant_character(self):
r"""
Returns the determinant character of self.
EXAMPLES::
sage: t = ClassFunction(SymmetricGroup(4), [1, -1, 1, 1, -1])
sage: t.determinant_character().values()
[1, -1, 1, 1, -1]
"""
return ClassFunction(self._group, self._gap_classfunction.DeterminantOfCharacter())
def tensor_product(self, other):
r"""
EXAMPLES::
sage: S3 = SymmetricGroup(3)
sage: chi1, chi2, chi3 = S3.irreducible_characters()
sage: chi1.tensor_product(chi3).values()
[1, -1, 1]
"""
return ClassFunction(self._group, gap.Tensored([self],[other])[1])
def restrict(self, H):
r"""
Return the restricted character.
INPUT:
- ``H`` -- a subgroup of the underlying group of ``self``.
OUTPUT:
A :class:`ClassFunction` of ``H`` defined by restriction.
EXAMPLES::
sage: G = SymmetricGroup(5)
sage: chi = ClassFunction(G, [3, -3, -1, 0, 0, -1, 3]); chi
Character of Symmetric group of order 5! as a permutation group
sage: H = G.subgroup([(1,2,3), (1,2), (4,5)])
sage: chi.restrict(H)
Character of Subgroup generated by [(4,5), (1,2), (1,2,3)] of (Symmetric group of order 5! as a permutation group)
sage: chi.restrict(H).values()
[3, -3, -3, -1, 0, 0]
"""
rest = self._gap_classfunction.RestrictedClassFunction(H._gap_())
return ClassFunction(H, rest)
def induct(self, G):
r"""
Return the induced character.
INPUT:
- ``G`` -- A supergroup of the underlying group of ``self``.
OUTPUT:
A :class:`ClassFunction` of ``G`` defined by
induction. Induction is the adjoint functor to restriction,
see :meth:`restrict`.
EXAMPLES::
sage: G = SymmetricGroup(5)
sage: H = G.subgroup([(1,2,3), (1,2), (4,5)])
sage: xi = H.trivial_character(); xi
Character of Subgroup generated by [(4,5), (1,2), (1,2,3)] of (Symmetric group of order 5! as a permutation group)
sage: xi.induct(G)
Character of Symmetric group of order 5! as a permutation group
sage: xi.induct(G).values()
[10, 4, 2, 1, 1, 0, 0]
"""
rest = self._gap_classfunction.InducedClassFunction(G._gap_())
return ClassFunction(G, rest)
def adams_operation(self, k):
r"""
Return the ``k``-th Adams operation on ``self``.
Let `G` be a finite group. The `k`-th Adams operation `\Psi^k`
is given by
.. MATH::
\Psi^k(\chi)(g) = \chi(g^k).
The Adams operations turn the representation ring of `G`
into a `\lambda`-ring.
EXAMPLES::
sage: G = groups.permutation.Alternating(5)
sage: chars = G.irreducible_characters()
sage: [chi.adams_operation(2).values() for chi in chars]
[[1, 1, 1, 1, 1],
[3, 3, 0, -zeta5^3 - zeta5^2, zeta5^3 + zeta5^2 + 1],
[3, 3, 0, zeta5^3 + zeta5^2 + 1, -zeta5^3 - zeta5^2],
[4, 4, 1, -1, -1],
[5, 5, -1, 0, 0]]
sage: chars[4].adams_operation(2).decompose()
((1, Character of Alternating group of order 5!/2 as a permutation group),
(-1, Character of Alternating group of order 5!/2 as a permutation group),
(-1, Character of Alternating group of order 5!/2 as a permutation group),
(2, Character of Alternating group of order 5!/2 as a permutation group))
REFERENCES:
- :wikipedia:`Adams_operation`
"""
reprs = self._group.conjugacy_classes_representatives()
return ClassFunction(self._group, [self(x**k) for x in reprs])
#####################################################################
###
### Class function using the GAP library
###
#####################################################################
@richcmp_method
class ClassFunction_libgap(SageObject):
"""
A wrapper of GAP's ``ClassFunction`` function.
.. NOTE::
It is *not* checked whether the given values describes a character,
since GAP does not do this.
EXAMPLES::
sage: G = SO(3,3)
sage: values = [1, -1, -1, 1, 2]
sage: chi = ClassFunction(G, values); chi
Character of Special Orthogonal Group of degree 3 over Finite Field of size 3
sage: loads(dumps(chi)) == chi
True
"""
def __init__(self, G, values):
r"""
Return the character of the group ``G`` with values given by the list
values. The order of the values must correspond to the output of
``G.conjugacy_classes_representatives()``.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
"""
self._group = G
if isinstance(values, LibGapElement) and values.IsClassFunction():
self._gap_classfunction = values
else:
self._gap_classfunction = libgap.ClassFunction(G._libgap_(),
list(values))
e = self._gap_classfunction.Conductor().sage()
self._base_ring = CyclotomicField(e)
def gap(self):
r"""
Return the underlying LibGAP element.
EXAMPLES::
sage: G = CyclicPermutationGroup(4)
sage: values = [1, -1, 1, -1]
sage: chi = ClassFunction(G, values); chi
Character of Cyclic group of order 4 as a permutation group
sage: type(chi)
<class 'sage.groups.class_function.ClassFunction_gap'>
sage: gap(chi)
ClassFunction( CharacterTable( Group( [ (1,2,3,4) ] ) ), [ 1, -1, 1, -1 ] )
sage: type(_)
<class 'sage.interfaces.gap.GapElement'>
"""
return self._gap_classfunction
_libgap_ = _gap_ = gap
def _repr_(self):
r"""
Return a string representation.
OUTPUT:
A string.
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: values = [1, -1, 1, 1, -1]
sage: ClassFunction(G, values)
Character of Symmetric group of order 4! as a permutation group
"""
return "Character of %s" % repr(self._group)
def __iter__(self):
r"""
Iterate through the values.
A class function assigns values to each conjugacy class. This
method iterates over the values, in the same order as the
conjugacy classes of the group.
EXAMPLES::
sage: xi = ClassFunction(SymmetricGroup(4), [1, -1, 1, 1, -1])
sage: list(xi)
[1, -1, 1, 1, -1]
"""
for v in self._gap_classfunction.List():
yield v.sage(ring=self._base_ring)
def __richcmp__(self, other, op):
r"""
Rich comparison for class functions.
Compares groups and then the values of the class function on the
conjugacy classes.
EXAMPLES::
sage: G = PermutationGroup([[(1,2,3),(4,5)],[(3,4)]])
sage: chi = G.character([1, 1, 1, 1, 1, 1, 1])
sage: H = PermutationGroup([[(1,2,3),(4,5)]])
sage: xi = H.character([1, 1, 1, 1, 1, 1])
sage: chi == chi
True
sage: xi == xi
True
sage: xi == chi
False
sage: chi < xi
False
sage: xi < chi
True
"""
if isinstance(other, ClassFunction_libgap):
return richcmp((self._group, self.values()),
(other._group, other.values()), op)
else:
return NotImplemented
def __reduce__(self):
r"""
Add pickle support.
EXAMPLES::
sage: G = GL(2,7)
sage: values = G.gap().CharacterTable().Irr()[2].List().sage()
sage: chi = ClassFunction(G, values)
sage: type(chi)
<class 'sage.groups.class_function.ClassFunction_libgap'>
sage: loads(dumps(chi)) == chi
True
"""
return ClassFunction_libgap, (self._group, self.values())
def domain(self):
r"""
Return the domain of ``self``.
OUTPUT:
The underlying group of the class function.
EXAMPLES::
sage: ClassFunction(SymmetricGroup(4), [1,-1,1,1,-1]).domain()
Symmetric group of order 4! as a permutation group
"""
return self._group
def __call__(self, g):
"""
Evaluate the character on the group element `g`.
Return an error if `g` is not in `G`.
EXAMPLES::
sage: G = GL(2,7)
sage: values = G.gap().CharacterTable().Irr()[2].List().sage()
sage: chi = ClassFunction(G, values)
sage: z = G([[3,0],[0,3]]); z
[3 0]
[0 3]
sage: chi(z)
zeta3
sage: G = GL(2,3)
sage: chi = G.irreducible_characters()[3]
sage: g = G.conjugacy_classes_representatives()[6]
sage: chi(g)
zeta8^3 + zeta8
sage: G = SymmetricGroup(3)
sage: h = G((2,3))
sage: triv = G.trivial_character()
sage: triv(h)
1
"""
value = g.gap() ** self.gap()
return value.sage(self._base_ring)
def __add__(self, other):
r"""
Return the sum of the characters ``self`` and ``other``.
INPUT:
- ``other`` -- a :class:`ClassFunction` of the same group as
``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: s = chi+chi
sage: s
Character of Symmetric group of order 4! as a permutation group
sage: s.values()
[6, 2, -2, 0, -2]
"""
if not isinstance(other, ClassFunction_libgap):
raise NotImplementedError
s = self._gap_classfunction + other._gap_classfunction
return ClassFunction(self._group, s)
def __sub__(self, other):
r"""
Return the difference of the characters ``self`` and ``other``.
INPUT:
- ``other`` -- a :class:`ClassFunction` of the same group as
``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi1 = ClassFunction(G, [3, 1, -1, 0, -1])
sage: chi2 = ClassFunction(G, [1, -1, 1, 1, -1])
sage: s = chi1 - chi2
sage: s
Character of Symmetric group of order 4! as a permutation group
sage: s.values()
[2, 2, -2, -1, 0]
"""
if not isinstance(other, ClassFunction_libgap):
raise NotImplementedError
s = self._gap_classfunction - other._gap_classfunction
return ClassFunction(self._group, s)
def __mul__(self, other):
r"""
Return the product of the character with ``other``.
INPUT:
- ``other`` -- either a number or a :class:`ClassFunction` of
the same group as ``self``. A number can be anything that
can be converted into GAP: integers, rational, and elements
of certain number fields.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi1 = ClassFunction(G, [3, 1, -1, 0, -1])
sage: 3*chi1
Character of Symmetric group of order 4! as a permutation group
sage: 3*chi1 == chi1+chi1+chi1
True
sage: (3*chi1).values()
[9, 3, -3, 0, -3]
sage: (1/2*chi1).values()
[3/2, 1/2, -1/2, 0, -1/2]
sage: CF3 = CyclotomicField(3)
sage: CF3.inject_variables()
Defining zeta3
sage: (zeta3 * chi1).values()
[3*zeta3, zeta3, -zeta3, 0, -zeta3]
sage: chi2 = ClassFunction(G, [1, -1, 1, 1, -1])
sage: p = chi1*chi2
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[3, -1, -1, 0, 1]
"""
if isinstance(other, ClassFunction_libgap):
p = self._gap_classfunction * other._gap_classfunction
return ClassFunction(self._group, p)
else:
return ClassFunction(self._group, other * self._gap_classfunction)
def __rmul__(self, other):
r"""
Return the reverse multiplication of ``self`` and ``other``.
EXAMPLES::
sage: G = SymmetricGroup(4)
sage: chi = ClassFunction(G, [3, 1, -1, 0, -1])
sage: chi * 4 # calls chi.__mul__
Character of Symmetric group of order 4! as a permutation group
sage: 4 * chi # calls chi.__rmul__
Character of Symmetric group of order 4! as a permutation group
sage: (4 * chi).values()
[12, 4, -4, 0, -4]
"""
return self.__mul__(other)
def __pos__(self):
r"""
Return ``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: +chi
Character of Symmetric group of order 4! as a permutation group
sage: _.values()
[3, 1, -1, 0, -1]
sage: chi.__pos__() == +chi
True
"""
return ClassFunction(self._group, self._gap_classfunction)
def __neg__(self):
r"""
Return the additive inverse of ``self``.
OUTPUT:
A :class:`ClassFunction`
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: -chi
Character of Symmetric group of order 4! as a permutation group
sage: _.values()
[-3, -1, 1, 0, 1]
sage: chi.__neg__() == -chi
True
"""
return ClassFunction(self._group, -self._gap_classfunction)
def __pow__(self, other):
r"""
Return the product of ``self`` with itself ``other`` times.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi**3
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[27, 1, -1, 0, -1]
"""
if not isinstance(other, (int,Integer)):
raise NotImplementedError
return ClassFunction(self._group, self._gap_classfunction ** other)
def symmetric_power(self, n):
r"""
Return the symmetrized product of ``self`` with itself ``n`` times.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
The ``n``-th symmetrized power of ``self`` as a
:class:`ClassFunction`.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi.symmetric_power(3)
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[10, 2, -2, 1, 0]
"""
n = Integer(n)
tbl = self._gap_classfunction.UnderlyingCharacterTable(self)
return ClassFunction(self._group, tbl.SymmetricParts([self],n)[1])
def exterior_power(self, n):
r"""
Return the anti-symmetrized product of ``self`` with itself ``n`` times.
INPUT:
- ``n`` -- a positive integer
OUTPUT:
The ``n``-th anti-symmetrized power of ``self`` as a
:class:`ClassFunction`.
EXAMPLES::
sage: chi = ClassFunction(SymmetricGroup(4), [3, 1, -1, 0, -1])
sage: p = chi.exterior_power(3) # the highest anti-symmetric power for a 3-d character
sage: p
Character of Symmetric group of order 4! as a permutation group
sage: p.values()
[1, -1, 1, 1, -1]
sage: p == chi.determinant_character()
True
"""
n = Integer(n)
tbl = self._gap_classfunction.UnderlyingCharacterTable(self)
return ClassFunction(self._group, tbl.AntiSymmetricParts([self],n)[1])
def scalar_product(self, other):
r"""
Return the scalar product of ``self`` with ``other``.
EXAMPLES::
sage: S4 = SymmetricGroup(4)
sage: irr = S4.irreducible_characters()
sage: [[x.scalar_product(y) for x in irr] for y in irr]
[[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0],
[0, 0, 0, 0, 1]]
"""
return self._gap_classfunction.ScalarProduct(other).sage()
def is_irreducible(self):
r"""
Return ``True`` if ``self`` cannot be written as the sum of two nonzero
characters of ``self``.
EXAMPLES::
sage: S4 = SymmetricGroup(4)
sage: irr = S4.irreducible_characters()
sage: [x.is_irreducible() for x in irr]
[True, True, True, True, True]
"""
return self._gap_classfunction.IsIrreducible().sage()
def degree(self):
r"""
Return the degree of the character ``self``.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: irr = S5.irreducible_characters()
sage: [x.degree() for x in irr]
[1, 4, 5, 6, 5, 4, 1]
"""
return self._gap_classfunction.DegreeOfCharacter().sage()
def irreducible_constituents(self):
r"""
Return a list of the characters that appear in the decomposition
of ``self``.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: chi = ClassFunction(S5, [22, -8, 2, 1, 1, 2, -3])
sage: irr = chi.irreducible_constituents(); irr
(Character of Symmetric group of order 5! as a permutation group,
Character of Symmetric group of order 5! as a permutation group)
sage: list(map(list, irr))
[[4, -2, 0, 1, 1, 0, -1], [5, -1, 1, -1, -1, 1, 0]]
sage: G = GL(2,3)
sage: chi = ClassFunction(G, [-1, -1, -1, -1, -1, -1, -1, -1])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [1, 1, 1, 1, 1, 1, 1, 1])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [2, 2, 2, 2, 2, 2, 2, 2])
sage: chi.irreducible_constituents()
(Character of General Linear Group of degree 2 over Finite Field of size 3,)
sage: chi = ClassFunction(G, [-1, -1, -1, -1, 3, -1, -1, 1])
sage: ic = chi.irreducible_constituents(); ic
(Character of General Linear Group of degree 2 over Finite Field of size 3,
Character of General Linear Group of degree 2 over Finite Field of size 3)
sage: list(map(list, ic))
[[2, -1, 2, -1, 2, 0, 0, 0], [3, 0, 3, 0, -1, 1, 1, -1]]
"""
L = self._gap_classfunction.ConstituentsOfCharacter()
return tuple(ClassFunction_libgap(self._group, l) for l in L)
def decompose(self):
r"""
Return a list of the characters that appear in the decomposition
of ``self``.
EXAMPLES::
sage: S5 = SymmetricGroup(5)
sage: chi = ClassFunction(S5, [22, -8, 2, 1, 1, 2, -3])
sage: chi.decompose()
((3, Character of Symmetric group of order 5! as a permutation group),
(2, Character of Symmetric group of order 5! as a permutation group))
"""
L = []
for irr in self.irreducible_constituents():
L.append((self.scalar_product(irr), irr))
return tuple(L)
def norm(self):
r"""
Return the norm of ``self``.
EXAMPLES::
sage: A5 = AlternatingGroup(5)
sage: [x.norm() for x in A5.irreducible_characters()]
[1, 1, 1, 1, 1]
"""
return self._gap_classfunction.Norm().sage()
def values(self):
r"""
Return the list of values of self on the conjugacy classes.
EXAMPLES::
sage: G = GL(2,3)
sage: [x.values() for x in G.irreducible_characters()] #random
[[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, -1, -1, -1],
[2, -1, 2, -1, 2, 0, 0, 0],
[2, 1, -2, -1, 0, -zeta8^3 - zeta8, zeta8^3 + zeta8, 0],
[2, 1, -2, -1, 0, zeta8^3 + zeta8, -zeta8^3 - zeta8, 0],
[3, 0, 3, 0, -1, -1, -1, 1],
[3, 0, 3, 0, -1, 1, 1, -1],
[4, -1, -4, 1, 0, 0, 0, 0]]
TESTS::
sage: G = GL(2,3)
sage: k = CyclotomicField(8)
sage: zeta8 = k.gen()
sage: v = [tuple(x.values()) for x in G.irreducible_characters()]
sage: set(v) == set([(1, 1, 1, 1, 1, 1, 1, 1), (1, 1, 1, 1, 1, -1, -1, -1), (2, -1, 2, -1, 2, 0, 0, 0), (2, 1, -2, -1, 0, -zeta8^3 - zeta8, zeta8^3 + zeta8, 0), (2, 1, -2, -1, 0, zeta8^3 + zeta8, -zeta8^3 - zeta8, 0), (3, 0, 3, 0, -1, -1, -1, 1), (3, 0, 3, 0, -1, 1, 1, -1), (4, -1, -4, 1, 0, 0, 0, 0)])
True
"""
return list(self)
def central_character(self):
r"""
Return the central character of ``self``.
EXAMPLES::
sage: t = SymmetricGroup(4).trivial_character()
sage: t.central_character().values()
[1, 6, 3, 8, 6]
"""
return ClassFunction(self._group, self._gap_classfunction.CentralCharacter())
def determinant_character(self):
r"""
Return the determinant character of ``self``.
EXAMPLES::
sage: t = ClassFunction(SymmetricGroup(4), [1, -1, 1, 1, -1])
sage: t.determinant_character().values()
[1, -1, 1, 1, -1]
"""
return ClassFunction(self._group, self._gap_classfunction.DeterminantOfCharacter())
def tensor_product(self, other):
r"""
Return the tensor product of ``self`` and ``other``.
EXAMPLES::
sage: S3 = SymmetricGroup(3)
sage: chi1, chi2, chi3 = S3.irreducible_characters()
sage: chi1.tensor_product(chi3).values()
[1, -1, 1]
"""
product = libgap.Tensored([self], [other])
return ClassFunction(self._group, product[0])
def restrict(self, H):
r"""
Return the restricted character.
INPUT:
- ``H`` -- a subgroup of the underlying group of ``self``.
OUTPUT:
A :class:`ClassFunction` of ``H`` defined by restriction.
EXAMPLES::
sage: G = SymmetricGroup(5)
sage: chi = ClassFunction(G, [3, -3, -1, 0, 0, -1, 3]); chi
Character of Symmetric group of order 5! as a permutation group
sage: H = G.subgroup([(1,2,3), (1,2), (4,5)])
sage: chi.restrict(H)
Character of Subgroup generated by [(4,5), (1,2), (1,2,3)] of (Symmetric group of order 5! as a permutation group)
sage: chi.restrict(H).values()
[3, -3, -3, -1, 0, 0]
"""
try:
gapH = H.gap()
except AttributeError:
from sage.libs.gap.libgap import libgap
gapH = libgap(H)
rest = self._gap_classfunction.RestrictedClassFunction(gapH)
return ClassFunction(H, rest)
def induct(self, G):
r"""
Return the induced character.
INPUT:
- ``G`` -- A supergroup of the underlying group of ``self``.
OUTPUT:
A :class:`ClassFunction` of ``G`` defined by
induction. Induction is the adjoint functor to restriction,
see :meth:`restrict`.
EXAMPLES::
sage: G = SymmetricGroup(5)
sage: H = G.subgroup([(1,2,3), (1,2), (4,5)])
sage: xi = H.trivial_character(); xi
Character of Subgroup generated by [(4,5), (1,2), (1,2,3)] of (Symmetric group of order 5! as a permutation group)
sage: xi.induct(G)
Character of Symmetric group of order 5! as a permutation group
sage: xi.induct(G).values()
[10, 4, 2, 1, 1, 0, 0]
"""
try:
gapG = G.gap()
except AttributeError:
from sage.libs.gap.libgap import libgap
gapG = libgap(G)
ind = self._gap_classfunction.InducedClassFunction(gapG)
return ClassFunction(G, ind)
def adams_operation(self, k):
r"""
Return the ``k``-th Adams operation on ``self``.
Let `G` be a finite group. The `k`-th Adams operation `\Psi^k`
is given by
.. MATH::
\Psi^k(\chi)(g) = \chi(g^k).
The Adams operations turn the representation ring of `G`
into a `\lambda`-ring.
EXAMPLES::
sage: G = GL(2,3)
sage: chars = G.irreducible_characters()
sage: [chi.adams_operation(2).values() for chi in chars]
[[1, 1, 1, 1, 1, 1, 1, 1],
[1, 1, 1, 1, 1, 1, 1, 1],
[2, -1, 2, -1, 2, 2, 2, 2],
[2, -1, 2, -1, -2, 0, 0, 2],
[2, -1, 2, -1, -2, 0, 0, 2],
[3, 0, 3, 0, 3, -1, -1, 3],
[3, 0, 3, 0, 3, -1, -1, 3],
[4, 1, 4, 1, -4, 0, 0, 4]]
sage: chars[5].adams_operation(3).decompose()
((1, Character of General Linear Group of degree 2 over Finite Field of size 3),
(1, Character of General Linear Group of degree 2 over Finite Field of size 3),
(-1, Character of General Linear Group of degree 2 over Finite Field of size 3),
(1, Character of General Linear Group of degree 2 over Finite Field of size 3))
REFERENCES:
- :wikipedia:`Adams_operation`
"""
reprs = self._group.conjugacy_classes_representatives()
return ClassFunction(self._group, [self(x**k) for x in reprs])
| 31.163015
| 315
| 0.524449
| 6,008
| 48,365
| 4.146638
| 0.061418
| 0.027616
| 0.029141
| 0.028258
| 0.891984
| 0.875968
| 0.87075
| 0.856059
| 0.838277
| 0.829246
| 0
| 0.048529
| 0.343017
| 48,365
| 1,551
| 316
| 31.183108
| 0.735515
| 0.629153
| 0
| 0.547038
| 0
| 0
| 0.002934
| 0
| 0
| 0
| 0
| 0.000645
| 0
| 1
| 0.219512
| false
| 0.003484
| 0.034843
| 0
| 0.494774
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
60d8d876add57506b9904ece846dd4b83b15cbc9
| 96
|
py
|
Python
|
schematic/store/__init__.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | 8
|
2020-11-06T23:38:06.000Z
|
2022-02-03T11:05:25.000Z
|
schematic/store/__init__.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | 326
|
2020-09-15T20:52:59.000Z
|
2022-03-31T23:20:35.000Z
|
schematic/store/__init__.py
|
linglp/schematic
|
fd0308c43783ac8e367e8a5be0cc6e4bfbc44b29
|
[
"MIT"
] | 15
|
2020-09-16T23:12:09.000Z
|
2022-03-14T23:05:46.000Z
|
from schematic.store.base import BaseStorage
from schematic.store.synapse import SynapseStorage
| 32
| 50
| 0.875
| 12
| 96
| 7
| 0.666667
| 0.309524
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 96
| 2
| 51
| 48
| 0.954545
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
60df1252901202f3db5a30724cbcc56e56ecf7fb
| 985
|
py
|
Python
|
utils/preprocess.py
|
Pheobe-Sun/anomaly-detection-challenge-2020
|
71e34350023023a17338b7931da70af035b2454c
|
[
"MIT"
] | 1
|
2021-04-24T17:04:33.000Z
|
2021-04-24T17:04:33.000Z
|
utils/preprocess.py
|
Pheobe-Sun/anomaly-detection-challenge-2020
|
71e34350023023a17338b7931da70af035b2454c
|
[
"MIT"
] | null | null | null |
utils/preprocess.py
|
Pheobe-Sun/anomaly-detection-challenge-2020
|
71e34350023023a17338b7931da70af035b2454c
|
[
"MIT"
] | null | null | null |
import numpy as np
def window_offset(x, window_size):
'''
Note this assume next step label prediction with a stride of 1. Also assumes we don't use current window.
Args
x (numpy array): Input time series
window_size (int): Sliding window size.
Return
[(n+1) x window_size] NumPy array
'''
pad_zeros = np.zeros(window_size)
x_pad = np.concatenate([pad_zeros, x])
windows = [x_pad[i:i+window_size] for i in range(len(x_pad)-(window_size))]
return np.array(windows)
def window(x, window_size):
'''
Note this assume next step label prediction with a stride of 1.
Args
x (numpy array): Input time series
window_size (int): Sliding window size.
Return
[(n+1) x window_size] NumPy array
'''
pad_zeros = np.zeros(window_size-1)
x_pad = np.concatenate([pad_zeros, x])
windows = [x_pad[i:i+window_size] for i in range(len(x_pad)-(window_size-1))]
return np.array(windows)
| 29.848485
| 110
| 0.652792
| 157
| 985
| 3.949045
| 0.292994
| 0.225806
| 0.070968
| 0.048387
| 0.816129
| 0.816129
| 0.816129
| 0.816129
| 0.816129
| 0.816129
| 0
| 0.008011
| 0.239594
| 985
| 32
| 111
| 30.78125
| 0.81976
| 0.432487
| 0
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
718530bfdd45c9153ff40393dc316e36c4c368ee
| 1,714
|
py
|
Python
|
src/waldur_mastermind/marketplace_script/serializers.py
|
ahti87/waldur-mastermind
|
772268e62dfd8eadb387b2ec3789785817a6e621
|
[
"MIT"
] | null | null | null |
src/waldur_mastermind/marketplace_script/serializers.py
|
ahti87/waldur-mastermind
|
772268e62dfd8eadb387b2ec3789785817a6e621
|
[
"MIT"
] | null | null | null |
src/waldur_mastermind/marketplace_script/serializers.py
|
ahti87/waldur-mastermind
|
772268e62dfd8eadb387b2ec3789785817a6e621
|
[
"MIT"
] | null | null | null |
from rest_framework import serializers
class OrderItemSerializer(serializers.Serializer):
attributes = serializers.ReadOnlyField()
limits = serializers.ReadOnlyField()
project_uuid = serializers.ReadOnlyField(source='order.project.uuid')
project_name = serializers.ReadOnlyField(source='order.project.name')
customer_uuid = serializers.ReadOnlyField(source='order.project.customer.uuid')
customer_name = serializers.ReadOnlyField(source='order.project.customer.name')
offering_uuid = serializers.ReadOnlyField(source='offering.uuid')
offering_name = serializers.ReadOnlyField(source='offering.name')
plan_uuid = serializers.ReadOnlyField(source='plan.uuid')
plan_name = serializers.ReadOnlyField(source='plan.name')
resource_uuid = serializers.ReadOnlyField(source='resource.uuid')
resource_name = serializers.ReadOnlyField(source='resource.name')
class ResourceSerializer(serializers.Serializer):
attributes = serializers.ReadOnlyField()
limits = serializers.ReadOnlyField()
project_uuid = serializers.ReadOnlyField(source='project.uuid')
project_name = serializers.ReadOnlyField(source='project.name')
customer_uuid = serializers.ReadOnlyField(source='project.customer.uuid')
customer_name = serializers.ReadOnlyField(source='project.customer.name')
offering_uuid = serializers.ReadOnlyField(source='offering.uuid')
offering_name = serializers.ReadOnlyField(source='offering.name')
plan_uuid = serializers.ReadOnlyField(source='plan.uuid')
plan_name = serializers.ReadOnlyField(source='plan.name')
resource_uuid = serializers.ReadOnlyField(source='uuid')
resource_name = serializers.ReadOnlyField(source='name')
| 53.5625
| 83
| 0.784131
| 171
| 1,714
| 7.736842
| 0.122807
| 0.435374
| 0.453515
| 0.256992
| 0.920635
| 0.920635
| 0.835979
| 0.677249
| 0.585034
| 0.585034
| 0
| 0
| 0.105018
| 1,714
| 31
| 84
| 55.290323
| 0.862451
| 0
| 0
| 0.444444
| 0
| 0
| 0.162194
| 0.056009
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037037
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
71ad9c74a2cd40a75b4c3ca7fda94b5b3ec1715d
| 15,004
|
py
|
Python
|
python/rz_linear/impl/RzLinearForward.py
|
Jokeren/RzLinear
|
d318d95254cd5c3dcf814774d22dc71179450aa0
|
[
"MIT"
] | null | null | null |
python/rz_linear/impl/RzLinearForward.py
|
Jokeren/RzLinear
|
d318d95254cd5c3dcf814774d22dc71179450aa0
|
[
"MIT"
] | null | null | null |
python/rz_linear/impl/RzLinearForward.py
|
Jokeren/RzLinear
|
d318d95254cd5c3dcf814774d22dc71179450aa0
|
[
"MIT"
] | null | null | null |
import torch
import triton
import triton.language as tl
def rz_linear_forward_tl(input: torch.tensor, hashed_weight: torch.tensor,
M: int, K: int, N: int, H: int,
R3: int, R2: int, R1: int, R0: int,
allow_tf32: bool = True, allow_autotune: bool = True,
BLOCK_SIZE_M: int = 64, BLOCK_SIZE_N: int = 64, BLOCK_SIZE_K: int = 32,
GROUP_SIZE: int = 4) -> torch.tensor:
'''
Compute input_tensor x hashed_weight and return an output tensor
Args:
input (Tensor): A MxK tensor
hashed_weight (Tensor): A 1xH tensor
M, K, N, H (int): Matrix dimensions
R3, R2, R1, R0 (int): Random numbers
allow_tf32 (bool): If tensor core is allowed
BLOCK_SIZE_M, BLOCK_SIZE_N, BLOCK_SIZE_K, GROUP_SIZE: Matrix tiling parameters for performance tunning
Returns:
output (Tensor): A MxN tensor
'''
# TODO(Keren): make rzlinear more general for any shape
assert (H > (BLOCK_SIZE_K * BLOCK_SIZE_N))
assert (M % 4 == 0)
assert (K % 4 == 0)
assert (N % 4 == 0)
# allocates output
output = torch.zeros((M, N), device=input.device, dtype=input.dtype)
def grid(META): return (
triton.cdiv(M, META['BLOCK_SIZE_M']) *
triton.cdiv(N, META['BLOCK_SIZE_N']),
)
if allow_tf32:
assert (K % 32 == 0)
else:
assert (K % 8 == 0)
if allow_autotune:
if allow_tf32:
rz_linear_forward_kernel_tf32[grid](
input, hashed_weight, output,
M, N, K, H,
input.stride(0), input.stride(1),
output.stride(0), output.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
# XXX(Keren): triton bug, cannot materialize allow_tf32
rz_linear_forward_kernel_fp32[grid](
input, hashed_weight, output,
M, N, K, H,
input.stride(0), input.stride(1),
output.stride(0), output.stride(1),
R3=R3, R2=R2, R1=R1, R0=R0,
GROUP_SIZE=GROUP_SIZE
)
else:
rz_linear_forward_kernel_notune[grid](
input, hashed_weight, output,
M, N, K, H,
input.stride(0), input.stride(1),
output.stride(0), output.stride(1),
allow_tf32=allow_tf32,
R3=R3, R2=R2, R1=R1, R0=R0,
num_stages=4,
num_warps=4,
BLOCK_SIZE_M=BLOCK_SIZE_M,
BLOCK_SIZE_N=BLOCK_SIZE_N,
BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE
)
return output
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_forward_kernel_fp32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_cm, stride_cn,
# Random numbers
R3: tl.constexpr, R2: tl.constexpr, R1: tl.constexpr, R0: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn,
allow_tf32=False, R3=R3, R2=R2, R1=R1, R0=R0,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.autotune(
configs=[
# basic configs for compute-bound matmuls
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=8),
triton.Config({'BLOCK_SIZE_M': 256, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 256,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=4, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=5, num_warps=2),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 32,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 128,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 16, 'BLOCK_SIZE_N': 64,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 128, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 64, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
triton.Config({'BLOCK_SIZE_M': 32, 'BLOCK_SIZE_N': 16,
'BLOCK_SIZE_K': 32}, num_stages=3, num_warps=4),
],
key=['M', 'N', 'K'],
)
@triton.jit
def rz_linear_forward_kernel_tf32(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_cm, stride_cn,
# Random numbers
R3: tl.constexpr, R2: tl.constexpr, R1: tl.constexpr, R0: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn,
allow_tf32=True, R3=R3, R2=R2, R1=R1, R0=R0,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_forward_kernel_notune(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_cm, stride_cn,
allow_tf32: tl.constexpr,
# Random numbers
R3: tl.constexpr, R2: tl.constexpr, R1: tl.constexpr, R0: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
rz_linear_forward_core(a_ptr=a_ptr, b_ptr=b_ptr, c_ptr=c_ptr, M=M, N=N, K=K, H=H,
stride_am=stride_am, stride_ak=stride_ak, stride_cm=stride_cm, stride_cn=stride_cn,
allow_tf32=allow_tf32, R3=R3, R2=R2, R1=R1, R0=R0,
BLOCK_SIZE_M=BLOCK_SIZE_M, BLOCK_SIZE_N=BLOCK_SIZE_N, BLOCK_SIZE_K=BLOCK_SIZE_K,
GROUP_SIZE=GROUP_SIZE)
@triton.jit
def rz_linear_forward_core(
# Pointers to matrices
a_ptr, b_ptr, c_ptr,
# Matrix dimensions
M, N, K, H,
# The stride variables represent how much to increase the ptr by when moving by 1
# element in a particular dimension.
stride_am, stride_ak,
stride_cm, stride_cn,
allow_tf32: tl.constexpr,
# Random numbers
R3: tl.constexpr, R2: tl.constexpr, R1: tl.constexpr, R0: tl.constexpr,
# Meta-parameters
BLOCK_SIZE_M: tl.constexpr, BLOCK_SIZE_N: tl.constexpr, BLOCK_SIZE_K: tl.constexpr,
GROUP_SIZE: tl.constexpr
):
"""Kernel for computing the matmul C = A x B.
A has shape (M, K), B has shape (K, N) and C has shape (M, N)
"""
pid = tl.program_id(axis=0)
num_pid_m = tl.cdiv(M, BLOCK_SIZE_M)
num_pid_n = tl.cdiv(N, BLOCK_SIZE_N)
num_pid_in_group = GROUP_SIZE * num_pid_n
group_id = pid // num_pid_in_group
first_pid_m = group_id * GROUP_SIZE
group_size_m = min(num_pid_m - first_pid_m, GROUP_SIZE)
pid_m = first_pid_m + (pid % group_size_m)
pid_n = (pid % num_pid_in_group) // group_size_m
# [BLOCK_SIZE_M, BLOCK_SIZE_K]
offs_am = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_k = tl.arange(0, BLOCK_SIZE_K)
a_ptrs = a_ptr + (offs_am[:, None] * stride_am +
offs_k[None, :] * stride_ak)
# Compute hash
# [H]
b_offset = b_ptr + offs_k[:, None] * \
BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)[None, :]
b_ptrs = b_offset + (0 * R3 + pid_n * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
# [BLOCK_SIZE_M, BLOCK_SIZE_N]
c = tl.zeros((BLOCK_SIZE_M, BLOCK_SIZE_N), dtype=tl.float32)
for k in range(0, K//BLOCK_SIZE_K):
# Note that for simplicity, we don't apply a mask here.
# This means that if K is not a multiple of BLOCK_SIZE_K,
# this will access out-of-bounds memory and produce an
# error or (worse!) incorrect results.
# TODO(Keren): Add K checks
a = tl.load(a_ptrs)
b = tl.load(b_ptrs)
# We accumulate along the K dimension
c += tl.dot(a, b, allow_tf32=allow_tf32)
# Advance the ptrs to the next K block
a_ptrs += BLOCK_SIZE_K * stride_ak
b_ptrs = b_offset + ((k + 1) * R3 + pid_n * R2 +
R1) % R0 % (H - BLOCK_SIZE_K * BLOCK_SIZE_N)
# -----------------------------------------------------------
# Write back the block of the output matrix C
offs_cm = pid_m * BLOCK_SIZE_M + tl.arange(0, BLOCK_SIZE_M)
offs_cn = pid_n * BLOCK_SIZE_N + tl.arange(0, BLOCK_SIZE_N)
c_ptrs = c_ptr + stride_cm * \
offs_cm[:, None] + stride_cn * offs_cn[None, :]
c_mask = (offs_cm[:, None] < M) & (offs_cn[None, :] < N)
tl.store(c_ptrs, c, mask=c_mask)
| 46.166154
| 110
| 0.585111
| 2,262
| 15,004
| 3.556145
| 0.080018
| 0.23272
| 0.088265
| 0.12009
| 0.795002
| 0.77996
| 0.76007
| 0.753729
| 0.750124
| 0.742914
| 0
| 0.052336
| 0.286857
| 15,004
| 324
| 111
| 46.308642
| 0.699439
| 0.129366
| 0
| 0.708502
| 0
| 0
| 0.130334
| 0
| 0
| 0
| 0
| 0.006173
| 0.024292
| 1
| 0.024292
| false
| 0
| 0.012146
| 0.004049
| 0.040486
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
71dc8498f0446b95d2288d377dd3adc1ba855f4a
| 4,724
|
py
|
Python
|
src/jntajis/tests/test_mj_translit.py
|
opencollector/jntajis-python
|
3e63b6901e93d1fd58623c672694caeceb815ac5
|
[
"BSD-3-Clause"
] | 10
|
2021-08-29T13:33:01.000Z
|
2022-03-03T22:20:27.000Z
|
src/jntajis/tests/test_mj_translit.py
|
opencollector/jntajis-python
|
3e63b6901e93d1fd58623c672694caeceb815ac5
|
[
"BSD-3-Clause"
] | null | null | null |
src/jntajis/tests/test_mj_translit.py
|
opencollector/jntajis-python
|
3e63b6901e93d1fd58623c672694caeceb815ac5
|
[
"BSD-3-Clause"
] | null | null | null |
import pytest
import jntajis
@pytest.mark.parametrize( ("input", "combo", "expected"),
[
# 斎
(
"\u658e",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u658e"],
),
(
"\u658e\U000e0102",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u658e"],
),
(
"\u658e",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u658e"],
),
(
"\u658e\U000e0102",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u658e"],
),
# 邉
(
"\u9089",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u9089"],
),
(
"\u9089\U000e0102",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u9089\U000e0102"],
),
(
"\u9089\U000e010f",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u9089"],
),
(
"\u9089\U000e0109",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u9089\U000e0109"],
),
(
"\u9089",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u9089"],
),
(
"\u9089\U000e0102",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u9089\U000e0102"],
),
(
"\u9089\U000e0109",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u9089\U000e0109"],
),
(
"\u9089\U000e010f",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u9089"],
),
(
"\u9089",
jntajis.MJShrinkSchemeCombo.MOJ_FAMILY_REGISTER_ACT_RELATED_NOTICE,
["\u8fba", "\u908a", "\u9089"],
),
(
"\u9089",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE | jntajis.MJShrinkSchemeCombo.MOJ_FAMILY_REGISTER_ACT_RELATED_NOTICE,
["\u8fba", "\u908a", "\u9089"],
),
# 邊󠄏
(
"\u908a",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u908a"],
),
(
"\u908a\U000e0102",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u908a\U000e0102"],
),
(
"\u908a\U000e0108",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u908a"],
),
(
"\u908a\U000e0109",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u908a"],
),
(
"\u908a",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u908a"],
),
(
"\u908a\U000e0102",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u908a\U000e0102"],
),
(
"\u908a\U000e0108",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u908a"],
),
(
"\u908a\U000e0109",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u908a"],
),
(
"\u908a",
jntajis.MJShrinkSchemeCombo.MOJ_FAMILY_REGISTER_ACT_RELATED_NOTICE,
["\u8fba", "\u908a"],
),
(
"\u908a",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE | jntajis.MJShrinkSchemeCombo.MOJ_FAMILY_REGISTER_ACT_RELATED_NOTICE,
["\u8fba", "\u908a"],
),
# 㑐
(
"\u3450",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\u3450"],
),
(
"\u3450",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\u3450"],
),
# あさぼらけ
(
"\U0002AC2A",
jntajis.MJShrinkSchemeCombo.JIS_INCORPORATION_UCS_UNIFICATION_RULE,
["\U0002AC2A"],
),
(
"\U0002AC2A",
jntajis.MJShrinkSchemeCombo.INFERENCE_BY_READING_AND_GLYPH,
["\U0002AC2A"],
),
],
)
def test_mj_shrink_candidates(input, combo, expected):
assert jntajis.mj_shrink_candidates(input, combo) == expected
| 30.282051
| 148
| 0.529213
| 328
| 4,724
| 7.231707
| 0.143293
| 0.328836
| 0.171164
| 0.247892
| 0.903457
| 0.901349
| 0.851602
| 0.757167
| 0.508432
| 0.171164
| 0
| 0.1147
| 0.357748
| 4,724
| 155
| 149
| 30.477419
| 0.666777
| 0.002964
| 0
| 0.756757
| 0
| 0
| 0.128827
| 0
| 0
| 0
| 0
| 0
| 0.006757
| 1
| 0.006757
| false
| 0
| 0.013514
| 0
| 0.02027
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0aab53fc604b9fc46f7177bc65f3eb136970311
| 17,531
|
py
|
Python
|
pytorch/metrics/ret_metrics.py
|
oliviaweng/imgclsmob
|
80fffbb46f986614b162c725b21f3d208597ac77
|
[
"MIT"
] | 2
|
2020-11-14T08:40:41.000Z
|
2021-11-08T09:30:41.000Z
|
pytorch/metrics/ret_metrics.py
|
ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision
|
a1f1f52eecbb841fa878bff4d3c311b79864835d
|
[
"MIT"
] | null | null | null |
pytorch/metrics/ret_metrics.py
|
ibrahim85/Sandbox-for-training-convolutional-networks-for-computer-vision
|
a1f1f52eecbb841fa878bff4d3c311b79864835d
|
[
"MIT"
] | 2
|
2020-09-01T12:22:50.000Z
|
2020-10-24T22:02:35.000Z
|
"""
Evaluation Metrics for Image Retrieval.
"""
import numpy as np
import torch
from .metric import EvalMetric
__all__ = ['PointDetectionMatchRatio', 'PointDescriptionMatchRatio']
class PointDetectionMatchRatio(EvalMetric):
"""
Computes point detection match ratio (with mean residual).
Parameters
----------
pts_max_count : int
Maximal count of points.
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
pts_max_count,
axis=1,
name="pt_det_ratio",
output_names=None,
label_names=None):
super(PointDetectionMatchRatio, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
self.pts_max_count = pts_max_count
self.resudual_sum = 0.0
self.resudual_count = 0
def update_alt(self,
homography,
src_pts,
dst_pts,
src_confs,
dst_confs,
src_img_size,
dst_img_size):
"""
Updates the internal evaluation result.
Parameters
----------
homography : torch.Tensor
Homography (from source image to destination one).
src_pts : torch.Tensor
Detected points for the first (source) image.
dst_pts : torch.Tensor
Detected points for the second (destination) image.
src_confs : torch.Tensor
Confidences for detected points on the source image.
dst_confs : torch.Tensor
Confidences for detected points on the destination image.
src_img_size : tuple of 2 int
Size (H, W) of the source image.
dst_img_size : tuple of 2 int
Size (H, W) of the destination image.
"""
assert (src_confs.argsort(descending=True).cpu().detach().numpy() == np.arange(src_confs.shape[0])).all()
assert (dst_confs.argsort(descending=True).cpu().detach().numpy() == np.arange(dst_confs.shape[0])).all()
max_dist_sat_value = 1e5
eps = 1e-5
# print("src_img_size={}".format(src_img_size))
# print("dst_img_size={}".format(dst_img_size))
homography = homography.to(src_pts.device)
self.normalize_homography(homography)
homography_inv = self.calc_homography_inv(homography)
# print("homography={}".format(homography))
# print("homography_inv={}".format(homography_inv))
# print("src_pts={}".format(src_pts[:10, :].int()))
src_pts = src_pts.flip(dims=(1,))
dst_pts = dst_pts.flip(dims=(1,))
# print("src_pts={}".format(src_pts[:10, :].int()))
# print("src_pts.shape={}".format(src_pts.shape))
# print("dst_pts.shape={}".format(dst_pts.shape))
# print("src_pts={}".format(src_pts[:10, :].int()))
# print("dst_pts={}".format(dst_pts[:10, :].int()))
# with torch.no_grad():
src_hmg_pts = self.calc_homogeneous_coords(src_pts.float())
dst_hmg_pts = self.calc_homogeneous_coords(dst_pts.float())
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
src_hmg_pts, src_confs = self.filter_inside_points(
src_hmg_pts,
src_confs,
homography,
dst_img_size)
dst_hmg_pts, dst_confs = self.filter_inside_points(
dst_hmg_pts,
dst_confs,
homography_inv,
src_img_size)
# print("src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
#
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
src_pts_count = src_hmg_pts.shape[0]
dst_pts_count = dst_hmg_pts.shape[0]
src_pts_count2 = min(src_pts_count, self.pts_max_count)
src_hmg_pts, conf_thr = self.filter_best_points(
hmg_pts=src_hmg_pts,
confs=src_confs,
max_count=src_pts_count2,
min_conf=None)
dst_pts_count2 = min(dst_pts_count, self.pts_max_count)
dst_hmg_pts, _ = self.filter_best_points(
hmg_pts=dst_hmg_pts,
confs=dst_confs,
max_count=dst_pts_count2,
min_conf=conf_thr)
# print("src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
# print("src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
preds_dst_hmg_pts = self.transform_points(
src_hmg_pts,
homography)
# print("preds_dst_hmg_pts={}".format(preds_dst_hmg_pts[:10, :].int()))
cost = self.calc_pairwise_distances(x=preds_dst_hmg_pts, y=dst_hmg_pts).cpu().detach().numpy()
self.saturate_distance_matrix(
dist_mat=cost,
max_dist_thr=8.0,
max_dist_sat=max_dist_sat_value)
# print("cost.shape={}".format(cost.shape))
from scipy.optimize import linear_sum_assignment
row_ind, col_ind = linear_sum_assignment(cost)
# print("row_ind.shape={}".format(row_ind.shape))
# print("col_ind.shape={}".format(col_ind.shape))
resuduals = cost[row_ind, col_ind]
resuduals = resuduals[resuduals < (max_dist_sat_value - eps)]
resudual_count = len(resuduals)
self.sum_metric += resudual_count
self.global_sum_metric += resudual_count
self.num_inst += src_pts_count2
self.global_num_inst += src_pts_count2
print("ratio_resudual={}".format(float(resudual_count) / src_pts_count2))
if resudual_count != 0:
self.resudual_sum += resuduals.sum()
self.resudual_count += resudual_count
@staticmethod
def normalize_homography(homography):
homography /= homography[2, 2]
@staticmethod
def calc_homography_inv(homography):
homography_inv = homography.inverse()
PointDetectionMatchRatio.normalize_homography(homography_inv)
return homography_inv
@staticmethod
def calc_homogeneous_coords(pts):
hmg_pts = torch.cat((pts, torch.ones((pts.shape[0], 1), dtype=pts.dtype, device=pts.device)), dim=1)
return hmg_pts
@staticmethod
def calc_cartesian_coords(hmg_pts):
pts = hmg_pts[:, :2]
return pts
@staticmethod
def transform_points(src_hmg_pts,
homography):
# print("transform_points -> src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("transform_points -> homography.shape={}".format(homography.shape))
# print("homography={}".format(homography))
# print("transform_points -> src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
dst_hmg_pts = torch.matmul(src_hmg_pts, homography.t())
# print("transform_points -> dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
# print("transform_points -> dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
dst_hmg_pts /= dst_hmg_pts[:, 2:]
return dst_hmg_pts
@staticmethod
def calc_inside_pts_mask(pts,
img_size):
eps = 1e-3
border_size = 1.0
border = border_size - eps
mask = (pts[:, 0] >= border) & (pts[:, 0] < img_size[0] - border) &\
(pts[:, 1] >= border) & (pts[:, 1] < img_size[1] - border)
return mask
@staticmethod
def filter_inside_points(src_hmg_pts,
src_confs,
homography,
dst_img_size):
# print("fip->src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("fip->src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
# print("fip->src_confs.shape={}".format(src_confs.shape))
# print("fip->src_confs={}".format(src_confs[:10]))
# print("homography_inv={}".format(homography))
dst_hmg_pts = PointDetectionMatchRatio.transform_points(src_hmg_pts, homography)
# print("fip->dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
# print("fip->dst_hmg_pts={}".format(dst_hmg_pts[:10, :]))
mask = PointDetectionMatchRatio.calc_inside_pts_mask(dst_hmg_pts, dst_img_size)
# print("fip->mask={}".format(mask[:10]))
# print("fip->mask.sum()={}".format(mask.sum()))
return src_hmg_pts[mask], src_confs[mask]
@staticmethod
def filter_best_points(hmg_pts,
confs,
max_count,
min_conf=None):
if min_conf is not None:
max_ind = (confs < min_conf).nonzero()[0, 0].item()
max_count = max(max_count, max_ind)
inds = confs.argsort(descending=True)[:max_count]
return hmg_pts[inds], confs[inds][-1]
@staticmethod
def calc_pairwise_distances(x, y):
diff = x.unsqueeze(1) - y.unsqueeze(0)
return torch.sum(diff * diff, dim=-1).sqrt()
@staticmethod
def saturate_distance_matrix(dist_mat,
max_dist_thr,
max_dist_sat):
dist_mat[dist_mat > max_dist_thr] = max_dist_sat
class PointDescriptionMatchRatio(EvalMetric):
"""
Computes point description match ratio.
Parameters
----------
pts_max_count : int
Maximal count of points.
dist_ratio_thr : float, default 0.9
Distance ratio threshold for point filtering.
axis : int, default 1
The axis that represents classes
name : str, default 'accuracy'
Name of this metric instance for display.
output_names : list of str, or None, default None
Name of predictions that should be used when updating with update_dict.
By default include all predictions.
label_names : list of str, or None, default None
Name of labels that should be used when updating with update_dict.
By default include all labels.
"""
def __init__(self,
pts_max_count,
dist_ratio_thr=0.95,
axis=1,
name="pt_desc_ratio",
output_names=None,
label_names=None):
super(PointDescriptionMatchRatio, self).__init__(
name,
axis=axis,
output_names=output_names,
label_names=label_names,
has_global_stats=True)
self.axis = axis
self.pts_max_count = pts_max_count
self.dist_ratio_thr = dist_ratio_thr
self.resudual_sum = 0.0
self.resudual_count = 0
def update_alt(self,
homography,
src_pts,
dst_pts,
src_descs,
dst_descs,
src_img_size,
dst_img_size):
"""
Updates the internal evaluation result.
Parameters
----------
homography : torch.Tensor
Homography (from source image to destination one).
src_pts : torch.Tensor
Detected points for the first (source) image.
dst_pts : torch.Tensor
Detected points for the second (destination) image.
src_descs : torch.Tensor
Descriptors for detected points on the source image.
dst_descs : torch.Tensor
Descriptors for detected points on the destination image.
src_img_size : tuple of 2 int
Size (H, W) of the source image.
dst_img_size : tuple of 2 int
Size (H, W) of the destination image.
"""
# max_dist_sat_value = 1e5
# eps = 1e-5
homography = homography.to(src_pts.device)
self.normalize_homography(homography)
homography_inv = self.calc_homography_inv(homography)
src_pts = src_pts.flip(dims=(1,))
dst_pts = dst_pts.flip(dims=(1,))
src_hmg_pts = self.calc_homogeneous_coords(src_pts.float())
dst_hmg_pts = self.calc_homogeneous_coords(dst_pts.float())
src_hmg_pts = self.filter_inside_points(
src_hmg_pts,
homography,
dst_img_size)
dst_hmg_pts = self.filter_inside_points(
dst_hmg_pts,
homography_inv,
src_img_size)
src_pts_count = src_hmg_pts.shape[0]
dst_pts_count = dst_hmg_pts.shape[0]
src_pts_count2 = min(src_pts_count, self.pts_max_count * 10)
src_hmg_pts, src_descs = self.filter_best_points(
hmg_pts=src_hmg_pts,
descs=src_descs,
max_count=src_pts_count2)
dst_pts_count2 = min(dst_pts_count, self.pts_max_count * 10)
dst_hmg_pts, dst_descs = self.filter_best_points(
hmg_pts=dst_hmg_pts,
descs=dst_descs,
max_count=dst_pts_count2)
dist_mat = self.calc_pairwise_distances(x=src_descs, y=dst_descs)
vals, inds = dist_mat.topk(k=2, dim=1, largest=True, sorted=True)
inds = inds[:, 0][(vals[:, 1] / vals[:, 0]) < 0.95]
src_hmg_pts = src_hmg_pts[inds]
preds_dst_hmg_pts = self.transform_points(
src_hmg_pts,
homography)
print(preds_dst_hmg_pts)
# self.saturate_distance_matrix(
# dist_mat=cost,
# max_dist_thr=8.0,
# max_dist_sat=max_dist_sat_value)
#
# # print("cost.shape={}".format(cost.shape))
#
# from scipy.optimize import linear_sum_assignment
# row_ind, col_ind = linear_sum_assignment(cost)
#
# # print("row_ind.shape={}".format(row_ind.shape))
# # print("col_ind.shape={}".format(col_ind.shape))
#
# resuduals = cost[row_ind, col_ind]
# resuduals = resuduals[resuduals < (max_dist_sat_value - eps)]
# resudual_count = len(resuduals)
resudual_count = 1
self.sum_metric += resudual_count
self.global_sum_metric += resudual_count
self.num_inst += src_pts_count2
self.global_num_inst += src_pts_count2
print("ratio_resudual={}".format(float(resudual_count) / src_pts_count2))
@staticmethod
def normalize_homography(homography):
homography /= homography[2, 2]
@staticmethod
def calc_homography_inv(homography):
homography_inv = homography.inverse()
PointDetectionMatchRatio.normalize_homography(homography_inv)
return homography_inv
@staticmethod
def calc_homogeneous_coords(pts):
hmg_pts = torch.cat((pts, torch.ones((pts.shape[0], 1), dtype=pts.dtype, device=pts.device)), dim=1)
return hmg_pts
@staticmethod
def calc_cartesian_coords(hmg_pts):
pts = hmg_pts[:, :2]
return pts
@staticmethod
def transform_points(src_hmg_pts,
homography):
# print("transform_points -> src_hmg_pts.shape={}".format(src_hmg_pts.shape))
# print("transform_points -> homography.shape={}".format(homography.shape))
# print("homography={}".format(homography))
# print("transform_points -> src_hmg_pts={}".format(src_hmg_pts[:10, :].int()))
dst_hmg_pts = torch.matmul(src_hmg_pts, homography.t())
# print("transform_points -> dst_hmg_pts={}".format(dst_hmg_pts[:10, :].int()))
# print("transform_points -> dst_hmg_pts.shape={}".format(dst_hmg_pts.shape))
dst_hmg_pts /= dst_hmg_pts[:, 2:]
return dst_hmg_pts
@staticmethod
def calc_inside_pts_mask(pts,
img_size):
eps = 1e-3
border_size = 1.0
border = border_size - eps
mask = (pts[:, 0] >= border) & (pts[:, 0] < img_size[0] - border) &\
(pts[:, 1] >= border) & (pts[:, 1] < img_size[1] - border)
return mask
@staticmethod
def filter_inside_points(src_hmg_pts,
homography,
dst_img_size):
dst_hmg_pts = PointDetectionMatchRatio.transform_points(src_hmg_pts, homography)
mask = PointDetectionMatchRatio.calc_inside_pts_mask(dst_hmg_pts, dst_img_size)
return src_hmg_pts[mask]
@staticmethod
def filter_best_points(hmg_pts,
descs,
max_count):
return hmg_pts[:max_count], descs[:max_count]
@staticmethod
def calc_pairwise_distances(x, y):
diff = x.unsqueeze(1) - y.unsqueeze(0)
return torch.sum(diff * diff, dim=-1).sqrt()
@staticmethod
def saturate_distance_matrix(dist_mat,
max_dist_thr,
max_dist_sat):
dist_mat[dist_mat > max_dist_thr] = max_dist_sat
| 35.559838
| 113
| 0.598597
| 2,163
| 17,531
| 4.526121
| 0.090153
| 0.071706
| 0.048723
| 0.02145
| 0.860163
| 0.814505
| 0.813892
| 0.803882
| 0.774259
| 0.718897
| 0
| 0.012071
| 0.291198
| 17,531
| 492
| 114
| 35.632114
| 0.775793
| 0.320233
| 0
| 0.726236
| 0
| 0
| 0.00957
| 0.00439
| 0
| 0
| 0
| 0
| 0.007605
| 1
| 0.091255
| false
| 0
| 0.015209
| 0.003802
| 0.174905
| 0.011407
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e0b15aa2f7cc426a07def48a96672a55309d08bc
| 65,808
|
py
|
Python
|
tests/casefiles/SizersSizeTests_nowrap.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 225
|
2018-03-26T11:23:22.000Z
|
2022-03-24T09:44:08.000Z
|
tests/casefiles/SizersSizeTests_nowrap.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 403
|
2018-01-03T19:47:28.000Z
|
2018-03-23T17:43:39.000Z
|
tests/casefiles/SizersSizeTests_nowrap.py
|
ardovm/wxGlade
|
a4cf8e65bcc6df5f65cf8ca5c49b9a628bf1e8eb
|
[
"MIT"
] | 47
|
2018-04-08T16:48:38.000Z
|
2021-12-21T20:08:44.000Z
|
#!/usr/bin/env python
# -*- coding: UTF-8 -*-
#
# generated by wxGlade
#
import wx
# begin wxGlade: dependencies
import gettext
# end wxGlade
# begin wxGlade: extracode
# end wxGlade
class MyFrame(wx.Frame):
def __init__(self, *args, **kwds):
# begin wxGlade: MyFrame.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((600, 400))
self.SetTitle(_("frame"))
sizer_1 = wx.BoxSizer(wx.VERTICAL)
self.notebook_1 = wx.Notebook(self, wx.ID_ANY)
sizer_1.Add(self.notebook_1, 1, wx.EXPAND, 0)
self.notebook_1_pane_1 = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_pane_1, _("BoxSizer"))
sizer_2 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3_nosize = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_3_nosize, 1, wx.EXPAND, 0)
self._0_N_N = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._0_N_N, 0, 0, 0)
self._1_N_N = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._1_N_N, 1, 0, 0)
self._0_X_N = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._0_X_N, 0, wx.EXPAND, 0)
self._1_X_N = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._1_X_N, 1, wx.EXPAND, 0)
self._0_N_F = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._0_N_F, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._1_N_F, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._0_X_F, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_3_nosize.Add(self._1_X_F, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_4_abs = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_4_abs, 1, wx.EXPAND, 0)
self._0_N_N_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._0_N_N_copy, 0, 0, 0)
self._1_N_N_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._1_N_N_copy, 1, 0, 0)
self._0_X_N_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._0_X_N_copy, 0, wx.EXPAND, 0)
self._1_X_N_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._1_X_N_copy, 1, wx.EXPAND, 0)
self._0_N_F_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._0_N_F_copy, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._1_N_F_copy, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_F_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._0_X_F_copy, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_F_copy.SetMinSize((100, 21))
sizer_4_abs.Add(self._1_X_F_copy, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_5_dlg = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_5_dlg, 1, wx.EXPAND, 0)
self._0_N_N_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy_1.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_1, (100, 21)))
sizer_5_dlg.Add(self._0_N_N_copy_1, 0, 0, 0)
self._1_N_N_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy_1.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_1, (100, 21)))
sizer_5_dlg.Add(self._1_N_N_copy_1, 1, 0, 0)
self._0_X_N_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy_1.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_1, (100, 21)))
sizer_5_dlg.Add(self._0_X_N_copy_1, 0, wx.EXPAND, 0)
self._1_X_N_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy_1.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_1, (100, 21)))
sizer_5_dlg.Add(self._1_X_N_copy_1, 1, wx.EXPAND, 0)
self._0_N_F_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy_1.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_1, (100, 21)))
sizer_5_dlg.Add(self._0_N_F_copy_1, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy_1.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_1, (100, 21)))
sizer_5_dlg.Add(self._1_N_F_copy_1, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_F_copy_1.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_1, (100, 21)))
sizer_5_dlg.Add(self._0_X_F_copy_1, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_1 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_F_copy_1.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_1, (100, 21)))
sizer_5_dlg.Add(self._1_X_F_copy_1, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_m1abs = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_6_m1abs, 1, wx.EXPAND, 0)
self._0_N_N_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._0_N_N_copy_2, 0, 0, 0)
self._1_N_N_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._1_N_N_copy_2, 1, 0, 0)
self._0_X_N_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._0_X_N_copy_2, 0, wx.EXPAND, 0)
self._1_X_N_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._1_X_N_copy_2, 1, wx.EXPAND, 0)
self._0_N_F_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._0_N_F_copy_2, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._1_N_F_copy_2, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_F_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._0_X_F_copy_2, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_2 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_F_copy_2.SetMinSize((-1, 21))
sizer_6_m1abs.Add(self._1_X_F_copy_2, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_absm1 = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_6_absm1, 1, wx.EXPAND, 0)
self._0_N_N_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._0_N_N_copy_3, 0, 0, 0)
self._1_N_N_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._1_N_N_copy_3, 1, 0, 0)
self._0_X_N_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._0_X_N_copy_3, 0, wx.EXPAND, 0)
self._1_X_N_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._1_X_N_copy_3, 1, wx.EXPAND, 0)
self._0_N_F_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._0_N_F_copy_3, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._1_N_F_copy_3, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_F_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._0_X_F_copy_3, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_3 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_F_copy_3.SetMinSize((100, -1))
sizer_6_absm1.Add(self._1_X_F_copy_3, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_m1dlg = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_6_m1dlg, 1, wx.EXPAND, 0)
self._0_N_N_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy_4.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._0_N_N_copy_4, 0, 0, 0)
self._1_N_N_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy_4.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._1_N_N_copy_4, 1, 0, 0)
self._0_X_N_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy_4.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._0_X_N_copy_4, 0, wx.EXPAND, 0)
self._1_X_N_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy_4.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._1_X_N_copy_4, 1, wx.EXPAND, 0)
self._0_N_F_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy_4.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._0_N_F_copy_4, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy_4.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_4, (-1, 100)))
sizer_6_m1dlg.Add(self._1_N_F_copy_4, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_6_m1dlg.Add(self._0_X_F_copy_4, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_4 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
sizer_6_m1dlg.Add(self._1_X_F_copy_4, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_dlgm1 = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_6_dlgm1, 1, wx.EXPAND, 0)
self._0_N_N_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_N_copy_5.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._0_N_N_copy_5, 0, 0, 0)
self._1_N_N_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_N_copy_5.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._1_N_N_copy_5, 1, 0, 0)
self._0_X_N_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_N_copy_5.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._0_X_N_copy_5, 0, wx.EXPAND, 0)
self._1_X_N_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_N_copy_5.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._1_X_N_copy_5, 1, wx.EXPAND, 0)
self._0_N_F_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_N_F_copy_5.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._0_N_F_copy_5, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_N_F_copy_5.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._1_N_F_copy_5, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._0_X_F_copy_5.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._0_X_F_copy_5, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_5 = wx.TextCtrl(self.notebook_1_pane_1, wx.ID_ANY, "")
self._1_X_F_copy_5.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_5, (100, -1)))
sizer_6_dlgm1.Add(self._1_X_F_copy_5, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.notebook_1_StaticBoxSizer = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_StaticBoxSizer, _("StaticBoxSizer"))
sizer_3 = wx.BoxSizer(wx.HORIZONTAL)
sizer_3_nosize_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_3_nosize_copy")), wx.VERTICAL)
sizer_3.Add(sizer_3_nosize_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._0_N_N_copy_6, 0, 0, 0)
self._1_N_N_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._1_N_N_copy_6, 1, 0, 0)
self._0_X_N_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._0_X_N_copy_6, 0, wx.EXPAND, 0)
self._1_X_N_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._1_X_N_copy_6, 1, wx.EXPAND, 0)
self._0_N_F_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._0_N_F_copy_6, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._1_N_F_copy_6, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._0_X_F_copy_6, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_6 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_3_nosize_copy.Add(self._1_X_F_copy_6, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_4_abs_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_4_abs_copy")), wx.VERTICAL)
sizer_3.Add(sizer_4_abs_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._0_N_N_copy_copy, 0, 0, 0)
self._1_N_N_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._1_N_N_copy_copy, 1, 0, 0)
self._0_X_N_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._0_X_N_copy_copy, 0, wx.EXPAND, 0)
self._1_X_N_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._1_X_N_copy_copy, 1, wx.EXPAND, 0)
self._0_N_F_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._0_N_F_copy_copy, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._1_N_F_copy_copy, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_F_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._0_X_F_copy_copy, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_copy = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_F_copy_copy.SetMinSize((100, 21))
sizer_4_abs_copy.Add(self._1_X_F_copy_copy, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_5_dlg_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_5_dlg_copy")), wx.VERTICAL)
sizer_3.Add(sizer_5_dlg_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_7.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._0_N_N_copy_7, 0, 0, 0)
self._1_N_N_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_7.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._1_N_N_copy_7, 1, 0, 0)
self._0_X_N_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_7.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._0_X_N_copy_7, 0, wx.EXPAND, 0)
self._1_X_N_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_7.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._1_X_N_copy_7, 1, wx.EXPAND, 0)
self._0_N_F_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_7.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._0_N_F_copy_7, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_7.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._1_N_F_copy_7, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_F_copy_7.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._0_X_F_copy_7, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_7 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_F_copy_7.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_7, (100, 21)))
sizer_5_dlg_copy.Add(self._1_X_F_copy_7, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_m1abs_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_6_m1abs_copy")), wx.VERTICAL)
sizer_3.Add(sizer_6_m1abs_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._0_N_N_copy_8, 0, 0, 0)
self._1_N_N_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._1_N_N_copy_8, 1, 0, 0)
self._0_X_N_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._0_X_N_copy_8, 0, wx.EXPAND, 0)
self._1_X_N_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._1_X_N_copy_8, 1, wx.EXPAND, 0)
self._0_N_F_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._0_N_F_copy_8, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._1_N_F_copy_8, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_F_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._0_X_F_copy_8, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_8 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_F_copy_8.SetMinSize((-1, 21))
sizer_6_m1abs_copy.Add(self._1_X_F_copy_8, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_absm1_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_6_absm1_copy")), wx.VERTICAL)
sizer_3.Add(sizer_6_absm1_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._0_N_N_copy_9, 0, 0, 0)
self._1_N_N_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._1_N_N_copy_9, 1, 0, 0)
self._0_X_N_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._0_X_N_copy_9, 0, wx.EXPAND, 0)
self._1_X_N_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._1_X_N_copy_9, 1, wx.EXPAND, 0)
self._0_N_F_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._0_N_F_copy_9, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._1_N_F_copy_9, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_F_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._0_X_F_copy_9, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_9 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_F_copy_9.SetMinSize((100, -1))
sizer_6_absm1_copy.Add(self._1_X_F_copy_9, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_m1dlg_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_6_m1dlg_copy")), wx.VERTICAL)
sizer_3.Add(sizer_6_m1dlg_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_10.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._0_N_N_copy_10, 0, 0, 0)
self._1_N_N_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_10.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._1_N_N_copy_10, 1, 0, 0)
self._0_X_N_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_10.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._0_X_N_copy_10, 0, wx.EXPAND, 0)
self._1_X_N_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_10.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._1_X_N_copy_10, 1, wx.EXPAND, 0)
self._0_N_F_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_10.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._0_N_F_copy_10, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_10.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_10, (-1, 100)))
sizer_6_m1dlg_copy.Add(self._1_N_F_copy_10, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_6_m1dlg_copy.Add(self._0_X_F_copy_10, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_10 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
sizer_6_m1dlg_copy.Add(self._1_X_F_copy_10, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
sizer_6_dlgm1_copy = wx.StaticBoxSizer(wx.StaticBox(self.notebook_1_StaticBoxSizer, wx.ID_ANY, _("sizer_6_dlgm1_copy")), wx.VERTICAL)
sizer_3.Add(sizer_6_dlgm1_copy, 1, wx.EXPAND, 0)
self._0_N_N_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_N_copy_11.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._0_N_N_copy_11, 0, 0, 0)
self._1_N_N_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_N_copy_11.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._1_N_N_copy_11, 1, 0, 0)
self._0_X_N_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_N_copy_11.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._0_X_N_copy_11, 0, wx.EXPAND, 0)
self._1_X_N_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_N_copy_11.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._1_X_N_copy_11, 1, wx.EXPAND, 0)
self._0_N_F_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_N_F_copy_11.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._0_N_F_copy_11, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_N_F_copy_11.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._1_N_F_copy_11, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._0_X_F_copy_11.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._0_X_F_copy_11, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_11 = wx.TextCtrl(self.notebook_1_StaticBoxSizer, wx.ID_ANY, "")
self._1_X_F_copy_11.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_11, (100, -1)))
sizer_6_dlgm1_copy.Add(self._1_X_F_copy_11, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.notebook_1_GridSizer = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_GridSizer, _("GridSizer"))
sizer_4 = wx.GridSizer(7, 8, 0, 0)
self._0_N_N_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._0_N_N_copy_12, 0, 0, 0)
self._1_N_N_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._1_N_N_copy_12, 1, 0, 0)
self._0_X_N_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._0_X_N_copy_12, 0, wx.EXPAND, 0)
self._1_X_N_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._1_X_N_copy_12, 1, wx.EXPAND, 0)
self._0_N_F_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._0_N_F_copy_12, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._1_N_F_copy_12, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._0_X_F_copy_12, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_12 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._1_X_F_copy_12, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._0_N_N_copy_copy_copy, 0, 0, 0)
self._1_N_N_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._1_N_N_copy_copy_copy, 1, 0, 0)
self._0_X_N_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._0_X_N_copy_copy_copy, 0, wx.EXPAND, 0)
self._1_X_N_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._1_X_N_copy_copy_copy, 1, wx.EXPAND, 0)
self._0_N_F_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._0_N_F_copy_copy_copy, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._1_N_F_copy_copy_copy, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_F_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._0_X_F_copy_copy_copy, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_F_copy_copy_copy.SetMinSize((100, 21))
sizer_4.Add(self._1_X_F_copy_copy_copy, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_13.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_13, (100, 21)))
sizer_4.Add(self._0_N_N_copy_13, 0, 0, 0)
self._1_N_N_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_13.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_13, (100, 21)))
sizer_4.Add(self._1_N_N_copy_13, 1, 0, 0)
self._0_X_N_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_13.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_13, (100, 21)))
sizer_4.Add(self._0_X_N_copy_13, 0, wx.EXPAND, 0)
self._1_X_N_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_13.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_13, (100, 21)))
sizer_4.Add(self._1_X_N_copy_13, 1, wx.EXPAND, 0)
self._0_N_F_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_13.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_13, (100, 21)))
sizer_4.Add(self._0_N_F_copy_13, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_13.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_13, (100, 21)))
sizer_4.Add(self._1_N_F_copy_13, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_F_copy_13.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_13, (100, 21)))
sizer_4.Add(self._0_X_F_copy_13, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_13 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_F_copy_13.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_13, (100, 21)))
sizer_4.Add(self._1_X_F_copy_13, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._0_N_N_copy_14, 0, 0, 0)
self._1_N_N_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._1_N_N_copy_14, 1, 0, 0)
self._0_X_N_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._0_X_N_copy_14, 0, wx.EXPAND, 0)
self._1_X_N_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._1_X_N_copy_14, 1, wx.EXPAND, 0)
self._0_N_F_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._0_N_F_copy_14, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._1_N_F_copy_14, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_F_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._0_X_F_copy_14, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_14 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_F_copy_14.SetMinSize((-1, 21))
sizer_4.Add(self._1_X_F_copy_14, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._0_N_N_copy_15, 0, 0, 0)
self._1_N_N_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._1_N_N_copy_15, 1, 0, 0)
self._0_X_N_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._0_X_N_copy_15, 0, wx.EXPAND, 0)
self._1_X_N_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._1_X_N_copy_15, 1, wx.EXPAND, 0)
self._0_N_F_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._0_N_F_copy_15, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._1_N_F_copy_15, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_F_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._0_X_F_copy_15, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_15 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_F_copy_15.SetMinSize((100, -1))
sizer_4.Add(self._1_X_F_copy_15, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_16.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_16, (-1, 100)))
sizer_4.Add(self._0_N_N_copy_16, 0, 0, 0)
self._1_N_N_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_16.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_16, (-1, 100)))
sizer_4.Add(self._1_N_N_copy_16, 1, 0, 0)
self._0_X_N_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_16.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_16, (-1, 100)))
sizer_4.Add(self._0_X_N_copy_16, 0, wx.EXPAND, 0)
self._1_X_N_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_16.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_16, (-1, 100)))
sizer_4.Add(self._1_X_N_copy_16, 1, wx.EXPAND, 0)
self._0_N_F_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_16.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_16, (-1, 100)))
sizer_4.Add(self._0_N_F_copy_16, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_16.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_16, (-1, 100)))
sizer_4.Add(self._1_N_F_copy_16, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._0_X_F_copy_16, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_16 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
sizer_4.Add(self._1_X_F_copy_16, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_N_copy_17.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_17, (100, -1)))
sizer_4.Add(self._0_N_N_copy_17, 0, 0, 0)
self._1_N_N_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_N_copy_17.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_17, (100, -1)))
sizer_4.Add(self._1_N_N_copy_17, 1, 0, 0)
self._0_X_N_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_N_copy_17.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_17, (100, -1)))
sizer_4.Add(self._0_X_N_copy_17, 0, wx.EXPAND, 0)
self._1_X_N_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_N_copy_17.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_17, (100, -1)))
sizer_4.Add(self._1_X_N_copy_17, 1, wx.EXPAND, 0)
self._0_N_F_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_N_F_copy_17.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_17, (100, -1)))
sizer_4.Add(self._0_N_F_copy_17, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_N_F_copy_17.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_17, (100, -1)))
sizer_4.Add(self._1_N_F_copy_17, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._0_X_F_copy_17.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_17, (100, -1)))
sizer_4.Add(self._0_X_F_copy_17, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_17 = wx.TextCtrl(self.notebook_1_GridSizer, wx.ID_ANY, "")
self._1_X_F_copy_17.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_17, (100, -1)))
sizer_4.Add(self._1_X_F_copy_17, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.notebook_1_FlexGridSizer = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_FlexGridSizer, _("FlexGridSizer"))
sizer_5 = wx.FlexGridSizer(7, 8, 1, 1)
self._0_N_N_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._0_N_N_copy_18, 0, 0, 0)
self._1_N_N_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._1_N_N_copy_18, 1, 0, 0)
self._0_X_N_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._0_X_N_copy_18, 0, wx.EXPAND, 0)
self._1_X_N_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._1_X_N_copy_18, 1, wx.EXPAND, 0)
self._0_N_F_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._0_N_F_copy_18, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._1_N_F_copy_18, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._0_X_F_copy_18, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_18 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._1_X_F_copy_18, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._0_N_N_copy_copy_copy_copy, 0, 0, 0)
self._1_N_N_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._1_N_N_copy_copy_copy_copy, 1, 0, 0)
self._0_X_N_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._0_X_N_copy_copy_copy_copy, 0, wx.EXPAND, 0)
self._1_X_N_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._1_X_N_copy_copy_copy_copy, 1, wx.EXPAND, 0)
self._0_N_F_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._0_N_F_copy_copy_copy_copy, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._1_N_F_copy_copy_copy_copy, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_F_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._0_X_F_copy_copy_copy_copy, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_F_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_5.Add(self._1_X_F_copy_copy_copy_copy, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_19.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_19, (100, 21)))
sizer_5.Add(self._0_N_N_copy_19, 0, 0, 0)
self._1_N_N_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_19.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_19, (100, 21)))
sizer_5.Add(self._1_N_N_copy_19, 1, 0, 0)
self._0_X_N_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_19.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_19, (100, 21)))
sizer_5.Add(self._0_X_N_copy_19, 0, wx.EXPAND, 0)
self._1_X_N_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_19.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_19, (100, 21)))
sizer_5.Add(self._1_X_N_copy_19, 1, wx.EXPAND, 0)
self._0_N_F_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_19.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_19, (100, 21)))
sizer_5.Add(self._0_N_F_copy_19, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_19.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_19, (100, 21)))
sizer_5.Add(self._1_N_F_copy_19, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_F_copy_19.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_19, (100, 21)))
sizer_5.Add(self._0_X_F_copy_19, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_19 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_F_copy_19.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_19, (100, 21)))
sizer_5.Add(self._1_X_F_copy_19, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._0_N_N_copy_20, 0, 0, 0)
self._1_N_N_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._1_N_N_copy_20, 1, 0, 0)
self._0_X_N_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._0_X_N_copy_20, 0, wx.EXPAND, 0)
self._1_X_N_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._1_X_N_copy_20, 1, wx.EXPAND, 0)
self._0_N_F_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._0_N_F_copy_20, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._1_N_F_copy_20, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_F_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._0_X_F_copy_20, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_20 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_F_copy_20.SetMinSize((-1, 21))
sizer_5.Add(self._1_X_F_copy_20, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._0_N_N_copy_21, 0, 0, 0)
self._1_N_N_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._1_N_N_copy_21, 1, 0, 0)
self._0_X_N_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._0_X_N_copy_21, 0, wx.EXPAND, 0)
self._1_X_N_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._1_X_N_copy_21, 1, wx.EXPAND, 0)
self._0_N_F_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._0_N_F_copy_21, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._1_N_F_copy_21, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_F_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._0_X_F_copy_21, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_21 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_F_copy_21.SetMinSize((100, -1))
sizer_5.Add(self._1_X_F_copy_21, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_22.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_22, (-1, 100)))
sizer_5.Add(self._0_N_N_copy_22, 0, 0, 0)
self._1_N_N_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_22.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_22, (-1, 100)))
sizer_5.Add(self._1_N_N_copy_22, 1, 0, 0)
self._0_X_N_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_22.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_22, (-1, 100)))
sizer_5.Add(self._0_X_N_copy_22, 0, wx.EXPAND, 0)
self._1_X_N_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_22.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_22, (-1, 100)))
sizer_5.Add(self._1_X_N_copy_22, 1, wx.EXPAND, 0)
self._0_N_F_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_22.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_22, (-1, 100)))
sizer_5.Add(self._0_N_F_copy_22, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_22.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_22, (-1, 100)))
sizer_5.Add(self._1_N_F_copy_22, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._0_X_F_copy_22, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_22 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
sizer_5.Add(self._1_X_F_copy_22, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_N_copy_23.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_23, (100, -1)))
sizer_5.Add(self._0_N_N_copy_23, 0, 0, 0)
self._1_N_N_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_N_copy_23.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_23, (100, -1)))
sizer_5.Add(self._1_N_N_copy_23, 1, 0, 0)
self._0_X_N_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_N_copy_23.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_23, (100, -1)))
sizer_5.Add(self._0_X_N_copy_23, 0, wx.EXPAND, 0)
self._1_X_N_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_N_copy_23.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_23, (100, -1)))
sizer_5.Add(self._1_X_N_copy_23, 1, wx.EXPAND, 0)
self._0_N_F_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_N_F_copy_23.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_23, (100, -1)))
sizer_5.Add(self._0_N_F_copy_23, 0, wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_N_F_copy_23.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_23, (100, -1)))
sizer_5.Add(self._1_N_F_copy_23, 1, wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._0_X_F_copy_23.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_23, (100, -1)))
sizer_5.Add(self._0_X_F_copy_23, 0, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_23 = wx.TextCtrl(self.notebook_1_FlexGridSizer, wx.ID_ANY, "")
self._1_X_F_copy_23.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_23, (100, -1)))
sizer_5.Add(self._1_X_F_copy_23, 1, wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.notebook_1_GridBagSizer = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_GridBagSizer, _("GridBagSizer"))
sizer_6 = wx.GridBagSizer(1, 1)
self._0_N_N_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._0_N_N_copy_24, (0, 0), (1, 1), 0, 0)
self._1_N_N_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._1_N_N_copy_24, (0, 1), (1, 1), 0, 0)
self._0_X_N_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._0_X_N_copy_24, (0, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._1_X_N_copy_24, (0, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._0_N_F_copy_24, (0, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._1_N_F_copy_24, (0, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._0_X_F_copy_24, (0, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_24 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._1_X_F_copy_24, (0, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._0_N_N_copy_copy_copy_copy_copy, (1, 0), (1, 1), 0, 0)
self._1_N_N_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._1_N_N_copy_copy_copy_copy_copy, (1, 1), (1, 1), 0, 0)
self._0_X_N_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._0_X_N_copy_copy_copy_copy_copy, (1, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._1_X_N_copy_copy_copy_copy_copy, (1, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._0_N_F_copy_copy_copy_copy_copy, (1, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._1_N_F_copy_copy_copy_copy_copy, (1, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_F_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._0_X_F_copy_copy_copy_copy_copy, (1, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_copy_copy_copy_copy = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_F_copy_copy_copy_copy_copy.SetMinSize((100, 21))
sizer_6.Add(self._1_X_F_copy_copy_copy_copy_copy, (1, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_25.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_25, (100, 21)))
sizer_6.Add(self._0_N_N_copy_25, (2, 0), (1, 1), 0, 0)
self._1_N_N_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_25.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_25, (100, 21)))
sizer_6.Add(self._1_N_N_copy_25, (2, 1), (1, 1), 0, 0)
self._0_X_N_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_25.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_25, (100, 21)))
sizer_6.Add(self._0_X_N_copy_25, (2, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_25.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_25, (100, 21)))
sizer_6.Add(self._1_X_N_copy_25, (2, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_25.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_25, (100, 21)))
sizer_6.Add(self._0_N_F_copy_25, (2, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_25.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_25, (100, 21)))
sizer_6.Add(self._1_N_F_copy_25, (2, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_F_copy_25.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_25, (100, 21)))
sizer_6.Add(self._0_X_F_copy_25, (2, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_25 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_F_copy_25.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_25, (100, 21)))
sizer_6.Add(self._1_X_F_copy_25, (2, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._0_N_N_copy_26, (3, 0), (1, 1), 0, 0)
self._1_N_N_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._1_N_N_copy_26, (3, 1), (1, 1), 0, 0)
self._0_X_N_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._0_X_N_copy_26, (3, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._1_X_N_copy_26, (3, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._0_N_F_copy_26, (3, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._1_N_F_copy_26, (3, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_F_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._0_X_F_copy_26, (3, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_26 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_F_copy_26.SetMinSize((-1, 21))
sizer_6.Add(self._1_X_F_copy_26, (3, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._0_N_N_copy_27, (4, 0), (1, 1), 0, 0)
self._1_N_N_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._1_N_N_copy_27, (4, 1), (1, 1), 0, 0)
self._0_X_N_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._0_X_N_copy_27, (4, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._1_X_N_copy_27, (4, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._0_N_F_copy_27, (4, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._1_N_F_copy_27, (4, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_F_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._0_X_F_copy_27, (4, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_27 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_F_copy_27.SetMinSize((100, -1))
sizer_6.Add(self._1_X_F_copy_27, (4, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_28.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_28, (-1, 100)))
sizer_6.Add(self._0_N_N_copy_28, (5, 0), (1, 1), 0, 0)
self._1_N_N_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_28.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_28, (-1, 100)))
sizer_6.Add(self._1_N_N_copy_28, (5, 1), (1, 1), 0, 0)
self._0_X_N_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_28.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_28, (-1, 100)))
sizer_6.Add(self._0_X_N_copy_28, (5, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_28.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_28, (-1, 100)))
sizer_6.Add(self._1_X_N_copy_28, (5, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_28.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_28, (-1, 100)))
sizer_6.Add(self._0_N_F_copy_28, (5, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_28.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_28, (-1, 100)))
sizer_6.Add(self._1_N_F_copy_28, (5, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._0_X_F_copy_28, (5, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_28 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
sizer_6.Add(self._1_X_F_copy_28, (5, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._0_N_N_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_N_copy_29.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_29, (100, -1)))
sizer_6.Add(self._0_N_N_copy_29, (6, 0), (1, 1), 0, 0)
self._1_N_N_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_N_copy_29.SetMinSize(wx.DLG_SZE(self._1_N_N_copy_29, (100, -1)))
sizer_6.Add(self._1_N_N_copy_29, (6, 1), (1, 1), 0, 0)
self._0_X_N_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_N_copy_29.SetMinSize(wx.DLG_SZE(self._0_X_N_copy_29, (100, -1)))
sizer_6.Add(self._0_X_N_copy_29, (6, 2), (1, 1), wx.EXPAND, 0)
self._1_X_N_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_N_copy_29.SetMinSize(wx.DLG_SZE(self._1_X_N_copy_29, (100, -1)))
sizer_6.Add(self._1_X_N_copy_29, (6, 3), (1, 1), wx.EXPAND, 0)
self._0_N_F_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_N_F_copy_29.SetMinSize(wx.DLG_SZE(self._0_N_F_copy_29, (100, -1)))
sizer_6.Add(self._0_N_F_copy_29, (6, 4), (1, 1), wx.FIXED_MINSIZE, 0)
self._1_N_F_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_N_F_copy_29.SetMinSize(wx.DLG_SZE(self._1_N_F_copy_29, (100, -1)))
sizer_6.Add(self._1_N_F_copy_29, (6, 5), (1, 1), wx.FIXED_MINSIZE, 0)
self._0_X_F_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._0_X_F_copy_29.SetMinSize(wx.DLG_SZE(self._0_X_F_copy_29, (100, -1)))
sizer_6.Add(self._0_X_F_copy_29, (6, 6), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self._1_X_F_copy_29 = wx.TextCtrl(self.notebook_1_GridBagSizer, wx.ID_ANY, "")
self._1_X_F_copy_29.SetMinSize(wx.DLG_SZE(self._1_X_F_copy_29, (100, -1)))
sizer_6.Add(self._1_X_F_copy_29, (6, 7), (1, 1), wx.EXPAND | wx.FIXED_MINSIZE, 0)
self.notebook_1_BorderTest = wx.Panel(self.notebook_1, wx.ID_ANY)
self.notebook_1.AddPage(self.notebook_1_BorderTest, _("BorderTest"))
sizer_7 = wx.BoxSizer(wx.HORIZONTAL)
sizer_border_10_none = wx.BoxSizer(wx.VERTICAL)
sizer_7.Add(sizer_border_10_none, 1, wx.EXPAND, 10)
self._0_N_N_border_10_none = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
sizer_border_10_none.Add(self._0_N_N_border_10_none, 0, 0, 10)
self._1_N_N_border_0_all = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
sizer_border_10_none.Add(self._1_N_N_border_0_all, 1, wx.ALL, 0)
self._0_X_N_border_5_LEFTRIGHT = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
sizer_border_10_none.Add(self._0_X_N_border_5_LEFTRIGHT, 0, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
self._1_X_N_border_15_BOTTOM = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
sizer_border_10_none.Add(self._1_X_N_border_15_BOTTOM, 1, wx.BOTTOM | wx.EXPAND, 15)
sizer_border_0_ALL = wx.BoxSizer(wx.VERTICAL)
sizer_7.Add(sizer_border_0_ALL, 1, wx.ALL | wx.EXPAND, 0)
self._0_N_N_copy_copy_1 = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
self._0_N_N_copy_copy_1.SetMinSize((100, 21))
sizer_border_0_ALL.Add(self._0_N_N_copy_copy_1, 0, 0, 0)
sizer_border_5_LEFTRIGHT = wx.BoxSizer(wx.VERTICAL)
sizer_7.Add(sizer_border_5_LEFTRIGHT, 1, wx.LEFT | wx.RIGHT, 5)
self._0_N_N_copy_31 = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
self._0_N_N_copy_31.SetMinSize(wx.DLG_SZE(self._0_N_N_copy_31, (100, 21)))
sizer_border_5_LEFTRIGHT.Add(self._0_N_N_copy_31, 0, 0, 0)
sizer_border_15_BOTTOM = wx.BoxSizer(wx.VERTICAL)
sizer_7.Add(sizer_border_15_BOTTOM, 1, 0, 15)
self._0_N_N_copy_32 = wx.TextCtrl(self.notebook_1_BorderTest, wx.ID_ANY, "")
self._0_N_N_copy_32.SetMinSize((-1, 21))
sizer_border_15_BOTTOM.Add(self._0_N_N_copy_32, 0, 0, 0)
self.notebook_1_BorderTest.SetSizer(sizer_7)
sizer_6.AddGrowableRow(2)
sizer_6.AddGrowableRow(5)
sizer_6.AddGrowableCol(1)
sizer_6.AddGrowableCol(7)
self.notebook_1_GridBagSizer.SetSizer(sizer_6)
sizer_5.AddGrowableRow(2)
sizer_5.AddGrowableRow(5)
sizer_5.AddGrowableCol(1)
sizer_5.AddGrowableCol(7)
self.notebook_1_FlexGridSizer.SetSizer(sizer_5)
self.notebook_1_GridSizer.SetSizer(sizer_4)
self.notebook_1_StaticBoxSizer.SetSizer(sizer_3)
self.notebook_1_pane_1.SetSizer(sizer_2)
self.SetSizer(sizer_1)
self.Layout()
# end wxGlade
# end of class MyFrame
class App(wx.App):
def OnInit(self):
self.frame = MyFrame(None, wx.ID_ANY, "")
self.SetTopWindow(self.frame)
self.frame.Show()
return True
# end of class App
if __name__ == "__main__":
gettext.install("App") # replace with the appropriate catalog name
App = App(0)
App.MainLoop()
| 52.688551
| 143
| 0.693426
| 12,655
| 65,808
| 3.084394
| 0.009957
| 0.059437
| 0.108575
| 0.161761
| 0.96421
| 0.956678
| 0.939282
| 0.922501
| 0.901494
| 0.794687
| 0
| 0.090732
| 0.171484
| 65,808
| 1,248
| 144
| 52.730769
| 0.625167
| 0.004012
| 0
| 0
| 1
| 0
| 0.003281
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002227
| false
| 0
| 0.002227
| 0
| 0.007795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
1c9de3984e9adbd11397e3bea8a1029148744454
| 1,356
|
py
|
Python
|
usr/gre/sensehat/06_environment.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | 3
|
2018-09-20T12:16:48.000Z
|
2019-06-21T08:32:17.000Z
|
usr/gre/sensehat/06_environment.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | null | null | null |
usr/gre/sensehat/06_environment.py
|
Bugnon/oc-2018
|
7961de5ba9923512bd50c579c37f1dadf070b692
|
[
"MIT"
] | 2
|
2018-09-20T11:55:05.000Z
|
2019-09-01T19:40:13.000Z
|
# File: 06_environment.py
# Author: Raphael Holzer
# Date: 26. 11. 2018
from sense_hat import SenseHat
<<<<<<< HEAD
from time import sleep
=======
>>>>>>> refs/remotes/origin/master
sense = SenseHat()
red = (255, 0, 0)
blue = (0, 0, 255)
<<<<<<< HEAD
while True:
h = int(sense.get_humidity())
t = int(sense.get_temperature())
p = int(sense.get_pressure())
sense.show_message('t='+str(t), text_colour=red)
sense.show_message('p='+str(p))
sense.show_message('h='+str(h), text_colour=blue)
while True:
print('>>> New values <<< \n \n humidity =', h)
print('pressure =', p)
print('temperature =', t)
print('temp from pressure =', sense.get_temperature_from_pressure())
print('temp from humidity =', sense.get_temperature_from_humidity(),'\n ---------------------\n')
sleep(20)
=======
h = int(sense.get_humidity())
t = int(sense.get_temperature())
p = int(sense.get_pressure())
print('humidity =', h)
print('pressure =', p)
print('temperature =', t)
print('temp from pressure =', sense.get_temperature_from_pressure())
print('temp from humidity =', sense.get_temperature_from_humidity())
while True:
sense.show_message('t='+str(t), text_colour=red)
sense.show_message('p='+str(p))
sense.show_message('h='+str(h), text_colour=blue)
>>>>>>> refs/remotes/origin/master
| 28.851064
| 105
| 0.634218
| 186
| 1,356
| 4.462366
| 0.258065
| 0.096386
| 0.079518
| 0.110843
| 0.715663
| 0.715663
| 0.715663
| 0.715663
| 0.715663
| 0.715663
| 0
| 0.019315
| 0.16003
| 1,356
| 46
| 106
| 29.478261
| 0.709394
| 0.047935
| 0
| 0.72973
| 0
| 0
| 0.162393
| 0.017871
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.054054
| null | null | 0.27027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1c0f626e6348e1d3e2034b3f74f179789c691bd6
| 9,583
|
py
|
Python
|
dxlclient/test/test_callback_manager.py
|
freedai/opendxl-client-python
|
b6a5b216b4b9ba815a1b03e07755db563880881f
|
[
"Apache-2.0"
] | 110
|
2016-11-03T17:49:24.000Z
|
2021-03-25T20:53:42.000Z
|
dxlclient/test/test_callback_manager.py
|
freedai/opendxl-client-python
|
b6a5b216b4b9ba815a1b03e07755db563880881f
|
[
"Apache-2.0"
] | 23
|
2016-11-04T17:08:13.000Z
|
2021-12-03T19:30:53.000Z
|
dxlclient/test/test_callback_manager.py
|
freedai/opendxl-client-python
|
b6a5b216b4b9ba815a1b03e07755db563880881f
|
[
"Apache-2.0"
] | 63
|
2016-11-03T17:49:38.000Z
|
2022-03-15T12:02:49.000Z
|
# -*- coding: utf-8 -*-
################################################################################
# Copyright (c) 2018 McAfee LLC - All Rights Reserved.
################################################################################
"""
Test cases for the CallbackManager class
"""
# Run with python -m unittest dxlclient.test.test_callback_manager
from __future__ import absolute_import
import unittest
from dxlclient import callbacks
import dxlclient._callback_manager as callback_manager
# pylint: disable=missing-docstring
class MockRequestCallback(callbacks.RequestCallback):
def on_request(self, request):
pass
class MockResponseCallback(callbacks.ResponseCallback):
def on_response(self, response):
pass
class MockEventCallback(callbacks.EventCallback):
def on_event(self, event):
pass
class CallbackManagerTest(unittest.TestCase):
def setUp(self):
pass
def test_request_callback_manager_with_valid_callback(self):
cbm = callback_manager._RequestCallbackManager()
cbm.add_callback("/test", MockRequestCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=MockRequestCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockRequestCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=MockRequestCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_request_callback_manager_with_invalid_callback(self):
cbm = callback_manager._RequestCallbackManager()
with self.assertRaises(ValueError):
cbm.add_callback("/test", MockResponseCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_request_callback_manager_with_double_registration(self):
cbm = callback_manager._RequestCallbackManager()
cbm.add_callback("/test", MockRequestCallback)
cbm.add_callback("/test", MockRequestCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockRequestCallback)
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_request_callback_manager_with_valid_callback_instance(self):
cbm = callback_manager._RequestCallbackManager()
callback = MockRequestCallback()
cbm.add_callback("/test", callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=callback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_request_callback_manager_with_invalid_callback_instance(self):
cbm = callback_manager._RequestCallbackManager()
callback = MockResponseCallback()
with self.assertRaises(ValueError):
cbm.add_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_response_callback_manager_with_valid_callback(self):
cbm = callback_manager._ResponseCallbackManager()
cbm.add_callback("/test", MockResponseCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=MockResponseCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockResponseCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=MockResponseCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_response_callback_manager_with_invalid_callback(self):
cbm = callback_manager._ResponseCallbackManager()
with self.assertRaises(ValueError):
cbm.add_callback("/test", MockEventCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_response_callback_manager_with_double_registration(self):
cbm = callback_manager._ResponseCallbackManager()
cbm.add_callback("/test", MockResponseCallback)
cbm.add_callback("/test", MockResponseCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockResponseCallback)
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_response_callback_manager_with_valid_callback_instance(self):
cbm = callback_manager._ResponseCallbackManager()
callback = MockResponseCallback()
cbm.add_callback("/test", callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=callback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_response_callback_manager_with_invalid_callback_instance(self):
cbm = callback_manager._ResponseCallbackManager()
callback = MockEventCallback()
with self.assertRaises(ValueError):
cbm.add_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_event_callback_manager_with_valid_callback(self):
cbm = callback_manager._EventCallbackManager()
cbm.add_callback("/test", MockEventCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=MockEventCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockEventCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=MockEventCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_event_callback_manager_with_invalid_callback(self):
cbm = callback_manager._EventCallbackManager()
with self.assertRaises(ValueError):
cbm.add_callback("/test", MockRequestCallback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_event_callback_manager_with_double_registration(self):
cbm = callback_manager._EventCallbackManager()
cbm.add_callback("/test", MockEventCallback)
cbm.add_callback("/test", MockEventCallback)
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", MockEventCallback)
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_event_callback_manager_with_valid_callback_instance(self):
cbm = callback_manager._EventCallbackManager()
callback = MockEventCallback()
cbm.add_callback("/test", callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("/test")))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.add_callback(callback=callback)
self.assertEqual(1, len(cbm.callbacks_by_channel.get("")))
self.assertEqual(2, len(cbm.callbacks_by_channel))
cbm.remove_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(1, len(cbm.callbacks_by_channel))
cbm.remove_callback(callback=callback)
self.assertEqual(None, cbm.callbacks_by_channel.get(""))
self.assertEqual(0, len(cbm.callbacks_by_channel))
def test_event_callback_manager_with_invalid_callback_instance(self):
cbm = callback_manager._EventCallbackManager()
callback = MockRequestCallback()
with self.assertRaises(ValueError):
cbm.add_callback("/test", callback)
self.assertEqual(None, cbm.callbacks_by_channel.get("/test"))
self.assertEqual(0, len(cbm.callbacks_by_channel))
| 47.676617
| 80
| 0.708964
| 1,077
| 9,583
| 6.020427
| 0.067781
| 0.152684
| 0.142505
| 0.213757
| 0.90438
| 0.895743
| 0.885719
| 0.885719
| 0.839451
| 0.807372
| 0
| 0.006632
| 0.166023
| 9,583
| 200
| 81
| 47.915
| 0.80468
| 0.022436
| 0
| 0.797546
| 0
| 0
| 0.024465
| 0
| 0
| 0
| 0
| 0
| 0.441718
| 1
| 0.116564
| false
| 0.02454
| 0.02454
| 0
| 0.165644
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
98ce8311b794b4ec9367117a45046c5556d9ec99
| 89,601
|
py
|
Python
|
src/yiheng_findfeatures/dialog_acts_extractor.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | null | null | null |
src/yiheng_findfeatures/dialog_acts_extractor.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | null | null | null |
src/yiheng_findfeatures/dialog_acts_extractor.py
|
s-akanksha/DialoGraph_ICLR21
|
d5bbc10b2623c9f84d21a99a5e54e7dcfdfb1bcc
|
[
"Apache-2.0"
] | null | null | null |
import json
from operator import add
from operator import sub
from sklearn.model_selection import cross_val_score
import random
import numpy as np
#import liwc_result_parser
import nltk
nltk.download('stopwords')
import re
from sklearn.svm import LinearSVC
from nltk.tokenize import word_tokenize
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
#import sentiment_from_liu
from . import read_dominance_arousal_valence
from nltk.util import ngrams
from nltk import pos_tag
from . import LIWC_Mapping
from sklearn.ensemble import RandomForestRegressor
from sklearn.ensemble import RandomForestClassifier
import math
from sklearn import datasets, linear_model
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.neural_network import MLPClassifier
from scipy.stats.stats import pearsonr
from sklearn.svm import SVR
from sklearn.neural_network import MLPRegressor
import sys
#sys.path.insert(0, '/projects/tir1/users/yihengz1/negotiation/evaluation/auto_labeling/multeval-0.5.1/')
#from calculate import feature_extractor
#from convert_sentence_to_parse_tree import string_to_phrases
import os
import sys
#from sklearn.externals import joblib
from collections import Counter
from importlib import reload
reload(sys)
import os
curr_file_path = os.path.dirname(os.path.abspath(__file__)) + '/'
#sys.setdefaultencoding('utf8')
lemmatizer = WordNetLemmatizer()
stopWords = stopwords.words('english')
feature_size = 47
recommendation_template = dict()
pos, neg = LIWC_Mapping.sentiment()
liwc_personal_concern = LIWC_Mapping.personal_concern()
liwc_family = LIWC_Mapping.family()
liwc_friend = LIWC_Mapping.friend()
liwc_i = LIWC_Mapping.i()
liwc_informal_dic = LIWC_Mapping.informal()
liwc_certain = LIWC_Mapping.certain()
lexicon_diff_dic_pos = ["years", "shape", "including", "yes", "apartment", "we've", "had", "good", "they", "got", "always", "works", "antique", "right", "some", "bluetooth", "tires", "are", "out", "even", "everything", "new", "we", "recently", "screen", "never", "free", "put", "months", "color", "quality", "from", "would", "it's", "there", "two", "been", "few", "too", "was", "selling", "that", "brand", "sound", "this", "car", "up", "need", "any", "i've", "amazing", "you", "nice", "used", "kept", "clean", "time"]
lexicon_diff_dic_neg = ["less", "excellent", "actually", "condition", "like", "tear", "miles", "year", "does", "newly", "come", "about", "many", "comes", "warranty", "features", "table", "your", "unit", "use", "area", "wood", "lot", "but", "scratches", "solid", "will", "piece", "almost", "as", "normal", "light", "well", "so", "original"]
hedges_word = {'claim': 11, 'presumably': 67, 'unclear': 60, 'often': 34, 'indicated': 26, 'feel': 20, 'seems': 48, 'mainly': 29, 'doubtful': 16, 'plausible': 37, 'argued': 74, 'likely': 28, 'unlikely': 62, "couldn't": 14, 'claimed': 12, 'estimated': 19, 'apparently': 3, 'supposes': 79, 'appeared': 5, 'relatively': 46, 'postulates': 81, 'guess': 24, 'appear': 4, 'would': 64, 'indicate': 25, 'perhaps': 36, 'assumed': 10, 'generally': 23, 'approximately': 7, 'should': 49, 'argues': 73, 'almost': 1, 'doubt': 15, 'suspect': 55, 'presumable': 43, 'indicates': 77, 'postulated': 42, 'probably': 45, 'postulate': 41, 'might': 32, 'ought': 35, 'supposed': 78, 'fairly': 69, 'apparent': 2, 'around': 8, 'mostly': 33, 'may': 30, 'plausibly': 38, 'felt': 21, 'essentially': 17, 'possible': 39, 'unclearly': 61, 'possibly': 40, 'feels': 76, 'somewhat': 51, 'frequently': 22, 'estimate': 18, 'quite': 70, 'appears': 6, 'probable': 44, 'suggested': 53, 'about': 0, 'uncertain': 58, 'suspects': 80, 'largely': 27, 'assume': 9, 'maybe': 31, 'could': 13, 'sometimes': 50, 'rather': 71, 'roughly': 47, 'suggests': 68, 'tended to': 66, 'uncertainly': 59, 'suppose': 54, 'broadly': 65, 'suggest': 52, 'usually': 63, 'claims': 75, 'argue': 72, 'typically': 57, 'typical': 56}
hedge_LIWC_word = {'think': 0, 'guesses': 28, 'consider': 6, 'basically': 73, 'understand': 12, 'generally': 57, 'estimates': 31, 'somehow': 77, 'speculates': 34, 'somebody': 83, 'understood': 14, 'likely': 45, 'guessed': 29, 'unlikely': 50, 'read': 53, 'speculated': 35, 'says': 55, 'seem': 21, 'estimated': 32, 'usually': 64, 'thinks': 1, 'seemed': 23, 'guess': 27, 'speculate': 33, 'appear': 18, 'suggests': 37, 'perhaps': 47, 'assumed': 11, 'apparently': 71, 'seems': 22, 'approximately': 74, 'find': 15, 'rarely': 59, 'appeared': 20, 'occasionally': 62, 'surely': 43, 'about': 70, 'probably': 44, 'several': 66, 'might': 42, 'something': 81, 'assumes': 10, 'virtually': 72, 'partially': 78, 'almost': 68, 'actually': 79, 'unsure': 48, 'somewhere': 84, 'may': 39, 'some': 67, 'supposes': 25, 'possible': 52, 'often': 58, 'possibly': 51, 'considers': 7, 'seldom': 63, 'somewhat': 76, 'practically': 69, 'frequently': 61, 'estimate': 30, 'believe': 3, 'appears': 19, 'probable': 49, 'suggested': 38, 'understands': 13, 'like': 80, 'largely': 56, 'considered': 8, 'should': 41, 'could': 40, 'sometimes': 60, 'say': 54, 'believes': 5, 'thought': 2, 'assume': 9, 'someone': 82, 'suppose': 24, 'supposed': 26, 'suggest': 36, 'believed': 4, 'found': 16, 'roughly': 75, 'finds': 17, 'maybe': 46, 'most': 65}
hedges_phrase = ["certain amount","certain extent","certain level", "from our perspective", "in general","in most cases","in most instances", "in our view", "on the whole", "from this perspective","from my perspective","in my view","in this view","in my opinion","in our opinion","to my knowledge", "tend to", "tends to"]
assertive = {'claim': 19, 'hypothesize': 28, 'presume': 63, 'figure': 8, 'predict': 36, 'hint': 27, 'prophesy': 37, 'insist': 31, 'testify': 46, 'imply': 29, 'vow': 49, 'expect': 3, 'deduce': 60, 'seem': 6, 'allege': 12, 'guarantee': 26, 'contend': 20, 'guess': 5, 'point out': 35, 'appear': 7, 'acknowledge': 9, 'suggest': 44, 'explain': 24, 'certify': 17, 'divulge': 22, 'write': 50, 'indicate': 30, 'charge': 18, 'swear': 45, 'suspect': 65, 'emphasize': 23, 'certain': 53, 'answer': 13, 'reply': 40, 'postulate': 38, 'surmise': 64, 'hope': 62, 'sure': 54, 'intimate': 32, 'agree': 51, 'assert': 15, 'mention': 34, 'state': 43, 'decide': 59, 'imagine': 4, 'report': 41, 'estimate': 61, 'believe': 1, 'calculate': 58, 'remark': 39, 'theorize': 47, 'evident': 57, 'affirm': 11, 'obvious': 56, 'clear': 55, 'grant': 25, 'say': 42, 'think': 0, 'afraid': 52, 'assure': 16, 'admit': 10, 'maintain': 33, 'suppose': 2, 'verify': 48, 'argue': 14, 'declare': 21}
factive = {'relevant': 22, 'regret': 2, 'discover': 4, 'see': 13, 'odd': 19, 'forget': 3, 'interesting': 21, 'suffice': 16, 'note': 6, 'strange': 20, 'sorry': 23, 'notice': 7, 'perceive': 9, 'resent': 14, 'observe': 8, 'know': 0, 'exciting': 24, 'realize': 1, 'care': 18, 'reveal': 12, 'remember': 11, 'recall': 10, 'bother': 17, 'learn': 5, 'amuse': 15}
factive_phrase = ["find out", "make sense", "found out", "makes sense", "made sense", "finds out"]
propose_keywords = {"$": 0, ".":1,"?":2,"could":3,"middle":4,"meet":5,"go":6,"deal":7,"come":8,"would":9,"ask":10,"will":11,"throw":12,"pick":13}
dominance, valence, arousal = read_dominance_arousal_valence.get_dominance_valence_arousal()
greetings = ["greetings", "hi", "hello", "yo", "hey", "howdy", "sup", "hiya", "how's it going", "how are you", "what's up", "how's everything", "how's your day", "nice to meet you", "good morning", "good afternoon", "good evening"]
apology = ["apologize", "apology", "my bad", "my fault", "my mistake", "my apologies"]
gratitute = ["thank", "grateful", "thankful", "thanks", "appreciate"]
first_person_singular = ["i", "me", "mine", "my"]
first_person_plural = ["we", "our", "us", "ours"]
third_person_singular = ["he","she","it","his","her","him"]
third_person_plural = ["them","they","their"]
def extract_acts(dialog):
strategies = list()
lexicon_list = list()
total_dialogss = 0
positive_text = ""
negative_text = ""
strategy_embedding_text = ""
dialog_index = 0
lemmatizer = WordNetLemmatizer()
positive = 0
negative = 0
total_uterance = 0
pre_complex_features_index = 0
example_arousal = list()
example_arousal_score = list()
propose_hedge = 0
propose_count = 0
hedge_count = 0
liwc_authenticity_text = list()
#automatically label complex labels
# complex_features = list()
#rule-based recommendation system
majority_rules = dict()
#complex feature calculator
pre_complex_features = list()
# des_classfier = joblib.load('/projects/tir1/users/rjoshi2/negotiation/yiheng_negotiation/evaluation/Classifier_With_Auto_Labeling/models/des.pkl')
# infer_classfier = joblib.load('/projects/tir1/users/rjoshi2/negotiation/yiheng_negotiation/evaluation/Classifier_With_Auto_Labeling/models/des.pkl')
# pata_classfier = joblib.load('/projects/tir1/users/rjoshi2/negotiation/yiheng_negotiation/evaluation/Classifier_With_Auto_Labeling/models/des.pkl')
# propose_classfier = joblib.load('/projects/tir1/users/rjoshi2/negotiation/yiheng_negotiation/evaluation/Classifier_With_Auto_Labeling/models/des.pkl')
# des_classfier = joblib.load(curr_file_path + 'des.pkl')
# infer_classfier = joblib.load(curr_file_path + 'infer.pkl')
# pata_classfier = joblib.load(curr_file_path + 'pata.pkl')
# propose_classfier = joblib.load(curr_file_path + 'propose.pkl')
categories = Counter()
uter_index_overall = 0
variance_examples_labels = {"seller_neg_sentiment":list(),"seller_pos_sentiment":list(),"first_person_plural_count_seller":list(),"first_person_singular_count_seller":list(),"third_person_singular_seller":list(),"third_person_plural_seller":list(),"seller_propose":list(),"hedge_count_seller":list(),"factive_count_seller":list(),"who_propose":list(),"seller_trade_in":list(),"sg_concern":list(),"liwc_certainty":list(),"liwc_informal":list(),"politeness_seller_please":list(),"politeness_seller_gratitude":list(),"politeness_seller_please_s":list(),"ap_des":list(),"ap_pata":list(),"ap_infer":list(),"family":list(),"friend":list(),"politeness_seller_greet":list()}
variance_examples = list()
#recommendation system, each set of feature represents each uterance
recommendation_data = list()
recommendation_feature_mapping = {"seller_neg_sentiment":0,"seller_pos_sentiment":1,"buyer_neg_sentiment":2,"buyer_pos_sentiment":3,"first_person_plural_count_seller":4,"first_person_singular_count_seller":5,"first_person_plural_count_buyer":6,"first_person_singular_count_buyer":7,"third_person_singular_seller":8,"third_person_plural_seller":9,"third_person_singular_buyer":10,"third_person_plural_buyer":11,"number_of_diff_dic_pos":12,"number_of_diff_dic_neg":13,"buyer_propose":14,"seller_propose":15,"hedge_count_seller":16,"hedge_count_buyer":17,"assertive_count_seller":18,"assertive_count_buyer":19,"factive_count_seller":20,"factive_count_buyer":21,"who_propose":22,"seller_trade_in":23,"personal_concern_seller":24,"sg_concern":25,"liwc_certainty":26,"liwc_informal":27,"politeness_seller_please":28,"politeness_seller_gratitude":29,"politeness_seller_please_s":30,"ap_des":31,"ap_pata":32,"ap_infer":33,"family":34,"friend":35,"politeness_buyer_please":36,"politeness_buyer_gratitude":37,"politeness_buyer_please_s":38,"politeness_seller_greet":39,"politeness_buyer_greet":40}
dialog_length = list()
recommendation_raw_utterance = list()
recommendation_product_description = list()
sequence_of_strategy = list()
#ngram = ""
ngram_dic = json.load(open(curr_file_path + "ngram_dic_cata"))
fine_intents = list()
total_dialogss += 1
# if "<selle>" not in dialog:
# continue
# if "<noise>" in dialog:
# continue
# if "<accept>" not in dialog:
# continue
#recommendation system
recommendation_raw_utterance_tmp = list()
strategy_sequences = list()
price = dialog["scenario"]["kbs"][1]["personal"]["Target"]
target = dialog["scenario"]["kbs"][0]["personal"]["Target"]
complex_features_tmp = list()
tmp = list()
tmp_complex = [0,0,0,0,0]
#ngram_features = [0]*len(ngram_dic)
first_person_plural_count_buyer = 0
first_person_singular_count_buyer = 0
first_person_plural_count_seller = 0
first_person_singular_count_seller = 0
third_person_plural_buyer = 0
third_person_singular_buyer = 0
third_person_singular_seller = 0
third_person_plural_seller = 0
dominance_avg_seller = 0.0
dominance_count_seller = 0
dominance_avg_buyer = 0.0
dominance_count_buyer = 0
valence_avg_buyer = 0.0
arousal_avg_buyer = 0.0
valence_avg_seller = 0.0
arousal_avg_seller = 0.0
example_arousal_tmp = list()
number_of_diff_dic_pos = 0
number_of_diff_dic_neg = 0
total_words_seller = 0
total_words_buyer = 0
total_uterance_seller = 0
total_uterance_buyer = 0
final = 0
buyer_pos_sentiment = 0
buyer_neg_sentiment = 0
seller_pos_sentiment = 0
seller_neg_sentiment = 0
greetings_seller = 0
sg_concern = 0
politeness_seller_gratitude = 0.0
politeness_seller_please = 0.0
politeness_seller_apology = 0.0
politeness_seller_greetings = 0.0
politeness_seller_please_s = 0.0
politeness_buyer_gratitude = 0.0
politeness_buyer_please = 0.0
politeness_buyer_apology = 0.0
politeness_buyer_greetings = 0.0
politeness_buyer_please_s = 0.0
politeness_buyer = 0.0
social_distance_seller = 0.0
social_distance_count = 0.0
social_distance_buyer = 0.0
social_distance_count_buyer = 0.0
personal_concern_seller = 0
personal_concern_buyer = 0
greetings_buyer = 0
factive_count_seller = 0.0
factive_count_buyer = 0.0
hedge_count_seller = 0.0
hedge_count_buyer = 0.0
assertive_count_seller = 0.0
assertive_count_buyer = 0.0
buyer_first_price = 0.0
seller_first_price = 0.0
first_price = True
_first_price = True
buyer_propose = 0
seller_propose = 0
who_propose = 0
who_propose_visit = True
seller_trade_in = 0
seller_deliver = 0
buyer_trade_in = 0
buyer_ask_trade_in = 0
buyer_reject = 0
stat_tmp = list()
liwc_authenticity = 0.0
liwc_informal = 0.0
liwc_certainty = 0
propose_hedge_tmp = 0
propose_count_tmp = 0
past_tense = 0
uters = list()
for event in dialog["events"]:
if event["agent"] == 1:
agent = "<selle>"
else:
agent = "<buyer>"
if event["action"] == "message":
uters.append(agent + " " + event["data"])
elif event["action"] == "accept":
uters.append(agent + " " +"<accept>")
elif event["action"] == "reject":
uters.append(agent + " " +"<reject>")
elif event["action"] == "offer":
uters.append(agent + " " +"<offer " + str(event["data"]["price"]) + " >")
elif event["action"] == "quit":
uters.append(agent + " " +"<quit>")
#get rid of noise posts
number_of_uter = len(uters)
# if number_of_uter <= 3:
# continue
uter_index = 0
portion_index = 1
buyer_propose_visit = True
seller_propose_visit = True
previous = ""
vocab_tmp = list()
tmp_strategies = list()
recommendation_data_uter_cumu = [0.0]*len(recommendation_feature_mapping)
strategy_embedding_text_dialog = ""
tmp_strategies_embedding_text = ""
fine_intents = list()
fine_intents.append(["<start>"])
bag_of_strategies = []
for u_index in range(len(uters)):
fine_intents.append([])
uter = uters[u_index]
keywords = dict()
tmp_strategy_sequences = list()
o_propose_visit = False
recommendation_data_uter = [0]*len(recommendation_feature_mapping)
previous_strategies_embedding = tmp_strategies_embedding_text
tmp_strategies_embedding_text = uter
tmp_strategies.append([uter])
if "<buyer>" in uter and "<offer " not in uter and "<accept>" not in uter:
#tmp_strategy_sequences.append("<buyer>")
if ("pick it up" in uter or "pick up" in uter):
buyer_trade_in += 1
fine_intents[-1].append("<buyer_trade_in>")
if ("throw in" in uter or "throwing in" in uter) and ("?" in uter or "if" in uter):
buyer_ask_trade_in = 1
fine_intents[-1].append("<buyer_trade_in>")
buyer_propose_visit = True
if len(re.findall(r"\d+", uter)) > 0 or len(re.findall(r"[0-9]+,[0-9]+", uter)) > 0:
for possible_price in re.findall(r"\d+", uter) + re.findall(r"[0-9]+,[0-9]+", uter):
possible_price = possible_price.replace(",", "")
if 1.2 > float(possible_price)/float(target) > 0.7 and float(possible_price) != float(price) and abs(float(possible_price) - float(target)) < abs(float(buyer_first_price) - float(target)):
if who_propose_visit:
who_propose = 0
tmp_strategies[-1].append("<Wait_For_Buyer_Propose>")
who_propose_visit = False
if buyer_propose_visit:
buyer_propose += 1
tmp_strategy_sequences.append("<buyer_propose>")
fine_intents[-1].append("<buyer_propose>")
buyer_propose_visit = False
recommendation_data_uter[recommendation_feature_mapping["buyer_propose"]] = 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(possible_price, " <buyer_propose> ")
if first_price:
buyer_first_price = float(possible_price)
first_price = False
if buyer_first_price == 0.0:
first_price = True
if buyer_propose_visit and (("lowest" in uter and (("?" in uter) or ("what" in uter))) or ("price" in uter and "high" in uter) or ("price" in uter and "lower" in uter)):
buyer_reject += 1
previous_word = ""
word_tokenized = word_tokenize(uter)
#uterrance wise analysis
for greet_i in range(len(greetings)):
if greet_i <= 7:
if greetings[greet_i] in word_tokenized:
politeness_buyer_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_greet"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_greet>")
fine_intents[-1].append("<politeness_buyer_greet>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_buyer_greet> ")
else:
if greetings[greet_i] in uter:
politeness_buyer_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_greet"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_greet>")
fine_intents[-1].append("<politeness_buyer_greet>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_buyer_greet> ")
for grad_i in range(len(gratitute)):
if gratitute[grad_i] in word_tokenized:
politeness_buyer_gratitude += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_gratitude"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_gratitude>")
fine_intents[-1].append("<politeness_buyer_gratitude>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + gratitute[grad_i], " <politeness_buyer_gratitude> ")
please_index = -1
for word_i in range(len(word_tokenized)):
if word_tokenized[word_i] == "please" or word_tokenized[word_i] == "pls":
please_index = word_i
break
if please_index != -1:
if word_tokenized[please_index-1] != ">" and word_tokenized[please_index-1] != "." and word_tokenized[please_index-1] != "?" and word_tokenized[please_index-1] != "!":
politeness_buyer_please += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_please"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_please>")
fine_intents[-1].append("<politeness_buyer_please>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" please", " <politeness_buyer_please> ").replace("pls", "<politeness_buyer_please>")
else:
politeness_buyer_please_s += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_please_s"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_please_s>")
fine_intents[-1].append("<politeness_buyer_please>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" please", " <politeness_buyer_please_s> ").replace("pls", "<politeness_buyer_please_s>")
for word in word_tokenized:
word = lemmatizer.lemmatize(word)
if word in liwc_friend and (previous_word != "your" and previous_word != "ur"):
social_distance_buyer += 1.0
social_distance_count_buyer += 1.0
if word in liwc_family and (previous_word != "your" and previous_word != "ur"):
social_distance_buyer += 0.0
social_distance_count_buyer += 1.0
if word in factive:
factive_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["factive_count_buyer"]] = 1
tmp_strategy_sequences.append("<factive_count_buyer>")
fine_intents[-1].append("<factive_count_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <factive_count_buyer> ")
if word in first_person_singular:
first_person_singular_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_singular_count_buyer"]] = 1
tmp_strategy_sequences.append("<first_person_singular_count_buyer>")
fine_intents[-1].append("<first_person_singular_count_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word +" ", " <first_person_singular_count_buyer> ")
elif word in first_person_plural:
first_person_plural_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_plural_count_buyer"]] = 1
tmp_strategy_sequences.append("<first_person_plural_count_buyer>")
fine_intents[-1].append("<first_person_plural_count_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <first_person_plural_count_buyer> ")
elif word in third_person_plural:
third_person_plural_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_plural_buyer"]] = 1
tmp_strategy_sequences.append("<third_person_plural_buyer>")
fine_intents[-1].append("<third_person_plural_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_plural_buyer> ")
elif word in third_person_singular:
third_person_singular_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_singular_buyer"]] = 1
tmp_strategy_sequences.append("<third_person_singular_buyer>")
fine_intents[-1].append("<third_person_singular_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_singular_buyer> ")
if word in assertive:
assertive_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["assertive_count_buyer"]] = 1
tmp_strategy_sequences.append("<assertive_count_buyer>")
fine_intents[-1].append("<assertive_count_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <assertive_count_buyer> ")
if word in hedges_word:
if _first_price and first_price:
example_arousal_tmp.append(word + "," + "N/A" + "," + uter.replace(",", ";"))
hedge_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["hedge_count_buyer"]] = 1
tmp_strategy_sequences.append("<hedge_count_buyer>")
fine_intents[-1].append("<hedge_count_buyer>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <hedge_count_buyer> ")
if word in pos:
buyer_pos_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["buyer_pos_sentiment"]] = 1
tmp_strategy_sequences.append("<buyer_pos_sentiment>")
fine_intents[-1].append("<buyer_pos_sentiment>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <buyer_pos_sentiment> ")
if word in neg:
buyer_neg_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["buyer_neg_sentiment"]] = 1
tmp_strategy_sequences.append("<buyer_neg_sentiment>")
fine_intents[-1].append("<buyer_neg_sentiment>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <buyer_neg_sentiment> ")
if word in dominance:
dominance_count_buyer += 1
dominance_avg_buyer += dominance[word]
valence_avg_buyer += valence[word]
arousal_avg_buyer += arousal[word]
total_words_buyer += 1
stat_tmp.append(word)
vocab_tmp.append(word)
previous_word = word
total_uterance_buyer += 1
recommendation_data_uter_cumu = [a + b for a, b in zip(recommendation_data_uter_cumu, recommendation_data_uter[:-2])]
recommendation_raw_utterance_tmp.append(tmp_strategies_embedding_text)
strategy_sequences.append(tmp_strategy_sequences)
if "<selle>" in uter and "<offer " not in uter and "<accept>" not in uter:
variance_examples.append(uter)
for key in variance_examples_labels:
variance_examples_labels[key].append(0)
#tmp_strategy_sequences.append("<selle>")
if "throw in" in uter or "throwing in" in uter:
seller_trade_in = 1
recommendation_data_uter[recommendation_feature_mapping["seller_trade_in"]] = 1
tmp_strategy_sequences.append("<seller_trade_in>")
fine_intents[-1].append("<seller_trade_in>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("throw in", "<seller_trade_in>").replace("throwing in", "<seller_trade_in>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Trade_In>")
else:
tmp_strategies[-1].append("<2_Trade_In>")
variance_examples_labels["seller_trade_in"][-1] = 1
if "deliver" in uter:
recommendation_data_uter[recommendation_feature_mapping["seller_trade_in"]] = 1
fine_intents[-1].append("<seller_trade_in>")
seller_deliver += 1
if len(re.findall(r"\d+", uter)) > 0:
#seller_propose_visit = True
for possible_price in re.findall(r"\d+", uter):
if 1 > float(possible_price)/float(price) > 0.7 and abs(float(possible_price) - float(price)) < abs(float(buyer_first_price) - float(price)):
if seller_propose_visit:
seller_propose += 1
tmp_strategy_sequences.append("<seller_propose>")
fine_intents[-1].append("<seller_propose>")
recommendation_data_uter[recommendation_feature_mapping["seller_propose"]] = 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(possible_price, "<seller_propose>")
tmp_strategies[-1].append("<Propose_New_Price>")
seller_propose_visit = False
variance_examples_labels["seller_propose"][-1] = 1
if _first_price:
seller_first_price = float(possible_price)
if 1 > float(possible_price)/float(price) > 0.5:
if who_propose_visit:
who_propose = 1
recommendation_data_uter[recommendation_feature_mapping["who_propose"]] = 1
who_propose_visit = False
variance_examples_labels["who_propose"][-1] = 1
_first_price = False
if seller_first_price == 0.0:
_first_price = True
word_tokenized = word_tokenize(uter)
# TODO (INSERT CODE HERE FOR CLASSIFIER BASED STRATEGIES)
#uterrance wise analysis
for greet_i in range(len(greetings)):
if greet_i <= 7:
if greetings[greet_i] in word_tokenized:
politeness_seller_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_greet"]] = 1
tmp_strategy_sequences.append("<politeness_seller_greet>")
fine_intents[-1].append("<politeness_seller_greet>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_seller_greet> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Greetings>")
else:
tmp_strategies[-1].append("<2_Greetings>")
variance_examples_labels["politeness_seller_greet"][-1] = 1
else:
if greetings[greet_i] in uter:
politeness_seller_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_greet"]] = 1
tmp_strategy_sequences.append("<politeness_seller_greet>")
fine_intents[-1].append("<politeness_seller_greet>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_seller_greet> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Greetings>")
else:
tmp_strategies[-1].append("<2_Greetings>")
variance_examples_labels["politeness_seller_greet"][-1] = 1
for sorry_i in range(len(apology)):
if sorry_i <= 1:
if apology[sorry_i] in word_tokenized:
politeness_seller_apology += 1
if first_price and _first_price:
tmp_strategies[-1].append("<1_Apology>")
else:
tmp_strategies[-1].append("<2_Apology>")
else:
if apology[sorry_i] in uter:
politeness_seller_apology += 1
if first_price and _first_price:
tmp_strategies[-1].append("<1_Apology>")
else:
tmp_strategies[-1].append("<2_Apology>")
for grad_i in range(len(gratitute)):
if gratitute[grad_i] in word_tokenized:
politeness_seller_gratitude += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_gratitude"]] = 1
tmp_strategy_sequences.append("<politeness_seller_gratitude>")
fine_intents[-1].append("<politeness_seller_gratitude>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + gratitute[grad_i], " <politeness_seller_gratitude> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_gratitude>")
else:
tmp_strategies[-1].append("<2_gratitude>")
variance_examples_labels["politeness_seller_gratitude"][-1] = 1
please_index = -1
for word_i in range(len(word_tokenized)):
if word_tokenized[word_i] == "please" or word_tokenized[word_i] == "pls":
please_index = word_i
break
if please_index != -1:
if word_tokenized[please_index-1] != ">" and word_tokenized[please_index-1] != "." and word_tokenized[please_index-1] != "?" and word_tokenized[please_index-1] != "!":
politeness_seller_please += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_please"]] = 1
tmp_strategy_sequences.append("<politeness_seller_please>")
fine_intents[-1].append("<politeness_seller_please>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("please", "<politeness_seller_please>").replace("pls", "<politeness_seller_please>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Please>")
else:
tmp_strategies[-1].append("<2_Please>")
variance_examples_labels["politeness_seller_please"][-1] = 1
else:
politeness_seller_please_s += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_please_s"]] = 1
tmp_strategy_sequences.append("<politeness_seller_please_s>")
fine_intents[-1].append("<politeness_seller_please_s>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("please", "<politeness_seller_please_s>").replace("pls", "<politeness_seller_please_s>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Please_Start>")
else:
tmp_strategies[-1].append("<2_Please_Start>")
variance_examples_labels["politeness_seller_please_s"][-1] = 1
previous_word = ""
for word in word_tokenized:
word = lemmatizer.lemmatize(word)
if word in liwc_informal_dic and word != "ha" and word != "yes" and word != "like" and word != "absolutely" and word != "agree" and word != "ok":
if word != "well":
liwc_informal += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_informal"]] = 1
tmp_strategy_sequences.append("<liwc_informal>")
fine_intents[-1].append("<liwc_informal>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_informal> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Informal_Word>")
else:
tmp_strategies[-1].append("<2_Informal_Word>")
variance_examples_labels["liwc_informal"][-1] = 1
else:
if previous_word == ">":
if first_price and _first_price:
tmp_strategies[-1].append("<1_Informal_Word>")
else:
tmp_strategies[-1].append("<2_Informal_Word>")
liwc_informal += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_informal"]] = 1
tmp_strategy_sequences.append("<liwc_informal>")
fine_intents[-1].append("<liwc_informal>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_informal> ")
variance_examples_labels["liwc_informal"][-1] = 1
if word in liwc_certain and word != "certain":
if word == "sure":
if previous_word != "not" and previous_word != ">" and "?" not in uter:
liwc_certainty += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_certainty"]] = 1
tmp_strategy_sequences.append("<liwc_certainty>")
fine_intents[-1].append("<liwc_certainty>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_certainty> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Certain_Word>")
else:
tmp_strategies[-1].append("<2_Certain_Word>")
variance_examples_labels["liwc_certainty"][-1] = 1
# if not (first_price and _first_price):
# print word + "," + uter.replace(",","")
else:
liwc_certainty += 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_certainty> ")
recommendation_data_uter[recommendation_feature_mapping["liwc_certainty"]] = 1
tmp_strategy_sequences.append("<liwc_certainty>")
fine_intents[-1].append("<liwc_certainty>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Certain_Word>")
else:
tmp_strategies[-1].append("<2_Certain_Word>")
variance_examples_labels["liwc_certainty"][-1] = 1
# if not (first_price and _first_price):
# print word + "," + uter.replace(",","")
if word in liwc_friend and (previous_word != "your" and previous_word != "ur") and not word.startswith("bud"):
social_distance_seller += 1.0
social_distance_count += 1.0
recommendation_data_uter[recommendation_feature_mapping["friend"]] = 1
tmp_strategy_sequences.append("<friend>")
fine_intents[-1].append("<friend>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <friend> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Friend_Word>")
else:
tmp_strategies[-1].append("<2_Friend_Word>")
variance_examples_labels["friend"][-1] = 1
if word in liwc_family and (previous_word != "your" and previous_word != "ur"):
if word == "family":
if previous_word == "my":
social_distance_seller += 0.0
recommendation_data_uter[recommendation_feature_mapping["family"]] = 1
tmp_strategy_sequences.append("<family>")
fine_intents[-1].append("<family>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <family> ")
social_distance_count += 1.0
if first_price and _first_price:
tmp_strategies[-1].append("<1_Family>")
else:
tmp_strategies[-1].append("<2_Family>")
variance_examples_labels["family"][-1] = 1
else:
social_distance_seller += 0.0
recommendation_data_uter[recommendation_feature_mapping["family"]] = 1
tmp_strategy_sequences.append("<family>")
fine_intents[-1].append("<family>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <family> ")
social_distance_count += 1.0
if first_price and _first_price:
tmp_strategies[-1].append("<1_Family>")
else:
tmp_strategies[-1].append("<2_Family>")
variance_examples_labels["family"][-1] = 1
if word in liwc_personal_concern:
personal_concern_seller += 1
recommendation_data_uter[recommendation_feature_mapping["personal_concern_seller"]] = 1
tmp_strategy_sequences.append("<personal_concern_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <personal_concern_seller> ")
if word in factive:
keywords["factive_count_seller"] = word
factive_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["factive_count_seller"]] = 1
tmp_strategy_sequences.append("<factive_count_seller>")
fine_intents[-1].append("<factive_count_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <factive_count_seller> ")
variance_examples_labels["factive_count_seller"][-1] = 1
if word in first_person_singular:
first_person_singular_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_singular_count_seller"]] = 1
tmp_strategy_sequences.append("<first_person_singular_count_seller>")
fine_intents[-1].append("<first_person_singular_count_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word +" ", " <first_person_singular_count_seller> ")
variance_examples_labels["first_person_singular_count_seller"][-1] = 1
elif word in first_person_plural:
first_person_plural_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_plural_count_seller"]] = 1
tmp_strategy_sequences.append("<first_person_plural_count_seller>")
fine_intents[-1].append("<first_person_plural_count_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <first_person_plural_count_seller> ")
variance_examples_labels["first_person_plural_count_seller"][-1] = 1
elif word in third_person_plural:
third_person_plural_seller += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_plural_seller"]] = 1
tmp_strategy_sequences.append("<third_person_plural_seller>")
fine_intents[-1].append("<third_person_plural_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_plural_seller> ")
variance_examples_labels["third_person_plural_seller"][-1] = 1
elif word in third_person_singular:
third_person_singular_seller += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_singular_seller"]] = 1
tmp_strategy_sequences.append("<third_person_singular_seller>")
fine_intents[-1].append("<third_person_singular_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_singular_seller> ")
variance_examples_labels["third_person_singular_seller"][-1] = 1
if word in dominance:
dominance_count_seller += 1
dominance_avg_seller += dominance[word]
valence_avg_seller += valence[word]
arousal_avg_seller += arousal[word]
if word in assertive:
keywords["assertive_count_seller"] = word
assertive_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["assertive_count_seller"]] = 1
tmp_strategy_sequences.append("<assertive_count_seller>")
fine_intents[-1].append("<assertive_count_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <assertive_count_seller> ")
if word in hedges_word:
#print word + "," + uter.replace(",","")
keywords["hedge_count_seller"] = word
recommendation_data_uter[recommendation_feature_mapping["hedge_count_seller"]] = 1
tmp_strategy_sequences.append("<hedge_count_seller>")
fine_intents[-1].append("<hedge_count_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <hedge_count_seller> ")
hedge_count_seller += 1
if o_propose_visit:
propose_hedge_tmp += 1
variance_examples_labels["hedge_count_seller"][-1] = 1
if word in pos:
seller_pos_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["seller_pos_sentiment"]] = 1
tmp_strategy_sequences.append("<seller_pos_sentiment>")
fine_intents[-1].append("<seller_pos_sentiment>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <seller_pos_sentiment> ")
variance_examples_labels["seller_pos_sentiment"][-1] = 1
if word in neg:
seller_neg_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["seller_neg_sentiment"]] = 1
tmp_strategy_sequences.append("<seller_neg_sentiment>")
fine_intents[-1].append("<seller_neg_sentiment>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <seller_neg_sentiment> ")
variance_examples_labels["seller_neg_sentiment"][-1] = 1
if word in lexicon_diff_dic_pos:
number_of_diff_dic_pos += 1
recommendation_data_uter[recommendation_feature_mapping["number_of_diff_dic_pos"]] = 1
tmp_strategy_sequences.append("<number_of_diff_dic_pos>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <number_of_diff_dic_pos> ")
if word in lexicon_diff_dic_neg:
number_of_diff_dic_neg += 1
recommendation_data_uter[recommendation_feature_mapping["number_of_diff_dic_neg"]] = 1
tmp_strategy_sequences.append("<number_of_diff_dic_neg>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <number_of_diff_dic_neg> ")
total_words_seller += 1
stat_tmp.append(word)
vocab_tmp.append(word)
previous_word = word
total_uterance_seller += 1
recommendation_data_uter_cumu = [a + b for a, b in zip(recommendation_data_uter_cumu, recommendation_data_uter[:-2])]
recommendation_raw_utterance_tmp.append(tmp_strategies_embedding_text)
strategy_sequences.append(tmp_strategy_sequences)
if len(strategy_sequences) > 1:
if ",".join(strategy_sequences[-2]) not in majority_rules:
majority_rules[",".join(strategy_sequences[-2])] = Counter()
majority_rules[",".join(strategy_sequences[-2])][",".join(strategy_sequences[-1])]+=1
else:
majority_rules[",".join(strategy_sequences[-2])][",".join(strategy_sequences[-1])]+=1
recommendation_template_tmp = [1,0] + recommendation_data_uter[:-2]
key = "".join([str(int(a)) for a in recommendation_template_tmp])
if key not in recommendation_template and keywords:
recommendation_template[key] = [uter, keywords]
uter_index += 1
bag_of_strategies.append(recommendation_data_uter)
strategy_embedding_text_dialog += previous_strategies_embedding + " " + tmp_strategies_embedding_text + "\n"
fine_intents[-1].append("<end>")
# if "<offer " in uter:
# final = re.findall(r'\d+', uter)[0]
if (not (first_price and _first_price) and portion_index == 1) or (uter_index == number_of_uter): # #uter_index >= portion_index*number_of_uter/4.0 and portion_index <= 4:
if len(tmp) == 0 and uter_index == number_of_uter:
tmp = [0.0]*feature_size
tmp[-10] = who_propose
# if portion_index == 1:
# stage_1_stat += vocab_tmp
# vocab_tmp = list()
# else:
# stage_2_stat += vocab_tmp
portion_index += 1
tmp += tmp_complex
#sentiment features
tmp.append(seller_neg_sentiment)
tmp.append(seller_pos_sentiment)
tmp.append(buyer_neg_sentiment)
tmp.append(buyer_pos_sentiment)
#stubborn features
if (dominance_count_buyer != 0):
tmp.append(dominance_avg_buyer/dominance_count_buyer)
tmp.append(arousal_avg_buyer/dominance_count_buyer)
else:
tmp.append(0)
tmp.append(0)
if (dominance_count_seller != 0):
tmp.append(dominance_avg_seller/dominance_count_seller)
tmp.append(arousal_avg_seller/dominance_count_seller)
else:
tmp.append(0)
tmp.append(0)
tmp.append(first_person_plural_count_seller)
tmp.append(first_person_singular_count_seller)
tmp.append(first_person_plural_count_buyer)
tmp.append(first_person_singular_count_buyer)
tmp.append(third_person_singular_seller)
tmp.append(third_person_plural_seller)
tmp.append(third_person_singular_buyer)
tmp.append(third_person_plural_buyer)
tmp.append(number_of_diff_dic_pos)
tmp.append(number_of_diff_dic_neg)
#most informative ones
if total_uterance_seller == 0:
tmp.append(0)
else:
tmp.append(total_words_seller/total_uterance_seller)
if total_uterance_buyer == 0:
tmp.append(0)
else:
tmp.append(total_words_buyer/total_uterance_buyer)
tmp.append(buyer_propose)
tmp.append(seller_propose)
tmp.append(float(buyer_first_price))
tmp.append(float(seller_first_price))
tmp.append(float(price))
tmp.append((float(buyer_first_price) - float(price))/float(price))
tmp.append(hedge_count_seller)
tmp.append(hedge_count_buyer)
tmp.append(assertive_count_seller)
tmp.append(assertive_count_buyer)
tmp.append(factive_count_seller)
tmp.append(factive_count_buyer)
tmp.append(who_propose)
tmp.append(seller_trade_in)
tmp.append(personal_concern_seller)
tmp.append(sg_concern)
if social_distance_count != 0:
tmp.append(social_distance_seller/social_distance_count)
else:
tmp.append(2.0)
tmp.append(liwc_certainty)
tmp.append(liwc_informal)
#tmp.append(politeness_seller_apology)
#tmp.append(politeness_seller_greetings)
tmp.append(politeness_seller_please)
tmp.append(politeness_seller_gratitude)
tmp.append(politeness_seller_please_s)
if uter_index != number_of_uter:
tmp_complex = [0,0,0,0,0]
seller_neg_sentiment = 0.0
seller_pos_sentiment = 0.0
buyer_neg_sentiment = 0.0
buyer_pos_sentiment = 0.0
dominance_avg_buyer = 0.0
dominance_count_buyer = 0.0
arousal_avg_buyer = 0.0
dominance_count_buyer = 0.0
dominance_avg_seller = 0.0
dominance_count_seller = 0.0
arousal_avg_seller = 0.0
dominance_count_seller = 0.0
first_person_plural_count_seller = 0.0
first_person_singular_count_seller = 0.0
first_person_plural_count_buyer = 0.0
first_person_singular_count_buyer = 0.0
third_person_singular_seller = 0.0
third_person_plural_seller = 0.0
third_person_singular_buyer = 0.0
third_person_plural_buyer = 0.0
number_of_diff_dic_pos = 0.0
number_of_diff_dic_neg = 0.0
total_uterance_seller = 0.0
total_words_seller = 0.0
total_words_buyer = 0.0
total_uterance_buyer = 0.0
buyer_propose = 0.0
seller_propose = 0.0
hedge_count_seller = 0.0
hedge_count_buyer = 0.0
assertive_count_seller = 0.0
assertive_count_buyer = 0.0
factive_count_seller = 0.0
factive_count_buyer = 0.0
seller_trade_in = 0
personal_concern_seller = 0
sg_concern = 0
social_distance_seller = 0.0
social_distance_count = 0.0
liwc_certainty = 0.0
liwc_informal = 0.0
#politeness_seller_apology = 0.0
#politeness_seller_greetings = 0.0
politeness_seller_please = 0.0
politeness_seller_gratitude = 0.0
politeness_seller_please_s = 0.0
previous = uter
uter_index_overall += 1
return fine_intents, bag_of_strategies
def extract_seq_acts(dialog):
strategies = list()
extracted_seqs = list()
lexicon_list = list()
total_dialogss = 0
positive_text = ""
negative_text = ""
strategy_embedding_text = ""
dialog_index = 0
lemmatizer = WordNetLemmatizer()
positive = 0
negative = 0
total_uterance = 0
pre_complex_features_index = 0
example_arousal = list()
example_arousal_score = list()
propose_hedge = 0
propose_count = 0
hedge_count = 0
liwc_authenticity_text = list()
#automatically label complex labels
# complex_features = list()
#rule-based recommendation system
majority_rules = dict()
#complex feature calculator
pre_complex_features = list()
categories = Counter()
uter_index_overall = 0
variance_examples_labels = {"seller_neg_sentiment":list(),"seller_pos_sentiment":list(),"first_person_plural_count_seller":list(),"first_person_singular_count_seller":list(),"third_person_singular_seller":list(),"third_person_plural_seller":list(),"seller_propose":list(),"hedge_count_seller":list(),"factive_count_seller":list(),"who_propose":list(),"seller_trade_in":list(),"sg_concern":list(),"liwc_certainty":list(),"liwc_informal":list(),"politeness_seller_please":list(),"politeness_seller_gratitude":list(),"politeness_seller_please_s":list(),"ap_des":list(),"ap_pata":list(),"ap_infer":list(),"family":list(),"friend":list(),"politeness_seller_greet":list()}
variance_examples = list()
#recommendation system, each set of feature represents each uterance
recommendation_data = list()
recommendation_feature_mapping = {"seller_neg_sentiment":0,"seller_pos_sentiment":1,"buyer_neg_sentiment":2,"buyer_pos_sentiment":3,"first_person_plural_count_seller":4,"first_person_singular_count_seller":5,"first_person_plural_count_buyer":6,"first_person_singular_count_buyer":7,"third_person_singular_seller":8,"third_person_plural_seller":9,"third_person_singular_buyer":10,"third_person_plural_buyer":11,"number_of_diff_dic_pos":12,"number_of_diff_dic_neg":13,"buyer_propose":14,"seller_propose":15,"hedge_count_seller":16,"hedge_count_buyer":17,"assertive_count_seller":18,"assertive_count_buyer":19,"factive_count_seller":20,"factive_count_buyer":21,"who_propose":22,"seller_trade_in":23,"personal_concern_seller":24,"sg_concern":25,"liwc_certainty":26,"liwc_informal":27,"politeness_seller_please":28,"politeness_seller_gratitude":29,"politeness_seller_please_s":30,"ap_des":31,"ap_pata":32,"ap_infer":33,"family":34,"friend":35,"politeness_buyer_please":36,"politeness_buyer_gratitude":37,"politeness_buyer_please_s":38,"politeness_seller_greet":39,"politeness_buyer_greet":40}
dialog_length = list()
recommendation_raw_utterance = list()
recommendation_product_description = list()
sequence_of_strategy = list()
#ngram = ""
ngram_dic = json.load(open(curr_file_path + "ngram_dic_cata"))
fine_intents = list()
total_dialogss += 1
# if "<selle>" not in dialog:
# continue
# if "<noise>" in dialog:
# continue
# if "<accept>" not in dialog:
# continue
#recommendation system
recommendation_raw_utterance_tmp = list()
strategy_sequences = list()
price = dialog["scenario"]["kbs"][1]["personal"]["Target"]
target = dialog["scenario"]["kbs"][0]["personal"]["Target"]
complex_features_tmp = list()
tmp = list()
tmp_complex = [0,0,0,0,0]
#ngram_features = [0]*len(ngram_dic)
first_person_plural_count_buyer = 0
first_person_singular_count_buyer = 0
first_person_plural_count_seller = 0
first_person_singular_count_seller = 0
third_person_plural_buyer = 0
third_person_singular_buyer = 0
third_person_singular_seller = 0
third_person_plural_seller = 0
dominance_avg_seller = 0.0
dominance_count_seller = 0
dominance_avg_buyer = 0.0
dominance_count_buyer = 0
valence_avg_buyer = 0.0
arousal_avg_buyer = 0.0
valence_avg_seller = 0.0
arousal_avg_seller = 0.0
example_arousal_tmp = list()
number_of_diff_dic_pos = 0
number_of_diff_dic_neg = 0
total_words_seller = 0
total_words_buyer = 0
total_uterance_seller = 0
total_uterance_buyer = 0
final = 0
buyer_pos_sentiment = 0
buyer_neg_sentiment = 0
seller_pos_sentiment = 0
seller_neg_sentiment = 0
greetings_seller = 0
sg_concern = 0
politeness_seller_gratitude = 0.0
politeness_seller_please = 0.0
politeness_seller_apology = 0.0
politeness_seller_greetings = 0.0
politeness_seller_please_s = 0.0
politeness_buyer_gratitude = 0.0
politeness_buyer_please = 0.0
politeness_buyer_apology = 0.0
politeness_buyer_greetings = 0.0
politeness_buyer_please_s = 0.0
politeness_buyer = 0.0
social_distance_seller = 0.0
social_distance_count = 0.0
social_distance_buyer = 0.0
social_distance_count_buyer = 0.0
personal_concern_seller = 0
personal_concern_buyer = 0
greetings_buyer = 0
factive_count_seller = 0.0
factive_count_buyer = 0.0
hedge_count_seller = 0.0
hedge_count_buyer = 0.0
assertive_count_seller = 0.0
assertive_count_buyer = 0.0
buyer_first_price = 0.0
seller_first_price = 0.0
first_price = True
_first_price = True
buyer_propose = 0
seller_propose = 0
who_propose = 0
who_propose_visit = True
seller_trade_in = 0
seller_deliver = 0
buyer_trade_in = 0
buyer_ask_trade_in = 0
buyer_reject = 0
stat_tmp = list()
liwc_authenticity = 0.0
liwc_informal = 0.0
liwc_certainty = 0
propose_hedge_tmp = 0
propose_count_tmp = 0
past_tense = 0
uters = list()
for event in dialog["events"]:
if event["agent"] == 1:
agent = "<selle>"
else:
agent = "<buyer>"
if event["action"] == "message":
uters.append(agent + " " + event["data"])
elif event["action"] == "accept":
uters.append(agent + " " +"<accept>")
elif event["action"] == "reject":
uters.append(agent + " " +"<reject>")
elif event["action"] == "offer":
uters.append(agent + " " +"<offer " + str(event["data"]["price"]) + " >")
elif event["action"] == "quit":
uters.append(agent + " " +"<quit>")
#get rid of noise posts
number_of_uter = len(uters)
# if number_of_uter <= 3:
# continue
uter_index = 0
portion_index = 1
buyer_propose_visit = True
seller_propose_visit = True
previous = ""
vocab_tmp = list()
tmp_strategies = list()
recommendation_data_uter_cumu = [0.0]*len(recommendation_feature_mapping)
strategy_embedding_text_dialog = ""
tmp_strategies_embedding_text = ""
fine_intents = list()
fine_intents.append(["<start>"])
bag_of_strategies = []
for u_index in range(len(uters)):
fine_intents.append([])
uter = uters[u_index]
uter_split = uter.split(" ")
extracted_seqs.append([-1] * len(uter_split))
keywords = dict()
tmp_strategy_sequences = list()
word_tokenized = uter_split
o_propose_visit = False
recommendation_data_uter = [0]*len(recommendation_feature_mapping)
previous_strategies_embedding = tmp_strategies_embedding_text
tmp_strategies_embedding_text = uter
tmp_strategies.append([uter])
if "<buyer>" in uter and "<offer " not in uter and "<accept>" not in uter:
#tmp_strategy_sequences.append("<buyer>")
if ("pick it up" in uter or "pick up" in uter):
buyer_trade_in += 1
fine_intents[-1].append("<buyer_trade_in>")
if "pick" in uter_split:
extracted_seqs[-1][uter_split.index("pick")] = "<buyer_trade_in>"
if ("throw in" in uter or "throwing in" in uter) and ("?" in uter or "if" in uter):
buyer_ask_trade_in = 1
fine_intents[-1].append("<buyer_trade_in>")
if "throw" in uter_split:
extracted_seqs[-1][uter_split.index("throw")] = "<buyer_trade_in>"
elif "throwing" in uter_split:
extracted_seqs[-1][uter_split.index("throwing")] = "<buyer_trade_in>"
buyer_propose_visit = True
if len(re.findall(r"\d+", uter)) > 0 or len(re.findall(r"[0-9]+,[0-9]+", uter)) > 0:
for possible_price in re.findall(r"\d+", uter) + re.findall(r"[0-9]+,[0-9]+", uter):
possible_price = possible_price.replace(",", "")
if 1.2 > float(possible_price)/float(target) > 0.7 and float(possible_price) != float(price) and abs(float(possible_price) - float(target)) < abs(float(buyer_first_price) - float(target)):
if who_propose_visit:
who_propose = 0
tmp_strategies[-1].append("<Wait_For_Buyer_Propose>")
who_propose_visit = False
if buyer_propose_visit:
buyer_propose += 1
tmp_strategy_sequences.append("<buyer_propose>")
fine_intents[-1].append("<buyer_propose>")
if str(possible_price) in uter_split:
extracted_seqs[-1][uter_split.index(str(possible_price))] = "<buyer_propose>"
buyer_propose_visit = False
recommendation_data_uter[recommendation_feature_mapping["buyer_propose"]] = 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(possible_price, " <buyer_propose> ")
if first_price:
buyer_first_price = float(possible_price)
first_price = False
if buyer_first_price == 0.0:
first_price = True
if buyer_propose_visit and (("lowest" in uter and (("?" in uter) or ("what" in uter))) or ("price" in uter and "high" in uter) or ("price" in uter and "lower" in uter)):
buyer_reject += 1
previous_word = ""
#uterrance wise analysis
for greet_i in range(len(greetings)):
if greet_i <= 7:
if greetings[greet_i] in word_tokenized:
politeness_buyer_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_greet"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_greet>")
fine_intents[-1].append("<politeness_buyer_greet>")
extracted_seqs[-1][uter_split.index(greetings[greet_i])] = "<politeness_buyer_greet>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_buyer_greet> ")
else:
if greetings[greet_i] in uter:
politeness_buyer_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_greet"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_greet>")
fine_intents[-1].append("<politeness_buyer_greet>")
extracted_seqs[-1][uter_split.index(greetings[greet_i].split()[0])] = "<politeness_buyer_greet>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_buyer_greet> ")
for grad_i in range(len(gratitute)):
if gratitute[grad_i] in word_tokenized:
politeness_buyer_gratitude += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_gratitude"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_gratitude>")
fine_intents[-1].append("<politeness_buyer_gratitude>")
extracted_seqs[-1][uter_split.index(gratitute[grad_i])] = "<politeness_buyer_gratitude>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + gratitute[grad_i], " <politeness_buyer_gratitude> ")
please_index = -1
for word_i in range(len(word_tokenized)):
if word_tokenized[word_i] == "please" or word_tokenized[word_i] == "pls":
please_index = word_i
break
if please_index != -1:
if word_tokenized[please_index-1] != ">" and word_tokenized[please_index-1] != "." and word_tokenized[please_index-1] != "?" and word_tokenized[please_index-1] != "!":
politeness_buyer_please += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_please"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_please>")
fine_intents[-1].append("<politeness_buyer_please>")
extracted_seqs[-1][please_index] = "<politeness_buyer_please>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" please", " <politeness_buyer_please> ").replace("pls", "<politeness_buyer_please>")
else:
politeness_buyer_please_s += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_buyer_please_s"]] = 1
tmp_strategy_sequences.append("<politeness_buyer_please_s>")
fine_intents[-1].append("<politeness_buyer_please_s>")
extracted_seqs[-1][please_index] = "<politeness_buyer_please_s>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" please", " <politeness_buyer_please_s> ").replace("pls", "<politeness_buyer_please_s>")
for word_i, word in enumerate(word_tokenized):
word = lemmatizer.lemmatize(word)
if word in liwc_friend and (previous_word != "your" and previous_word != "ur"):
social_distance_buyer += 1.0
social_distance_count_buyer += 1.0
if word in liwc_family and (previous_word != "your" and previous_word != "ur"):
social_distance_buyer += 0.0
social_distance_count_buyer += 1.0
if word in factive:
factive_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["factive_count_buyer"]] = 1
tmp_strategy_sequences.append("<factive_count_buyer>")
fine_intents[-1].append("<factive_count_buyer>")
extracted_seqs[-1][word_i] = "<factive_count_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <factive_count_buyer> ")
if word in first_person_singular:
first_person_singular_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_singular_count_buyer"]] = 1
tmp_strategy_sequences.append("<first_person_singular_count_buyer>")
fine_intents[-1].append("<first_person_singular_count_buyer>")
extracted_seqs[-1][word_i] = "<first_person_singular_count_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word +" ", " <first_person_singular_count_buyer> ")
elif word in first_person_plural:
first_person_plural_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_plural_count_buyer"]] = 1
tmp_strategy_sequences.append("<first_person_plural_count_buyer>")
fine_intents[-1].append("<first_person_plural_count_buyer>")
extracted_seqs[-1][word_i] = "<first_person_plural_count_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <first_person_plural_count_buyer> ")
elif word in third_person_plural:
third_person_plural_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_plural_buyer"]] = 1
tmp_strategy_sequences.append("<third_person_plural_buyer>")
fine_intents[-1].append("<third_person_plural_buyer>")
extracted_seqs[-1][word_i] = "<third_person_plural_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_plural_buyer> ")
elif word in third_person_singular:
third_person_singular_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_singular_buyer"]] = 1
tmp_strategy_sequences.append("<third_person_singular_buyer>")
fine_intents[-1].append("<third_person_singular_buyer>")
extracted_seqs[-1][word_i] = "<third_person_singular_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_singular_buyer> ")
if word in assertive:
assertive_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["assertive_count_buyer"]] = 1
tmp_strategy_sequences.append("<assertive_count_buyer>")
fine_intents[-1].append("<assertive_count_buyer>")
extracted_seqs[-1][word_i] = "<assertive_count_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <assertive_count_buyer> ")
if word in hedges_word:
if _first_price and first_price:
example_arousal_tmp.append(word + "," + "N/A" + "," + uter.replace(",", ";"))
hedge_count_buyer += 1
recommendation_data_uter[recommendation_feature_mapping["hedge_count_buyer"]] = 1
tmp_strategy_sequences.append("<hedge_count_buyer>")
fine_intents[-1].append("<hedge_count_buyer>")
extracted_seqs[-1][word_i] = "<hedge_count_buyer>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <hedge_count_buyer> ")
if word in pos:
buyer_pos_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["buyer_pos_sentiment"]] = 1
tmp_strategy_sequences.append("<buyer_pos_sentiment>")
fine_intents[-1].append("<buyer_pos_sentiment>")
extracted_seqs[-1][word_i] = "<buyer_pos_sentiment>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <buyer_pos_sentiment> ")
if word in neg:
buyer_neg_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["buyer_neg_sentiment"]] = 1
tmp_strategy_sequences.append("<buyer_neg_sentiment>")
fine_intents[-1].append("<buyer_neg_sentiment>")
extracted_seqs[-1][word_i] = "<buyer_neg_sentiment>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <buyer_neg_sentiment> ")
if word in dominance:
dominance_count_buyer += 1
dominance_avg_buyer += dominance[word]
valence_avg_buyer += valence[word]
arousal_avg_buyer += arousal[word]
total_words_buyer += 1
stat_tmp.append(word)
vocab_tmp.append(word)
previous_word = word
total_uterance_buyer += 1
recommendation_data_uter_cumu = [a + b for a, b in zip(recommendation_data_uter_cumu, recommendation_data_uter[:-2])]
recommendation_raw_utterance_tmp.append(tmp_strategies_embedding_text)
strategy_sequences.append(tmp_strategy_sequences)
if "<selle>" in uter and "<offer " not in uter and "<accept>" not in uter:
variance_examples.append(uter)
for key in variance_examples_labels:
variance_examples_labels[key].append(0)
#tmp_strategy_sequences.append("<selle>")
if "throw in" in uter or "throwing in" in uter:
seller_trade_in = 1
recommendation_data_uter[recommendation_feature_mapping["seller_trade_in"]] = 1
tmp_strategy_sequences.append("<seller_trade_in>")
fine_intents[-1].append("<seller_trade_in>")
if "throw" in uter_split:
extracted_seqs[-1][uter_split.index("throw")] = "<seller_trade_in>"
else:
extracted_seqs[-1][uter_split.index("throwing")] = "<seller_trade_in>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("throw in", "<seller_trade_in>").replace("throwing in", "<seller_trade_in>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Trade_In>")
else:
tmp_strategies[-1].append("<2_Trade_In>")
variance_examples_labels["seller_trade_in"][-1] = 1
if "deliver" in uter:
recommendation_data_uter[recommendation_feature_mapping["seller_trade_in"]] = 1
fine_intents[-1].append("<seller_trade_in>")
if "deliver" in uter_split:
extracted_seqs[-1][uter_split.index("deliver")] = "<seller_trade_in>"
elif "delivery" in uter_split:
extracted_seqs[-1][uter_split.index("delivery")] = "<seller_trade_in>"
seller_deliver += 1
if len(re.findall(r"\d+", uter)) > 0:
#seller_propose_visit = True
for possible_price in re.findall(r"\d+", uter):
if 1 > float(possible_price)/float(price) > 0.7 and abs(float(possible_price) - float(price)) < abs(float(buyer_first_price) - float(price)):
if seller_propose_visit:
seller_propose += 1
tmp_strategy_sequences.append("<seller_propose>")
fine_intents[-1].append("<seller_propose>")
if str(possible_price) in uter_split:
extracted_seqs[-1][uter_split.index(str(possible_price))] = "<seller_propose>"
recommendation_data_uter[recommendation_feature_mapping["seller_propose"]] = 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(possible_price, "<seller_propose>")
tmp_strategies[-1].append("<Propose_New_Price>")
seller_propose_visit = False
variance_examples_labels["seller_propose"][-1] = 1
if _first_price:
seller_first_price = float(possible_price)
if 1 > float(possible_price)/float(price) > 0.5:
if who_propose_visit:
who_propose = 1
recommendation_data_uter[recommendation_feature_mapping["who_propose"]] = 1
who_propose_visit = False
variance_examples_labels["who_propose"][-1] = 1
_first_price = False
if seller_first_price == 0.0:
_first_price = True
#uterrance wise analysis
## TODO (INSERT CODE HERE FOR CLASSIFIER BASED STRATEGIES) # 530 in regresssion.py
for greet_i in range(len(greetings)):
if greet_i <= 7:
if greetings[greet_i] in word_tokenized:
politeness_seller_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_greet"]] = 1
tmp_strategy_sequences.append("<politeness_seller_greet>")
fine_intents[-1].append("<politeness_seller_greet>")
extracted_seqs[-1][uter_split.index(greetings[greet_i])] = "<politeness_seller_greet>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_seller_greet> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Greetings>")
else:
tmp_strategies[-1].append("<2_Greetings>")
variance_examples_labels["politeness_seller_greet"][-1] = 1
else:
if greetings[greet_i] in uter:
politeness_seller_greetings += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_greet"]] = 1
tmp_strategy_sequences.append("<politeness_seller_greet>")
fine_intents[-1].append("<politeness_seller_greet>")
extracted_seqs[-1][uter_split.index(greetings[greet_i].split()[0])] = "<politeness_seller_greet>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + greetings[greet_i], " <politeness_seller_greet> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Greetings>")
else:
tmp_strategies[-1].append("<2_Greetings>")
variance_examples_labels["politeness_seller_greet"][-1] = 1
for sorry_i in range(len(apology)):
if sorry_i <= 1:
if apology[sorry_i] in word_tokenized:
politeness_seller_apology += 1
if first_price and _first_price:
tmp_strategies[-1].append("<1_Apology>")
else:
tmp_strategies[-1].append("<2_Apology>")
else:
if apology[sorry_i] in uter:
politeness_seller_apology += 1
if first_price and _first_price:
tmp_strategies[-1].append("<1_Apology>")
else:
tmp_strategies[-1].append("<2_Apology>")
for grad_i in range(len(gratitute)):
if gratitute[grad_i] in word_tokenized:
politeness_seller_gratitude += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_gratitude"]] = 1
tmp_strategy_sequences.append("<politeness_seller_gratitude>")
fine_intents[-1].append("<politeness_seller_gratitude>")
extracted_seqs[-1][uter_split.index(gratitute[grad_i])] = "<politeness_seller_gratitude>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + gratitute[grad_i], " <politeness_seller_gratitude> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_gratitude>")
else:
tmp_strategies[-1].append("<2_gratitude>")
variance_examples_labels["politeness_seller_gratitude"][-1] = 1
please_index = -1
for word_i in range(len(word_tokenized)):
if word_tokenized[word_i] == "please" or word_tokenized[word_i] == "pls":
please_index = word_i
break
if please_index != -1:
if word_tokenized[please_index-1] != ">" and word_tokenized[please_index-1] != "." and word_tokenized[please_index-1] != "?" and word_tokenized[please_index-1] != "!":
politeness_seller_please += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_please"]] = 1
tmp_strategy_sequences.append("<politeness_seller_please>")
fine_intents[-1].append("<politeness_seller_please>")
extracted_seqs[-1][please_index] = "<politeness_seller_please>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("please", "<politeness_seller_please>").replace("pls", "<politeness_seller_please>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Please>")
else:
tmp_strategies[-1].append("<2_Please>")
variance_examples_labels["politeness_seller_please"][-1] = 1
else:
politeness_seller_please_s += 1
recommendation_data_uter[recommendation_feature_mapping["politeness_seller_please_s"]] = 1
tmp_strategy_sequences.append("<politeness_seller_please_s>")
fine_intents[-1].append("<politeness_seller_please_s>")
extracted_seqs[-1][please_index] = "<politeness_seller_please_s>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace("please", "<politeness_seller_please_s>").replace("pls", "<politeness_seller_please_s>")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Please_Start>")
else:
tmp_strategies[-1].append("<2_Please_Start>")
variance_examples_labels["politeness_seller_please_s"][-1] = 1
previous_word = ""
for word_i, word in enumerate(word_tokenized):
word = lemmatizer.lemmatize(word)
if word in liwc_informal_dic and word != "ha" and word != "yes" and word != "like" and word != "absolutely" and word != "agree" and word != "ok":
if word != "well":
liwc_informal += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_informal"]] = 1
tmp_strategy_sequences.append("<liwc_informal>")
fine_intents[-1].append("<liwc_informal>")
extracted_seqs[-1][word_i] = "<liwc_informal>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_informal> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Informal_Word>")
else:
tmp_strategies[-1].append("<2_Informal_Word>")
variance_examples_labels["liwc_informal"][-1] = 1
else:
if previous_word == ">":
if first_price and _first_price:
tmp_strategies[-1].append("<1_Informal_Word>")
else:
tmp_strategies[-1].append("<2_Informal_Word>")
liwc_informal += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_informal"]] = 1
tmp_strategy_sequences.append("<liwc_informal>")
fine_intents[-1].append("<liwc_informal>")
extracted_seqs[-1][word_i] = "<liwc_informal>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_informal> ")
variance_examples_labels["liwc_informal"][-1] = 1
if word in liwc_certain and word != "certain":
if word == "sure":
if previous_word != "not" and previous_word != ">" and "?" not in uter:
liwc_certainty += 1
recommendation_data_uter[recommendation_feature_mapping["liwc_certainty"]] = 1
tmp_strategy_sequences.append("<liwc_certainty>")
fine_intents[-1].append("<liwc_certainty>")
extracted_seqs[-1][word_i] = "<liwc_certainty>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_certainty> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Certain_Word>")
else:
tmp_strategies[-1].append("<2_Certain_Word>")
variance_examples_labels["liwc_certainty"][-1] = 1
# if not (first_price and _first_price):
# print word + "," + uter.replace(",","")
else:
liwc_certainty += 1
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <liwc_certainty> ")
recommendation_data_uter[recommendation_feature_mapping["liwc_certainty"]] = 1
tmp_strategy_sequences.append("<liwc_certainty>")
fine_intents[-1].append("<liwc_certainty>")
extracted_seqs[-1][word_i] = "<liwc_certainty>"
if first_price and _first_price:
tmp_strategies[-1].append("<1_Certain_Word>")
else:
tmp_strategies[-1].append("<2_Certain_Word>")
variance_examples_labels["liwc_certainty"][-1] = 1
# if not (first_price and _first_price):
# print word + "," + uter.replace(",","")
if word in liwc_friend and (previous_word != "your" and previous_word != "ur") and not word.startswith("bud"):
social_distance_seller += 1.0
social_distance_count += 1.0
recommendation_data_uter[recommendation_feature_mapping["friend"]] = 1
tmp_strategy_sequences.append("<friend>")
fine_intents[-1].append("<friend>")
extracted_seqs[-1][word_i] = "<friend>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <friend> ")
if first_price and _first_price:
tmp_strategies[-1].append("<1_Friend_Word>")
else:
tmp_strategies[-1].append("<2_Friend_Word>")
variance_examples_labels["friend"][-1] = 1
if word in liwc_family and (previous_word != "your" and previous_word != "ur"):
if word == "family":
if previous_word == "my":
social_distance_seller += 0.0
recommendation_data_uter[recommendation_feature_mapping["family"]] = 1
tmp_strategy_sequences.append("<family>")
fine_intents[-1].append("<family>")
extracted_seqs[-1][word_i] = "<family>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <family> ")
social_distance_count += 1.0
if first_price and _first_price:
tmp_strategies[-1].append("<1_Family>")
else:
tmp_strategies[-1].append("<2_Family>")
variance_examples_labels["family"][-1] = 1
else:
social_distance_seller += 0.0
recommendation_data_uter[recommendation_feature_mapping["family"]] = 1
tmp_strategy_sequences.append("<family>")
fine_intents[-1].append("<family>")
extracted_seqs[-1][word_i] = "<family>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <family> ")
social_distance_count += 1.0
if first_price and _first_price:
tmp_strategies[-1].append("<1_Family>")
else:
tmp_strategies[-1].append("<2_Family>")
variance_examples_labels["family"][-1] = 1
if word in liwc_personal_concern:
personal_concern_seller += 1
recommendation_data_uter[recommendation_feature_mapping["personal_concern_seller"]] = 1
tmp_strategy_sequences.append("<personal_concern_seller>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <personal_concern_seller> ")
if word in factive:
keywords["factive_count_seller"] = word
factive_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["factive_count_seller"]] = 1
tmp_strategy_sequences.append("<factive_count_seller>")
fine_intents[-1].append("<factive_count_seller>")
extracted_seqs[-1][word_i] = "<factive_count_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <factive_count_seller> ")
variance_examples_labels["factive_count_seller"][-1] = 1
if word in first_person_singular:
first_person_singular_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_singular_count_seller"]] = 1
tmp_strategy_sequences.append("<first_person_singular_count_seller>")
fine_intents[-1].append("<first_person_singular_count_seller>")
extracted_seqs[-1][word_i] = "<first_person_singular_count_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word +" ", " <first_person_singular_count_seller> ")
variance_examples_labels["first_person_singular_count_seller"][-1] = 1
elif word in first_person_plural:
first_person_plural_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["first_person_plural_count_seller"]] = 1
tmp_strategy_sequences.append("<first_person_plural_count_seller>")
fine_intents[-1].append("<first_person_plural_count_seller>")
extracted_seqs[-1][word_i] = "<first_person_plural_count_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <first_person_plural_count_seller> ")
variance_examples_labels["first_person_plural_count_seller"][-1] = 1
elif word in third_person_plural:
third_person_plural_seller += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_plural_seller"]] = 1
tmp_strategy_sequences.append("<third_person_plural_seller>")
fine_intents[-1].append("<third_person_plural_seller>")
extracted_seqs[-1][word_i] = "<third_person_plural_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_plural_seller> ")
variance_examples_labels["third_person_plural_seller"][-1] = 1
elif word in third_person_singular:
third_person_singular_seller += 1
recommendation_data_uter[recommendation_feature_mapping["third_person_singular_seller"]] = 1
tmp_strategy_sequences.append("<third_person_singular_seller>")
fine_intents[-1].append("<third_person_singular_seller>")
extracted_seqs[-1][word_i] = "<third_person_singular_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <third_person_singular_seller> ")
variance_examples_labels["third_person_singular_seller"][-1] = 1
if word in dominance:
dominance_count_seller += 1
dominance_avg_seller += dominance[word]
valence_avg_seller += valence[word]
arousal_avg_seller += arousal[word]
if word in assertive:
keywords["assertive_count_seller"] = word
assertive_count_seller += 1
recommendation_data_uter[recommendation_feature_mapping["assertive_count_seller"]] = 1
tmp_strategy_sequences.append("<assertive_count_seller>")
fine_intents[-1].append("<assertive_count_seller>")
extracted_seqs[-1][word_i] = "<assertive_count_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <assertive_count_seller> ")
if word in hedges_word:
#print word + "," + uter.replace(",","")
keywords["hedge_count_seller"] = word
recommendation_data_uter[recommendation_feature_mapping["hedge_count_seller"]] = 1
tmp_strategy_sequences.append("<hedge_count_seller>")
fine_intents[-1].append("<hedge_count_seller>")
extracted_seqs[-1][word_i] = "<hedge_count_seller>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <hedge_count_seller> ")
hedge_count_seller += 1
if o_propose_visit:
propose_hedge_tmp += 1
variance_examples_labels["hedge_count_seller"][-1] = 1
if word in pos:
seller_pos_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["seller_pos_sentiment"]] = 1
tmp_strategy_sequences.append("<seller_pos_sentiment>")
fine_intents[-1].append("<seller_pos_sentiment>")
extracted_seqs[-1][word_i] = "<seller_pos_sentiment>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <seller_pos_sentiment> ")
variance_examples_labels["seller_pos_sentiment"][-1] = 1
if word in neg:
seller_neg_sentiment += 1
recommendation_data_uter[recommendation_feature_mapping["seller_neg_sentiment"]] = 1
tmp_strategy_sequences.append("<seller_neg_sentiment>")
fine_intents[-1].append("<seller_neg_sentiment>")
extracted_seqs[-1][word_i] = "<seller_neg_sentiment>"
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <seller_neg_sentiment> ")
variance_examples_labels["seller_neg_sentiment"][-1] = 1
if word in lexicon_diff_dic_pos:
number_of_diff_dic_pos += 1
recommendation_data_uter[recommendation_feature_mapping["number_of_diff_dic_pos"]] = 1
tmp_strategy_sequences.append("<number_of_diff_dic_pos>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <number_of_diff_dic_pos> ")
if word in lexicon_diff_dic_neg:
number_of_diff_dic_neg += 1
recommendation_data_uter[recommendation_feature_mapping["number_of_diff_dic_neg"]] = 1
tmp_strategy_sequences.append("<number_of_diff_dic_neg>")
tmp_strategies_embedding_text = tmp_strategies_embedding_text.replace(" " + word, " <number_of_diff_dic_neg> ")
total_words_seller += 1
stat_tmp.append(word)
vocab_tmp.append(word)
previous_word = word
total_uterance_seller += 1
recommendation_data_uter_cumu = [a + b for a, b in zip(recommendation_data_uter_cumu, recommendation_data_uter[:-2])]
recommendation_raw_utterance_tmp.append(tmp_strategies_embedding_text)
strategy_sequences.append(tmp_strategy_sequences)
if len(strategy_sequences) > 1:
if ",".join(strategy_sequences[-2]) not in majority_rules:
majority_rules[",".join(strategy_sequences[-2])] = Counter()
majority_rules[",".join(strategy_sequences[-2])][",".join(strategy_sequences[-1])]+=1
else:
majority_rules[",".join(strategy_sequences[-2])][",".join(strategy_sequences[-1])]+=1
recommendation_template_tmp = [1,0] + recommendation_data_uter[:-2]
key = "".join([str(int(a)) for a in recommendation_template_tmp])
if key not in recommendation_template and keywords:
recommendation_template[key] = [uter, keywords]
uter_index += 1
extracted_seqs[-1] = extracted_seqs[-1][1:]
bag_of_strategies.append(recommendation_data_uter)
strategy_embedding_text_dialog += previous_strategies_embedding + " " + tmp_strategies_embedding_text + "\n"
fine_intents[-1].append("<end>")
# if "<offer " in uter:
# final = re.findall(r'\d+', uter)[0]
if (not (first_price and _first_price) and portion_index == 1) or (uter_index == number_of_uter): # #uter_index >= portion_index*number_of_uter/4.0 and portion_index <= 4:
if len(tmp) == 0 and uter_index == number_of_uter:
tmp = [0.0]*feature_size
tmp[-10] = who_propose
# if portion_index == 1:
# stage_1_stat += vocab_tmp
# vocab_tmp = list()
# else:
# stage_2_stat += vocab_tmp
portion_index += 1
tmp += tmp_complex
#sentiment features
tmp.append(seller_neg_sentiment)
tmp.append(seller_pos_sentiment)
tmp.append(buyer_neg_sentiment)
tmp.append(buyer_pos_sentiment)
#stubborn features
if (dominance_count_buyer != 0):
tmp.append(dominance_avg_buyer/dominance_count_buyer)
tmp.append(arousal_avg_buyer/dominance_count_buyer)
else:
tmp.append(0)
tmp.append(0)
if (dominance_count_seller != 0):
tmp.append(dominance_avg_seller/dominance_count_seller)
tmp.append(arousal_avg_seller/dominance_count_seller)
else:
tmp.append(0)
tmp.append(0)
tmp.append(first_person_plural_count_seller)
tmp.append(first_person_singular_count_seller)
tmp.append(first_person_plural_count_buyer)
tmp.append(first_person_singular_count_buyer)
tmp.append(third_person_singular_seller)
tmp.append(third_person_plural_seller)
tmp.append(third_person_singular_buyer)
tmp.append(third_person_plural_buyer)
tmp.append(number_of_diff_dic_pos)
tmp.append(number_of_diff_dic_neg)
#most informative ones
if total_uterance_seller == 0:
tmp.append(0)
else:
tmp.append(total_words_seller/total_uterance_seller)
if total_uterance_buyer == 0:
tmp.append(0)
else:
tmp.append(total_words_buyer/total_uterance_buyer)
tmp.append(buyer_propose)
tmp.append(seller_propose)
tmp.append(float(buyer_first_price))
tmp.append(float(seller_first_price))
tmp.append(float(price))
tmp.append((float(buyer_first_price) - float(price))/float(price))
tmp.append(hedge_count_seller)
tmp.append(hedge_count_buyer)
tmp.append(assertive_count_seller)
tmp.append(assertive_count_buyer)
tmp.append(factive_count_seller)
tmp.append(factive_count_buyer)
tmp.append(who_propose)
tmp.append(seller_trade_in)
tmp.append(personal_concern_seller)
tmp.append(sg_concern)
if social_distance_count != 0:
tmp.append(social_distance_seller/social_distance_count)
else:
tmp.append(2.0)
tmp.append(liwc_certainty)
tmp.append(liwc_informal)
#tmp.append(politeness_seller_apology)
#tmp.append(politeness_seller_greetings)
tmp.append(politeness_seller_please)
tmp.append(politeness_seller_gratitude)
tmp.append(politeness_seller_please_s)
if uter_index != number_of_uter:
tmp_complex = [0,0,0,0,0]
seller_neg_sentiment = 0.0
seller_pos_sentiment = 0.0
buyer_neg_sentiment = 0.0
buyer_pos_sentiment = 0.0
dominance_avg_buyer = 0.0
dominance_count_buyer = 0.0
arousal_avg_buyer = 0.0
dominance_count_buyer = 0.0
dominance_avg_seller = 0.0
dominance_count_seller = 0.0
arousal_avg_seller = 0.0
dominance_count_seller = 0.0
first_person_plural_count_seller = 0.0
first_person_singular_count_seller = 0.0
first_person_plural_count_buyer = 0.0
first_person_singular_count_buyer = 0.0
third_person_singular_seller = 0.0
third_person_plural_seller = 0.0
third_person_singular_buyer = 0.0
third_person_plural_buyer = 0.0
number_of_diff_dic_pos = 0.0
number_of_diff_dic_neg = 0.0
total_uterance_seller = 0.0
total_words_seller = 0.0
total_words_buyer = 0.0
total_uterance_buyer = 0.0
buyer_propose = 0.0
seller_propose = 0.0
hedge_count_seller = 0.0
hedge_count_buyer = 0.0
assertive_count_seller = 0.0
assertive_count_buyer = 0.0
factive_count_seller = 0.0
factive_count_buyer = 0.0
seller_trade_in = 0
personal_concern_seller = 0
sg_concern = 0
social_distance_seller = 0.0
social_distance_count = 0.0
liwc_certainty = 0.0
liwc_informal = 0.0
#politeness_seller_apology = 0.0
#politeness_seller_greetings = 0.0
politeness_seller_please = 0.0
politeness_seller_gratitude = 0.0
politeness_seller_please_s = 0.0
previous = uter
uter_index_overall += 1
return fine_intents, bag_of_strategies, extracted_seqs
def calculate_ngram_features(ngram_dic, features, uter):
tmp = list()
for i in word_tokenize(uter):
if i not in stopWords:
tmp.append(lemmatizer.lemmatize(i))
for word in word_grams(tmp):
if word in ngram_dic:
features[ngram_dic[word]] += 1
return features
def word_grams(words, min=1, max=4):
s = []
for n in range(min, max):
for ngram in ngrams(words, n):
s.append(' '.join(str(i) for i in ngram))
return s
if __name__ == "__main__": main()
| 47.458157
| 1,303
| 0.727838
| 11,926
| 89,601
| 5.074878
| 0.062469
| 0.05241
| 0.063976
| 0.075608
| 0.907308
| 0.904582
| 0.901079
| 0.897213
| 0.892487
| 0.888654
| 0
| 0.024237
| 0.147186
| 89,601
| 1,887
| 1,304
| 47.483307
| 0.767814
| 0.041149
| 0
| 0.923077
| 0
| 0
| 0.200247
| 0.100234
| 0
| 0
| 0
| 0.00053
| 0.026675
| 1
| 0.002481
| false
| 0
| 0.019231
| 0
| 0.024194
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
98d79bf1cb7671371b36b7d35544279e64d0d748
| 1,975
|
py
|
Python
|
src/news/migrations/0005_auto_20160119_1016.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
src/news/migrations/0005_auto_20160119_1016.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
src/news/migrations/0005_auto_20160119_1016.py
|
Busaka/excellence
|
1cd19770285584d61aeddd77d6c1dd83e2fd04ba
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.9 on 2016-01-19 10:16
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('news', '0004_auto_20160111_1457'),
]
operations = [
migrations.AlterField(
model_name='new',
name='file_five',
field=models.FileField(upload_to='news/news_files'),
),
migrations.AlterField(
model_name='new',
name='file_four',
field=models.FileField(upload_to='news/news_files'),
),
migrations.AlterField(
model_name='new',
name='file_one',
field=models.FileField(upload_to='news/news_files'),
),
migrations.AlterField(
model_name='new',
name='file_three',
field=models.FileField(upload_to='news/news_files'),
),
migrations.AlterField(
model_name='new',
name='file_two',
field=models.FileField(upload_to='news/news_files'),
),
migrations.AlterField(
model_name='new',
name='image_five',
field=models.ImageField(upload_to='news/news_photos'),
),
migrations.AlterField(
model_name='new',
name='image_four',
field=models.ImageField(upload_to='news/news_photos'),
),
migrations.AlterField(
model_name='new',
name='image_one',
field=models.ImageField(upload_to='news/news_photos'),
),
migrations.AlterField(
model_name='new',
name='image_three',
field=models.ImageField(upload_to='news/news_photos'),
),
migrations.AlterField(
model_name='new',
name='image_two',
field=models.ImageField(upload_to='news/news_photos'),
),
]
| 29.924242
| 66
| 0.555949
| 196
| 1,975
| 5.357143
| 0.25
| 0.190476
| 0.238095
| 0.27619
| 0.785714
| 0.785714
| 0.785714
| 0.742857
| 0.701905
| 0.701905
| 0
| 0.0231
| 0.320506
| 1,975
| 65
| 67
| 30.384615
| 0.759314
| 0.032911
| 0
| 0.689655
| 1
| 0
| 0.159937
| 0.012061
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.086207
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
c7358607862275e38da92d5785341130a135a1cb
| 27,211
|
py
|
Python
|
sdk/python/pulumi_aws/mq/configuration.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/mq/configuration.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_aws/mq/configuration.py
|
alexbowers/pulumi-aws
|
7dbdb03b1e4f7c0d51d5b5d17233ff4465c3eff5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = ['ConfigurationArgs', 'Configuration']
@pulumi.input_type
class ConfigurationArgs:
def __init__(__self__, *,
data: pulumi.Input[str],
engine_type: pulumi.Input[str],
engine_version: pulumi.Input[str],
authentication_strategy: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a Configuration resource.
:param pulumi.Input[str] data: Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
:param pulumi.Input[str] engine_type: Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
:param pulumi.Input[str] engine_version: Version of the broker engine.
:param pulumi.Input[str] authentication_strategy: Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
:param pulumi.Input[str] description: Description of the configuration.
:param pulumi.Input[str] name: Name of the configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
pulumi.set(__self__, "data", data)
pulumi.set(__self__, "engine_type", engine_type)
pulumi.set(__self__, "engine_version", engine_version)
if authentication_strategy is not None:
pulumi.set(__self__, "authentication_strategy", authentication_strategy)
if description is not None:
pulumi.set(__self__, "description", description)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def data(self) -> pulumi.Input[str]:
"""
Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: pulumi.Input[str]):
pulumi.set(self, "data", value)
@property
@pulumi.getter(name="engineType")
def engine_type(self) -> pulumi.Input[str]:
"""
Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
"""
return pulumi.get(self, "engine_type")
@engine_type.setter
def engine_type(self, value: pulumi.Input[str]):
pulumi.set(self, "engine_type", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Input[str]:
"""
Version of the broker engine.
"""
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: pulumi.Input[str]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter(name="authenticationStrategy")
def authentication_strategy(self) -> Optional[pulumi.Input[str]]:
"""
Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
"""
return pulumi.get(self, "authentication_strategy")
@authentication_strategy.setter
def authentication_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authentication_strategy", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the configuration.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
@pulumi.input_type
class _ConfigurationState:
def __init__(__self__, *,
arn: Optional[pulumi.Input[str]] = None,
authentication_strategy: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_type: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
latest_revision: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering Configuration resources.
:param pulumi.Input[str] arn: ARN of the configuration.
:param pulumi.Input[str] authentication_strategy: Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
:param pulumi.Input[str] data: Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
:param pulumi.Input[str] description: Description of the configuration.
:param pulumi.Input[str] engine_type: Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
:param pulumi.Input[str] engine_version: Version of the broker engine.
:param pulumi.Input[int] latest_revision: Latest revision of the configuration.
:param pulumi.Input[str] name: Name of the configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
if arn is not None:
pulumi.set(__self__, "arn", arn)
if authentication_strategy is not None:
pulumi.set(__self__, "authentication_strategy", authentication_strategy)
if data is not None:
pulumi.set(__self__, "data", data)
if description is not None:
pulumi.set(__self__, "description", description)
if engine_type is not None:
pulumi.set(__self__, "engine_type", engine_type)
if engine_version is not None:
pulumi.set(__self__, "engine_version", engine_version)
if latest_revision is not None:
pulumi.set(__self__, "latest_revision", latest_revision)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if tags_all is not None:
pulumi.set(__self__, "tags_all", tags_all)
@property
@pulumi.getter
def arn(self) -> Optional[pulumi.Input[str]]:
"""
ARN of the configuration.
"""
return pulumi.get(self, "arn")
@arn.setter
def arn(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "arn", value)
@property
@pulumi.getter(name="authenticationStrategy")
def authentication_strategy(self) -> Optional[pulumi.Input[str]]:
"""
Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
"""
return pulumi.get(self, "authentication_strategy")
@authentication_strategy.setter
def authentication_strategy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "authentication_strategy", value)
@property
@pulumi.getter
def data(self) -> Optional[pulumi.Input[str]]:
"""
Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
"""
return pulumi.get(self, "data")
@data.setter
def data(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "data", value)
@property
@pulumi.getter
def description(self) -> Optional[pulumi.Input[str]]:
"""
Description of the configuration.
"""
return pulumi.get(self, "description")
@description.setter
def description(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "description", value)
@property
@pulumi.getter(name="engineType")
def engine_type(self) -> Optional[pulumi.Input[str]]:
"""
Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
"""
return pulumi.get(self, "engine_type")
@engine_type.setter
def engine_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_type", value)
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> Optional[pulumi.Input[str]]:
"""
Version of the broker engine.
"""
return pulumi.get(self, "engine_version")
@engine_version.setter
def engine_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "engine_version", value)
@property
@pulumi.getter(name="latestRevision")
def latest_revision(self) -> Optional[pulumi.Input[int]]:
"""
Latest revision of the configuration.
"""
return pulumi.get(self, "latest_revision")
@latest_revision.setter
def latest_revision(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "latest_revision", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
@tags_all.setter
def tags_all(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags_all", value)
class Configuration(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authentication_strategy: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_type: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Provides an MQ Configuration Resource.
For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mq.Configuration("example",
data=\"\"\"<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<broker xmlns="http://activemq.apache.org/schema/core">
<plugins>
<forcePersistencyModeBrokerPlugin persistenceFlag="true"/>
<statisticsBrokerPlugin/>
<timeStampingBrokerPlugin ttlCeiling="86400000" zeroExpirationOverride="86400000"/>
</plugins>
</broker>
\"\"\",
description="Example Configuration",
engine_type="ActiveMQ",
engine_version="5.15.0")
```
## Import
MQ Configurations can be imported using the configuration ID, e.g.
```sh
$ pulumi import aws:mq/configuration:Configuration example c-0187d1eb-88c8-475a-9b79-16ef5a10c94f
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] authentication_strategy: Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
:param pulumi.Input[str] data: Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
:param pulumi.Input[str] description: Description of the configuration.
:param pulumi.Input[str] engine_type: Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
:param pulumi.Input[str] engine_version: Version of the broker engine.
:param pulumi.Input[str] name: Name of the configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ConfigurationArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides an MQ Configuration Resource.
For more information on Amazon MQ, see [Amazon MQ documentation](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/welcome.html).
## Example Usage
```python
import pulumi
import pulumi_aws as aws
example = aws.mq.Configuration("example",
data=\"\"\"<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<broker xmlns="http://activemq.apache.org/schema/core">
<plugins>
<forcePersistencyModeBrokerPlugin persistenceFlag="true"/>
<statisticsBrokerPlugin/>
<timeStampingBrokerPlugin ttlCeiling="86400000" zeroExpirationOverride="86400000"/>
</plugins>
</broker>
\"\"\",
description="Example Configuration",
engine_type="ActiveMQ",
engine_version="5.15.0")
```
## Import
MQ Configurations can be imported using the configuration ID, e.g.
```sh
$ pulumi import aws:mq/configuration:Configuration example c-0187d1eb-88c8-475a-9b79-16ef5a10c94f
```
:param str resource_name: The name of the resource.
:param ConfigurationArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ConfigurationArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
authentication_strategy: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_type: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ConfigurationArgs.__new__(ConfigurationArgs)
__props__.__dict__["authentication_strategy"] = authentication_strategy
if data is None and not opts.urn:
raise TypeError("Missing required property 'data'")
__props__.__dict__["data"] = data
__props__.__dict__["description"] = description
if engine_type is None and not opts.urn:
raise TypeError("Missing required property 'engine_type'")
__props__.__dict__["engine_type"] = engine_type
if engine_version is None and not opts.urn:
raise TypeError("Missing required property 'engine_version'")
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
__props__.__dict__["arn"] = None
__props__.__dict__["latest_revision"] = None
super(Configuration, __self__).__init__(
'aws:mq/configuration:Configuration',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
arn: Optional[pulumi.Input[str]] = None,
authentication_strategy: Optional[pulumi.Input[str]] = None,
data: Optional[pulumi.Input[str]] = None,
description: Optional[pulumi.Input[str]] = None,
engine_type: Optional[pulumi.Input[str]] = None,
engine_version: Optional[pulumi.Input[str]] = None,
latest_revision: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
tags_all: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'Configuration':
"""
Get an existing Configuration resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] arn: ARN of the configuration.
:param pulumi.Input[str] authentication_strategy: Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
:param pulumi.Input[str] data: Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
:param pulumi.Input[str] description: Description of the configuration.
:param pulumi.Input[str] engine_type: Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
:param pulumi.Input[str] engine_version: Version of the broker engine.
:param pulumi.Input[int] latest_revision: Latest revision of the configuration.
:param pulumi.Input[str] name: Name of the configuration.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags_all: A map of tags assigned to the resource, including those inherited from the provider .
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ConfigurationState.__new__(_ConfigurationState)
__props__.__dict__["arn"] = arn
__props__.__dict__["authentication_strategy"] = authentication_strategy
__props__.__dict__["data"] = data
__props__.__dict__["description"] = description
__props__.__dict__["engine_type"] = engine_type
__props__.__dict__["engine_version"] = engine_version
__props__.__dict__["latest_revision"] = latest_revision
__props__.__dict__["name"] = name
__props__.__dict__["tags"] = tags
__props__.__dict__["tags_all"] = tags_all
return Configuration(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def arn(self) -> pulumi.Output[str]:
"""
ARN of the configuration.
"""
return pulumi.get(self, "arn")
@property
@pulumi.getter(name="authenticationStrategy")
def authentication_strategy(self) -> pulumi.Output[str]:
"""
Authentication strategy associated with the configuration. Valid values are `simple` and `ldap`. `ldap` is not supported for `engine_type` `RabbitMQ`.
"""
return pulumi.get(self, "authentication_strategy")
@property
@pulumi.getter
def data(self) -> pulumi.Output[str]:
"""
Broker configuration in XML format. See [official docs](https://docs.aws.amazon.com/amazon-mq/latest/developer-guide/amazon-mq-broker-configuration-parameters.html) for supported parameters and format of the XML.
"""
return pulumi.get(self, "data")
@property
@pulumi.getter
def description(self) -> pulumi.Output[Optional[str]]:
"""
Description of the configuration.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter(name="engineType")
def engine_type(self) -> pulumi.Output[str]:
"""
Type of broker engine. Valid values are `ActiveMQ` and `RabbitMQ`.
"""
return pulumi.get(self, "engine_type")
@property
@pulumi.getter(name="engineVersion")
def engine_version(self) -> pulumi.Output[str]:
"""
Version of the broker engine.
"""
return pulumi.get(self, "engine_version")
@property
@pulumi.getter(name="latestRevision")
def latest_revision(self) -> pulumi.Output[int]:
"""
Latest revision of the configuration.
"""
return pulumi.get(self, "latest_revision")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Name of the configuration.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of tags to assign to the resource. .If configured with a provider `default_tags` configuration block present, tags with matching keys will overwrite those defined at the provider-level.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter(name="tagsAll")
def tags_all(self) -> pulumi.Output[Mapping[str, str]]:
"""
A map of tags assigned to the resource, including those inherited from the provider .
"""
return pulumi.get(self, "tags_all")
| 45.351667
| 256
| 0.650399
| 3,161
| 27,211
| 5.428029
| 0.069915
| 0.093601
| 0.091386
| 0.062828
| 0.888623
| 0.865544
| 0.843921
| 0.828535
| 0.806504
| 0.791001
| 0
| 0.004297
| 0.238911
| 27,211
| 599
| 257
| 45.427379
| 0.824191
| 0.375914
| 0
| 0.723926
| 1
| 0
| 0.087305
| 0.019854
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162577
| false
| 0.003067
| 0.015337
| 0
| 0.276074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.