hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
88d39322c3041524215f4b9a9cc3d781740f5bc4 | 9,642 | py | Python | formy.py | JITENDRAMINDA/sssnnn | e612f120458b1c3edb3f2738c04e0b7a3cbcdae7 | [
"MIT"
] | null | null | null | formy.py | JITENDRAMINDA/sssnnn | e612f120458b1c3edb3f2738c04e0b7a3cbcdae7 | [
"MIT"
] | null | null | null | formy.py | JITENDRAMINDA/sssnnn | e612f120458b1c3edb3f2738c04e0b7a3cbcdae7 | [
"MIT"
] | null | null | null | from pyrogram import Client, Filters
TOKAN = "639957559:AAFbwAStH_GXBgUVFxC93CCsbBM5MSA-Piw"
app = Client( TOKAN ,605563,"7f2c2d12880400b88764b9b304e14e0b")
@app.on_message(Filters.command("delete"))
def main(client, message):
for message.message_id in range(int(message.text.split(' ')[1]), int(message.text.split(' ')[2])):
try:
client.delete_messages(message.chat.id,message.message_id)
except:
continue
@app.on_message(Filters.channel & ~ Filters.edited)
def main(client, message):
file = open("bullets.txt" , "r")
s = file.readlines()
file.close()
for d in s:
if message.chat.id == int(d):
file = open("bulleti.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
fil = open("ferraris.txt" , "r")
q = fil.readlines()
fil.close()
for e in q:
if message.chat.id == int(e):
file = open("ferrarii.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
fl = open("sessionss.txt" , "r")
h = fl.readlines()
fl.close()
for e in h:
if message.chat.id == int(e):
file = open("sessionsi.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
try:
mes = client.send_message( int(x), "**" + message.text + "**" )
fille = open(str(x)+".txt","r")
n = fille.readlines()
fille.close()
for t in n:
fie = open(str(x)+".txt","w")
fie.write(t +" " + str(message.message_id) + " " + str(mes.message_id))
fie.close()
except:
continue
@app.on_message(Filters.channel & Filters.edited)
def main(client, message):
file = open("ferraris.txt" , "r")
s = file.readlines()
file.close()
for d in s:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("ferrarii.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
fil = open("bullets.txt" , "r")
f = fil.readlines()
fil.close()
for d in f:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("bulleti.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
fl = open("sessionss.txt" , "r")
f = fl.readlines()
fl.close()
for d in f:
if message.chat.id == int(d):
filer = open("update.txt" , "r")
m = filer.readlines()
filer.close()
for l in m:
if l == "on":
file = open("sessionsi.txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for o in p:
files = open(str(o)+".txt" , "r")
d = files.readlines()
files.close()
for c in d:
x = c.split()
id = str(message.message_id)
if id in x:
try:
client.edit_message_text(int(o),int(x[x.index(id)+1]), "**" + message.text + "**" )
except:
continue
@app.on_message(Filters.command('add') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
with open(message.text.split(" ")[2] + ".txt" , "r") as file:
lines = file.readlines()
file.close()
for line in lines:
files = open(message.text.split(" ")[2] + ".txt" , "w")
files.write(line + " " + message.text.split(' ')[1])
files.close()
with open(message.text.split(' ')[1]+".txt" , "w") as g:
g.write("001 002")
g.close()
message.reply("πΎ Done, The chat_id ```" + message.text.split(' ')[1] +"```π has been added to my database. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
@app.on_message(Filters.command('remove') & Filters.user(491634139))
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
file = open(message.text.split(" ")[2] + ".txt" , "r")
u = file.readlines()
file.close()
for v in u:
lines = v.split()
del lines[lines.index(message.text.split(' ')[1])]
y = " ".join(str(x) for x in lines)
files = open(message.text.split(" ")[2] + ".txt" , "w")
files.write(y)
files.close()
message.reply("πΎ Done, The chat_id ```" + message.text.split(' ')[1] +"```π has been removed to my database. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
@app.on_message(Filters.command('clear') & Filters.user(491634139))
def forward(client, message):
file = open(message.text.split(" ")[1] + ".txt" , "r")
lines = file.readlines()
file.close()
for line in lines:
p = line.split()
for x in p:
fie = open(str(x)+".txt","w")
fie.write("001 002")
fie.close()
message.reply("β’οΈ Done, Editing data cleared β
β
")
@app.on_message(Filters.command('list') & Filters.user(491634139))
def forward(client, message):
file = open(message.text.split(" ")[1] + ".txt" , "r")
u = file.readlines()
file.close()
for v in u :
message.reply("ποΈ List of Chat_ids in my database are ```" + str(v) + "```. Its can be change. β
β
")
@app.on_message(Filters.command('sets') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 2:
if len(message.text.split(' ')[1]) == 14:
with open(message.text.split(' ')[2] + '.txt', 'w') as file:
file.write(message.text.split(' ')[1])
file.close()
message.reply("π Done, Now my source chat is ```" + message.text.split(' ')[1] + "```. I will try to forward messages from this chat. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
@app.on_message(Filters.command('setupdate') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 1:
with open('update.txt', 'w') as file:
file.write(message.text.split(' ')[1])
file.close()
message.reply("π Done,Now my message update status is ```" + message.text.split(' ')[1] + "```.β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
@app.on_message(Filters.command('source') & Filters.user(491634139) )
def forward(client, message):
with open(message.text.split(" ")[1] + '.txt', 'r') as file:
x = file.readlines()
file.close()
for y in x:
message.reply("π My source chat is ```" + y + "```. I am trying to forward messages from this chat. β
β
")
@app.on_message(Filters.command('get') & Filters.user(491634139) )
def forward(client, message):
if len(message.text.split(' ')) > 1:
if len(message.text.split(' ')[1]) == 14:
x = client.get_chat(int(message.text.split(' ')[1])).title
message.reply("πΆ This chat name is - "+str(x)+" β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
else:
message.reply("πΌ Please write a valid chat id. β
β
")
@app.on_message(Filters.command('update') & Filters.user(491634139) )
def forward(client, message):
with open('update.txt', 'r') as file:
x = file.readlines()
file.close()
for y in x:
message.reply("π My current message update status is ```" + y + "```. β
β
")
@app.on_message(Filters.command("start"))
def forward(client, message):
if message.from_user.id == 491634139:
message.reply("β»οΈ Welcome to your LineBot . β
β
")
else:
message.reply("β»οΈ You need admins permission to use my functions. β
β
")
@app.on_message(Filters.private)
def forward(client, message):
if not message.from_user.id == 491634139:
message.reply("β»οΈ You need admins permission to use my functions. β
β
")
app.run()
| 30.609524 | 142 | 0.553827 | 1,353 | 9,642 | 3.966741 | 0.116778 | 0.077883 | 0.086454 | 0.06335 | 0.84945 | 0.80231 | 0.778275 | 0.743059 | 0.719396 | 0.694056 | 0 | 0.026978 | 0.261875 | 9,642 | 314 | 143 | 30.707006 | 0.717437 | 0 | 0 | 0.788321 | 0 | 0 | 0.15391 | 0.007986 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051095 | false | 0 | 0.00365 | 0 | 0.054745 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
cc39e7f213ab75d908dc1d45461c1e49625759cd | 4,717 | py | Python | tests/polys/test_conversions.py | hussinhassan80/galois | 5553d0f17d5f4dcf105f92028fcde8f6afd53b6d | [
"MIT"
] | 65 | 2021-02-20T04:07:59.000Z | 2022-03-13T10:14:58.000Z | tests/polys/test_conversions.py | hussinhassan80/galois | 5553d0f17d5f4dcf105f92028fcde8f6afd53b6d | [
"MIT"
] | 303 | 2021-02-22T19:36:25.000Z | 2022-03-31T14:48:15.000Z | tests/polys/test_conversions.py | hussinhassan80/galois | 5553d0f17d5f4dcf105f92028fcde8f6afd53b6d | [
"MIT"
] | 9 | 2021-03-11T07:40:51.000Z | 2022-03-06T20:13:17.000Z | """
A pytest module to test the polynomial conversion functions.
"""
import random
import pytest
import galois
def test_integer_to_poly():
assert galois._poly_conversion.integer_to_poly(0, 2) == [0]
assert galois._poly_conversion.integer_to_poly(5, 2) == [1, 0, 1]
assert galois._poly_conversion.integer_to_poly(5, 2, degree=3) == [0, 1, 0, 1]
assert galois._poly_conversion.integer_to_poly(3**5, 3) == [1, 0, 0, 0, 0, 0]
for _ in range(5):
order = random.randint(2, 1_000_000_000)
assert galois._poly_conversion.integer_to_poly(order**5 - 1, order) == [order - 1,]*5
assert galois._poly_conversion.integer_to_poly(order**5, order) == [1, 0, 0, 0, 0, 0]
assert galois._poly_conversion.integer_to_poly(order**5 + 1, order) == [1, 0, 0, 0, 0, 1]
def test_poly_to_integer():
assert galois._poly_conversion.poly_to_integer([0], 2) == 0
assert galois._poly_conversion.poly_to_integer([1, 0, 1], 2) == 5
assert galois._poly_conversion.poly_to_integer([0, 1, 0, 1], 2) == 5
assert galois._poly_conversion.poly_to_integer([1, 0, 0, 0, 0, 0], 3) == 3**5
for _ in range(5):
order = random.randint(2, 1_000_000_000)
assert galois._poly_conversion.poly_to_integer([order - 1,]*5, order) == order**5 - 1
assert galois._poly_conversion.poly_to_integer([1, 0, 0, 0, 0, 0], order) == order**5
assert galois._poly_conversion.poly_to_integer([1, 0, 0, 0, 0, 1], order) == order**5 + 1
def test_sparse_poly_to_integer():
assert galois._poly_conversion.sparse_poly_to_integer([0], [0], 2) == 0
assert galois._poly_conversion.sparse_poly_to_integer([2, 0], [1, 1], 2) == 5
def test_poly_to_str():
assert galois._poly_conversion.poly_to_str([0]) == "0"
assert galois._poly_conversion.poly_to_str([1, 0, 1, 1]) == "x^3 + x + 1"
assert galois._poly_conversion.poly_to_str([0, 1, 0, 1, 1]) == "x^3 + x + 1"
assert galois._poly_conversion.poly_to_str([0], poly_var="y") == "0"
assert galois._poly_conversion.poly_to_str([1, 0, 1, 1], poly_var="y") == "y^3 + y + 1"
assert galois._poly_conversion.poly_to_str([0, 1, 0, 1, 1], poly_var="y") == "y^3 + y + 1"
def test_sparse_poly_to_str():
assert galois._poly_conversion.sparse_poly_to_str([0], [0]) == "0"
assert galois._poly_conversion.sparse_poly_to_str([3, 1, 0], [1, 1, 1]) == "x^3 + x + 1"
assert galois._poly_conversion.sparse_poly_to_str([0], [0], poly_var="y") == "0"
assert galois._poly_conversion.sparse_poly_to_str([3, 1, 0], [1, 1, 1], poly_var="y") == "y^3 + y + 1"
GF = galois.GF(2**8)
with GF.display("poly"):
assert galois._poly_conversion.sparse_poly_to_str([0], GF([0])) == "0"
assert galois._poly_conversion.sparse_poly_to_str([3, 1, 0], GF([1, 2, 3])) == "x^3 + (Ξ±)x + (Ξ± + 1)"
assert galois._poly_conversion.sparse_poly_to_str([0], GF([0]), poly_var="y") == "0"
assert galois._poly_conversion.sparse_poly_to_str([3, 1, 0], GF([1, 2, 3]), poly_var="y") == "y^3 + (Ξ±)y + (Ξ± + 1)"
def test_str_to_sparse_poly():
# Over GF(2)
assert galois._poly_conversion.str_to_sparse_poly("x^2 + 1") == ([2, 0], [1, 1])
assert galois._poly_conversion.str_to_sparse_poly("1 - x^2") == ([0, 2], [1, -1])
assert galois._poly_conversion.str_to_sparse_poly("x**2 + 1") == ([2, 0], [1, 1])
assert galois._poly_conversion.str_to_sparse_poly("y^2 + y + 1") == ([2, 1, 0], [1, 1, 1])
assert galois._poly_conversion.str_to_sparse_poly("y**2 + y**1 + 1*y**0") == ([2, 1, 0], [1, 1, 1])
# Over GF(3)
assert galois._poly_conversion.str_to_sparse_poly("2*x^2 + 2") == ([2, 0], [2, 2])
assert galois._poly_conversion.str_to_sparse_poly("2*x^2 - 1") == ([2, 0], [2, -1])
# Over GF(2)
with pytest.raises(ValueError):
galois._poly_conversion.str_to_sparse_poly("x^2 + y + 1")
with pytest.raises(ValueError):
galois._poly_conversion.str_to_sparse_poly("x^2 + x^-1 + 1")
def test_str_to_integer():
GF = galois.GF2
assert galois._poly_conversion.str_to_integer("x^2 + 1", GF) == 5
assert galois._poly_conversion.str_to_integer("x**2 + 1", GF) == 5
assert galois._poly_conversion.str_to_integer("y^2 + y + 1", GF) == 7
assert galois._poly_conversion.str_to_integer("y**2 + y**1 + 1*y**0", GF) == 7
GF = galois.GF(3)
assert galois._poly_conversion.str_to_integer("2*x^2 + 2", GF) == 2*3**2 + 2
assert galois._poly_conversion.str_to_integer("2*x^2 - 1", GF) == 2*3**2 + 2
GF = galois.GF2
with pytest.raises(ValueError):
galois._poly_conversion.str_to_integer("x^2 + y + 1", GF)
with pytest.raises(ValueError):
galois._poly_conversion.str_to_integer("x^2 + x^-1 + 1", GF)
| 44.92381 | 123 | 0.641933 | 817 | 4,717 | 3.405141 | 0.052632 | 0.168943 | 0.337886 | 0.401869 | 0.890007 | 0.867362 | 0.846513 | 0.806614 | 0.742272 | 0.688713 | 0 | 0.074523 | 0.177867 | 4,717 | 104 | 124 | 45.355769 | 0.642857 | 0.019928 | 0 | 0.142857 | 0 | 0 | 0.066768 | 0 | 0 | 0 | 0 | 0 | 0.614286 | 1 | 0.1 | false | 0 | 0.042857 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
cc6ec93718a6d8a9b7831aa00fe4079f3f6e0aed | 170 | py | Python | seahub/base/database_storage/__init__.py | MJochim/seahub | 66fcc6772511d43346a2980613576c5fdb4c4945 | [
"Apache-2.0"
] | 420 | 2015-01-03T11:34:46.000Z | 2022-03-10T07:15:41.000Z | seahub/base/database_storage/__init__.py | MJochim/seahub | 66fcc6772511d43346a2980613576c5fdb4c4945 | [
"Apache-2.0"
] | 735 | 2015-01-04T21:22:51.000Z | 2022-03-31T09:26:07.000Z | seahub/base/database_storage/__init__.py | MJochim/seahub | 66fcc6772511d43346a2980613576c5fdb4c4945 | [
"Apache-2.0"
] | 379 | 2015-01-05T17:08:03.000Z | 2022-03-06T00:11:50.000Z | # Copyright (c) 2012-2016 Seafile Ltd.
# Allow users to: from database_storage import DatabaseStorage
# (reduce redundancy a little bit)
from .database_storage import *
| 28.333333 | 62 | 0.782353 | 23 | 170 | 5.695652 | 0.826087 | 0.183206 | 0.290076 | 0.381679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055172 | 0.147059 | 170 | 5 | 63 | 34 | 0.848276 | 0.764706 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
cc781c71fabb708c5d519e25c8f3203b258f2e47 | 8,207 | py | Python | FP2122P2_public_tests.py | afonsoazaruja/proj-fp-prado | b0f68b9c80bc24f96988084d38a1c82851531479 | [
"MIT"
] | null | null | null | FP2122P2_public_tests.py | afonsoazaruja/proj-fp-prado | b0f68b9c80bc24f96988084d38a1c82851531479 | [
"MIT"
] | null | null | null | FP2122P2_public_tests.py | afonsoazaruja/proj-fp-prado | b0f68b9c80bc24f96988084d38a1c82851531479 | [
"MIT"
] | null | null | null | def teste211():
total_score = 0
fun_name = TAD_posicao_public
p1 = cria_posicao(-1, 2)
# cria_posicao: argumentos invalidos
p1 = cria_posicao(2, 3)
p2 = cria_posicao(7, 0)
posicoes_iguais(p1, p2)
# False
p1 = cria_posicao(2, 3)
posicao_para_str(p1) == '(2, 3)'
# True
p2 = cria_posicao(7, 0)
t = obter_posicoes_adjacentes(p2)
tuple(posicao_para_str(p) for p in t)
# ('(8, 0)', '(7, 1)', '(6, 0)')
p2 = cria_posicao(7, 0)
t = obter_posicoes_adjacentes(p2)
tuple(posicao_para_str(p) for p in ordenar_posicoes(t))
# ('(6, 0)', '(8, 0)', '(7, 1)')
return
def teste212():
total_score = 0
fun_name = TAD_animal_public
cria_animal('rabbit', -5, 0)
# cria_animal: argumentos invalidos
r1 = cria_animal('rabbit', 5, 0)
animal_para_str(r1)
# rabbit [0/5]
f1 = cria_animal('fox', 20, 10)
animal_para_str(f1)
# fox [0/20;0/10]
r1 = cria_animal('rabbit', 5, 0)
animal_para_char(r1)
# r
f1 = cria_animal('fox', 20, 10)
animal_para_char(f1)
# F
f1 = cria_animal('fox', 20, 10)
f2 = cria_copia_animal(f1)
f2 = aumenta_idade(aumenta_idade(f2))
f2 = aumenta_fome(f2)
animal_para_str(f1)
# fox [0/20;0/10]
f1 = cria_animal('fox', 20, 10)
f2 = cria_copia_animal(f1)
f2 = aumenta_idade(aumenta_idade(f2))
f2 = aumenta_fome(f2)
animal_para_str(f2)
# fox [2/20;1/10]
f1 = cria_animal('fox', 20, 10)
f2 = cria_copia_animal(f1)
f2 = aumenta_idade(aumenta_idade(f2))
f2 = aumenta_fome(f2)
animais_iguais(f1, f2)
# False
f1 = cria_animal('fox', 20, 10)
f2 = cria_copia_animal(f1)
f2 = aumenta_idade(aumenta_idade(f2))
f2 = aumenta_fome(f2)
f3 = reproduz_animal(f2)
animal_para_str(f2)
# fox [0/20;1/10]
f1 = cria_animal('fox', 20, 10)
f2 = cria_copia_animal(f1)
f2 = aumenta_idade(aumenta_idade(f2))
f2 = aumenta_fome(f2)
f3 = reproduz_animal(f2)
animal_para_str(f3)
# fox [0/20;0/10]
return
def teste213():
total_score = 0
fun_name = TAD_prado_public
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (7, 2), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
obter_tamanho_x(prado), obter_tamanho_y(prado)
# (12, 5)
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (7, 2), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
prado_para_str(prado)
# +----------+\\n|....rL...r|\\n|...@@.r...|\\n|..........|\\n+----------+
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (7, 2), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
p1 = cria_posicao(7, 2)
p2 = cria_posicao(9, 3)
prado = mover_animal(prado, p1, p2)
prado_para_str(prado)
# +----------+\\n|....rL...r|\\n|...@@.....|\\n|........r.|\\n+----------+
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (7, 2), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
obter_valor_numerico(prado, cria_posicao(9, 3))
# 45
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (9, 3), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
posicao_para_str(obter_movimento(prado, cria_posicao(5, 1)))
# (4, 1)
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (9, 3), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
posicao_para_str(obter_movimento(prado, cria_posicao(6, 1)))
# (5, 1)
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('rabbit', 5, 0) for i in range(3))
an2 = (cria_animal('lynx', 20, 15),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((5, 1), (9, 3), (10, 1), (6, 1)))
prado = cria_prado(dim, obs, an1 + an2, pos)
posicao_para_str(obter_movimento(prado, cria_posicao(10, 1)))
# (10, 2)
return
def teste221():
total_score = 0
fun_name = geracao_public
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('sheep', 2, 0) for i in range(3))
an2 = (cria_animal('wolf', 10, 3),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((2, 2), (4, 3), (10, 2), (3, 2)))
prado = cria_prado(dim, obs, an1 + an2, pos)
prado_para_str(prado)
# +----------+\\n|..........|\\n|.sW@@....s|\\n|...s......|\\n+----------+
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('sheep', 2, 0) for i in range(3))
an2 = (cria_animal('wolf', 10, 3),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((2, 2), (4, 3), (10, 2), (3, 2)))
prado = cria_prado(dim, obs, an1 + an2, pos)
prado_para_str(geracao(prado))
# +----------+\\n|..W.......|\\n|s..@@.....|\\n|....s....s|\\n+----------+
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('sheep', 2, 0) for i in range(3))
an2 = (cria_animal('wolf', 10, 3),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((2, 2), (4, 3), (10, 2), (3, 2)))
prado = cria_prado(dim, obs, an1 + an2, pos)
prado_para_str(geracao(geracao(prado)))
# +----------+\\n|...W......|\\n|ss.@@....s|\\n|...ss....s|\\n+----------+
dim = cria_posicao(11, 4)
obs = (cria_posicao(4, 2), cria_posicao(5, 2))
an1 = tuple(cria_animal('sheep', 2, 0) for i in range(3))
an2 = (cria_animal('wolf', 10, 3),)
pos = tuple(cria_posicao(p[0], p[1]) for p in ((2, 2), (4, 3), (10, 2), (3, 2)))
prado = cria_prado(dim, obs, an1 + an2, pos)
prado_para_str(geracao(geracao(geracao(prado))))
# +----------+\\n|.........s|\\n|...@@....s|\\n|ssss......|\\n+----------+
return
def teste222():
total_score = 0
fun_name = simula_ecossistema_public
path = '/home/fpshak/data/contests/FP2122P2/'
simula_ecossistema(path + 'public_test_config.txt', 20, False)
# Predadores: 1 vs Presas: 3 (Gen. 0)\n+----------+\n|..........|\n|.mL@@....m|\n|...m......|\n+----------+\nPredadores: 0 vs Presas: 28 (Gen. 20)\n+----------+\n|mmmmmmmmmm|\n|mmm@@mmmmm|\n|mmmmmmmmmm|\n+----------+\n(0, 28)
path = '/home/fpshak/data/contests/FP2122P2/'
simula_ecossistema(path + 'public_test_config.txt', 20, True)
# Predadores: 1 vs Presas: 3 (Gen. 0)\n+----------+\n|..........|\n|.mL@@....m|\n|...m......|\n+----------+\nPredadores: 1 vs Presas: 6 (Gen. 2)\n+----------+\n|...L......|\n|mm.@@....m|\n|...mm....m|\n+----------+\nPredadores: 0 vs Presas: 6 (Gen. 3)\n+----------+\n|.........m|\n|...@@....m|\n|mmmm......|\n+----------+\nPredadores: 0 vs Presas: 12 (Gen. 4)\n+----------+\n|........mm|\n|mmm@@....m|\n|mmmmm....m|\n+----------+\nPredadores: 0 vs Presas: 18 (Gen. 6)\n+----------+\n|mmm....mmm|\n|mmm@@..mmm|\n|mmmmm....m|\n+----------+\nPredadores: 0 vs Presas: 20 (Gen. 7)\n+----------+\n|mmmm..mmmm|\n|mmm@@..mmm|\n|mmmm.m.m..|\n+----------+\nPredadores: 0 vs Presas: 28 (Gen. 8)\n+----------+\n|mmmmmmmmmm|\n|mmm@@mmmmm|\n|mmmmmmmmmm|\n+----------+\n(0, 28)
return
| 36.314159 | 766 | 0.543195 | 1,334 | 8,207 | 3.184408 | 0.087706 | 0.147599 | 0.018362 | 0.041431 | 0.850282 | 0.816149 | 0.793785 | 0.793785 | 0.739407 | 0.731638 | 0 | 0.087401 | 0.20117 | 8,207 | 225 | 767 | 36.475556 | 0.560555 | 0.20714 | 0 | 0.776316 | 0 | 0 | 0.041216 | 0.017907 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032895 | false | 0 | 0 | 0 | 0.065789 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ccc2c6267b1bc8745b5da604e834b0b06112c865 | 129 | py | Python | congregation/dag/nodes/__init__.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 3 | 2020-10-05T16:30:15.000Z | 2021-01-22T13:38:02.000Z | congregation/dag/nodes/__init__.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | null | null | null | congregation/dag/nodes/__init__.py | CCD-HRI/congregation | a552856b03a64a4295792184107c4e529ca3f4ae | [
"MIT"
] | 1 | 2021-02-19T12:40:57.000Z | 2021-02-19T12:40:57.000Z | from congregation.dag.nodes.unary import *
from congregation.dag.nodes.binary import *
from congregation.dag.nodes.nary import *
| 32.25 | 43 | 0.813953 | 18 | 129 | 5.833333 | 0.444444 | 0.457143 | 0.542857 | 0.685714 | 0.571429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093023 | 129 | 3 | 44 | 43 | 0.897436 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
4e2f244b4ac10bdabdb51972c50a331705ddc5d3 | 494 | py | Python | pyrotation/__init__.py | ChristofDubs/pyrotation | 81b9f41e1874fe7a647c362611363cc923d6fbde | [
"MIT"
] | 2 | 2018-07-19T14:42:27.000Z | 2018-07-21T04:29:30.000Z | pyrotation/__init__.py | ChristofDubs/pyrotation | 81b9f41e1874fe7a647c362611363cc923d6fbde | [
"MIT"
] | 2 | 2018-12-18T14:58:32.000Z | 2018-12-30T17:05:56.000Z | pyrotation/__init__.py | ChristofDubs/pyrotation | 81b9f41e1874fe7a647c362611363cc923d6fbde | [
"MIT"
] | null | null | null | from .rotation import Quaternion, quat_from_angle_axis, quat_from_angle_vector, quat_from_roll_pitch_yaw, rot_from_angle_axis, rot_from_angle_vector, rot_from_roll_pitch_yaw, rot_x, rot_y, rot_z, rot_to_roll_pitch_yaw
__all__ = [
'Quaternion',
'quat_from_angle_axis',
'quat_from_angle_vector',
'quat_from_roll_pitch_yaw',
'rot_from_angle_axis',
'rot_from_angle_vector',
'rot_from_roll_pitch_yaw',
'rot_x',
'rot_y',
'rot_z',
'rot_to_roll_pitch_yaw']
| 32.933333 | 217 | 0.759109 | 80 | 494 | 3.9625 | 0.2125 | 0.227129 | 0.227129 | 0.201893 | 0.933754 | 0.933754 | 0.933754 | 0.933754 | 0.933754 | 0.933754 | 0 | 0 | 0.147773 | 494 | 14 | 218 | 35.285714 | 0.752969 | 0 | 0 | 0 | 0 | 0 | 0.354251 | 0.224696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
9d7e679abf489840e6c9fa664459a68833d9a9c4 | 2,976 | py | Python | Program.py | IrvanDimetrio/Calculator-Fraction | ac4cfe5ed53416c12a4e6e34cae941b010bfaad2 | [
"MIT"
] | null | null | null | Program.py | IrvanDimetrio/Calculator-Fraction | ac4cfe5ed53416c12a4e6e34cae941b010bfaad2 | [
"MIT"
] | null | null | null | Program.py | IrvanDimetrio/Calculator-Fraction | ac4cfe5ed53416c12a4e6e34cae941b010bfaad2 | [
"MIT"
] | null | null | null | from fractions import Fraction as P
print "- PROGRAM MENGHITUNG BILANGAN PECAHAN"
print " "
print "1. Penjumlahan"
print "2. Pengurangan"
print "3. Perkalian"
print "4. Pembagian"
print " "
pilih = input("Masukkan Pilihan : ")
if pilih == 1:
a = input ("Bulat 1 : ")
b = input ("Pembilang 1 : ")
c = input ("Penyebut 1 : ")
d = input ("Bulat 2 : ")
e = input ("Pembilang 2 : ")
f = input ("Penyebut 2 : ")
print "-----------------------------------"
print " ",b, (' '), (' '), e, (' ')
print a,('-'), ('+'),d, ('-'), ('=')
print " ", c, (' '), (' '), f, (' ')
print '----------------------------------- '
bill1 = (a * c) + b
bill4 = (d * f) + e
print bill1, " ", "",bill4, " "
print "--", "+","--", "="
print c," ","",f
print '----------------------------------- '
print (P(bill1, c) + P(bill4, f))
elif pilih == 2:
a = input("Bulat 1 : ")
b = input("Pembilang 1 : ")
c = input("Penyebut 1 : ")
d = input("Bulat 2 : ")
e = input("Pembilang 2 : ")
f = input("Penyebut 2 : ")
print "-----------------------------------"
print " ", b, (' '), (' '), e, (' ')
print a, ('-'), ('-'), d, ('-'), ('=')
print " ", c, (' '), (' '), f, (' ')
print '----------------------------------- '
bill1 = (a * c) + b
bill4 = (d * f) + e
print bill1, " ", "", bill4, " "
print "--", "-", "--", "="
print c, " ", "", f
print '----------------------------------- '
print (P(bill1, c) - P(bill4, f))
elif pilih == 3:
a = input("Bulat 1 : ")
b = input("Pembilang 1 : ")
c = input("Penyebut 1 : ")
d = input("Bulat 2 : ")
e = input("Pembilang 2 : ")
f = input("Penyebut 2 : ")
print "-----------------------------------"
print " ", b, (' '), (' '), e, (' ')
print a, ('-'), ('X'), d, ('-'), ('=')
print " ", c, (' '), (' '), f, (' ')
print '----------------------------------- '
bill1 = (a * c) + b
bill4 = (d * f) + e
print bill1, " ", "", bill4, " "
print "--", "X", "--", "="
print c, " ", "", f
print '----------------------------------- '
print (P(bill1, c) * P(bill4, f))
elif pilih == 4:
a = input("Bulat 1 : ")
b = input("Pembilang 1 : ")
c = input("Penyebut 1 : ")
d = input("Bulat 2 : ")
e = input("Pembilang 2 : ")
f = input("Penyebut 2 : ")
print "-----------------------------------"
print " ", b, (' '), (' '), e, (' ')
print a, ('-'), (':'), d, ('-'), ('=')
print " ", c, (' '), (' '), f, (' ')
print '----------------------------------- '
bill1 = (a * c) + b
bill4 = (d * f) + e
print bill1, " ", "", bill4, " "
print "--", ":", "--", "="
print c, " ", "", f
print '----------------------------------- '
print (P(bill1, c) / P(bill4, f))
else :
print " "
print " Pilihan Tidak Ada"
| 31.326316 | 49 | 0.326277 | 281 | 2,976 | 3.455516 | 0.142349 | 0.133883 | 0.057673 | 0.098867 | 0.785788 | 0.785788 | 0.785788 | 0.785788 | 0.785788 | 0.785788 | 0 | 0.027145 | 0.306788 | 2,976 | 94 | 50 | 31.659574 | 0.443529 | 0 | 0 | 0.715909 | 0 | 0 | 0.314852 | 0.141129 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.011364 | null | null | 0.556818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 9 |
9dbfd46dd86009025c58dba5249095f6ebb0d5cf | 37,081 | py | Python | tests/elichika_typing/pytorch/fast_neural_style_test.py | shinh/chainer-compiler | 2a5e0bfd3f1abd7258a4cbffcfab79bc1d28f9e9 | [
"MIT"
] | 116 | 2019-01-25T03:54:44.000Z | 2022-03-08T00:11:14.000Z | tests/elichika_typing/pytorch/fast_neural_style_test.py | shinh/chainer-compiler | 2a5e0bfd3f1abd7258a4cbffcfab79bc1d28f9e9 | [
"MIT"
] | 431 | 2019-01-25T10:18:44.000Z | 2020-06-17T05:28:55.000Z | tests/elichika_typing/pytorch/fast_neural_style_test.py | momohatt/chainer-compiler | 26782cd29a5becf8e2badf268b47d98b3a6aea1d | [
"MIT"
] | 26 | 2019-01-25T07:21:09.000Z | 2021-11-26T04:24:35.000Z | import torch
import torch.nn as nn
import unittest
from chainer_compiler.elichika.testtools import generate_id2type_from_forward
from chainer_compiler.elichika.testtools import type_inference_tools
from testcases.pytorch.fast_neural_style import gen_TransformerNet_model
class TestTransformerNet(unittest.TestCase):
def test_TransformerNet(self):
type_inference_tools.reset_state()
model, forward_args = gen_TransformerNet_model()
id2type = generate_id2type_from_forward(model, forward_args)
# === BEGIN ASSERTIONS for TransformerNet ===
# === function forward ===
self.assertEqual(str(id2type[8]), "torch.Tensor(float32, (5, 32, 16, 16))") # Name y (line 2)
self.assertEqual(str(id2type[10]), "torch.Tensor(float32, (5, 32, 16, 16))") # Call self.relu(self.in1(self.conv1(X))) (line 2)
self.assertEqual(str(id2type[12]), "class TransformerNet") # Name self (line 2)
self.assertEqual(str(id2type[15]), "torch.Tensor(float32, (5, 32, 16, 16))") # Call self.in1(self.conv1(X)) (line 2)
self.assertEqual(str(id2type[17]), "class TransformerNet") # Name self (line 2)
self.assertEqual(str(id2type[20]), "torch.Tensor(float32, (5, 32, 16, 16))") # Call self.conv1(X) (line 2)
self.assertEqual(str(id2type[22]), "class TransformerNet") # Name self (line 2)
self.assertEqual(str(id2type[25]), "torch.Tensor(float32, (5, 3, 16, 16))") # Name X (line 2)
self.assertEqual(str(id2type[28]), "torch.Tensor(float32, (5, 64, 8, 8))") # Name y (line 3)
self.assertEqual(str(id2type[30]), "torch.Tensor(float32, (5, 64, 8, 8))") # Call self.relu(self.in2(self.conv2(y))) (line 3)
self.assertEqual(str(id2type[32]), "class TransformerNet") # Name self (line 3)
self.assertEqual(str(id2type[35]), "torch.Tensor(float32, (5, 64, 8, 8))") # Call self.in2(self.conv2(y)) (line 3)
self.assertEqual(str(id2type[37]), "class TransformerNet") # Name self (line 3)
self.assertEqual(str(id2type[40]), "torch.Tensor(float32, (5, 64, 8, 8))") # Call self.conv2(y) (line 3)
self.assertEqual(str(id2type[42]), "class TransformerNet") # Name self (line 3)
self.assertEqual(str(id2type[45]), "torch.Tensor(float32, (5, 32, 16, 16))") # Name y (line 3)
self.assertEqual(str(id2type[48]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 4)
self.assertEqual(str(id2type[50]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in3(self.conv3(y))) (line 4)
self.assertEqual(str(id2type[52]), "class TransformerNet") # Name self (line 4)
self.assertEqual(str(id2type[55]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in3(self.conv3(y)) (line 4)
self.assertEqual(str(id2type[57]), "class TransformerNet") # Name self (line 4)
self.assertEqual(str(id2type[60]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv3(y) (line 4)
self.assertEqual(str(id2type[62]), "class TransformerNet") # Name self (line 4)
self.assertEqual(str(id2type[65]), "torch.Tensor(float32, (5, 64, 8, 8))") # Name y (line 4)
self.assertEqual(str(id2type[68]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 5)
self.assertEqual(str(id2type[70]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.res1(y) (line 5)
self.assertEqual(str(id2type[72]), "class TransformerNet") # Name self (line 5)
self.assertEqual(str(id2type[75]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 5)
self.assertEqual(str(id2type[78]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 6)
self.assertEqual(str(id2type[80]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.res2(y) (line 6)
self.assertEqual(str(id2type[82]), "class TransformerNet") # Name self (line 6)
self.assertEqual(str(id2type[85]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 6)
self.assertEqual(str(id2type[88]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 7)
self.assertEqual(str(id2type[90]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.res3(y) (line 7)
self.assertEqual(str(id2type[92]), "class TransformerNet") # Name self (line 7)
self.assertEqual(str(id2type[95]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 7)
self.assertEqual(str(id2type[98]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 8)
self.assertEqual(str(id2type[100]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.res4(y) (line 8)
self.assertEqual(str(id2type[102]), "class TransformerNet") # Name self (line 8)
self.assertEqual(str(id2type[105]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 8)
self.assertEqual(str(id2type[108]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 9)
self.assertEqual(str(id2type[110]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.res5(y) (line 9)
self.assertEqual(str(id2type[112]), "class TransformerNet") # Name self (line 9)
self.assertEqual(str(id2type[115]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 9)
self.assertEqual(str(id2type[118]), "torch.Tensor(float32, (5, 64, None, None))") # Name y (line 10)
self.assertEqual(str(id2type[120]), "torch.Tensor(float32, (5, 64, None, None))") # Call self.relu(self.in4(self.deconv1(y))) (line 10)
self.assertEqual(str(id2type[122]), "class TransformerNet") # Name self (line 10)
self.assertEqual(str(id2type[125]), "torch.Tensor(float32, (5, 64, None, None))") # Call self.in4(self.deconv1(y)) (line 10)
self.assertEqual(str(id2type[127]), "class TransformerNet") # Name self (line 10)
self.assertEqual(str(id2type[130]), "torch.Tensor(float32, (5, 64, None, None))") # Call self.deconv1(y) (line 10)
self.assertEqual(str(id2type[132]), "class TransformerNet") # Name self (line 10)
self.assertEqual(str(id2type[135]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name y (line 10)
self.assertEqual(str(id2type[138]), "torch.Tensor(float32, (5, 32, None, None))") # Name y (line 11)
self.assertEqual(str(id2type[140]), "torch.Tensor(float32, (5, 32, None, None))") # Call self.relu(self.in5(self.deconv2(y))) (line 11)
self.assertEqual(str(id2type[142]), "class TransformerNet") # Name self (line 11)
self.assertEqual(str(id2type[145]), "torch.Tensor(float32, (5, 32, None, None))") # Call self.in5(self.deconv2(y)) (line 11)
self.assertEqual(str(id2type[147]), "class TransformerNet") # Name self (line 11)
self.assertEqual(str(id2type[150]), "torch.Tensor(float32, (5, 32, None, None))") # Call self.deconv2(y) (line 11)
self.assertEqual(str(id2type[152]), "class TransformerNet") # Name self (line 11)
self.assertEqual(str(id2type[155]), "torch.Tensor(float32, (5, 64, None, None))") # Name y (line 11)
self.assertEqual(str(id2type[158]), "torch.Tensor(float32, (5, 3, None, None))") # Name y (line 12)
self.assertEqual(str(id2type[160]), "torch.Tensor(float32, (5, 3, None, None))") # Call self.deconv3(y) (line 12)
self.assertEqual(str(id2type[162]), "class TransformerNet") # Name self (line 12)
self.assertEqual(str(id2type[165]), "torch.Tensor(float32, (5, 32, None, None))") # Name y (line 12)
self.assertEqual(str(id2type[168]), "torch.Tensor(float32, (5, 3, None, None))") # Name y (line 13)
# === function forward ===
self.assertEqual(str(id2type[177]), "torch.Tensor(float32, (5, 3, 24, 24))") # Name out (line 2)
self.assertEqual(str(id2type[179]), "torch.Tensor(float32, (5, 3, 24, 24))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[181]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[184]), "torch.Tensor(float32, (5, 3, 16, 16))") # Name x (line 2)
self.assertEqual(str(id2type[187]), "torch.Tensor(float32, (5, 32, 16, 16))") # Name out (line 3)
self.assertEqual(str(id2type[189]), "torch.Tensor(float32, (5, 32, 16, 16))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[191]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[194]), "torch.Tensor(float32, (5, 3, 24, 24))") # Name out (line 3)
self.assertEqual(str(id2type[197]), "torch.Tensor(float32, (5, 32, 16, 16))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[206]), "torch.Tensor(float32, (5, 32, 18, 18))") # Name out (line 2)
self.assertEqual(str(id2type[208]), "torch.Tensor(float32, (5, 32, 18, 18))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[210]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[213]), "torch.Tensor(float32, (5, 32, 16, 16))") # Name x (line 2)
self.assertEqual(str(id2type[216]), "torch.Tensor(float32, (5, 64, 8, 8))") # Name out (line 3)
self.assertEqual(str(id2type[218]), "torch.Tensor(float32, (5, 64, 8, 8))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[220]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[223]), "torch.Tensor(float32, (5, 32, 18, 18))") # Name out (line 3)
self.assertEqual(str(id2type[226]), "torch.Tensor(float32, (5, 64, 8, 8))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[235]), "torch.Tensor(float32, (5, 64, 10, 10))") # Name out (line 2)
self.assertEqual(str(id2type[237]), "torch.Tensor(float32, (5, 64, 10, 10))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[239]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[242]), "torch.Tensor(float32, (5, 64, 8, 8))") # Name x (line 2)
self.assertEqual(str(id2type[245]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[247]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[249]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[252]), "torch.Tensor(float32, (5, 64, 10, 10))") # Name out (line 3)
self.assertEqual(str(id2type[255]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[264]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 2)
self.assertEqual(str(id2type[266]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[269]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[271]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in1(self.conv1(x))) (line 3)
self.assertEqual(str(id2type[273]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[276]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in1(self.conv1(x)) (line 3)
self.assertEqual(str(id2type[278]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[281]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv1(x) (line 3)
self.assertEqual(str(id2type[283]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[286]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 3)
self.assertEqual(str(id2type[289]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[291]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in2(self.conv2(out)) (line 4)
self.assertEqual(str(id2type[293]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[296]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2(out) (line 4)
self.assertEqual(str(id2type[298]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[301]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[304]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[306]), "torch.Tensor(float32, (5, 128, 4, 4))") # BinOp out + residual (line 5)
self.assertEqual(str(id2type[307]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[310]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 5)
self.assertEqual(str(id2type[313]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 6)
# === function forward ===
self.assertEqual(str(id2type[322]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[324]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[326]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[329]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[332]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[334]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[336]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[339]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[342]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[351]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[353]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[355]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[358]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[361]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[363]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[365]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[368]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[371]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[380]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 2)
self.assertEqual(str(id2type[382]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[385]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[387]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in1(self.conv1(x))) (line 3)
self.assertEqual(str(id2type[389]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[392]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in1(self.conv1(x)) (line 3)
self.assertEqual(str(id2type[394]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[397]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv1(x) (line 3)
self.assertEqual(str(id2type[399]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[402]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 3)
self.assertEqual(str(id2type[405]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[407]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in2(self.conv2(out)) (line 4)
self.assertEqual(str(id2type[409]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[412]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2(out) (line 4)
self.assertEqual(str(id2type[414]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[417]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[420]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[422]), "torch.Tensor(float32, (5, 128, 4, 4))") # BinOp out + residual (line 5)
self.assertEqual(str(id2type[423]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[426]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 5)
self.assertEqual(str(id2type[429]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 6)
# === function forward ===
self.assertEqual(str(id2type[438]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[440]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[442]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[445]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[448]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[450]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[452]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[455]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[458]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[467]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[469]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[471]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[474]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[477]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[479]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[481]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[484]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[487]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[496]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 2)
self.assertEqual(str(id2type[498]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[501]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[503]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in1(self.conv1(x))) (line 3)
self.assertEqual(str(id2type[505]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[508]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in1(self.conv1(x)) (line 3)
self.assertEqual(str(id2type[510]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[513]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv1(x) (line 3)
self.assertEqual(str(id2type[515]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[518]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 3)
self.assertEqual(str(id2type[521]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[523]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in2(self.conv2(out)) (line 4)
self.assertEqual(str(id2type[525]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[528]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2(out) (line 4)
self.assertEqual(str(id2type[530]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[533]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[536]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[538]), "torch.Tensor(float32, (5, 128, 4, 4))") # BinOp out + residual (line 5)
self.assertEqual(str(id2type[539]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[542]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 5)
self.assertEqual(str(id2type[545]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 6)
# === function forward ===
self.assertEqual(str(id2type[554]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[556]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[558]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[561]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[564]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[566]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[568]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[571]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[574]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[583]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[585]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[587]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[590]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[593]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[595]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[597]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[600]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[603]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[612]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 2)
self.assertEqual(str(id2type[614]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[617]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[619]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in1(self.conv1(x))) (line 3)
self.assertEqual(str(id2type[621]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[624]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in1(self.conv1(x)) (line 3)
self.assertEqual(str(id2type[626]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[629]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv1(x) (line 3)
self.assertEqual(str(id2type[631]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[634]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 3)
self.assertEqual(str(id2type[637]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[639]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in2(self.conv2(out)) (line 4)
self.assertEqual(str(id2type[641]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[644]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2(out) (line 4)
self.assertEqual(str(id2type[646]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[649]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[652]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[654]), "torch.Tensor(float32, (5, 128, 4, 4))") # BinOp out + residual (line 5)
self.assertEqual(str(id2type[655]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[658]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 5)
self.assertEqual(str(id2type[661]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 6)
# === function forward ===
self.assertEqual(str(id2type[670]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[672]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[674]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[677]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[680]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[682]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[684]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[687]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[690]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[699]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[701]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[703]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[706]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[709]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[711]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[713]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[716]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[719]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[728]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 2)
self.assertEqual(str(id2type[730]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[733]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[735]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.relu(self.in1(self.conv1(x))) (line 3)
self.assertEqual(str(id2type[737]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[740]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in1(self.conv1(x)) (line 3)
self.assertEqual(str(id2type[742]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[745]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv1(x) (line 3)
self.assertEqual(str(id2type[747]), "class ResidualBlock") # Name self (line 3)
self.assertEqual(str(id2type[750]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 3)
self.assertEqual(str(id2type[753]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[755]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.in2(self.conv2(out)) (line 4)
self.assertEqual(str(id2type[757]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[760]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2(out) (line 4)
self.assertEqual(str(id2type[762]), "class ResidualBlock") # Name self (line 4)
self.assertEqual(str(id2type[765]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
self.assertEqual(str(id2type[768]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[770]), "torch.Tensor(float32, (5, 128, 4, 4))") # BinOp out + residual (line 5)
self.assertEqual(str(id2type[771]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 5)
self.assertEqual(str(id2type[774]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name residual (line 5)
self.assertEqual(str(id2type[777]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 6)
# === function forward ===
self.assertEqual(str(id2type[786]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[788]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[790]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[793]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[796]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[798]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[800]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[803]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[806]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[815]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 2)
self.assertEqual(str(id2type[817]), "torch.Tensor(float32, (5, 128, 6, 6))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[819]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[822]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[825]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 3)
self.assertEqual(str(id2type[827]), "torch.Tensor(float32, (5, 128, 4, 4))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[829]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[832]), "torch.Tensor(float32, (5, 128, 6, 6))") # Name out (line 3)
self.assertEqual(str(id2type[835]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name out (line 4)
# === function forward ===
self.assertEqual(str(id2type[844]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x_in (line 2)
self.assertEqual(str(id2type[846]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x (line 2)
self.assertEqual(str(id2type[849]), "int") # Attribute self.upsample (line 3)
self.assertEqual(str(id2type[850]), "class UpsampleConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[854]), "torch.Tensor(float32, (5, 128, 8, 8))") # Name x_in (line 4)
self.assertEqual(str(id2type[856]), "torch.Tensor(float32, (5, 128, 8, 8))") # Call torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) (line 4)
self.assertEqual(str(id2type[858]), "class module") # Attribute torch.nn.functional (line 4)
self.assertEqual(str(id2type[859]), "class module") # Attribute torch.nn (line 4)
self.assertEqual(str(id2type[865]), "torch.Tensor(float32, (5, 128, 4, 4))") # Name x_in (line 4)
self.assertEqual(str(id2type[868]), "string") # Constant 'nearest' (line 4)
self.assertEqual(str(id2type[870]), "int") # Attribute self.upsample (line 4)
self.assertEqual(str(id2type[871]), "class UpsampleConvLayer") # Name self (line 4)
self.assertEqual(str(id2type[875]), "torch.Tensor(float32, (5, 128, None, None))") # Name out (line 5)
self.assertEqual(str(id2type[877]), "torch.Tensor(float32, (5, 128, None, None))") # Call self.reflection_pad(x_in) (line 5)
self.assertEqual(str(id2type[879]), "class UpsampleConvLayer") # Name self (line 5)
self.assertEqual(str(id2type[882]), "torch.Tensor(float32, (5, 128, None, None))") # Name x_in (line 5)
self.assertEqual(str(id2type[885]), "torch.Tensor(float32, (5, 64, None, None))") # Name out (line 6)
self.assertEqual(str(id2type[887]), "torch.Tensor(float32, (5, 64, None, None))") # Call self.conv2d(out) (line 6)
self.assertEqual(str(id2type[889]), "class UpsampleConvLayer") # Name self (line 6)
self.assertEqual(str(id2type[892]), "torch.Tensor(float32, (5, 128, None, None))") # Name out (line 6)
self.assertEqual(str(id2type[895]), "torch.Tensor(float32, (5, 64, None, None))") # Name out (line 7)
# === function forward ===
self.assertEqual(str(id2type[904]), "torch.Tensor(float32, (5, 64, None, None))") # Name x_in (line 2)
self.assertEqual(str(id2type[906]), "torch.Tensor(float32, (5, 64, None, None))") # Name x (line 2)
self.assertEqual(str(id2type[909]), "int") # Attribute self.upsample (line 3)
self.assertEqual(str(id2type[910]), "class UpsampleConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[914]), "torch.Tensor(float32, (5, 64, None, None))") # Name x_in (line 4)
self.assertEqual(str(id2type[916]), "torch.Tensor(float32, (5, 64, None, None))") # Call torch.nn.functional.interpolate(x_in, mode='nearest', scale_factor=self.upsample) (line 4)
self.assertEqual(str(id2type[918]), "class module") # Attribute torch.nn.functional (line 4)
self.assertEqual(str(id2type[919]), "class module") # Attribute torch.nn (line 4)
self.assertEqual(str(id2type[925]), "torch.Tensor(float32, (5, 64, None, None))") # Name x_in (line 4)
self.assertEqual(str(id2type[928]), "string") # Constant 'nearest' (line 4)
self.assertEqual(str(id2type[930]), "int") # Attribute self.upsample (line 4)
self.assertEqual(str(id2type[931]), "class UpsampleConvLayer") # Name self (line 4)
self.assertEqual(str(id2type[935]), "torch.Tensor(float32, (5, 64, None, None))") # Name out (line 5)
self.assertEqual(str(id2type[937]), "torch.Tensor(float32, (5, 64, None, None))") # Call self.reflection_pad(x_in) (line 5)
self.assertEqual(str(id2type[939]), "class UpsampleConvLayer") # Name self (line 5)
self.assertEqual(str(id2type[942]), "torch.Tensor(float32, (5, 64, None, None))") # Name x_in (line 5)
self.assertEqual(str(id2type[945]), "torch.Tensor(float32, (5, 32, None, None))") # Name out (line 6)
self.assertEqual(str(id2type[947]), "torch.Tensor(float32, (5, 32, None, None))") # Call self.conv2d(out) (line 6)
self.assertEqual(str(id2type[949]), "class UpsampleConvLayer") # Name self (line 6)
self.assertEqual(str(id2type[952]), "torch.Tensor(float32, (5, 64, None, None))") # Name out (line 6)
self.assertEqual(str(id2type[955]), "torch.Tensor(float32, (5, 32, None, None))") # Name out (line 7)
# === function forward ===
self.assertEqual(str(id2type[964]), "torch.Tensor(float32, (5, 32, None, None))") # Name out (line 2)
self.assertEqual(str(id2type[966]), "torch.Tensor(float32, (5, 32, None, None))") # Call self.reflection_pad(x) (line 2)
self.assertEqual(str(id2type[968]), "class ConvLayer") # Name self (line 2)
self.assertEqual(str(id2type[971]), "torch.Tensor(float32, (5, 32, None, None))") # Name x (line 2)
self.assertEqual(str(id2type[974]), "torch.Tensor(float32, (5, 3, None, None))") # Name out (line 3)
self.assertEqual(str(id2type[976]), "torch.Tensor(float32, (5, 3, None, None))") # Call self.conv2d(out) (line 3)
self.assertEqual(str(id2type[978]), "class ConvLayer") # Name self (line 3)
self.assertEqual(str(id2type[981]), "torch.Tensor(float32, (5, 32, None, None))") # Name out (line 3)
self.assertEqual(str(id2type[984]), "torch.Tensor(float32, (5, 3, None, None))") # Name out (line 4)
# === END ASSERTIONS for TransformerNet ===
def main():
unittest.main()
if __name__ == '__main__':
main()
| 95.816537 | 187 | 0.629136 | 5,356 | 37,081 | 4.344473 | 0.082338 | 0.217886 | 0.261464 | 0.363144 | 0.938416 | 0.932141 | 0.918432 | 0.904207 | 0.877605 | 0.823886 | 0 | 0.118526 | 0.176802 | 37,081 | 386 | 188 | 96.064767 | 0.643767 | 0.229417 | 0 | 0 | 1 | 0 | 0.385665 | 0.18312 | 0 | 0 | 0 | 0 | 0.957507 | 1 | 0.005666 | false | 0 | 0.016997 | 0 | 0.025496 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
d1c6405ebda23be34ecb6a4cce8d42b156f8a1b0 | 403,003 | py | Python | clearml/backend_api/services/v2_13/tasks.py | patzm/clearml | 63c3e446a3f0101d187734749f987ab1a33d7f22 | [
"Apache-2.0"
] | 1,118 | 2020-12-23T09:28:43.000Z | 2022-03-31T14:22:31.000Z | clearml/backend_api/services/v2_13/tasks.py | patzm/clearml | 63c3e446a3f0101d187734749f987ab1a33d7f22 | [
"Apache-2.0"
] | 347 | 2020-12-23T22:38:48.000Z | 2022-03-31T20:01:06.000Z | clearml/backend_api/services/v2_13/tasks.py | patzm/clearml | 63c3e446a3f0101d187734749f987ab1a33d7f22 | [
"Apache-2.0"
] | 228 | 2020-12-23T14:44:51.000Z | 2022-03-27T08:56:48.000Z | """
tasks service
Provides a management API for tasks in the system.
"""
import enum
from datetime import datetime
import six
from clearml.backend_api.session import (
Request,
BatchRequest,
Response,
NonStrictDataModel,
schema_property,
StringEnum,
)
from dateutil.parser import parse as parse_datetime
class MultiFieldPatternData(NonStrictDataModel):
"""
:param pattern: Pattern string (regex)
:type pattern: str
:param fields: List of field names
:type fields: Sequence[str]
"""
_schema = {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, pattern=None, fields=None, **kwargs):
super(MultiFieldPatternData, self).__init__(**kwargs)
self.pattern = pattern
self.fields = fields
@schema_property("pattern")
def pattern(self):
return self._property_pattern
@pattern.setter
def pattern(self, value):
if value is None:
self._property_pattern = None
return
self.assert_isinstance(value, "pattern", six.string_types)
self._property_pattern = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (list, tuple))
self.assert_isinstance(value, "fields", six.string_types, is_array=True)
self._property_fields = value
class ModelTypeEnum(StringEnum):
input = "input"
output = "output"
class TaskModelItem(NonStrictDataModel):
"""
:param name: The task model name
:type name: str
:param model: The model ID
:type model: str
"""
_schema = {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
},
"required": ["name", "model"],
"type": "object",
}
def __init__(self, name, model, **kwargs):
super(TaskModelItem, self).__init__(**kwargs)
self.name = name
self.model = model
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("model")
def model(self):
return self._property_model
@model.setter
def model(self, value):
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
class Script(NonStrictDataModel):
"""
:param binary: Binary to use when running the script
:type binary: str
:param repository: Name of the repository where the script is located
:type repository: str
:param tag: Repository tag
:type tag: str
:param branch: Repository branch id If not provided and tag not provided,
default repository branch is used.
:type branch: str
:param version_num: Version (changeset) number. Optional (default is head
version) Unused if tag is provided.
:type version_num: str
:param entry_point: Path to execute within the repository
:type entry_point: str
:param working_dir: Path to the folder from which to run the script Default -
root folder of repository
:type working_dir: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
:param diff: Uncommitted changes found in the repository when task was run
:type diff: str
"""
_schema = {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {"description": "Repository tag", "type": ["string", "null"]},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
binary="python",
repository=None,
tag=None,
branch=None,
version_num=None,
entry_point=None,
working_dir=None,
requirements=None,
diff=None,
**kwargs
):
super(Script, self).__init__(**kwargs)
self.binary = binary
self.repository = repository
self.tag = tag
self.branch = branch
self.version_num = version_num
self.entry_point = entry_point
self.working_dir = working_dir
self.requirements = requirements
self.diff = diff
@schema_property("binary")
def binary(self):
return self._property_binary
@binary.setter
def binary(self, value):
if value is None:
self._property_binary = None
return
self.assert_isinstance(value, "binary", six.string_types)
self._property_binary = value
@schema_property("repository")
def repository(self):
return self._property_repository
@repository.setter
def repository(self, value):
if value is None:
self._property_repository = None
return
self.assert_isinstance(value, "repository", six.string_types)
self._property_repository = value
@schema_property("tag")
def tag(self):
return self._property_tag
@tag.setter
def tag(self, value):
if value is None:
self._property_tag = None
return
self.assert_isinstance(value, "tag", six.string_types)
self._property_tag = value
@schema_property("branch")
def branch(self):
return self._property_branch
@branch.setter
def branch(self, value):
if value is None:
self._property_branch = None
return
self.assert_isinstance(value, "branch", six.string_types)
self._property_branch = value
@schema_property("version_num")
def version_num(self):
return self._property_version_num
@version_num.setter
def version_num(self, value):
if value is None:
self._property_version_num = None
return
self.assert_isinstance(value, "version_num", six.string_types)
self._property_version_num = value
@schema_property("entry_point")
def entry_point(self):
return self._property_entry_point
@entry_point.setter
def entry_point(self, value):
if value is None:
self._property_entry_point = None
return
self.assert_isinstance(value, "entry_point", six.string_types)
self._property_entry_point = value
@schema_property("working_dir")
def working_dir(self):
return self._property_working_dir
@working_dir.setter
def working_dir(self, value):
if value is None:
self._property_working_dir = None
return
self.assert_isinstance(value, "working_dir", six.string_types)
self._property_working_dir = value
@schema_property("requirements")
def requirements(self):
return self._property_requirements
@requirements.setter
def requirements(self, value):
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
@schema_property("diff")
def diff(self):
return self._property_diff
@diff.setter
def diff(self, value):
if value is None:
self._property_diff = None
return
self.assert_isinstance(value, "diff", six.string_types)
self._property_diff = value
class Output(NonStrictDataModel):
"""
:param destination: Storage id. This is where output files will be stored.
:type destination: str
:param model: Model id.
:type model: str
:param result: Task result. Values: 'success', 'failure'
:type result: str
:param error: Last error text
:type error: str
"""
_schema = {
"properties": {
"destination": {
"description": "Storage id. This is where output files will be stored.",
"type": ["string", "null"],
},
"error": {"description": "Last error text", "type": ["string", "null"]},
"model": {"description": "Model id.", "type": ["string", "null"]},
"result": {
"description": "Task result. Values: 'success', 'failure'",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self, destination=None, model=None, result=None, error=None, **kwargs
):
super(Output, self).__init__(**kwargs)
self.destination = destination
self.model = model
self.result = result
self.error = error
@schema_property("destination")
def destination(self):
return self._property_destination
@destination.setter
def destination(self, value):
if value is None:
self._property_destination = None
return
self.assert_isinstance(value, "destination", six.string_types)
self._property_destination = value
@schema_property("model")
def model(self):
return self._property_model
@model.setter
def model(self, value):
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("result")
def result(self):
return self._property_result
@result.setter
def result(self, value):
if value is None:
self._property_result = None
return
self.assert_isinstance(value, "result", six.string_types)
self._property_result = value
@schema_property("error")
def error(self):
return self._property_error
@error.setter
def error(self, value):
if value is None:
self._property_error = None
return
self.assert_isinstance(value, "error", six.string_types)
self._property_error = value
class ArtifactTypeData(NonStrictDataModel):
"""
:param preview: Description or textual data
:type preview: str
:param content_type: System defined raw data content type
:type content_type: str
:param data_hash: Hash of raw data, without any headers or descriptive parts
:type data_hash: str
"""
_schema = {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, preview=None, content_type=None, data_hash=None, **kwargs):
super(ArtifactTypeData, self).__init__(**kwargs)
self.preview = preview
self.content_type = content_type
self.data_hash = data_hash
@schema_property("preview")
def preview(self):
return self._property_preview
@preview.setter
def preview(self, value):
if value is None:
self._property_preview = None
return
self.assert_isinstance(value, "preview", six.string_types)
self._property_preview = value
@schema_property("content_type")
def content_type(self):
return self._property_content_type
@content_type.setter
def content_type(self, value):
if value is None:
self._property_content_type = None
return
self.assert_isinstance(value, "content_type", six.string_types)
self._property_content_type = value
@schema_property("data_hash")
def data_hash(self):
return self._property_data_hash
@data_hash.setter
def data_hash(self, value):
if value is None:
self._property_data_hash = None
return
self.assert_isinstance(value, "data_hash", six.string_types)
self._property_data_hash = value
class ArtifactModeEnum(StringEnum):
input = "input"
output = "output"
class Artifact(NonStrictDataModel):
"""
:param key: Entry key
:type key: str
:param type: System defined type
:type type: str
:param mode: System defined input/output indication
:type mode: ArtifactModeEnum
:param uri: Raw data location
:type uri: str
:param content_size: Raw data length in bytes
:type content_size: int
:param hash: Hash of entire raw data
:type hash: str
:param timestamp: Epoch time when artifact was created
:type timestamp: int
:param type_data: Additional fields defined by the system
:type type_data: ArtifactTypeData
:param display_data: User-defined list of key/value pairs, sorted
:type display_data: Sequence[Sequence[str]]
"""
_schema = {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {"description": "Hash of entire raw data", "type": "string"},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {"description": "System defined type", "type": "string"},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
}
def __init__(
self,
key,
type,
mode=None,
uri=None,
content_size=None,
hash=None,
timestamp=None,
type_data=None,
display_data=None,
**kwargs
):
super(Artifact, self).__init__(**kwargs)
self.key = key
self.type = type
self.mode = mode
self.uri = uri
self.content_size = content_size
self.hash = hash
self.timestamp = timestamp
self.type_data = type_data
self.display_data = display_data
@schema_property("key")
def key(self):
return self._property_key
@key.setter
def key(self, value):
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("mode")
def mode(self):
return self._property_mode
@mode.setter
def mode(self, value):
if value is None:
self._property_mode = None
return
if isinstance(value, six.string_types):
try:
value = ArtifactModeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "mode", enum.Enum)
self._property_mode = value
@schema_property("uri")
def uri(self):
return self._property_uri
@uri.setter
def uri(self, value):
if value is None:
self._property_uri = None
return
self.assert_isinstance(value, "uri", six.string_types)
self._property_uri = value
@schema_property("content_size")
def content_size(self):
return self._property_content_size
@content_size.setter
def content_size(self, value):
if value is None:
self._property_content_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "content_size", six.integer_types)
self._property_content_size = value
@schema_property("hash")
def hash(self):
return self._property_hash
@hash.setter
def hash(self, value):
if value is None:
self._property_hash = None
return
self.assert_isinstance(value, "hash", six.string_types)
self._property_hash = value
@schema_property("timestamp")
def timestamp(self):
return self._property_timestamp
@timestamp.setter
def timestamp(self, value):
if value is None:
self._property_timestamp = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "timestamp", six.integer_types)
self._property_timestamp = value
@schema_property("type_data")
def type_data(self):
return self._property_type_data
@type_data.setter
def type_data(self, value):
if value is None:
self._property_type_data = None
return
if isinstance(value, dict):
value = ArtifactTypeData.from_dict(value)
else:
self.assert_isinstance(value, "type_data", ArtifactTypeData)
self._property_type_data = value
@schema_property("display_data")
def display_data(self):
return self._property_display_data
@display_data.setter
def display_data(self, value):
if value is None:
self._property_display_data = None
return
self.assert_isinstance(value, "display_data", (list, tuple))
self.assert_isinstance(value, "display_data", (list, tuple), is_array=True)
self._property_display_data = value
class ArtifactId(NonStrictDataModel):
"""
:param key: Entry key
:type key: str
:param mode: System defined input/output indication
:type mode: ArtifactModeEnum
"""
_schema = {
"properties": {
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
},
"required": ["key"],
"type": "object",
}
def __init__(self, key, mode=None, **kwargs):
super(ArtifactId, self).__init__(**kwargs)
self.key = key
self.mode = mode
@schema_property("key")
def key(self):
return self._property_key
@key.setter
def key(self, value):
if value is None:
self._property_key = None
return
self.assert_isinstance(value, "key", six.string_types)
self._property_key = value
@schema_property("mode")
def mode(self):
return self._property_mode
@mode.setter
def mode(self, value):
if value is None:
self._property_mode = None
return
if isinstance(value, six.string_types):
try:
value = ArtifactModeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "mode", enum.Enum)
self._property_mode = value
class TaskModels(NonStrictDataModel):
"""
:param input: The list of task input models
:type input: Sequence[TaskModelItem]
:param output: The list of task output models
:type output: Sequence[TaskModelItem]
"""
_schema = {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, input=None, output=None, **kwargs):
super(TaskModels, self).__init__(**kwargs)
self.input = input
self.output = output
@schema_property("input")
def input(self):
return self._property_input
@input.setter
def input(self, value):
if value is None:
self._property_input = None
return
self.assert_isinstance(value, "input", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
TaskModelItem.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "input", TaskModelItem, is_array=True)
self._property_input = value
@schema_property("output")
def output(self):
return self._property_output
@output.setter
def output(self, value):
if value is None:
self._property_output = None
return
self.assert_isinstance(value, "output", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
TaskModelItem.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "output", TaskModelItem, is_array=True)
self._property_output = value
class Execution(NonStrictDataModel):
"""
:param queue: Queue ID where task was queued.
:type queue: str
:param parameters: Json object containing the Task parameters
:type parameters: dict
:param model_desc: Json object representing the Model descriptors
:type model_desc: dict
:param model_labels: Json object representing the ids of the labels in the
model. The keys are the layers' names and the values are the IDs. Not
applicable for Register (Import) tasks. Mandatory for Training tasks
:type model_labels: dict
:param framework: Framework related to the task. Case insensitive. Mandatory
for Training tasks.
:type framework: str
:param artifacts: Task artifacts
:type artifacts: Sequence[Artifact]
"""
_schema = {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self,
queue=None,
parameters=None,
model_desc=None,
model_labels=None,
framework=None,
artifacts=None,
**kwargs
):
super(Execution, self).__init__(**kwargs)
self.queue = queue
self.parameters = parameters
self.model_desc = model_desc
self.model_labels = model_labels
self.framework = framework
self.artifacts = artifacts
@schema_property("queue")
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("parameters")
def parameters(self):
return self._property_parameters
@parameters.setter
def parameters(self, value):
if value is None:
self._property_parameters = None
return
self.assert_isinstance(value, "parameters", (dict,))
self._property_parameters = value
@schema_property("model_desc")
def model_desc(self):
return self._property_model_desc
@model_desc.setter
def model_desc(self, value):
if value is None:
self._property_model_desc = None
return
self.assert_isinstance(value, "model_desc", (dict,))
self._property_model_desc = value
@schema_property("model_labels")
def model_labels(self):
return self._property_model_labels
@model_labels.setter
def model_labels(self, value):
if value is None:
self._property_model_labels = None
return
self.assert_isinstance(value, "model_labels", (dict,))
self._property_model_labels = value
@schema_property("framework")
def framework(self):
return self._property_framework
@framework.setter
def framework(self, value):
if value is None:
self._property_framework = None
return
self.assert_isinstance(value, "framework", six.string_types)
self._property_framework = value
@schema_property("artifacts")
def artifacts(self):
return self._property_artifacts
@artifacts.setter
def artifacts(self, value):
if value is None:
self._property_artifacts = None
return
self.assert_isinstance(value, "artifacts", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Artifact.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "artifacts", Artifact, is_array=True)
self._property_artifacts = value
class TaskStatusEnum(StringEnum):
created = "created"
queued = "queued"
in_progress = "in_progress"
stopped = "stopped"
published = "published"
publishing = "publishing"
closed = "closed"
failed = "failed"
completed = "completed"
unknown = "unknown"
class TaskTypeEnum(StringEnum):
training = "training"
testing = "testing"
inference = "inference"
data_processing = "data_processing"
application = "application"
monitor = "monitor"
controller = "controller"
optimizer = "optimizer"
service = "service"
qc = "qc"
custom = "custom"
class LastMetricsEvent(NonStrictDataModel):
"""
:param metric: Metric name
:type metric: str
:param variant: Variant name
:type variant: str
:param value: Last value reported
:type value: float
:param min_value: Minimum value reported
:type min_value: float
:param max_value: Maximum value reported
:type max_value: float
"""
_schema = {
"properties": {
"max_value": {
"description": "Maximum value reported",
"type": ["number", "null"],
},
"metric": {"description": "Metric name", "type": ["string", "null"]},
"min_value": {
"description": "Minimum value reported",
"type": ["number", "null"],
},
"value": {"description": "Last value reported", "type": ["number", "null"]},
"variant": {"description": "Variant name", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
metric=None,
variant=None,
value=None,
min_value=None,
max_value=None,
**kwargs
):
super(LastMetricsEvent, self).__init__(**kwargs)
self.metric = metric
self.variant = variant
self.value = value
self.min_value = min_value
self.max_value = max_value
@schema_property("metric")
def metric(self):
return self._property_metric
@metric.setter
def metric(self, value):
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variant")
def variant(self):
return self._property_variant
@variant.setter
def variant(self, value):
if value is None:
self._property_variant = None
return
self.assert_isinstance(value, "variant", six.string_types)
self._property_variant = value
@schema_property("value")
def value(self):
return self._property_value
@value.setter
def value(self, value):
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.integer_types + (float,))
self._property_value = value
@schema_property("min_value")
def min_value(self):
return self._property_min_value
@min_value.setter
def min_value(self, value):
if value is None:
self._property_min_value = None
return
self.assert_isinstance(value, "min_value", six.integer_types + (float,))
self._property_min_value = value
@schema_property("max_value")
def max_value(self):
return self._property_max_value
@max_value.setter
def max_value(self, value):
if value is None:
self._property_max_value = None
return
self.assert_isinstance(value, "max_value", six.integer_types + (float,))
self._property_max_value = value
class LastMetricsVariants(NonStrictDataModel):
"""
Last metric events, one for each variant hash
"""
_schema = {
"additionalProperties": {"$ref": "#/definitions/last_metrics_event"},
"description": "Last metric events, one for each variant hash",
"type": "object",
}
class ParamsItem(NonStrictDataModel):
"""
:param section: Section that the parameter belongs to
:type section: str
:param name: Name of the parameter. The combination of section and name should
be unique
:type name: str
:param value: Value of the parameter
:type value: str
:param type: Type of the parameter. Optional
:type type: str
:param description: The parameter description. Optional
:type description: str
"""
_schema = {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(
self, section=None, name=None, value=None, type=None, description=None, **kwargs
):
super(ParamsItem, self).__init__(**kwargs)
self.section = section
self.name = name
self.value = value
self.type = type
self.description = description
@schema_property("section")
def section(self):
return self._property_section
@section.setter
def section(self, value):
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("value")
def value(self):
return self._property_value
@value.setter
def value(self, value):
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.string_types)
self._property_value = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("description")
def description(self):
return self._property_description
@description.setter
def description(self, value):
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
class ConfigurationItem(NonStrictDataModel):
"""
:param name: Name of the parameter. Should be unique
:type name: str
:param value: Value of the parameter
:type value: str
:param type: Type of the parameter. Optional
:type type: str
:param description: The parameter description. Optional
:type description: str
"""
_schema = {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, name=None, value=None, type=None, description=None, **kwargs):
super(ConfigurationItem, self).__init__(**kwargs)
self.name = name
self.value = value
self.type = type
self.description = description
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("value")
def value(self):
return self._property_value
@value.setter
def value(self, value):
if value is None:
self._property_value = None
return
self.assert_isinstance(value, "value", six.string_types)
self._property_value = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", six.string_types)
self._property_type = value
@schema_property("description")
def description(self):
return self._property_description
@description.setter
def description(self, value):
if value is None:
self._property_description = None
return
self.assert_isinstance(value, "description", six.string_types)
self._property_description = value
class ParamKey(NonStrictDataModel):
"""
:param section: Section that the parameter belongs to
:type section: str
:param name: Name of the parameter. If the name is ommitted then the
corresponding operation is performed on the whole section
:type name: str
"""
_schema = {
"properties": {
"name": {
"description": "Name of the parameter. If the name is ommitted then the corresponding operation is performed on the whole section",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, section=None, name=None, **kwargs):
super(ParamKey, self).__init__(**kwargs)
self.section = section
self.name = name
@schema_property("section")
def section(self):
return self._property_section
@section.setter
def section(self, value):
if value is None:
self._property_section = None
return
self.assert_isinstance(value, "section", six.string_types)
self._property_section = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
class SectionParams(dict, NonStrictDataModel):
"""
Task section params
"""
_schema = {
# 'additionalProperties': {'$ref': '#/definitions/params_item'},
"additionalProperties": True,
"description": "Task section params",
"type": "object",
}
def __init__(self, *args, **kwargs):
self.assert_isinstance(args, "section_params", dict, is_array=True)
kwargs.update(args)
self.assert_isinstance(
kwargs.values(), "params", (ParamsItem, dict), is_array=True
)
for k, v in kwargs.items():
if isinstance(v, dict):
kwargs[k] = ParamsItem(**v)
super(SectionParams, self).__init__(**kwargs)
class ReplaceHyperparamsEnum(StringEnum):
none = "none"
section = "section"
all = "all"
class Task(NonStrictDataModel):
"""
:param id: Task id
:type id: str
:param name: Task Name
:type name: str
:param user: Associated user id
:type user: str
:param company: Company ID
:type company: str
:param type: Type of task. Values: 'training', 'testing'
:type type: TaskTypeEnum
:param status:
:type status: TaskStatusEnum
:param comment: Free text comment
:type comment: str
:param created: Task creation time (UTC)
:type created: datetime.datetime
:param started: Task start time (UTC)
:type started: datetime.datetime
:param completed: Task end time (UTC)
:type completed: datetime.datetime
:param active_duration: Task duration time (seconds)
:type active_duration: int
:param parent: Parent task id
:type parent: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param output: Task output params
:type output: Output
:param execution: Task execution params
:type execution: Execution
:param container: Docker container parameters
:type container: dict
:param models: Task models
:type models: TaskModels
:param script: Script info
:type script: Script
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param status_changed: Last status change time
:type status_changed: datetime.datetime
:param status_message: free text string representing info about the status
:type status_message: str
:param status_reason: Reason for last status change
:type status_reason: str
:param published: Last status change time
:type published: datetime.datetime
:param last_worker: ID of last worker that handled the task
:type last_worker: str
:param last_worker_report: Last time a worker reported while working on this
task
:type last_worker_report: datetime.datetime
:param last_update: Last time this task was created, edited, changed or events
for this task were reported
:type last_update: datetime.datetime
:param last_change: Last time any update was done to the task
:type last_change: datetime.datetime
:param last_iteration: Last iteration reported for this task
:type last_iteration: int
:param last_metrics: Last metric variants (hash to events), one for each metric
hash
:type last_metrics: dict
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_schema = {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {"description": "Free text comment", "type": ["string", "null"]},
"company": {"description": "Company ID", "type": ["string", "null"]},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [{"$ref": "#/definitions/execution"}, {"type": "null"}],
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {"$ref": "#/definitions/last_metrics_variants"},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, edited, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [{"$ref": "#/definitions/task_models"}, {"type": "null"}],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {"description": "Parent task id", "type": ["string", "null"]},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"runtime": {
"description": "Task runtime mapping",
"type": ["object", "null"],
"additionalProperties": True,
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [{"$ref": "#/definitions/task_status_enum"}, {"type": "null"}],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'training', 'testing'",
"oneOf": [{"$ref": "#/definitions/task_type_enum"}, {"type": "null"}],
},
"user": {"description": "Associated user id", "type": ["string", "null"]},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
user=None,
company=None,
type=None,
status=None,
comment=None,
created=None,
started=None,
completed=None,
active_duration=None,
parent=None,
project=None,
output=None,
execution=None,
container=None,
models=None,
script=None,
tags=None,
system_tags=None,
status_changed=None,
status_message=None,
status_reason=None,
published=None,
last_worker=None,
last_worker_report=None,
last_update=None,
last_change=None,
last_iteration=None,
last_metrics=None,
hyperparams=None,
configuration=None,
runtime=None,
**kwargs
):
super(Task, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.company = company
self.type = type
self.status = status
self.comment = comment
self.created = created
self.started = started
self.completed = completed
self.active_duration = active_duration
self.parent = parent
self.project = project
self.output = output
self.execution = execution
self.container = container
self.models = models
self.script = script
self.tags = tags
self.system_tags = system_tags
self.status_changed = status_changed
self.status_message = status_message
self.status_reason = status_reason
self.published = published
self.last_worker = last_worker
self.last_worker_report = last_worker_report
self.last_update = last_update
self.last_change = last_change
self.last_iteration = last_iteration
self.last_metrics = last_metrics
self.hyperparams = hyperparams
self.configuration = configuration
self.runtime = runtime
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", six.string_types)
self._property_user = value
@schema_property("company")
def company(self):
return self._property_company
@company.setter
def company(self, value):
if value is None:
self._property_company = None
return
self.assert_isinstance(value, "company", six.string_types)
self._property_company = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("status")
def status(self):
return self._property_status
@status.setter
def status(self, value):
if value is None:
self._property_status = None
return
if isinstance(value, six.string_types):
try:
value = TaskStatusEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "status", enum.Enum)
self._property_status = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("created")
def created(self):
return self._property_created
@created.setter
def created(self, value):
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
@schema_property("started")
def started(self):
return self._property_started
@started.setter
def started(self, value):
if value is None:
self._property_started = None
return
self.assert_isinstance(value, "started", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_started = value
@schema_property("completed")
def completed(self):
return self._property_completed
@completed.setter
def completed(self, value):
if value is None:
self._property_completed = None
return
self.assert_isinstance(value, "completed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_completed = value
@schema_property("active_duration")
def active_duration(self):
return self._property_active_duration
@active_duration.setter
def active_duration(self, value):
if value is None:
self._property_active_duration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "active_duration", six.integer_types)
self._property_active_duration = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output")
def output(self):
return self._property_output
@output.setter
def output(self, value):
if value is None:
self._property_output = None
return
if isinstance(value, dict):
value = Output.from_dict(value)
else:
self.assert_isinstance(value, "output", Output)
self._property_output = value
@schema_property("execution")
def execution(self):
return self._property_execution
@execution.setter
def execution(self, value):
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("container")
def container(self):
return self._property_container
@container.setter
def container(self, value):
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("script")
def script(self):
return self._property_script
@script.setter
def script(self, value):
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status_changed")
def status_changed(self):
return self._property_status_changed
@status_changed.setter
def status_changed(self, value):
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_status_changed = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("published")
def published(self):
return self._property_published
@published.setter
def published(self, value):
if value is None:
self._property_published = None
return
self.assert_isinstance(value, "published", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_published = value
@schema_property("last_worker")
def last_worker(self):
return self._property_last_worker
@last_worker.setter
def last_worker(self, value):
if value is None:
self._property_last_worker = None
return
self.assert_isinstance(value, "last_worker", six.string_types)
self._property_last_worker = value
@schema_property("last_worker_report")
def last_worker_report(self):
return self._property_last_worker_report
@last_worker_report.setter
def last_worker_report(self, value):
if value is None:
self._property_last_worker_report = None
return
self.assert_isinstance(
value, "last_worker_report", six.string_types + (datetime,)
)
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_worker_report = value
@schema_property("last_update")
def last_update(self):
return self._property_last_update
@last_update.setter
def last_update(self, value):
if value is None:
self._property_last_update = None
return
self.assert_isinstance(value, "last_update", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_update = value
@schema_property("last_change")
def last_change(self):
return self._property_last_change
@last_change.setter
def last_change(self, value):
if value is None:
self._property_last_change = None
return
self.assert_isinstance(value, "last_change", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_last_change = value
@schema_property("last_iteration")
def last_iteration(self):
return self._property_last_iteration
@last_iteration.setter
def last_iteration(self, value):
if value is None:
self._property_last_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "last_iteration", six.integer_types)
self._property_last_iteration = value
@schema_property("last_metrics")
def last_metrics(self):
return self._property_last_metrics
@last_metrics.setter
def last_metrics(self, value):
if value is None:
self._property_last_metrics = None
return
self.assert_isinstance(value, "last_metrics", (dict,))
self._property_last_metrics = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(
value.keys(), "hyperparams_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(), "hyperparams_values", (SectionParams, dict), is_array=True
)
value = dict(
(k, SectionParams(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("runtime")
def runtime(self):
return self._property_runtime
@runtime.setter
def runtime(self, value):
if value is None:
self._property_runtime = None
return
self.assert_isinstance(value, "runtime", dict)
self._property_runtime = value
class TaskUrls(NonStrictDataModel):
"""
:param model_urls:
:type model_urls: Sequence[str]
:param event_urls:
:type event_urls: Sequence[str]
:param artifact_urls:
:type artifact_urls: Sequence[str]
"""
_schema = {
"properties": {
"artifact_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
"event_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
"model_urls": {"items": {"type": "string"}, "type": ["array", "null"]},
},
"type": "object",
}
def __init__(self, model_urls=None, event_urls=None, artifact_urls=None, **kwargs):
super(TaskUrls, self).__init__(**kwargs)
self.model_urls = model_urls
self.event_urls = event_urls
self.artifact_urls = artifact_urls
@schema_property("model_urls")
def model_urls(self):
return self._property_model_urls
@model_urls.setter
def model_urls(self, value):
if value is None:
self._property_model_urls = None
return
self.assert_isinstance(value, "model_urls", (list, tuple))
self.assert_isinstance(value, "model_urls", six.string_types, is_array=True)
self._property_model_urls = value
@schema_property("event_urls")
def event_urls(self):
return self._property_event_urls
@event_urls.setter
def event_urls(self, value):
if value is None:
self._property_event_urls = None
return
self.assert_isinstance(value, "event_urls", (list, tuple))
self.assert_isinstance(value, "event_urls", six.string_types, is_array=True)
self._property_event_urls = value
@schema_property("artifact_urls")
def artifact_urls(self):
return self._property_artifact_urls
@artifact_urls.setter
def artifact_urls(self, value):
if value is None:
self._property_artifact_urls = None
return
self.assert_isinstance(value, "artifact_urls", (list, tuple))
self.assert_isinstance(value, "artifact_urls", six.string_types, is_array=True)
self._property_artifact_urls = value
class AddOrUpdateArtifactsRequest(Request):
"""
Update existing artifacts (search by key/mode) and add new ones
:param task: Task ID
:type task: str
:param artifacts: Artifacts to add or update
:type artifacts: Sequence[Artifact]
:param force: If set to True then both new and running task artifacts can be
edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "add_or_update_artifacts"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
},
"properties": {
"artifacts": {
"description": "Artifacts to add or update",
"items": {"$ref": "#/definitions/artifact"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task artifacts can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "artifacts"],
"type": "object",
}
def __init__(self, task, artifacts, force=None, **kwargs):
super(AddOrUpdateArtifactsRequest, self).__init__(**kwargs)
self.task = task
self.artifacts = artifacts
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("artifacts")
def artifacts(self):
return self._property_artifacts
@artifacts.setter
def artifacts(self, value):
if value is None:
self._property_artifacts = None
return
self.assert_isinstance(value, "artifacts", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Artifact.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "artifacts", Artifact, is_array=True)
self._property_artifacts = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class AddOrUpdateArtifactsResponse(Response):
"""
Response of tasks.add_or_update_artifacts endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "add_or_update_artifacts"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(AddOrUpdateArtifactsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class AddOrUpdateModelRequest(Request):
"""
Add or update task model
:param task: ID of the task
:type task: str
:param name: The task model name
:type name: str
:param model: The model ID
:type model: str
:param type: The task model type
:type type: ModelTypeEnum
:param iteration: Iteration (used to update task statistics)
:type iteration: int
"""
_service = "tasks"
_action = "add_or_update_model"
_version = "2.13"
_schema = {
"definitions": {
"model_type_enum": {"enum": ["input", "output"], "type": "string"}
},
"properties": {
"iteration": {
"description": "Iteration (used to update task statistics)",
"type": "integer",
},
"model": {"description": "The model ID", "type": "string"},
"name": {"description": "The task model name", "type": "string"},
"task": {"description": "ID of the task", "type": "string"},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["task", "name", "model", "type"],
"type": "object",
}
def __init__(self, task, name, model, type, iteration=None, **kwargs):
super(AddOrUpdateModelRequest, self).__init__(**kwargs)
self.task = task
self.name = name
self.model = model
self.type = type
self.iteration = iteration
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("model")
def model(self):
return self._property_model
@model.setter
def model(self, value):
if value is None:
self._property_model = None
return
self.assert_isinstance(value, "model", six.string_types)
self._property_model = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = ModelTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("iteration")
def iteration(self):
return self._property_iteration
@iteration.setter
def iteration(self, value):
if value is None:
self._property_iteration = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "iteration", six.integer_types)
self._property_iteration = value
class AddOrUpdateModelResponse(Response):
"""
Response of tasks.add_or_update_model endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "add_or_update_model"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(AddOrUpdateModelResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class ArchiveRequest(Request):
"""
Archive tasks.
If a task is queued it will first be dequeued and then archived.
:param tasks: List of task ids
:type tasks: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "archive"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"tasks": {
"description": "List of task ids",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks, status_reason=None, status_message=None, **kwargs):
super(ArchiveRequest, self).__init__(**kwargs)
self.tasks = tasks
self.status_reason = status_reason
self.status_message = status_message
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class ArchiveResponse(Response):
"""
Response of tasks.archive endpoint.
:param archived: Indicates number of archived tasks
:type archived: int
"""
_service = "tasks"
_action = "archive"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"archived": {
"description": "Indicates number of archived tasks",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, archived=None, **kwargs):
super(ArchiveResponse, self).__init__(**kwargs)
self.archived = archived
@schema_property("archived")
def archived(self):
return self._property_archived
@archived.setter
def archived(self, value):
if value is None:
self._property_archived = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "archived", six.integer_types)
self._property_archived = value
class ArchiveManyRequest(Request):
"""
Archive tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "archive_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, status_reason=None, status_message=None, **kwargs):
super(ArchiveManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class ArchiveManyResponse(Response):
"""
Response of tasks.archive_many endpoint.
:param archived: Number of tasks archived
:type archived: int
"""
_service = "tasks"
_action = "archive_many"
_version = "2.13"
_schema = {
"definitions": {},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"archived": {
"description": "Number of tasks archived",
"type": ["integer", "null"],
}
},
}
def __init__(self, archived=None, **kwargs):
super(ArchiveManyResponse, self).__init__(**kwargs)
self.archived = archived
@schema_property("archived")
def archived(self):
return self._property_archived
@archived.setter
def archived(self, value):
if value is None:
self._property_archived = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "archived", six.integer_types)
self._property_archived = value
class CloneRequest(Request):
"""
Clone an existing task
:param task: ID of the task
:type task: str
:param new_task_name: The name of the cloned task. If not provided then taken
from the original task
:type new_task_name: str
:param new_task_comment: The comment of the cloned task. If not provided then
taken from the original task
:type new_task_comment: str
:param new_task_tags: The user-defined tags of the cloned task. If not provided
then taken from the original task
:type new_task_tags: Sequence[str]
:param new_task_system_tags: The system tags of the cloned task. If not
provided then empty
:type new_task_system_tags: Sequence[str]
:param new_task_parent: The parent of the cloned task. If not provided then
taken from the original task
:type new_task_parent: str
:param new_task_project: The project of the cloned task. If not provided then
taken from the original task
:type new_task_project: str
:param new_task_hyperparams: The hyper params for the new task. If not provided
then taken from the original task
:type new_task_hyperparams: dict
:param new_task_configuration: The configuration for the new task. If not
provided then taken from the original task
:type new_task_configuration: dict
:param execution_overrides: The execution params for the cloned task. The
params not specified are taken from the original task
:type execution_overrides: Execution
:param validate_references: If set to 'false' then the task fields that are
copied from the original task are not validated. The default is false.
:type validate_references: bool
:param new_project_name: Clone task to a new project by this name (only if
`new_task_project` is not provided). If a project by this name already exists,
task will be cloned to existing project.
:type new_project_name: str
:param new_task_input_models: The list of input models for the cloned task. If
not specifed then copied from the original task
:type new_task_input_models: Sequence[TaskModelItem]
:param new_task_container: The docker container properties for the new task. If
not provided then taken from the original task
:type new_task_container: dict
"""
_service = "tasks"
_action = "clone"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
},
"properties": {
"execution_overrides": {
"$ref": "#/definitions/execution",
"description": "The execution params for the cloned task. The params not specified are taken from the original task",
},
"new_project_name": {
"description": "Clone task to a new project by this name (only if `new_task_project` is not provided). If a project by this name already exists, task will be cloned to existing project.",
"type": "string",
},
"new_task_comment": {
"description": "The comment of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "The configuration for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_container": {
"additionalProperties": {"type": "string"},
"description": "The docker container properties for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "The hyper params for the new task. If not provided then taken from the original task",
"type": "object",
},
"new_task_input_models": {
"description": "The list of input models for the cloned task. If not specifed then copied from the original task",
"items": {"$ref": "#/definitions/task_model_item"},
"type": "array",
},
"new_task_name": {
"description": "The name of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_parent": {
"description": "The parent of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_project": {
"description": "The project of the cloned task. If not provided then taken from the original task",
"type": "string",
},
"new_task_system_tags": {
"description": "The system tags of the cloned task. If not provided then empty",
"items": {"type": "string"},
"type": "array",
},
"new_task_tags": {
"description": "The user-defined tags of the cloned task. If not provided then taken from the original task",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
"validate_references": {
"description": "If set to 'false' then the task fields that are copied from the original task are not validated. The default is false.",
"type": "boolean",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
new_task_name=None,
new_task_comment=None,
new_task_tags=None,
new_task_system_tags=None,
new_task_parent=None,
new_task_project=None,
new_task_hyperparams=None,
new_task_configuration=None,
execution_overrides=None,
validate_references=None,
new_project_name=None,
new_task_input_models=None,
new_task_container=None,
**kwargs
):
super(CloneRequest, self).__init__(**kwargs)
self.task = task
self.new_task_name = new_task_name
self.new_task_comment = new_task_comment
self.new_task_tags = new_task_tags
self.new_task_system_tags = new_task_system_tags
self.new_task_parent = new_task_parent
self.new_task_project = new_task_project
self.new_task_hyperparams = new_task_hyperparams
self.new_task_configuration = new_task_configuration
self.execution_overrides = execution_overrides
self.validate_references = validate_references
self.new_project_name = new_project_name
self.new_task_input_models = new_task_input_models
self.new_task_container = new_task_container
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("new_task_name")
def new_task_name(self):
return self._property_new_task_name
@new_task_name.setter
def new_task_name(self, value):
if value is None:
self._property_new_task_name = None
return
self.assert_isinstance(value, "new_task_name", six.string_types)
self._property_new_task_name = value
@schema_property("new_task_comment")
def new_task_comment(self):
return self._property_new_task_comment
@new_task_comment.setter
def new_task_comment(self, value):
if value is None:
self._property_new_task_comment = None
return
self.assert_isinstance(value, "new_task_comment", six.string_types)
self._property_new_task_comment = value
@schema_property("new_task_tags")
def new_task_tags(self):
return self._property_new_task_tags
@new_task_tags.setter
def new_task_tags(self, value):
if value is None:
self._property_new_task_tags = None
return
self.assert_isinstance(value, "new_task_tags", (list, tuple))
self.assert_isinstance(value, "new_task_tags", six.string_types, is_array=True)
self._property_new_task_tags = value
@schema_property("new_task_system_tags")
def new_task_system_tags(self):
return self._property_new_task_system_tags
@new_task_system_tags.setter
def new_task_system_tags(self, value):
if value is None:
self._property_new_task_system_tags = None
return
self.assert_isinstance(value, "new_task_system_tags", (list, tuple))
self.assert_isinstance(
value, "new_task_system_tags", six.string_types, is_array=True
)
self._property_new_task_system_tags = value
@schema_property("new_task_parent")
def new_task_parent(self):
return self._property_new_task_parent
@new_task_parent.setter
def new_task_parent(self, value):
if value is None:
self._property_new_task_parent = None
return
self.assert_isinstance(value, "new_task_parent", six.string_types)
self._property_new_task_parent = value
@schema_property("new_task_project")
def new_task_project(self):
return self._property_new_task_project
@new_task_project.setter
def new_task_project(self, value):
if value is None:
self._property_new_task_project = None
return
self.assert_isinstance(value, "new_task_project", six.string_types)
self._property_new_task_project = value
@schema_property("new_task_hyperparams")
def new_task_hyperparams(self):
return self._property_new_task_hyperparams
@new_task_hyperparams.setter
def new_task_hyperparams(self, value):
if value is None:
self._property_new_task_hyperparams = None
return
self.assert_isinstance(value, "new_task_hyperparams", (dict,))
self._property_new_task_hyperparams = value
@schema_property("new_task_configuration")
def new_task_configuration(self):
return self._property_new_task_configuration
@new_task_configuration.setter
def new_task_configuration(self, value):
if value is None:
self._property_new_task_configuration = None
return
self.assert_isinstance(value, "new_task_configuration", (dict,))
self._property_new_task_configuration = value
@schema_property("execution_overrides")
def execution_overrides(self):
return self._property_execution_overrides
@execution_overrides.setter
def execution_overrides(self, value):
if value is None:
self._property_execution_overrides = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution_overrides", Execution)
self._property_execution_overrides = value
@schema_property("validate_references")
def validate_references(self):
return self._property_validate_references
@validate_references.setter
def validate_references(self, value):
if value is None:
self._property_validate_references = None
return
self.assert_isinstance(value, "validate_references", (bool,))
self._property_validate_references = value
@schema_property("new_project_name")
def new_project_name(self):
return self._property_new_project_name
@new_project_name.setter
def new_project_name(self, value):
if value is None:
self._property_new_project_name = None
return
self.assert_isinstance(value, "new_project_name", six.string_types)
self._property_new_project_name = value
@schema_property("new_task_input_models")
def new_task_input_models(self):
return self._property_new_task_input_models
@new_task_input_models.setter
def new_task_input_models(self, value):
if value is None:
self._property_new_task_input_models = None
return
self.assert_isinstance(value, "new_task_input_models", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
TaskModelItem.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(
value, "new_task_input_models", TaskModelItem, is_array=True
)
self._property_new_task_input_models = value
@schema_property("new_task_container")
def new_task_container(self):
return self._property_new_task_container
@new_task_container.setter
def new_task_container(self, value):
if value is None:
self._property_new_task_container = None
return
self.assert_isinstance(value, "new_task_container", (dict,))
self._property_new_task_container = value
class CloneResponse(Response):
"""
Response of tasks.clone endpoint.
:param id: ID of the new task
:type id: str
:param new_project: In case the new_project_name was specified returns the
target project details
:type new_project: dict
"""
_service = "tasks"
_action = "clone"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "ID of the new task", "type": ["string", "null"]},
"new_project": {
"description": "In case the new_project_name was specified returns the target project details",
"properties": {
"id": {
"description": "The ID of the target project",
"type": "string",
},
"name": {
"description": "The name of the target project",
"type": "string",
},
},
"type": ["object", "null"],
},
},
"type": "object",
}
def __init__(self, id=None, new_project=None, **kwargs):
super(CloneResponse, self).__init__(**kwargs)
self.id = id
self.new_project = new_project
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
@schema_property("new_project")
def new_project(self):
return self._property_new_project
@new_project.setter
def new_project(self, value):
if value is None:
self._property_new_project = None
return
self.assert_isinstance(value, "new_project", (dict,))
self._property_new_project = value
class CloseRequest(Request):
"""
Indicates that task is closed
:param force: Allows forcing state change even if transition is not supported
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "close"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allows forcing state change even if transition is not supported",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(CloseRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class CloseResponse(Response):
"""
Response of tasks.close endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "close"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(CloseResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class CompletedRequest(Request):
"""
Signal a task has completed
:param force: If not true, call fails if the task status is not
in_progress/stopped
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "completed"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not in_progress/stopped",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(CompletedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class CompletedResponse(Response):
"""
Response of tasks.completed endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "completed"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(CompletedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class CreateRequest(Request):
"""
Create a new task
:param name: Task name. Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
"""
_service = "tasks"
_action = "create"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name. Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["name", "type"],
"type": "object",
}
def __init__(
self,
name,
type,
tags=None,
system_tags=None,
comment=None,
parent=None,
project=None,
input=None,
output_dest=None,
execution=None,
script=None,
hyperparams=None,
configuration=None,
models=None,
container=None,
**kwargs
):
super(CreateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.input = input
self.output_dest = output_dest
self.execution = execution
self.script = script
self.hyperparams = hyperparams
self.configuration = configuration
self.models = models
self.container = container
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("input")
def input(self):
return self._property_input
@input.setter
def input(self, value):
self._property_input = value
@schema_property("output_dest")
def output_dest(self):
return self._property_output_dest
@output_dest.setter
def output_dest(self, value):
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self):
return self._property_execution
@execution.setter
def execution(self, value):
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("script")
def script(self):
return self._property_script
@script.setter
def script(self, value):
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(
value.keys(), "hyperparams_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(), "hyperparams_values", (SectionParams, dict), is_array=True
)
value = dict(
(k, SectionParams(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self):
return self._property_container
@container.setter
def container(self, value):
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
class CreateResponse(Response):
"""
Response of tasks.create endpoint.
:param id: ID of the task
:type id: str
"""
_service = "tasks"
_action = "create"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"id": {"description": "ID of the task", "type": ["string", "null"]}
},
"type": "object",
}
def __init__(self, id=None, **kwargs):
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
class DeleteRequest(Request):
"""
Delete a task along with any information stored for it (statistics, frame updates etc.)
Unless Force flag is provided, operation will fail if task has objects associated with it - i.e. children tasks and projects.
Models that refer to the deleted task will be updated with a task ID indicating a deleted task.
:param move_to_trash: Move task to trash instead of deleting it. For internal
use only, tasks in the trash are not visible from the API and cannot be
restored!
:type move_to_trash: bool
:param force: If not true, call fails if the task status is 'in_progress'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by this task. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of this
task that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "delete"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"delete_output_models": {
"description": "If set to 'true' then delete output models of this task that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'in_progress'",
"type": ["boolean", "null"],
},
"move_to_trash": {
"default": False,
"description": "Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!",
"type": ["boolean", "null"],
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by this task. Default value is 'false'",
"type": "boolean",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
move_to_trash=False,
force=False,
status_reason=None,
status_message=None,
return_file_urls=None,
delete_output_models=None,
**kwargs
):
super(DeleteRequest, self).__init__(**kwargs)
self.move_to_trash = move_to_trash
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("move_to_trash")
def move_to_trash(self):
return self._property_move_to_trash
@move_to_trash.setter
def move_to_trash(self, value):
if value is None:
self._property_move_to_trash = None
return
self.assert_isinstance(value, "move_to_trash", (bool,))
self._property_move_to_trash = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
class DeleteResponse(Response):
"""
Response of tasks.delete endpoint.
:param deleted: Indicates whether the task was deleted
:type deleted: bool
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param frames: Response from frames.rollback
:type frames: dict
:param events: Response from events.delete_for_task
:type events: dict
:param urls: The urls of the files that were uploaded by this task. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted": {
"description": "Indicates whether the task was deleted",
"type": ["boolean", "null"],
},
"events": {
"additionalProperties": True,
"description": "Response from events.delete_for_task",
"type": ["object", "null"],
},
"frames": {
"additionalProperties": True,
"description": "Response from frames.rollback",
"type": ["object", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by this task. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
deleted=None,
updated_children=None,
updated_models=None,
frames=None,
events=None,
urls=None,
**kwargs
):
super(DeleteResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.frames = frames
self.events = events
self.urls = urls
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
self.assert_isinstance(value, "deleted", (bool,))
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self):
return self._property_updated_children
@updated_children.setter
def updated_children(self, value):
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self):
return self._property_updated_models
@updated_models.setter
def updated_models(self, value):
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("frames")
def frames(self):
return self._property_frames
@frames.setter
def frames(self, value):
if value is None:
self._property_frames = None
return
self.assert_isinstance(value, "frames", (dict,))
self._property_frames = value
@schema_property("events")
def events(self):
return self._property_events
@events.setter
def events(self, value):
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (dict,))
self._property_events = value
@schema_property("urls")
def urls(self):
return self._property_urls
@urls.setter
def urls(self, value):
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
class DeleteArtifactsRequest(Request):
"""
Delete existing artifacts (search by key/mode)
:param task: Task ID
:type task: str
:param artifacts: Artifacts to delete
:type artifacts: Sequence[ArtifactId]
:param force: If set to True then both new and running task artifacts can be
deleted. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "delete_artifacts"
_version = "2.13"
_schema = {
"definitions": {
"artifact_id": {
"properties": {
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
},
"required": ["key"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
},
"properties": {
"artifacts": {
"description": "Artifacts to delete",
"items": {"$ref": "#/definitions/artifact_id"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task artifacts can be deleted. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "artifacts"],
"type": "object",
}
def __init__(self, task, artifacts, force=None, **kwargs):
super(DeleteArtifactsRequest, self).__init__(**kwargs)
self.task = task
self.artifacts = artifacts
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("artifacts")
def artifacts(self):
return self._property_artifacts
@artifacts.setter
def artifacts(self, value):
if value is None:
self._property_artifacts = None
return
self.assert_isinstance(value, "artifacts", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [
ArtifactId.from_dict(v) if isinstance(v, dict) else v for v in value
]
else:
self.assert_isinstance(value, "artifacts", ArtifactId, is_array=True)
self._property_artifacts = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class DeleteArtifactsResponse(Response):
"""
Response of tasks.delete_artifacts endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_artifacts"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteArtifactsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
class DeleteConfigurationRequest(Request):
"""
Delete task configuration items
:param task: Task ID
:type task: str
:param configuration: List of configuration itemss to delete
:type configuration: Sequence[str]
:param force: If set to True then both new and running task configuration can
be deleted. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "delete_configuration"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"configuration": {
"description": "List of configuration itemss to delete",
"items": {"type": "string"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task configuration can be deleted. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "configuration"],
"type": "object",
}
def __init__(self, task, configuration, force=None, **kwargs):
super(DeleteConfigurationRequest, self).__init__(**kwargs)
self.task = task
self.configuration = configuration
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class DeleteConfigurationResponse(Response):
"""
Response of tasks.delete_configuration endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_configuration"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteConfigurationResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
class DeleteHyperParamsRequest(Request):
"""
Delete task hyper parameters
:param task: Task ID
:type task: str
:param hyperparams: List of hyper parameters to delete. In case a parameter
with an empty name is passed all the section will be deleted
:type hyperparams: Sequence[ParamKey]
:param force: If set to True then both new and running task hyper params can be
deleted. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "delete_hyper_params"
_version = "2.13"
_schema = {
"definitions": {
"param_key": {
"properties": {
"name": {
"description": "Name of the parameter. If the name is ommitted then the corresponding operation is performed on the whole section",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"force": {
"description": "If set to True then both new and running task hyper params can be deleted. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"hyperparams": {
"description": "List of hyper parameters to delete. In case a parameter with an empty name is passed all the section will be deleted",
"items": {"$ref": "#/definitions/param_key"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(self, task, hyperparams, force=None, **kwargs):
super(DeleteHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (ParamKey, dict), is_array=True)
value = [(ParamKey(**v) if isinstance(v, dict) else v) for v in value]
self._property_hyperparams = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class DeleteHyperParamsResponse(Response):
"""
Response of tasks.delete_hyper_params endpoint.
:param deleted: Indicates if the task was updated successfully
:type deleted: int
"""
_service = "tasks"
_action = "delete_hyper_params"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"deleted": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, deleted=None, **kwargs):
super(DeleteHyperParamsResponse, self).__init__(**kwargs)
self.deleted = deleted
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
class DeleteManyRequest(Request):
"""
Delete tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param move_to_trash: Move task to trash instead of deleting it. For internal
use only, tasks in the trash are not visible from the API and cannot be
restored!
:type move_to_trash: bool
:param force: If not true, call fails if the task status is 'in_progress'
:type force: bool
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by the tasks. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of the
tasks that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"delete_output_models": {
"description": "If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'in_progress'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"move_to_trash": {
"default": False,
"description": "Move task to trash instead of deleting it. For internal use only, tasks in the trash are not visible from the API and cannot be restored!",
"type": "boolean",
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false'",
"type": "boolean",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids,
move_to_trash=False,
force=False,
return_file_urls=None,
delete_output_models=None,
**kwargs
):
super(DeleteManyRequest, self).__init__(**kwargs)
self.ids = ids
self.move_to_trash = move_to_trash
self.force = force
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("move_to_trash")
def move_to_trash(self):
return self._property_move_to_trash
@move_to_trash.setter
def move_to_trash(self, value):
if value is None:
self._property_move_to_trash = None
return
self.assert_isinstance(value, "move_to_trash", (bool,))
self._property_move_to_trash = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
class DeleteManyResponse(Response):
"""
Response of tasks.delete_many endpoint.
:param deleted: Number of tasks deleted
:type deleted: int
:param updated_children: Number of child tasks whose parent property was
updated
:type updated_children: int
:param updated_models: Number of models whose task property was updated
:type updated_models: int
:param deleted_models: Number of deleted output models
:type deleted_models: int
:param urls: The urls of the files that were uploaded by the tasks. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "delete_many"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted": {
"description": "Number of tasks deleted",
"type": ["integer", "null"],
},
"deleted_models": {
"description": "Number of deleted output models",
"type": ["integer", "null"],
},
"updated_children": {
"description": "Number of child tasks whose parent property was updated",
"type": ["integer", "null"],
},
"updated_models": {
"description": "Number of models whose task property was updated",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the tasks. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
}
def __init__(
self,
deleted=None,
updated_children=None,
updated_models=None,
deleted_models=None,
urls=None,
**kwargs
):
super(DeleteManyResponse, self).__init__(**kwargs)
self.deleted = deleted
self.updated_children = updated_children
self.updated_models = updated_models
self.deleted_models = deleted_models
self.urls = urls
@schema_property("deleted")
def deleted(self):
return self._property_deleted
@deleted.setter
def deleted(self, value):
if value is None:
self._property_deleted = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted", six.integer_types)
self._property_deleted = value
@schema_property("updated_children")
def updated_children(self):
return self._property_updated_children
@updated_children.setter
def updated_children(self, value):
if value is None:
self._property_updated_children = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_children", six.integer_types)
self._property_updated_children = value
@schema_property("updated_models")
def updated_models(self):
return self._property_updated_models
@updated_models.setter
def updated_models(self, value):
if value is None:
self._property_updated_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated_models", six.integer_types)
self._property_updated_models = value
@schema_property("deleted_models")
def deleted_models(self):
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value):
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self):
return self._property_urls
@urls.setter
def urls(self, value):
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
class DeleteModelsRequest(Request):
"""
Delete models from task
:param task: ID of the task
:type task: str
:param models: The list of models to delete
:type models: Sequence[dict]
"""
_service = "tasks"
_action = "delete_models"
_version = "2.13"
_schema = {
"definitions": {
"model_type_enum": {"enum": ["input", "output"], "type": "string"}
},
"properties": {
"models": {
"description": "The list of models to delete",
"items": {
"properties": {
"name": {
"description": "The task model name",
"type": "string",
},
"type": {
"$ref": "#/definitions/model_type_enum",
"description": "The task model type",
},
},
"required": ["name", "type"],
"type": "object",
},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
},
"required": ["task", "models"],
"type": "object",
}
def __init__(self, task, models, **kwargs):
super(DeleteModelsRequest, self).__init__(**kwargs)
self.task = task
self.models = models
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
self.assert_isinstance(value, "models", (list, tuple))
self.assert_isinstance(value, "models", (dict,), is_array=True)
self._property_models = value
class DeleteModelsResponse(Response):
"""
Response of tasks.delete_models endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "delete_models"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(DeleteModelsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class DequeueRequest(Request):
"""
Remove a task from its queue.
Fails if task status is not queued.
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(self, task, status_reason=None, status_message=None, **kwargs):
super(DequeueRequest, self).__init__(**kwargs)
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class DequeueResponse(Response):
"""
Response of tasks.dequeue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param dequeued: Number of tasks dequeued (0 or 1)
:type dequeued: int
"""
_service = "tasks"
_action = "dequeue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"dequeued": {
"description": "Number of tasks dequeued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, dequeued=None, **kwargs):
super(DequeueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.dequeued = dequeued
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("dequeued")
def dequeued(self):
return self._property_dequeued
@dequeued.setter
def dequeued(self, value):
if value is None:
self._property_dequeued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "dequeued", six.integer_types)
self._property_dequeued = value
class EditRequest(Request):
"""
Edit task's details.
:param task: ID of the task
:type task: str
:param force: If not true, call fails if the task status is not 'created'
:type force: bool
:param name: Task name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
:param runtime: Task runtime mapping
:type runtime: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'created'",
"type": "boolean",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"runtime": {
"description": "Task runtime mapping",
"type": ["object", "null"],
"additionalProperties": True,
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
force=False,
name=None,
tags=None,
system_tags=None,
type=None,
comment=None,
parent=None,
project=None,
output_dest=None,
execution=None,
script=None,
hyperparams=None,
configuration=None,
models=None,
container=None,
runtime=None,
**kwargs
):
super(EditRequest, self).__init__(**kwargs)
self.task = task
self.force = force
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.output_dest = output_dest
self.execution = execution
self.script = script
self.hyperparams = hyperparams
self.configuration = configuration
self.models = models
self.container = container
self.runtime = runtime
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output_dest")
def output_dest(self):
return self._property_output_dest
@output_dest.setter
def output_dest(self, value):
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self):
return self._property_execution
@execution.setter
def execution(self, value):
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(
value.keys(), "hyperparams_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(), "hyperparams_values", (SectionParams, dict), is_array=True
)
value = dict(
(k, SectionParams(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("script")
def script(self):
return self._property_script
@script.setter
def script(self, value):
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self):
return self._property_container
@container.setter
def container(self, value):
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
@schema_property("runtime")
def runtime(self):
return self._property_runtime
@runtime.setter
def runtime(self, value):
if value is None:
self._property_runtime = None
return
self.assert_isinstance(value, "runtime", dict)
self._property_runtime = value
class EditResponse(Response):
"""
Response of tasks.edit endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "edit"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(EditResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class EditConfigurationRequest(Request):
"""
Add or update task configuration
:param task: Task ID
:type task: str
:param configuration: Task configuration items. The new ones will be added and
the already existing ones will be updated
:type configuration: Sequence[ConfigurationItem]
:param replace_configuration: If set then the all the configuration items will
be replaced with the provided ones. Otherwise only the provided configuration
items will be updated or added
:type replace_configuration: bool
:param force: If set to True then both new and running task configuration can
be edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.13"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configuration": {
"description": "Task configuration items. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"force": {
"description": "If set to True then both new and running task configuration can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"replace_configuration": {
"description": "If set then the all the configuration items will be replaced with the provided ones. Otherwise only the provided configuration items will be updated or added",
"type": "boolean",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "configuration"],
"type": "object",
}
def __init__(
self, task, configuration, replace_configuration=None, force=None, **kwargs
):
super(EditConfigurationRequest, self).__init__(**kwargs)
self.task = task
self.configuration = configuration
self.replace_configuration = replace_configuration
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(
value, "configuration", (dict, ConfigurationItem), is_array=True
)
value = [(ConfigurationItem(**v) if isinstance(v, dict) else v) for v in value]
self._property_configuration = value
@schema_property("replace_configuration")
def replace_configuration(self):
return self._property_replace_configuration
@replace_configuration.setter
def replace_configuration(self, value):
if value is None:
self._property_replace_configuration = None
return
self.assert_isinstance(value, "replace_configuration", (bool,))
self._property_replace_configuration = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class EditConfigurationResponse(Response):
"""
Response of tasks.edit_configuration endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_configuration"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(EditConfigurationResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class EditHyperParamsRequest(Request):
"""
Add or update task hyper parameters
:param task: Task ID
:type task: str
:param hyperparams: Task hyper parameters. The new ones will be added and the
already existing ones will be updated
:type hyperparams: Sequence[ParamsItem]
:param replace_hyperparams: Can be set to one of the following: 'all' - all the
hyper parameters will be replaced with the provided ones 'section' - the
sections that present in the new parameters will be replaced with the provided
parameters 'none' (the default value) - only the specific parameters will be
updated or added
:type replace_hyperparams: ReplaceHyperparamsEnum
:param force: If set to True then both new and running task hyper params can be
edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.13"
_schema = {
"definitions": {
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"replace_hyperparams_enum": {
"enum": ["none", "section", "all"],
"type": "string",
},
},
"properties": {
"force": {
"description": "If set to True then both new and running task hyper params can be edited. Otherwise only the new task ones. Default is False",
"type": "boolean",
},
"hyperparams": {
"description": "Task hyper parameters. The new ones will be added and the already existing ones will be updated",
"items": {"$ref": "#/definitions/params_item"},
"type": "array",
},
"replace_hyperparams": {
"$ref": "#/definitions/replace_hyperparams_enum",
"description": "Can be set to one of the following:\n 'all' - all the hyper parameters will be replaced with the provided ones\n 'section' - the sections that present in the new parameters will be replaced with the provided parameters\n 'none' (the default value) - only the specific parameters will be updated or added",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(
self, task, hyperparams, replace_hyperparams=None, force=None, **kwargs
):
super(EditHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
self.replace_hyperparams = replace_hyperparams
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (dict, ParamsItem), is_array=True)
value = [(ParamsItem(**v) if isinstance(v, dict) else v) for v in value]
self._property_hyperparams = value
@schema_property("replace_hyperparams")
def replace_hyperparams(self):
return self._property_replace_hyperparams
@replace_hyperparams.setter
def replace_hyperparams(self, value):
if value is None:
self._property_replace_hyperparams = None
return
if isinstance(value, six.string_types):
try:
value = ReplaceHyperparamsEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "replace_hyperparams", enum.Enum)
self._property_replace_hyperparams = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class EditHyperParamsResponse(Response):
"""
Response of tasks.edit_hyper_params endpoint.
:param updated: Indicates if the task was updated successfully
:type updated: int
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Indicates if the task was updated successfully",
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(EditHyperParamsResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class EnqueueRequest(Request):
"""
Adds a task into a queue.
Fails if task state is not 'created'.
Fails if the following parameters in the task were not filled:
* execution.script.repository
* execution.script.entrypoint
:param queue: Queue id. If not provided, task is added to the default queue.
:type queue: str
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "enqueue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"queue": {
"description": "Queue id. If not provided, task is added to the default queue.",
"type": ["string", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, queue=None, status_reason=None, status_message=None, **kwargs
):
super(EnqueueRequest, self).__init__(**kwargs)
self.queue = queue
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("queue")
def queue(self):
return self._property_queue
@queue.setter
def queue(self, value):
if value is None:
self._property_queue = None
return
self.assert_isinstance(value, "queue", six.string_types)
self._property_queue = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class EnqueueResponse(Response):
"""
Response of tasks.enqueue endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param queued: Number of tasks queued (0 or 1)
:type queued: int
"""
_service = "tasks"
_action = "enqueue"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"queued": {
"description": "Number of tasks queued (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, queued=None, **kwargs):
super(EnqueueResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.queued = queued
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("queued")
def queued(self):
return self._property_queued
@queued.setter
def queued(self, value):
if value is None:
self._property_queued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "queued", six.integer_types)
self._property_queued = value
class EnqueueManyRequest(Request):
"""
Enqueue tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "enqueue_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, status_reason=None, status_message=None, **kwargs):
super(EnqueueManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class EnqueueManyResponse(Response):
"""
Response of tasks.enqueue_many endpoint.
:param enqueued: Number of tasks enqueued
:type enqueued: int
"""
_service = "tasks"
_action = "enqueue_many"
_version = "2.13"
_schema = {
"definitions": {},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"enqueued": {
"description": "Number of tasks enqueued",
"type": ["integer", "null"],
}
},
}
def __init__(self, enqueued=None, **kwargs):
super(EnqueueManyResponse, self).__init__(**kwargs)
self.enqueued = enqueued
@schema_property("enqueued")
def enqueued(self):
return self._property_enqueued
@enqueued.setter
def enqueued(self, value):
if value is None:
self._property_enqueued = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "enqueued", six.integer_types)
self._property_enqueued = value
class FailedRequest(Request):
"""
Indicates that task has failed
:param force: Allows forcing state change even if transition is not supported
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "failed"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "Allows forcing state change even if transition is not supported",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(FailedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class FailedResponse(Response):
"""
Response of tasks.failed endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "failed"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(FailedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class GetAllRequest(Request):
"""
Get all the company's tasks and all public tasks
:param id: List of IDs to filter by
:type id: Sequence[str]
:param name: Get only tasks whose name matches this pattern (python regular
expression syntax)
:type name: str
:param user: List of user IDs used to filter results by the task's creating
user
:type user: Sequence[str]
:param project: List of project IDs
:type project: Sequence[str]
:param page: Page number, returns a specific page out of the resulting list of
tasks
:type page: int
:param page_size: Page size, specifies the number of results returned in each
page (last page may contain fewer results)
:type page_size: int
:param order_by: List of field names to order by. When search_text is used,
'@text_score' can be used as a field representing the text score of returned
documents. Use '-' prefix to specify descending order. Optional, recommended
when using page
:type order_by: Sequence[str]
:param type: List of task types. One or more of: 'import', 'annotation',
'training' or 'testing' (case insensitive)
:type type: Sequence[str]
:param tags: List of task user-defined tags. Use '-' prefix to exclude tags
:type tags: Sequence[str]
:param system_tags: List of task system tags. Use '-' prefix to exclude system
tags
:type system_tags: Sequence[str]
:param status: List of task status.
:type status: Sequence[TaskStatusEnum]
:param only_fields: List of task field names (nesting is supported using '.',
e.g. execution.model_labels). If provided, this list defines the query's
projection (only these fields will be returned for each result entry)
:type only_fields: Sequence[str]
:param parent: Parent ID
:type parent: str
:param status_changed: List of status changed constraint strings (utcformat,
epoch) with an optional prefix modifier (>, >=, <, <=)
:type status_changed: Sequence[str]
:param search_text: Free text search query
:type search_text: str
:param _all_: Multi-field pattern condition (all fields match pattern)
:type _all_: MultiFieldPatternData
:param _any_: Multi-field pattern condition (any field matches pattern)
:type _any_: MultiFieldPatternData
"""
_service = "tasks"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"multi_field_pattern_data": {
"properties": {
"fields": {
"description": "List of field names",
"items": {"type": "string"},
"type": ["array", "null"],
},
"pattern": {
"description": "Pattern string (regex)",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
},
"dependencies": {"page": ["page_size"]},
"properties": {
"_all_": {
"description": "Multi-field pattern condition (all fields match pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"_any_": {
"description": "Multi-field pattern condition (any field matches pattern)",
"oneOf": [
{"$ref": "#/definitions/multi_field_pattern_data"},
{"type": "null"},
],
},
"id": {
"description": "List of IDs to filter by",
"items": {"type": "string"},
"type": ["array", "null"],
},
"name": {
"description": "Get only tasks whose name matches this pattern (python regular expression syntax)",
"type": ["string", "null"],
},
"only_fields": {
"description": "List of task field names (nesting is supported using '.', e.g. execution.model_labels). If provided, this list defines the query's projection (only these fields will be returned for each result entry)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"order_by": {
"description": "List of field names to order by. When search_text is used, '@text_score' can be used as a field representing the text score of returned documents. Use '-' prefix to specify descending order. Optional, recommended when using page",
"items": {"type": "string"},
"type": ["array", "null"],
},
"page": {
"description": "Page number, returns a specific page out of the resulting list of tasks",
"minimum": 0,
"type": ["integer", "null"],
},
"page_size": {
"description": "Page size, specifies the number of results returned in each page (last page may contain fewer results)",
"minimum": 1,
"type": ["integer", "null"],
},
"parent": {"description": "Parent ID", "type": ["string", "null"]},
"project": {
"description": "List of project IDs",
"items": {"type": "string"},
"type": ["array", "null"],
},
"search_text": {
"description": "Free text search query",
"type": ["string", "null"],
},
"status": {
"description": "List of task status.",
"items": {"$ref": "#/definitions/task_status_enum"},
"type": ["array", "null"],
},
"status_changed": {
"description": "List of status changed constraint strings (utcformat, epoch) with an optional prefix modifier (>, >=, <, <=)",
"items": {"pattern": "^(>=|>|<=|<)?.*$", "type": "string"},
"type": ["array", "null"],
},
"system_tags": {
"description": "List of task system tags. Use '-' prefix to exclude system tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "List of task user-defined tags. Use '-' prefix to exclude tags",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "List of task types. One or more of: 'training', 'testing', 'inference', 'data_processing', 'application', 'monitor', 'controller', 'optimizer', 'service', 'qc' or 'custom' (case insensitive)",
"items": {"type": "string"},
"type": ["array", "null"],
},
"user": {
"description": "List of user IDs used to filter results by the task's creating user",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(
self,
id=None,
name=None,
user=None,
project=None,
page=None,
page_size=None,
order_by=None,
type=None,
tags=None,
system_tags=None,
status=None,
only_fields=None,
parent=None,
status_changed=None,
search_text=None,
_all_=None,
_any_=None,
**kwargs
):
super(GetAllRequest, self).__init__(**kwargs)
self.id = id
self.name = name
self.user = user
self.project = project
self.page = page
self.page_size = page_size
self.order_by = order_by
self.type = type
self.tags = tags
self.system_tags = system_tags
self.status = status
self.only_fields = only_fields
self.parent = parent
self.status_changed = status_changed
self.search_text = search_text
self._all_ = _all_
self._any_ = _any_
@schema_property("id")
def id(self):
return self._property_id
@id.setter
def id(self, value):
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", (list, tuple))
self.assert_isinstance(value, "id", six.string_types, is_array=True)
self._property_id = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("user")
def user(self):
return self._property_user
@user.setter
def user(self, value):
if value is None:
self._property_user = None
return
self.assert_isinstance(value, "user", (list, tuple))
self.assert_isinstance(value, "user", six.string_types, is_array=True)
self._property_user = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", (list, tuple))
self.assert_isinstance(value, "project", six.string_types, is_array=True)
self._property_project = value
@schema_property("page")
def page(self):
return self._property_page
@page.setter
def page(self, value):
if value is None:
self._property_page = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page", six.integer_types)
self._property_page = value
@schema_property("page_size")
def page_size(self):
return self._property_page_size
@page_size.setter
def page_size(self, value):
if value is None:
self._property_page_size = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "page_size", six.integer_types)
self._property_page_size = value
@schema_property("order_by")
def order_by(self):
return self._property_order_by
@order_by.setter
def order_by(self, value):
if value is None:
self._property_order_by = None
return
self.assert_isinstance(value, "order_by", (list, tuple))
self.assert_isinstance(value, "order_by", six.string_types, is_array=True)
self._property_order_by = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
self.assert_isinstance(value, "type", (list, tuple))
self.assert_isinstance(value, "type", six.string_types, is_array=True)
self._property_type = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("status")
def status(self):
return self._property_status
@status.setter
def status(self, value):
if value is None:
self._property_status = None
return
self.assert_isinstance(value, "status", (list, tuple))
if any(isinstance(v, six.string_types) for v in value):
value = [
TaskStatusEnum(v) if isinstance(v, six.string_types) else v
for v in value
]
else:
self.assert_isinstance(value, "status", TaskStatusEnum, is_array=True)
self._property_status = value
@schema_property("only_fields")
def only_fields(self):
return self._property_only_fields
@only_fields.setter
def only_fields(self, value):
if value is None:
self._property_only_fields = None
return
self.assert_isinstance(value, "only_fields", (list, tuple))
self.assert_isinstance(value, "only_fields", six.string_types, is_array=True)
self._property_only_fields = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("status_changed")
def status_changed(self):
return self._property_status_changed
@status_changed.setter
def status_changed(self, value):
if value is None:
self._property_status_changed = None
return
self.assert_isinstance(value, "status_changed", (list, tuple))
self.assert_isinstance(value, "status_changed", six.string_types, is_array=True)
self._property_status_changed = value
@schema_property("search_text")
def search_text(self):
return self._property_search_text
@search_text.setter
def search_text(self, value):
if value is None:
self._property_search_text = None
return
self.assert_isinstance(value, "search_text", six.string_types)
self._property_search_text = value
@schema_property("_all_")
def _all_(self):
return self._property__all_
@_all_.setter
def _all_(self, value):
if value is None:
self._property__all_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_all_", MultiFieldPatternData)
self._property__all_ = value
@schema_property("_any_")
def _any_(self):
return self._property__any_
@_any_.setter
def _any_(self, value):
if value is None:
self._property__any_ = None
return
if isinstance(value, dict):
value = MultiFieldPatternData.from_dict(value)
else:
self.assert_isinstance(value, "_any_", MultiFieldPatternData)
self._property__any_ = value
class GetAllResponse(Response):
"""
Response of tasks.get_all endpoint.
:param tasks: List of tasks
:type tasks: Sequence[Task]
"""
_service = "tasks"
_action = "get_all"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_event": {
"properties": {
"max_value": {
"description": "Maximum value reported",
"type": ["number", "null"],
},
"metric": {
"description": "Metric name",
"type": ["string", "null"],
},
"min_value": {
"description": "Minimum value reported",
"type": ["number", "null"],
},
"value": {
"description": "Last value reported",
"type": ["number", "null"],
},
"variant": {
"description": "Variant name",
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_variants": {
"additionalProperties": {
"$ref": "#/definitions/last_metrics_event",
},
"description": "Last metric events, one for each variant hash",
"type": "object",
},
"output": {
"properties": {
"destination": {
"description": "Storage id. This is where output files will be stored.",
"type": ["string", "null"],
},
"error": {
"description": "Last error text",
"type": ["string", "null"],
},
"model": {"description": "Model id.", "type": ["string", "null"]},
"result": {
"description": "Task result. Values: 'success', 'failure'",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task": {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {
"description": "Free text comment",
"type": ["string", "null"],
},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {
"$ref": "#/definitions/configuration_item"
},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [
{"$ref": "#/definitions/execution"},
{"type": "null"},
],
},
"hyperparams": {
"additionalProperties": {
"$ref": "#/definitions/section_params"
},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {
"$ref": "#/definitions/last_metrics_variants"
},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, edited, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [
{"$ref": "#/definitions/task_models"},
{"type": "null"},
],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {
"description": "Parent task id",
"type": ["string", "null"],
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [
{"$ref": "#/definitions/task_status_enum"},
{"type": "null"},
],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'training', 'testing'",
"oneOf": [
{"$ref": "#/definitions/task_type_enum"},
{"type": "null"},
],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"tasks": {
"description": "List of tasks",
"items": {"$ref": "#/definitions/task"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, tasks=None, **kwargs):
super(GetAllResponse, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
if any(isinstance(v, dict) for v in value):
value = [Task.from_dict(v) if isinstance(v, dict) else v for v in value]
else:
self.assert_isinstance(value, "tasks", Task, is_array=True)
self._property_tasks = value
class GetByIdRequest(Request):
"""
Gets task information
:param task: Task ID
:type task: str
"""
_service = "tasks"
_action = "get_by_id"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task, **kwargs):
super(GetByIdRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class GetByIdResponse(Response):
"""
Response of tasks.get_by_id endpoint.
:param task: Task info
:type task: Task
"""
_service = "tasks"
_action = "get_by_id"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_event": {
"properties": {
"max_value": {
"description": "Maximum value reported",
"type": ["number", "null"],
},
"metric": {
"description": "Metric name",
"type": ["string", "null"],
},
"min_value": {
"description": "Minimum value reported",
"type": ["number", "null"],
},
"value": {
"description": "Last value reported",
"type": ["number", "null"],
},
"variant": {
"description": "Variant name",
"type": ["string", "null"],
},
},
"type": "object",
},
"last_metrics_variants": {
"additionalProperties": {
"$ref": "#/definitions/last_metrics_event",
},
"description": "Last metric events, one for each variant hash",
"type": "object",
},
"output": {
"properties": {
"destination": {
"description": "Storage id. This is where output files will be stored.",
"type": ["string", "null"],
},
"error": {
"description": "Last error text",
"type": ["string", "null"],
},
"model": {"description": "Model id.", "type": ["string", "null"]},
"result": {
"description": "Task result. Values: 'success', 'failure'",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task": {
"properties": {
"active_duration": {
"description": "Task duration time (seconds)",
"type": ["integer", "null"],
},
"comment": {
"description": "Free text comment",
"type": ["string", "null"],
},
"company": {
"description": "Company ID",
"type": ["string", "null"],
},
"completed": {
"description": "Task end time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"configuration": {
"additionalProperties": {
"$ref": "#/definitions/configuration_item"
},
"description": "Task configuration params",
"type": ["object", "null"],
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": ["string", "null"],
},
"execution": {
"description": "Task execution params",
"oneOf": [
{"$ref": "#/definitions/execution"},
{"type": "null"},
],
},
"hyperparams": {
"additionalProperties": {
"$ref": "#/definitions/section_params"
},
"description": "Task hyper params per section",
"type": ["object", "null"],
},
"id": {"description": "Task id", "type": ["string", "null"]},
"last_change": {
"description": "Last time any update was done to the task",
"format": "date-time",
"type": ["string", "null"],
},
"last_iteration": {
"description": "Last iteration reported for this task",
"type": ["integer", "null"],
},
"last_metrics": {
"additionalProperties": {
"$ref": "#/definitions/last_metrics_variants"
},
"description": "Last metric variants (hash to events), one for each metric hash",
"type": ["object", "null"],
},
"last_update": {
"description": "Last time this task was created, edited, changed or events for this task were reported",
"format": "date-time",
"type": ["string", "null"],
},
"last_worker": {
"description": "ID of last worker that handled the task",
"type": ["string", "null"],
},
"last_worker_report": {
"description": "Last time a worker reported while working on this task",
"format": "date-time",
"type": ["string", "null"],
},
"models": {
"description": "Task models",
"oneOf": [
{"$ref": "#/definitions/task_models"},
{"type": "null"},
],
},
"name": {"description": "Task Name", "type": ["string", "null"]},
"output": {
"description": "Task output params",
"oneOf": [{"$ref": "#/definitions/output"}, {"type": "null"}],
},
"parent": {
"description": "Parent task id",
"type": ["string", "null"],
},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": ["string", "null"],
},
"published": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"script": {
"description": "Script info",
"oneOf": [{"$ref": "#/definitions/script"}, {"type": "null"}],
},
"started": {
"description": "Task start time (UTC)",
"format": "date-time",
"type": ["string", "null"],
},
"status": {
"description": "",
"oneOf": [
{"$ref": "#/definitions/task_status_enum"},
{"type": "null"},
],
},
"status_changed": {
"description": "Last status change time",
"format": "date-time",
"type": ["string", "null"],
},
"status_message": {
"description": "free text string representing info about the status",
"type": ["string", "null"],
},
"status_reason": {
"description": "Reason for last status change",
"type": ["string", "null"],
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": ["array", "null"],
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": ["array", "null"],
},
"type": {
"description": "Type of task. Values: 'training', 'testing'",
"oneOf": [
{"$ref": "#/definitions/task_type_enum"},
{"type": "null"},
],
},
"user": {
"description": "Associated user id",
"type": ["string", "null"],
},
},
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_status_enum": {
"enum": [
"created",
"queued",
"in_progress",
"stopped",
"published",
"publishing",
"closed",
"failed",
"completed",
"unknown",
],
"type": "string",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"task": {
"description": "Task info",
"oneOf": [{"$ref": "#/definitions/task"}, {"type": "null"}],
}
},
"type": "object",
}
def __init__(self, task=None, **kwargs):
super(GetByIdResponse, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
if isinstance(value, dict):
value = Task.from_dict(value)
else:
self.assert_isinstance(value, "task", Task)
self._property_task = value
class GetConfigurationNamesRequest(Request):
"""
Get the list of task configuration items names
:param tasks: Task IDs
:type tasks: Sequence[str]
:param skip_empty: If set to 'true' then the names for configurations with
missing values are not returned
:type skip_empty: bool
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"skip_empty": {
"default": True,
"description": "If set to 'true' then the names for configurations with missing values are not returned",
"type": "boolean",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks, skip_empty=True, **kwargs):
super(GetConfigurationNamesRequest, self).__init__(**kwargs)
self.tasks = tasks
self.skip_empty = skip_empty
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("skip_empty")
def skip_empty(self):
return self._property_skip_empty
@skip_empty.setter
def skip_empty(self, value):
if value is None:
self._property_skip_empty = None
return
self.assert_isinstance(value, "skip_empty", (bool,))
self._property_skip_empty = value
class GetConfigurationNamesResponse(Response):
"""
Response of tasks.get_configuration_names endpoint.
:param configurations: Names of task configuration items (keyed by task ID)
:type configurations: dict
"""
_service = "tasks"
_action = "get_configuration_names"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"configurations": {
"description": "Names of task configuration items (keyed by task ID)",
"properties": {
"names": {
"description": "Configuration names",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"type": ["object", "null"],
}
},
"type": "object",
}
def __init__(self, configurations=None, **kwargs):
super(GetConfigurationNamesResponse, self).__init__(**kwargs)
self.configurations = configurations
@schema_property("configurations")
def configurations(self):
return self._property_configurations
@configurations.setter
def configurations(self, value):
if value is None:
self._property_configurations = None
return
self.assert_isinstance(value, "configurations", (dict,))
self._property_configurations = value
class GetConfigurationsRequest(Request):
"""
Get the list of task configurations
:param tasks: Task IDs
:type tasks: Sequence[str]
:param names: Names of the configuration items to retreive. If not passed or
empty then all the configurations will be retreived.
:type names: Sequence[str]
"""
_service = "tasks"
_action = "get_configurations"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"names": {
"description": "Names of the configuration items to retreive. If not passed or empty then all the configurations will be retreived.",
"items": {"type": "string"},
"type": "array",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks, names=None, **kwargs):
super(GetConfigurationsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.names = names
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("names")
def names(self):
return self._property_names
@names.setter
def names(self, value):
if value is None:
self._property_names = None
return
self.assert_isinstance(value, "names", (list, tuple))
self.assert_isinstance(value, "names", six.string_types, is_array=True)
self._property_names = value
class GetConfigurationsResponse(Response):
"""
Response of tasks.get_configurations endpoint.
:param configurations: Configurations (keyed by task ID)
:type configurations: Sequence[dict]
"""
_service = "tasks"
_action = "get_configurations"
_version = "2.13"
_schema = {
"definitions": {
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"configurations": {
"description": "Configurations (keyed by task ID)",
"items": {
"properties": {
"configuration": {
"description": "Configuration list",
"items": {"$ref": "#/definitions/configuration_item"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, configurations=None, **kwargs):
super(GetConfigurationsResponse, self).__init__(**kwargs)
self.configurations = configurations
@schema_property("configurations")
def configurations(self):
return self._property_configurations
@configurations.setter
def configurations(self, value):
if value is None:
self._property_configurations = None
return
self.assert_isinstance(value, "configurations", (list, tuple))
self.assert_isinstance(value, "configurations", (dict,), is_array=True)
self._property_configurations = value
class GetHyperParamsRequest(Request):
"""
Get the list of task hyper parameters
:param tasks: Task IDs
:type tasks: Sequence[str]
"""
_service = "tasks"
_action = "get_hyper_params"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
}
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks, **kwargs):
super(GetHyperParamsRequest, self).__init__(**kwargs)
self.tasks = tasks
@schema_property("tasks")
def tasks(self):
return self._property_tasks
@tasks.setter
def tasks(self, value):
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
class GetHyperParamsResponse(Response):
"""
Response of tasks.get_hyper_params endpoint.
:param params: Hyper parameters (keyed by task ID)
:type params: Sequence[dict]
"""
_service = "tasks"
_action = "get_hyper_params"
_version = "2.13"
_schema = {
"definitions": {
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
}
},
"properties": {
"params": {
"description": "Hyper parameters (keyed by task ID)",
"items": {
"properties": {
"hyperparams": {
"description": "Hyper parameters",
"items": {"$ref": "#/definitions/params_item"},
"type": "array",
},
"task": {"description": "Task ID", "type": "string"},
},
"type": "object",
},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, params=None, **kwargs):
super(GetHyperParamsResponse, self).__init__(**kwargs)
self.params = params
@schema_property("params")
def params(self):
return self._property_params
@params.setter
def params(self, value):
if value is None:
self._property_params = None
return
self.assert_isinstance(value, "params", (list, tuple))
self.assert_isinstance(value, "params", (dict,), is_array=True)
self._property_params = value
class GetTypesRequest(Request):
"""
Get the list of task types used in the specified projects
:param projects: The list of projects which tasks will be analyzed. If not
passed or empty then all the company and public tasks will be analyzed
:type projects: Sequence[str]
"""
_service = "tasks"
_action = "get_types"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"projects": {
"description": "The list of projects which tasks will be analyzed. If not passed or empty then all the company and public tasks will be analyzed",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, projects=None, **kwargs):
super(GetTypesRequest, self).__init__(**kwargs)
self.projects = projects
@schema_property("projects")
def projects(self):
return self._property_projects
@projects.setter
def projects(self, value):
if value is None:
self._property_projects = None
return
self.assert_isinstance(value, "projects", (list, tuple))
self.assert_isinstance(value, "projects", six.string_types, is_array=True)
self._property_projects = value
class GetTypesResponse(Response):
"""
Response of tasks.get_types endpoint.
:param types: Unique list of the task types used in the requested projects
:type types: Sequence[str]
"""
_service = "tasks"
_action = "get_types"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"types": {
"description": "Unique list of the task types used in the requested projects",
"items": {"type": "string"},
"type": ["array", "null"],
}
},
"type": "object",
}
def __init__(self, types=None, **kwargs):
super(GetTypesResponse, self).__init__(**kwargs)
self.types = types
@schema_property("types") # noqa: F811
def types(self):
return self._property_types
@types.setter
def types(self, value):
if value is None:
self._property_types = None
return
self.assert_isinstance(value, "types", (list, tuple))
self.assert_isinstance(value, "types", six.string_types, is_array=True)
self._property_types = value
class MakePrivateRequest(Request):
"""
Convert public tasks to private
:param ids: Ids of the tasks to convert. Only the tasks originated by the
company can be converted
:type ids: Sequence[str]
"""
_service = "tasks"
_action = "make_private"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the tasks to convert. Only the tasks originated by the company can be converted",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, ids=None, **kwargs):
super(MakePrivateRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
class MakePrivateResponse(Response):
"""
Response of tasks.make_private endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_private"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(MakePrivateResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class MakePublicRequest(Request):
"""
Convert company tasks to public
:param ids: Ids of the tasks to convert
:type ids: Sequence[str]
"""
_service = "tasks"
_action = "make_public"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Ids of the tasks to convert",
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
def __init__(self, ids=None, **kwargs):
super(MakePublicRequest, self).__init__(**kwargs)
self.ids = ids
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
class MakePublicResponse(Response):
"""
Response of tasks.make_public endpoint.
:param updated: Number of tasks updated
:type updated: int
"""
_service = "tasks"
_action = "make_public"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated",
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(MakePublicResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class MoveRequest(Request):
"""
Move tasks to a project
:param ids: Tasks to move
:type ids: Sequence[str]
:param project: Target project ID. If not provided, `project_name` must be
provided.
:type project: str
:param project_name: Target project name. If provided and a project with this
name does not exist, a new project will be created. If not provided, `project`
must be provided.
:type project_name: str
"""
_service = "tasks"
_action = "move"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"ids": {
"description": "Tasks to move",
"items": {"type": "string"},
"type": "array",
},
"project": {
"description": "Target project ID. If not provided, `project_name` must be provided.",
"type": "string",
},
"project_name": {
"description": "Target project name. If provided and a project with this name does not exist, a new project will be created. If not provided, `project` must be provided.",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(self, ids, project=None, project_name=None, **kwargs):
super(MoveRequest, self).__init__(**kwargs)
self.ids = ids
self.project = project
self.project_name = project_name
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("project_name")
def project_name(self):
return self._property_project_name
@project_name.setter
def project_name(self, value):
if value is None:
self._property_project_name = None
return
self.assert_isinstance(value, "project_name", six.string_types)
self._property_project_name = value
class MoveResponse(Response):
"""
Response of tasks.move endpoint.
"""
_service = "tasks"
_action = "move"
_version = "2.13"
_schema = {"additionalProperties": True, "definitions": {}, "type": "object"}
class PingRequest(Request):
"""
Refresh the task's last update time
:param task: Task ID
:type task: str
"""
_service = "tasks"
_action = "ping"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {"task": {"description": "Task ID", "type": "string"}},
"required": ["task"],
"type": "object",
}
def __init__(self, task, **kwargs):
super(PingRequest, self).__init__(**kwargs)
self.task = task
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
class PingResponse(Response):
"""
Response of tasks.ping endpoint.
"""
_service = "tasks"
_action = "ping"
_version = "2.13"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
class PublishRequest(Request):
"""
Mark a task status as published. If a model was created, it should be set to ready.
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param publish_model: Indicates that the task output model (if exists) should
be published. Optional, the default value is True.
:type publish_model: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "publish"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": ["boolean", "null"],
},
"publish_model": {
"description": "Indicates that the task output model (if exists) should be published. Optional, the default value is True.",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
force=False,
publish_model=None,
status_reason=None,
status_message=None,
**kwargs
):
super(PublishRequest, self).__init__(**kwargs)
self.force = force
self.publish_model = publish_model
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publish_model")
def publish_model(self):
return self._property_publish_model
@publish_model.setter
def publish_model(self, value):
if value is None:
self._property_publish_model = None
return
self.assert_isinstance(value, "publish_model", (bool,))
self._property_publish_model = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class PublishResponse(Response):
"""
Response of tasks.publish endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "publish"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(
self, updated=None, fields=None, **kwargs
):
super(PublishResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class PublishManyRequest(Request):
"""
Publish tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param publish_model: Indicates that the task output model (if exists) should
be published. Optional, the default value is True.
:type publish_model: bool
"""
_service = "tasks"
_action = "publish_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"publish_model": {
"description": "Indicates that the task output model (if exists) should be published. Optional, the default value is True.",
"type": "boolean",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids,
status_reason=None,
status_message=None,
force=False,
publish_model=None,
**kwargs
):
super(PublishManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
self.force = force
self.publish_model = publish_model
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("publish_model")
def publish_model(self):
return self._property_publish_model
@publish_model.setter
def publish_model(self, value):
if value is None:
self._property_publish_model = None
return
self.assert_isinstance(value, "publish_model", (bool,))
self._property_publish_model = value
class PublishManyResponse(Response):
"""
Response of tasks.publish_many endpoint.
:param published: Number of tasks published
:type published: int
"""
_service = "tasks"
_action = "publish_many"
_version = "2.13"
_schema = {
"definitions": {},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"published": {
"description": "Number of tasks published",
"type": ["integer", "null"],
},
},
}
def __init__(self, published=None, **kwargs):
super(PublishManyResponse, self).__init__(**kwargs)
self.published = published
@schema_property("published")
def published(self):
return self._property_published
@published.setter
def published(self, value):
if value is None:
self._property_published = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "published", six.integer_types)
self._property_published = value
class ResetRequest(Request):
"""
Reset a task to its initial state, along with any information stored for it (statistics, frame updates etc.).
:param force: If not true, call fails if the task status is 'completed'
:type force: bool
:param clear_all: Clear script and execution sections completely
:type clear_all: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by this task. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of this
task that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "reset"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"clear_all": {
"default": False,
"description": "Clear script and execution sections completely",
"type": ["boolean", "null"],
},
"delete_output_models": {
"description": "If set to 'true' then delete output models of this task that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'completed'",
"type": ["boolean", "null"],
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by this task. Default value is 'false'",
"type": "boolean",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
force=False,
clear_all=False,
status_reason=None,
status_message=None,
return_file_urls=None,
delete_output_models=None,
**kwargs
):
super(ResetRequest, self).__init__(**kwargs)
self.force = force
self.clear_all = clear_all
self.task = task
self.status_reason = status_reason
self.status_message = status_message
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("clear_all")
def clear_all(self):
return self._property_clear_all
@clear_all.setter
def clear_all(self, value):
if value is None:
self._property_clear_all = None
return
self.assert_isinstance(value, "clear_all", (bool,))
self._property_clear_all = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
class ResetResponse(Response):
"""
Response of tasks.reset endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param deleted_indices: List of deleted ES indices that were removed as part of
the reset process
:type deleted_indices: Sequence[str]
:param dequeued: Response from queues.remove_task
:type dequeued: dict
:param frames: Response from frames.rollback
:type frames: dict
:param events: Response from events.delete_for_task
:type events: dict
:param deleted_models: Number of output models deleted by the reset
:type deleted_models: int
:param urls: The urls of the files that were uploaded by this task. Returned if
the 'return_file_urls' was set to True
:type urls: TaskUrls
"""
_service = "tasks"
_action = "reset"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"properties": {
"deleted_indices": {
"description": "List of deleted ES indices that were removed as part of the reset process",
"items": {"type": "string"},
"type": ["array", "null"],
},
"deleted_models": {
"description": "Number of output models deleted by the reset",
"type": ["integer", "null"],
},
"dequeued": {
"additionalProperties": True,
"description": "Response from queues.remove_task",
"type": ["object", "null"],
},
"events": {
"additionalProperties": True,
"description": "Response from events.delete_for_task",
"type": ["object", "null"],
},
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"frames": {
"additionalProperties": True,
"description": "Response from frames.rollback",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by this task. Returned if the 'return_file_urls' was set to True",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
"type": "object",
}
def __init__(
self,
updated=None,
fields=None,
deleted_indices=None,
dequeued=None,
frames=None,
events=None,
deleted_models=None,
urls=None,
**kwargs
):
super(ResetResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.deleted_indices = deleted_indices
self.dequeued = dequeued
self.frames = frames
self.events = events
self.deleted_models = deleted_models
self.urls = urls
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("deleted_indices")
def deleted_indices(self):
return self._property_deleted_indices
@deleted_indices.setter
def deleted_indices(self, value):
if value is None:
self._property_deleted_indices = None
return
self.assert_isinstance(value, "deleted_indices", (list, tuple))
self.assert_isinstance(
value, "deleted_indices", six.string_types, is_array=True
)
self._property_deleted_indices = value
@schema_property("dequeued")
def dequeued(self):
return self._property_dequeued
@dequeued.setter
def dequeued(self, value):
if value is None:
self._property_dequeued = None
return
self.assert_isinstance(value, "dequeued", (dict,))
self._property_dequeued = value
@schema_property("frames")
def frames(self):
return self._property_frames
@frames.setter
def frames(self, value):
if value is None:
self._property_frames = None
return
self.assert_isinstance(value, "frames", (dict,))
self._property_frames = value
@schema_property("events")
def events(self):
return self._property_events
@events.setter
def events(self, value):
if value is None:
self._property_events = None
return
self.assert_isinstance(value, "events", (dict,))
self._property_events = value
@schema_property("deleted_models")
def deleted_models(self):
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value):
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self):
return self._property_urls
@urls.setter
def urls(self, value):
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
class ResetManyRequest(Request):
"""
Reset tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param force: If not true, call fails if the task status is 'completed'
:type force: bool
:param clear_all: Clear script and execution sections completely
:type clear_all: bool
:param return_file_urls: If set to 'true' then return the urls of the files
that were uploaded by the tasks. Default value is 'false'
:type return_file_urls: bool
:param delete_output_models: If set to 'true' then delete output models of the
tasks that are not referenced by other tasks. Default value is 'true'
:type delete_output_models: bool
"""
_service = "tasks"
_action = "reset_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"clear_all": {
"default": False,
"description": "Clear script and execution sections completely",
"type": "boolean",
},
"delete_output_models": {
"description": "If set to 'true' then delete output models of the tasks that are not referenced by other tasks. Default value is 'true'",
"type": "boolean",
},
"force": {
"default": False,
"description": "If not true, call fails if the task status is 'completed'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"return_file_urls": {
"description": "If set to 'true' then return the urls of the files that were uploaded by the tasks. Default value is 'false'",
"type": "boolean",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self,
ids,
force=False,
clear_all=False,
return_file_urls=None,
delete_output_models=None,
**kwargs
):
super(ResetManyRequest, self).__init__(**kwargs)
self.ids = ids
self.force = force
self.clear_all = clear_all
self.return_file_urls = return_file_urls
self.delete_output_models = delete_output_models
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("clear_all")
def clear_all(self):
return self._property_clear_all
@clear_all.setter
def clear_all(self, value):
if value is None:
self._property_clear_all = None
return
self.assert_isinstance(value, "clear_all", (bool,))
self._property_clear_all = value
@schema_property("return_file_urls")
def return_file_urls(self):
return self._property_return_file_urls
@return_file_urls.setter
def return_file_urls(self, value):
if value is None:
self._property_return_file_urls = None
return
self.assert_isinstance(value, "return_file_urls", (bool,))
self._property_return_file_urls = value
@schema_property("delete_output_models")
def delete_output_models(self):
return self._property_delete_output_models
@delete_output_models.setter
def delete_output_models(self, value):
if value is None:
self._property_delete_output_models = None
return
self.assert_isinstance(value, "delete_output_models", (bool,))
self._property_delete_output_models = value
class ResetManyResponse(Response):
"""
Response of tasks.reset_many endpoint.
:param reset: Number of tasks reset
:type reset: int
:param dequeued: Number of tasks dequeued
:type dequeued: dict
:param deleted_models: Number of output models deleted by the reset
:type deleted_models: int
:param urls: The urls of the files that were uploaded by the tasks. Returned if
the 'return_file_urls' was set to 'true'
:type urls: TaskUrls
"""
_service = "tasks"
_action = "reset_many"
_version = "2.13"
_schema = {
"definitions": {
"task_urls": {
"properties": {
"artifact_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"event_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
"model_urls": {
"items": {"type": "string"},
"type": ["array", "null"],
},
},
"type": "object",
}
},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"deleted_models": {
"description": "Number of output models deleted by the reset",
"type": ["integer", "null"],
},
"dequeued": {
"additionalProperties": True,
"description": "Number of tasks dequeued",
"type": ["object", "null"],
},
"reset": {
"description": "Number of tasks reset",
"type": ["integer", "null"],
},
"urls": {
"description": "The urls of the files that were uploaded by the tasks. Returned if the 'return_file_urls' was set to 'true'",
"oneOf": [{"$ref": "#/definitions/task_urls"}, {"type": "null"}],
},
},
}
def __init__(
self,
reset=None,
dequeued=None,
deleted_models=None,
urls=None,
**kwargs
):
super(ResetManyResponse, self).__init__(**kwargs)
self.reset = reset
self.dequeued = dequeued
self.deleted_models = deleted_models
self.urls = urls
@schema_property("reset")
def reset(self):
return self._property_reset
@reset.setter
def reset(self, value):
if value is None:
self._property_reset = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "reset", six.integer_types)
self._property_reset = value
@schema_property("dequeued")
def dequeued(self):
return self._property_dequeued
@dequeued.setter
def dequeued(self, value):
if value is None:
self._property_dequeued = None
return
self.assert_isinstance(value, "dequeued", (dict,))
self._property_dequeued = value
@schema_property("deleted_models")
def deleted_models(self):
return self._property_deleted_models
@deleted_models.setter
def deleted_models(self, value):
if value is None:
self._property_deleted_models = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "deleted_models", six.integer_types)
self._property_deleted_models = value
@schema_property("urls")
def urls(self):
return self._property_urls
@urls.setter
def urls(self, value):
if value is None:
self._property_urls = None
return
if isinstance(value, dict):
value = TaskUrls.from_dict(value)
else:
self.assert_isinstance(value, "urls", TaskUrls)
self._property_urls = value
class SetRequirementsRequest(Request):
"""
Set the script requirements for a task
:param task: Task ID
:type task: str
:param requirements: A JSON object containing requirements strings by key
:type requirements: dict
"""
_service = "tasks"
_action = "set_requirements"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": "object",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "requirements"],
"type": "object",
}
def __init__(self, task, requirements, **kwargs):
super(SetRequirementsRequest, self).__init__(**kwargs)
self.task = task
self.requirements = requirements
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("requirements")
def requirements(self):
return self._property_requirements
@requirements.setter
def requirements(self, value):
if value is None:
self._property_requirements = None
return
self.assert_isinstance(value, "requirements", (dict,))
self._property_requirements = value
class SetRequirementsResponse(Response):
"""
Response of tasks.set_requirements endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "set_requirements"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(SetRequirementsResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class StartedRequest(Request):
"""
Mark a task status as in_progress. Optionally allows to set the task's execution progress.
:param force: If not true, call fails if the task status is not 'not_started'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "started"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'not_started'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(StartedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class StartedResponse(Response):
"""
Response of tasks.started endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
:param started: Number of tasks started (0 or 1)
:type started: int
"""
_service = "tasks"
_action = "started"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"started": {
"description": "Number of tasks started (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, started=None, **kwargs):
super(StartedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
self.started = started
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
@schema_property("started")
def started(self):
return self._property_started
@started.setter
def started(self, value):
if value is None:
self._property_started = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "started", six.integer_types)
self._property_started = value
class StopRequest(Request):
"""
Request to stop a running task
:param force: If not true, call fails if the task status is not 'in_progress'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "stop"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'in_progress'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(StopRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class StopResponse(Response):
"""
Response of tasks.stop endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stop"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(StopResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class StopManyRequest(Request):
"""
Request to stop running tasks
:param ids: Entities to move
:type ids: Sequence[str]
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
:param force: If not true, call fails if the task status is not 'in_progress'
:type force: bool
"""
_service = "tasks"
_action = "stop_many"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'in_progress'",
"type": "boolean",
},
"ids": {
"description": "Entities to move",
"items": {"type": "string"},
"type": "array",
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
},
"required": ["ids"],
"type": "object",
}
def __init__(
self, ids, status_reason=None, status_message=None, force=False, **kwargs
):
super(StopManyRequest, self).__init__(**kwargs)
self.ids = ids
self.status_reason = status_reason
self.status_message = status_message
self.force = force
@schema_property("ids")
def ids(self):
return self._property_ids
@ids.setter
def ids(self, value):
if value is None:
self._property_ids = None
return
self.assert_isinstance(value, "ids", (list, tuple))
self.assert_isinstance(value, "ids", six.string_types, is_array=True)
self._property_ids = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
class StopManyResponse(Response):
"""
Response of tasks.stop_many endpoint.
:param stopped: Number of tasks stopped
:type stopped: int
"""
_service = "tasks"
_action = "stop_many"
_version = "2.13"
_schema = {
"definitions": {},
"failures": {
"item": {
"error": {
"description": "Error info",
"properties": {
"codes": {"item": {"type": "integer"}, "type": "array"},
"data": {"additionalProperties": True, "type": "object"},
"msg": {"type": "string"},
},
"type": "object",
},
"id": {"description": "ID of the failed entity", "type": "string"},
"type": "object",
},
"type": "array",
},
"properties": {
"stopped": {
"description": "Number of tasks stopped",
"type": ["integer", "null"],
}
},
}
def __init__(self, stopped=None, **kwargs):
super(StopManyResponse, self).__init__(**kwargs)
self.stopped = stopped
@schema_property("stopped")
def stopped(self):
return self._property_stopped
@stopped.setter
def stopped(self, value):
if value is None:
self._property_stopped = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "stopped", six.integer_types)
self._property_stopped = value
class StoppedRequest(Request):
"""
Signal a task has stopped
:param force: If not true, call fails if the task status is not 'stopped'
:type force: bool
:param task: Task ID
:type task: str
:param status_reason: Reason for status change
:type status_reason: str
:param status_message: Extra information regarding status change
:type status_message: str
"""
_service = "tasks"
_action = "stopped"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"force": {
"default": False,
"description": "If not true, call fails if the task status is not 'stopped'",
"type": ["boolean", "null"],
},
"status_message": {
"description": "Extra information regarding status change",
"type": "string",
},
"status_reason": {
"description": "Reason for status change",
"type": "string",
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task, force=False, status_reason=None, status_message=None, **kwargs
):
super(StoppedRequest, self).__init__(**kwargs)
self.force = force
self.task = task
self.status_reason = status_reason
self.status_message = status_message
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("status_reason")
def status_reason(self):
return self._property_status_reason
@status_reason.setter
def status_reason(self, value):
if value is None:
self._property_status_reason = None
return
self.assert_isinstance(value, "status_reason", six.string_types)
self._property_status_reason = value
@schema_property("status_message")
def status_message(self):
return self._property_status_message
@status_message.setter
def status_message(self, value):
if value is None:
self._property_status_message = None
return
self.assert_isinstance(value, "status_message", six.string_types)
self._property_status_message = value
class StoppedResponse(Response):
"""
Response of tasks.stopped endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "stopped"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(StoppedResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class UpdateRequest(Request):
"""
Update task's runtime parameters
:param task: ID of the task
:type task: str
:param name: Task name Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param comment: Free text comment
:type comment: str
:param project: Project ID of the project to which this task is assigned
:type project: str
:param output__error: Free text error
:type output__error: str
:param created: Task creation time (UTC)
:type created: datetime.datetime
"""
_service = "tasks"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"created": {
"description": "Task creation time (UTC) ",
"format": "date-time",
"type": "string",
},
"name": {
"description": "Task name Unique within the company.",
"type": "string",
},
"output__error": {"description": "Free text error", "type": "string"},
"project": {
"description": "Project ID of the project to which this task is assigned",
"type": "string",
},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"task": {"description": "ID of the task", "type": "string"},
},
"required": ["task"],
"type": "object",
}
def __init__(
self,
task,
name=None,
tags=None,
system_tags=None,
comment=None,
project=None,
output__error=None,
created=None,
**kwargs
):
super(UpdateRequest, self).__init__(**kwargs)
self.task = task
self.name = name
self.tags = tags
self.system_tags = system_tags
self.comment = comment
self.project = project
self.output__error = output__error
self.created = created
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output__error")
def output__error(self):
return self._property_output__error
@output__error.setter
def output__error(self, value):
if value is None:
self._property_output__error = None
return
self.assert_isinstance(value, "output__error", six.string_types)
self._property_output__error = value
@schema_property("created")
def created(self):
return self._property_created
@created.setter
def created(self, value):
if value is None:
self._property_created = None
return
self.assert_isinstance(value, "created", six.string_types + (datetime,))
if not isinstance(value, datetime):
value = parse_datetime(value)
self._property_created = value
class UpdateResponse(Response):
"""
Response of tasks.update endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
:param fields: Updated fields names and values
:type fields: dict
"""
_service = "tasks"
_action = "update"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"fields": {
"additionalProperties": True,
"description": "Updated fields names and values",
"type": ["object", "null"],
},
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
},
},
"type": "object",
}
def __init__(self, updated=None, fields=None, **kwargs):
super(UpdateResponse, self).__init__(**kwargs)
self.updated = updated
self.fields = fields
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
@schema_property("fields")
def fields(self):
return self._property_fields
@fields.setter
def fields(self, value):
if value is None:
self._property_fields = None
return
self.assert_isinstance(value, "fields", (dict,))
self._property_fields = value
class UpdateBatchRequest(BatchRequest):
"""
Updates a batch of tasks.
Headers
Content type should be 'application/json-lines'.
"""
_service = "tasks"
_action = "update_batch"
_version = "2.13"
_batched_request_cls = UpdateRequest
class UpdateBatchResponse(Response):
"""
Response of tasks.update_batch endpoint.
:param updated: Number of tasks updated (0 or 1)
:type updated: int
"""
_service = "tasks"
_action = "update_batch"
_version = "2.13"
_schema = {
"definitions": {},
"properties": {
"updated": {
"description": "Number of tasks updated (0 or 1)",
"enum": [0, 1],
"type": ["integer", "null"],
}
},
"type": "object",
}
def __init__(self, updated=None, **kwargs):
super(UpdateBatchResponse, self).__init__(**kwargs)
self.updated = updated
@schema_property("updated")
def updated(self):
return self._property_updated
@updated.setter
def updated(self, value):
if value is None:
self._property_updated = None
return
if isinstance(value, float) and value.is_integer():
value = int(value)
self.assert_isinstance(value, "updated", six.integer_types)
self._property_updated = value
class ValidateRequest(Request):
"""
Validate task properties (before create)
:param name: Task name. Unique within the company.
:type name: str
:param tags: User-defined tags list
:type tags: Sequence[str]
:param system_tags: System tags list. This field is reserved for system use,
please don't use it.
:type system_tags: Sequence[str]
:param type: Type of task
:type type: TaskTypeEnum
:param comment: Free text comment
:type comment: str
:param parent: Parent task id Must be a completed task.
:type parent: str
:param project: Project ID of the project to which this task is assigned Must
exist[ab]
:type project: str
:param output_dest: Output storage id Must be a reference to an existing
storage.
:type output_dest: str
:param execution: Task execution params
:type execution: Execution
:param script: Script info
:type script: Script
:param hyperparams: Task hyper params per section
:type hyperparams: dict
:param configuration: Task configuration params
:type configuration: dict
:param models: Task models
:type models: TaskModels
:param container: Docker container parameters
:type container: dict
"""
_service = "tasks"
_action = "validate"
_version = "2.13"
_schema = {
"definitions": {
"artifact": {
"properties": {
"content_size": {
"description": "Raw data length in bytes",
"type": "integer",
},
"display_data": {
"description": "User-defined list of key/value pairs, sorted",
"items": {"items": {"type": "string"}, "type": "array"},
"type": "array",
},
"hash": {
"description": "Hash of entire raw data",
"type": "string",
},
"key": {"description": "Entry key", "type": "string"},
"mode": {
"$ref": "#/definitions/artifact_mode_enum",
"description": "System defined input/output indication",
},
"timestamp": {
"description": "Epoch time when artifact was created",
"type": "integer",
},
"type": {
"description": "System defined type",
"type": "string",
},
"type_data": {
"$ref": "#/definitions/artifact_type_data",
"description": "Additional fields defined by the system",
},
"uri": {"description": "Raw data location", "type": "string"},
},
"required": ["key", "type"],
"type": "object",
},
"artifact_mode_enum": {
"default": "output",
"enum": ["input", "output"],
"type": "string",
},
"artifact_type_data": {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
},
"configuration_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. Should be unique",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"execution": {
"properties": {
"artifacts": {
"description": "Task artifacts",
"items": {"$ref": "#/definitions/artifact"},
"type": ["array", "null"],
},
"framework": {
"description": "Framework related to the task. Case insensitive. Mandatory for Training tasks. ",
"type": ["string", "null"],
},
"model_desc": {
"additionalProperties": True,
"description": "Json object representing the Model descriptors",
"type": ["object", "null"],
},
"model_labels": {
"additionalProperties": {"type": "integer"},
"description": "Json object representing the ids of the labels in the model.\n The keys are the layers' names and the values are the IDs.\n Not applicable for Register (Import) tasks.\n Mandatory for Training tasks",
"type": ["object", "null"],
},
"parameters": {
"additionalProperties": True,
"description": "Json object containing the Task parameters",
"type": ["object", "null"],
},
"queue": {
"description": "Queue ID where task was queued.",
"type": ["string", "null"],
},
},
"type": "object",
},
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"script": {
"properties": {
"binary": {
"default": "python",
"description": "Binary to use when running the script",
"type": ["string", "null"],
},
"branch": {
"description": "Repository branch id If not provided and tag not provided, default repository branch is used.",
"type": ["string", "null"],
},
"diff": {
"description": "Uncommitted changes found in the repository when task was run",
"type": ["string", "null"],
},
"entry_point": {
"description": "Path to execute within the repository",
"type": ["string", "null"],
},
"repository": {
"description": "Name of the repository where the script is located",
"type": ["string", "null"],
},
"requirements": {
"description": "A JSON object containing requirements strings by key",
"type": ["object", "null"],
},
"tag": {
"description": "Repository tag",
"type": ["string", "null"],
},
"version_num": {
"description": "Version (changeset) number. Optional (default is head version) Unused if tag is provided.",
"type": ["string", "null"],
},
"working_dir": {
"description": "Path to the folder from which to run the script Default - root folder of repository",
"type": ["string", "null"],
},
},
"type": "object",
},
"section_params": {
"additionalProperties": {"$ref": "#/definitions/params_item"},
"description": "Task section params",
"type": "object",
},
"task_model_item": {
"properties": {
"model": {"description": "The model ID", "type": "string"},
"name": {
"description": "The task model name",
"type": "string",
},
},
"required": ["name", "model"],
"type": "object",
},
"task_models": {
"properties": {
"input": {
"description": "The list of task input models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
"output": {
"description": "The list of task output models",
"items": {"$ref": "#/definitions/task_model_item"},
"type": ["array", "null"],
},
},
"type": "object",
},
"task_type_enum": {
"enum": [
"training",
"testing",
"inference",
"data_processing",
"application",
"monitor",
"controller",
"optimizer",
"service",
"qc",
"custom",
],
"type": "string",
},
},
"properties": {
"comment": {"description": "Free text comment ", "type": "string"},
"configuration": {
"additionalProperties": {"$ref": "#/definitions/configuration_item"},
"description": "Task configuration params",
"type": "object",
},
"container": {
"type": "object",
"description": "Docker container parameters",
"additionalProperties": {"type": "string"},
},
"execution": {
"$ref": "#/definitions/execution",
"description": "Task execution params",
},
"hyperparams": {
"additionalProperties": {"$ref": "#/definitions/section_params"},
"description": "Task hyper params per section",
"type": "object",
},
"models": {
"$ref": "#/definitions/task_models",
"description": "Task models",
},
"name": {
"description": "Task name. Unique within the company.",
"type": "string",
},
"output_dest": {
"description": "Output storage id Must be a reference to an existing storage.",
"type": "string",
},
"parent": {
"description": "Parent task id Must be a completed task.",
"type": "string",
},
"project": {
"description": "Project ID of the project to which this task is assigned Must exist[ab]",
"type": "string",
},
"script": {"$ref": "#/definitions/script", "description": "Script info"},
"system_tags": {
"description": "System tags list. This field is reserved for system use, please don't use it.",
"items": {"type": "string"},
"type": "array",
},
"tags": {
"description": "User-defined tags list",
"items": {"type": "string"},
"type": "array",
},
"type": {
"$ref": "#/definitions/task_type_enum",
"description": "Type of task",
},
},
"required": ["name", "type"],
"type": "object",
}
def __init__(
self,
name,
type,
tags=None,
system_tags=None,
comment=None,
parent=None,
project=None,
output_dest=None,
execution=None,
script=None,
hyperparams=None,
configuration=None,
models=None,
container=None,
**kwargs
):
super(ValidateRequest, self).__init__(**kwargs)
self.name = name
self.tags = tags
self.system_tags = system_tags
self.type = type
self.comment = comment
self.parent = parent
self.project = project
self.output_dest = output_dest
self.execution = execution
self.script = script
self.hyperparams = hyperparams
self.configuration = configuration
self.models = models
self.container = container
@schema_property("name")
def name(self):
return self._property_name
@name.setter
def name(self, value):
if value is None:
self._property_name = None
return
self.assert_isinstance(value, "name", six.string_types)
self._property_name = value
@schema_property("tags")
def tags(self):
return self._property_tags
@tags.setter
def tags(self, value):
if value is None:
self._property_tags = None
return
self.assert_isinstance(value, "tags", (list, tuple))
self.assert_isinstance(value, "tags", six.string_types, is_array=True)
self._property_tags = value
@schema_property("system_tags")
def system_tags(self):
return self._property_system_tags
@system_tags.setter
def system_tags(self, value):
if value is None:
self._property_system_tags = None
return
self.assert_isinstance(value, "system_tags", (list, tuple))
self.assert_isinstance(value, "system_tags", six.string_types, is_array=True)
self._property_system_tags = value
@schema_property("type")
def type(self):
return self._property_type
@type.setter
def type(self, value):
if value is None:
self._property_type = None
return
if isinstance(value, six.string_types):
try:
value = TaskTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "type", enum.Enum)
self._property_type = value
@schema_property("comment")
def comment(self):
return self._property_comment
@comment.setter
def comment(self, value):
if value is None:
self._property_comment = None
return
self.assert_isinstance(value, "comment", six.string_types)
self._property_comment = value
@schema_property("parent")
def parent(self):
return self._property_parent
@parent.setter
def parent(self, value):
if value is None:
self._property_parent = None
return
self.assert_isinstance(value, "parent", six.string_types)
self._property_parent = value
@schema_property("project")
def project(self):
return self._property_project
@project.setter
def project(self, value):
if value is None:
self._property_project = None
return
self.assert_isinstance(value, "project", six.string_types)
self._property_project = value
@schema_property("output_dest")
def output_dest(self):
return self._property_output_dest
@output_dest.setter
def output_dest(self, value):
if value is None:
self._property_output_dest = None
return
self.assert_isinstance(value, "output_dest", six.string_types)
self._property_output_dest = value
@schema_property("execution")
def execution(self):
return self._property_execution
@execution.setter
def execution(self, value):
if value is None:
self._property_execution = None
return
if isinstance(value, dict):
value = Execution.from_dict(value)
else:
self.assert_isinstance(value, "execution", Execution)
self._property_execution = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", dict)
self.assert_isinstance(
value.keys(), "hyperparams_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(), "hyperparams_values", (SectionParams, dict), is_array=True
)
value = dict(
(k, SectionParams(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_hyperparams = value
@schema_property("configuration")
def configuration(self):
return self._property_configuration
@configuration.setter
def configuration(self, value):
if value is None:
self._property_configuration = None
return
self.assert_isinstance(value, "configuration", dict)
self.assert_isinstance(
value.keys(), "configuration_keys", six.string_types, is_array=True
)
self.assert_isinstance(
value.values(),
"configuration_values",
(ConfigurationItem, dict),
is_array=True,
)
value = dict(
(k, ConfigurationItem(**v) if isinstance(v, dict) else v)
for k, v in value.items()
)
self._property_configuration = value
@schema_property("script")
def script(self):
return self._property_script
@script.setter
def script(self, value):
if value is None:
self._property_script = None
return
if isinstance(value, dict):
value = Script.from_dict(value)
else:
self.assert_isinstance(value, "script", Script)
self._property_script = value
@schema_property("models")
def models(self):
return self._property_models
@models.setter
def models(self, value):
if value is None:
self._property_models = None
return
if isinstance(value, dict):
value = TaskModels.from_dict(value)
else:
self.assert_isinstance(value, "models", TaskModels)
self._property_models = value
@schema_property("container")
def container(self):
return self._property_container
@container.setter
def container(self, value):
if value is None:
self._property_container = None
return
self.assert_isinstance(value, "container", dict)
self._property_container = value
class ValidateResponse(Response):
"""
Response of tasks.validate endpoint.
"""
_service = "tasks"
_action = "validate"
_version = "2.13"
_schema = {"additionalProperties": False, "definitions": {}, "type": "object"}
response_mapping = {
GetByIdRequest: GetByIdResponse,
GetAllRequest: GetAllResponse,
GetTypesRequest: GetTypesResponse,
CloneRequest: CloneResponse,
AddOrUpdateModelRequest: AddOrUpdateModelResponse,
DeleteModelsRequest: DeleteModelsResponse,
CreateRequest: CreateResponse,
ValidateRequest: ValidateResponse,
UpdateRequest: UpdateResponse,
UpdateBatchRequest: UpdateBatchResponse,
EditRequest: EditResponse,
ResetRequest: ResetResponse,
ResetManyRequest: ResetManyResponse,
DeleteManyRequest: DeleteManyResponse,
DeleteRequest: DeleteResponse,
ArchiveRequest: ArchiveResponse,
ArchiveManyRequest: ArchiveManyResponse,
StartedRequest: StartedResponse,
StopRequest: StopResponse,
StopManyRequest: StopManyResponse,
StoppedRequest: StoppedResponse,
FailedRequest: FailedResponse,
CloseRequest: CloseResponse,
PublishRequest: PublishResponse,
PublishManyRequest: PublishManyResponse,
EnqueueRequest: EnqueueResponse,
EnqueueManyRequest: EnqueueManyResponse,
DequeueRequest: DequeueResponse,
SetRequirementsRequest: SetRequirementsResponse,
CompletedRequest: CompletedResponse,
PingRequest: PingResponse,
AddOrUpdateArtifactsRequest: AddOrUpdateArtifactsResponse,
MakePublicRequest: MakePublicResponse,
MakePrivateRequest: MakePrivateResponse,
DeleteArtifactsRequest: DeleteArtifactsResponse,
GetHyperParamsRequest: GetHyperParamsResponse,
EditHyperParamsRequest: EditHyperParamsResponse,
DeleteHyperParamsRequest: DeleteHyperParamsResponse,
GetConfigurationsRequest: GetConfigurationsResponse,
GetConfigurationNamesRequest: GetConfigurationNamesResponse,
EditConfigurationRequest: EditConfigurationResponse,
DeleteConfigurationRequest: DeleteConfigurationResponse,
MoveRequest: MoveResponse,
}
| 32.096448 | 397 | 0.528056 | 38,072 | 403,003 | 5.401818 | 0.015891 | 0.064884 | 0.043178 | 0.05373 | 0.878891 | 0.853937 | 0.827096 | 0.809232 | 0.788678 | 0.774237 | 0 | 0.001468 | 0.362786 | 403,003 | 12,555 | 398 | 32.099004 | 0.799384 | 0.090166 | 0 | 0.750643 | 0 | 0.004117 | 0.216576 | 0.009364 | 0 | 0 | 0 | 0 | 0.045693 | 1 | 0.086446 | false | 0.001235 | 0.001235 | 0.038181 | 0.215087 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ae4d3ee2620dfde02d2d3d8c2b56dbc8c372aa6d | 58,259 | py | Python | thirdweb/abi/t_w_registry.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 30 | 2021-10-31T13:17:58.000Z | 2022-02-04T13:41:13.000Z | thirdweb/abi/t_w_registry.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 36 | 2021-11-03T20:30:38.000Z | 2022-02-14T10:15:40.000Z | thirdweb/abi/t_w_registry.py | nftlabs/nftlabs-sdk-python | ea533142dc0881872b347cd8ce635dc0bfff3153 | [
"Apache-2.0"
] | 10 | 2021-11-10T19:59:41.000Z | 2022-01-21T21:26:55.000Z | """Generated wrapper for TWRegistry Solidity contract."""
# pylint: disable=too-many-arguments
import json
from typing import ( # pylint: disable=unused-import
Any,
List,
Optional,
Tuple,
Union,
)
from eth_utils import to_checksum_address
from mypy_extensions import TypedDict # pylint: disable=unused-import
from hexbytes import HexBytes
from web3 import Web3
from web3.contract import ContractFunction
from web3.datastructures import AttributeDict
from web3.providers.base import BaseProvider
from zero_ex.contract_wrappers.bases import ContractMethod, Validator
from zero_ex.contract_wrappers.tx_params import TxParams
# Try to import a custom validator class definition; if there isn't one,
# declare one that we can instantiate for the default argument to the
# constructor for TWRegistry below.
try:
# both mypy and pylint complain about what we're doing here, but this
# works just fine, so their messages have been disabled here.
from . import ( # type: ignore # pylint: disable=import-self
TWRegistryValidator,
)
except ImportError:
class TWRegistryValidator(Validator): # type: ignore
"""No-op input validator."""
try:
from .middleware import MIDDLEWARE # type: ignore
except ImportError:
pass
class DefaultAdminRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the DEFAULT_ADMIN_ROLE method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class OperatorRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the OPERATOR_ROLE method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address)
self._underlying_method = contract_function
def call(self, tx_params: Optional[TxParams] = None) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method().call(tx_params.as_dict())
return returned
def send_transaction(
self, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().transact(tx_params.as_dict())
def build_transaction(self, tx_params: Optional[TxParams] = None) -> dict:
"""Construct calldata to be used as input to the method."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().buildTransaction(tx_params.as_dict())
def estimate_gas(self, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method().estimateGas(tx_params.as_dict())
class AddMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the add method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, deployer: str, deployment: str):
"""Validate the inputs to the add method."""
self.validator.assert_valid(
method_name="add",
parameter_name="_deployer",
argument_value=deployer,
)
deployer = self.validate_and_checksum_address(deployer)
self.validator.assert_valid(
method_name="add",
parameter_name="_deployment",
argument_value=deployment,
)
deployment = self.validate_and_checksum_address(deployment)
return (deployer, deployment)
def call(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(deployer, deployment).call(tx_params.as_dict())
def send_transaction(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).transact(
tx_params.as_dict()
)
def build_transaction(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).estimateGas(
tx_params.as_dict()
)
class CountMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the count method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, deployer: str):
"""Validate the inputs to the count method."""
self.validator.assert_valid(
method_name="count",
parameter_name="_deployer",
argument_value=deployer,
)
deployer = self.validate_and_checksum_address(deployer)
return deployer
def call(self, deployer: str, tx_params: Optional[TxParams] = None) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(deployer).call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, deployer: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).transact(tx_params.as_dict())
def build_transaction(
self, deployer: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).buildTransaction(tx_params.as_dict())
def estimate_gas(self, deployer: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).estimateGas(tx_params.as_dict())
class GetAllMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getAll method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, deployer: str):
"""Validate the inputs to the getAll method."""
self.validator.assert_valid(
method_name="getAll",
parameter_name="_deployer",
argument_value=deployer,
)
deployer = self.validate_and_checksum_address(deployer)
return deployer
def call(self, deployer: str, tx_params: Optional[TxParams] = None) -> List[str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(deployer).call(tx_params.as_dict())
return [str(element) for element in returned]
def send_transaction(
self, deployer: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).transact(tx_params.as_dict())
def build_transaction(
self, deployer: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).buildTransaction(tx_params.as_dict())
def estimate_gas(self, deployer: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(deployer) = self.validate_and_normalize_inputs(deployer)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer).estimateGas(tx_params.as_dict())
class GetRoleAdminMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleAdmin method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str]):
"""Validate the inputs to the getRoleAdmin method."""
self.validator.assert_valid(
method_name="getRoleAdmin",
parameter_name="role",
argument_value=role,
)
return role
def call(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[bytes, str]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role).call(tx_params.as_dict())
return Union[bytes, str](returned)
def send_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).transact(tx_params.as_dict())
def build_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).buildTransaction(tx_params.as_dict())
def estimate_gas(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).estimateGas(tx_params.as_dict())
class GetRoleMemberMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleMember method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str], index: int):
"""Validate the inputs to the getRoleMember method."""
self.validator.assert_valid(
method_name="getRoleMember",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="getRoleMember",
parameter_name="index",
argument_value=index,
)
# safeguard against fractional inputs
index = int(index)
return (role, index)
def call(
self, role: Union[bytes, str], index: int, tx_params: Optional[TxParams] = None
) -> str:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role, index).call(tx_params.as_dict())
return str(returned)
def send_transaction(
self, role: Union[bytes, str], index: int, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).transact(tx_params.as_dict())
def build_transaction(
self, role: Union[bytes, str], index: int, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, role: Union[bytes, str], index: int, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(role, index) = self.validate_and_normalize_inputs(role, index)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, index).estimateGas(tx_params.as_dict())
class GetRoleMemberCountMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the getRoleMemberCount method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str]):
"""Validate the inputs to the getRoleMemberCount method."""
self.validator.assert_valid(
method_name="getRoleMemberCount",
parameter_name="role",
argument_value=role,
)
return role
def call(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role).call(tx_params.as_dict())
return int(returned)
def send_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).transact(tx_params.as_dict())
def build_transaction(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).buildTransaction(tx_params.as_dict())
def estimate_gas(
self, role: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(role) = self.validate_and_normalize_inputs(role)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role).estimateGas(tx_params.as_dict())
class GrantRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the grantRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str], account: str):
"""Validate the inputs to the grantRole method."""
self.validator.assert_valid(
method_name="grantRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="grantRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(tx_params.as_dict())
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(tx_params.as_dict())
class HasRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the hasRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str], account: str):
"""Validate the inputs to the hasRole method."""
self.validator.assert_valid(
method_name="hasRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="hasRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(role, account).call(tx_params.as_dict())
return bool(returned)
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(tx_params.as_dict())
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(tx_params.as_dict())
class IsTrustedForwarderMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the isTrustedForwarder method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, forwarder: str):
"""Validate the inputs to the isTrustedForwarder method."""
self.validator.assert_valid(
method_name="isTrustedForwarder",
parameter_name="forwarder",
argument_value=forwarder,
)
forwarder = self.validate_and_checksum_address(forwarder)
return forwarder
def call(self, forwarder: str, tx_params: Optional[TxParams] = None) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(forwarder).call(tx_params.as_dict())
return bool(returned)
def send_transaction(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).transact(tx_params.as_dict())
def build_transaction(
self, forwarder: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).buildTransaction(tx_params.as_dict())
def estimate_gas(self, forwarder: str, tx_params: Optional[TxParams] = None) -> int:
"""Estimate gas consumption of method call."""
(forwarder) = self.validate_and_normalize_inputs(forwarder)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(forwarder).estimateGas(tx_params.as_dict())
class MulticallMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the multicall method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, data: List[Union[bytes, str]]):
"""Validate the inputs to the multicall method."""
self.validator.assert_valid(
method_name="multicall",
parameter_name="data",
argument_value=data,
)
return data
def call(
self, data: List[Union[bytes, str]], tx_params: Optional[TxParams] = None
) -> List[Union[bytes, str]]:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(data).call(tx_params.as_dict())
return [Union[bytes, str](element) for element in returned]
def send_transaction(
self, data: List[Union[bytes, str]], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).transact(tx_params.as_dict())
def build_transaction(
self, data: List[Union[bytes, str]], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).buildTransaction(tx_params.as_dict())
def estimate_gas(
self, data: List[Union[bytes, str]], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(data) = self.validate_and_normalize_inputs(data)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(data).estimateGas(tx_params.as_dict())
class RemoveMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the remove method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, deployer: str, deployment: str):
"""Validate the inputs to the remove method."""
self.validator.assert_valid(
method_name="remove",
parameter_name="_deployer",
argument_value=deployer,
)
deployer = self.validate_and_checksum_address(deployer)
self.validator.assert_valid(
method_name="remove",
parameter_name="_deployment",
argument_value=deployment,
)
deployment = self.validate_and_checksum_address(deployment)
return (deployer, deployment)
def call(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(deployer, deployment).call(tx_params.as_dict())
def send_transaction(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).transact(
tx_params.as_dict()
)
def build_transaction(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, deployer: str, deployment: str, tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(deployer, deployment) = self.validate_and_normalize_inputs(
deployer, deployment
)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(deployer, deployment).estimateGas(
tx_params.as_dict()
)
class RenounceRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the renounceRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str], account: str):
"""Validate the inputs to the renounceRole method."""
self.validator.assert_valid(
method_name="renounceRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="renounceRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(tx_params.as_dict())
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(tx_params.as_dict())
class RevokeRoleMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the revokeRole method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, role: Union[bytes, str], account: str):
"""Validate the inputs to the revokeRole method."""
self.validator.assert_valid(
method_name="revokeRole",
parameter_name="role",
argument_value=role,
)
self.validator.assert_valid(
method_name="revokeRole",
parameter_name="account",
argument_value=account,
)
account = self.validate_and_checksum_address(account)
return (role, account)
def call(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> None:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
self._underlying_method(role, account).call(tx_params.as_dict())
def send_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).transact(tx_params.as_dict())
def build_transaction(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> dict:
"""Construct calldata to be used as input to the method."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self,
role: Union[bytes, str],
account: str,
tx_params: Optional[TxParams] = None,
) -> int:
"""Estimate gas consumption of method call."""
(role, account) = self.validate_and_normalize_inputs(role, account)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(role, account).estimateGas(tx_params.as_dict())
class SupportsInterfaceMethod(ContractMethod): # pylint: disable=invalid-name
"""Various interfaces to the supportsInterface method."""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
contract_function: ContractFunction,
validator: Validator = None,
):
"""Persist instance data."""
super().__init__(web3_or_provider, contract_address, validator)
self._underlying_method = contract_function
def validate_and_normalize_inputs(self, interface_id: Union[bytes, str]):
"""Validate the inputs to the supportsInterface method."""
self.validator.assert_valid(
method_name="supportsInterface",
parameter_name="interfaceId",
argument_value=interface_id,
)
return interface_id
def call(
self, interface_id: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> bool:
"""Execute underlying contract method via eth_call.
:param tx_params: transaction parameters
:returns: the return value of the underlying method.
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
returned = self._underlying_method(interface_id).call(tx_params.as_dict())
return bool(returned)
def send_transaction(
self, interface_id: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> Union[HexBytes, bytes]:
"""Execute underlying contract method via eth_sendTransaction.
:param tx_params: transaction parameters
"""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).transact(tx_params.as_dict())
def build_transaction(
self, interface_id: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> dict:
"""Construct calldata to be used as input to the method."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).buildTransaction(
tx_params.as_dict()
)
def estimate_gas(
self, interface_id: Union[bytes, str], tx_params: Optional[TxParams] = None
) -> int:
"""Estimate gas consumption of method call."""
(interface_id) = self.validate_and_normalize_inputs(interface_id)
tx_params = super().normalize_tx_params(tx_params)
return self._underlying_method(interface_id).estimateGas(tx_params.as_dict())
# pylint: disable=too-many-public-methods,too-many-instance-attributes
class TWRegistry:
"""Wrapper class for TWRegistry Solidity contract."""
default_admin_role: DefaultAdminRoleMethod
"""Constructor-initialized instance of
:class:`DefaultAdminRoleMethod`.
"""
operator_role: OperatorRoleMethod
"""Constructor-initialized instance of
:class:`OperatorRoleMethod`.
"""
add: AddMethod
"""Constructor-initialized instance of
:class:`AddMethod`.
"""
count: CountMethod
"""Constructor-initialized instance of
:class:`CountMethod`.
"""
get_all: GetAllMethod
"""Constructor-initialized instance of
:class:`GetAllMethod`.
"""
get_role_admin: GetRoleAdminMethod
"""Constructor-initialized instance of
:class:`GetRoleAdminMethod`.
"""
get_role_member: GetRoleMemberMethod
"""Constructor-initialized instance of
:class:`GetRoleMemberMethod`.
"""
get_role_member_count: GetRoleMemberCountMethod
"""Constructor-initialized instance of
:class:`GetRoleMemberCountMethod`.
"""
grant_role: GrantRoleMethod
"""Constructor-initialized instance of
:class:`GrantRoleMethod`.
"""
has_role: HasRoleMethod
"""Constructor-initialized instance of
:class:`HasRoleMethod`.
"""
is_trusted_forwarder: IsTrustedForwarderMethod
"""Constructor-initialized instance of
:class:`IsTrustedForwarderMethod`.
"""
multicall: MulticallMethod
"""Constructor-initialized instance of
:class:`MulticallMethod`.
"""
remove: RemoveMethod
"""Constructor-initialized instance of
:class:`RemoveMethod`.
"""
renounce_role: RenounceRoleMethod
"""Constructor-initialized instance of
:class:`RenounceRoleMethod`.
"""
revoke_role: RevokeRoleMethod
"""Constructor-initialized instance of
:class:`RevokeRoleMethod`.
"""
supports_interface: SupportsInterfaceMethod
"""Constructor-initialized instance of
:class:`SupportsInterfaceMethod`.
"""
def __init__(
self,
web3_or_provider: Union[Web3, BaseProvider],
contract_address: str,
validator: TWRegistryValidator = None,
):
"""Get an instance of wrapper for smart contract.
:param web3_or_provider: Either an instance of `web3.Web3`:code: or
`web3.providers.base.BaseProvider`:code:
:param contract_address: where the contract has been deployed
:param validator: for validation of method inputs.
"""
# pylint: disable=too-many-statements
self.contract_address = contract_address
if not validator:
validator = TWRegistryValidator(web3_or_provider, contract_address)
web3 = None
if isinstance(web3_or_provider, BaseProvider):
web3 = Web3(web3_or_provider)
elif isinstance(web3_or_provider, Web3):
web3 = web3_or_provider
else:
raise TypeError(
"Expected parameter 'web3_or_provider' to be an instance of either"
+ " Web3 or BaseProvider"
)
# if any middleware was imported, inject it
try:
MIDDLEWARE
except NameError:
pass
else:
try:
for middleware in MIDDLEWARE:
web3.middleware_onion.inject(
middleware["function"],
layer=middleware["layer"],
)
except ValueError as value_error:
if value_error.args == (
"You can't add the same un-named instance twice",
):
pass
self._web3_eth = web3.eth
functions = self._web3_eth.contract(
address=to_checksum_address(contract_address), abi=TWRegistry.abi()
).functions
self.default_admin_role = DefaultAdminRoleMethod(
web3_or_provider, contract_address, functions.DEFAULT_ADMIN_ROLE
)
self.operator_role = OperatorRoleMethod(
web3_or_provider, contract_address, functions.OPERATOR_ROLE
)
self.add = AddMethod(
web3_or_provider, contract_address, functions.add, validator
)
self.count = CountMethod(
web3_or_provider, contract_address, functions.count, validator
)
self.get_all = GetAllMethod(
web3_or_provider, contract_address, functions.getAll, validator
)
self.get_role_admin = GetRoleAdminMethod(
web3_or_provider, contract_address, functions.getRoleAdmin, validator
)
self.get_role_member = GetRoleMemberMethod(
web3_or_provider, contract_address, functions.getRoleMember, validator
)
self.get_role_member_count = GetRoleMemberCountMethod(
web3_or_provider, contract_address, functions.getRoleMemberCount, validator
)
self.grant_role = GrantRoleMethod(
web3_or_provider, contract_address, functions.grantRole, validator
)
self.has_role = HasRoleMethod(
web3_or_provider, contract_address, functions.hasRole, validator
)
self.is_trusted_forwarder = IsTrustedForwarderMethod(
web3_or_provider, contract_address, functions.isTrustedForwarder, validator
)
self.multicall = MulticallMethod(
web3_or_provider, contract_address, functions.multicall, validator
)
self.remove = RemoveMethod(
web3_or_provider, contract_address, functions.remove, validator
)
self.renounce_role = RenounceRoleMethod(
web3_or_provider, contract_address, functions.renounceRole, validator
)
self.revoke_role = RevokeRoleMethod(
web3_or_provider, contract_address, functions.revokeRole, validator
)
self.supports_interface = SupportsInterfaceMethod(
web3_or_provider, contract_address, functions.supportsInterface, validator
)
def get_added_event(self, tx_hash: Union[HexBytes, bytes]) -> Tuple[AttributeDict]:
"""Get log entry for Added event.
:param tx_hash: hash of transaction emitting Added event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address), abi=TWRegistry.abi()
)
.events.Added()
.processReceipt(tx_receipt)
)
def get_deleted_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for Deleted event.
:param tx_hash: hash of transaction emitting Deleted event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address), abi=TWRegistry.abi()
)
.events.Deleted()
.processReceipt(tx_receipt)
)
def get_role_admin_changed_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleAdminChanged event.
:param tx_hash: hash of transaction emitting RoleAdminChanged event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address), abi=TWRegistry.abi()
)
.events.RoleAdminChanged()
.processReceipt(tx_receipt)
)
def get_role_granted_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleGranted event.
:param tx_hash: hash of transaction emitting RoleGranted event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address), abi=TWRegistry.abi()
)
.events.RoleGranted()
.processReceipt(tx_receipt)
)
def get_role_revoked_event(
self, tx_hash: Union[HexBytes, bytes]
) -> Tuple[AttributeDict]:
"""Get log entry for RoleRevoked event.
:param tx_hash: hash of transaction emitting RoleRevoked event
"""
tx_receipt = self._web3_eth.getTransactionReceipt(tx_hash)
return (
self._web3_eth.contract(
address=to_checksum_address(self.contract_address), abi=TWRegistry.abi()
)
.events.RoleRevoked()
.processReceipt(tx_receipt)
)
@staticmethod
def abi():
"""Return the ABI to the underlying contract."""
return json.loads(
'[{"inputs":[{"internalType":"address","name":"_trustedForwarder","type":"address"}],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"deployer","type":"address"},{"indexed":true,"internalType":"address","name":"deployment","type":"address"}],"name":"Added","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"deployer","type":"address"},{"indexed":true,"internalType":"address","name":"deployment","type":"address"}],"name":"Deleted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"previousAdminRole","type":"bytes32"},{"indexed":true,"internalType":"bytes32","name":"newAdminRole","type":"bytes32"}],"name":"RoleAdminChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleGranted","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"bytes32","name":"role","type":"bytes32"},{"indexed":true,"internalType":"address","name":"account","type":"address"},{"indexed":true,"internalType":"address","name":"sender","type":"address"}],"name":"RoleRevoked","type":"event"},{"inputs":[],"name":"DEFAULT_ADMIN_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"OPERATOR_ROLE","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_deployer","type":"address"},{"internalType":"address","name":"_deployment","type":"address"}],"name":"add","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_deployer","type":"address"}],"name":"count","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_deployer","type":"address"}],"name":"getAll","outputs":[{"internalType":"address[]","name":"","type":"address[]"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleAdmin","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"uint256","name":"index","type":"uint256"}],"name":"getRoleMember","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"}],"name":"getRoleMemberCount","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"grantRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"hasRole","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"forwarder","type":"address"}],"name":"isTrustedForwarder","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"bytes[]","name":"data","type":"bytes[]"}],"name":"multicall","outputs":[{"internalType":"bytes[]","name":"results","type":"bytes[]"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"_deployer","type":"address"},{"internalType":"address","name":"_deployment","type":"address"}],"name":"remove","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"renounceRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes32","name":"role","type":"bytes32"},{"internalType":"address","name":"account","type":"address"}],"name":"revokeRole","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"bytes4","name":"interfaceId","type":"bytes4"}],"name":"supportsInterface","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"view","type":"function"}]' # noqa: E501 (line-too-long)
)
# pylint: disable=too-many-lines
| 40.457639 | 4,840 | 0.658937 | 6,213 | 58,259 | 5.929986 | 0.045228 | 0.07665 | 0.043428 | 0.049399 | 0.862579 | 0.835627 | 0.8128 | 0.799392 | 0.789621 | 0.745542 | 0 | 0.004079 | 0.225716 | 58,259 | 1,439 | 4,841 | 40.485754 | 0.812676 | 0.159855 | 0 | 0.735949 | 1 | 0.00106 | 0.11431 | 0.103655 | 0 | 0 | 0 | 0 | 0.022269 | 1 | 0.107105 | false | 0.003181 | 0.015907 | 0 | 0.242842 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
ae77cf777bf1ef0d2319c8295248e7d049bd2a15 | 12,326 | py | Python | old_tests/test_bonded.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | 3 | 2020-01-28T21:19:54.000Z | 2020-01-29T16:03:44.000Z | old_tests/test_bonded.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | null | null | null | old_tests/test_bonded.py | schmolly/timemachine | 7d13a0406dc2d09ac67892988641ba4965bfb206 | [
"Apache-2.0"
] | null | null | null | import unittest
import numpy as np
import functools
from jax.config import config; config.update("jax_enable_x64", True)
from jax.test_util import check_grads
from tests.invariances import assert_potential_invariance
from timemachine.potentials import bonded
class TestAngles(unittest.TestCase):
def test_jax_harmonic_angle_4d(self):
x0 = np.array([
[ 0.0637, 0.0126, 0.2203, 0.5], # C
[ 1.0573, -0.2011, 1.2864, 0.3], # H
[ 2.3928, 1.2209, -0.2230, -0.2], # H
[-0.6891, 1.6983, 0.0780, 0.4], # H
[-0.6312, -1.6261, -0.2601, 0.9] # H
], dtype=np.float64)
num_atoms = x0.shape[0]
params = np.array([75, 1.91, 0.45], dtype=np.float64)
angle_idxs = np.array([[1,0,2],[1,0,3],[1,0,4],[2,0,3],[2,0,4],[3,0,4]])
param_idxs = np.array([[0,1],[0,1],[0,2],[0,1],[0,1],[0,2]])
# enable cos angles
energy_fn = functools.partial(bonded.harmonic_angle,
angle_idxs=angle_idxs,
param_idxs=param_idxs,
cos_angles=True)
box = None
check_grads(energy_fn, (x0, params, None), order=1, eps=1e-5)
check_grads(energy_fn, (x0, params, None), order=2, eps=1e-7)
# disable cos angles
energy_fn = functools.partial(bonded.harmonic_angle,
angle_idxs=angle_idxs,
param_idxs=param_idxs,
cos_angles=False)
check_grads(energy_fn, (x0, params, None), order=1, eps=1e-5)
check_grads(energy_fn, (x0, params, None), order=2, eps=1e-7)
def test_jax_harmonic_angle(self):
x0 = np.array([
[ 0.0637, 0.0126, 0.2203], # C
[ 1.0573, -0.2011, 1.2864], # H
[ 2.3928, 1.2209, -0.2230], # H
[-0.6891, 1.6983, 0.0780], # H
[-0.6312, -1.6261, -0.2601], # H
], dtype=np.float64)
num_atoms = x0.shape[0]
params = np.array([75, 1.91, 0.45], dtype=np.float64)
angle_idxs = np.array([[1,0,2],[1,0,3],[1,0,4],[2,0,3],[2,0,4],[3,0,4]])
param_idxs = np.array([[0,1],[0,1],[0,2],[0,1],[0,1],[0,2]])
# enable cos angles
energy_fn = functools.partial(bonded.harmonic_angle,
angle_idxs=angle_idxs,
param_idxs=param_idxs,
cos_angles=True)
box = np.array([
[2.0, 0.5, 0.6],
[0.6, 1.6, 0.3],
[0.4, 0.7, 1.1]
], dtype=np.float64)
assert_potential_invariance(energy_fn, x0, params, box)
# disable cos angles
energy_fn = functools.partial(bonded.harmonic_angle,
angle_idxs=angle_idxs,
param_idxs=param_idxs,
cos_angles=False)
assert_potential_invariance(energy_fn, x0, params, box)
class TestBonded(unittest.TestCase):
def test_jax_harmonic_bond_4d(self):
x0 = np.array([
[1.0, 0.2, 3.3, 1.0], # H
[-0.5,-1.1,-0.9, 0.5], # C
[3.4, 5.5, 0.2, -0.3], # H
], dtype=np.float64)
params = np.array([10.0, 3.0, 5.5], dtype=np.float64)
param_idxs = np.array([
[0,1],
[1,2],
], dtype=np.int32)
bond_idxs = np.array([
[0,1],
[1,2]
], dtype=np.int32)
energy_fn = functools.partial(bonded.harmonic_bond, param_idxs=param_idxs, bond_idxs=bond_idxs)
check_grads(energy_fn, (x0, params, None), order=1, eps=1e-5)
check_grads(energy_fn, (x0, params, None), order=2, eps=1e-7)
def test_jax_harmonic_bond(self):
x0 = np.array([
[1.0, 0.2, 3.3], # H
[-0.5,-1.1,-0.9], # C
[3.4, 5.5, 0.2], # H
], dtype=np.float64)
params = np.array([10.0, 3.0, 5.5], dtype=np.float64)
param_idxs = np.array([
[0,1],
[1,2],
], dtype=np.int32)
bond_idxs = np.array([
[0,1],
[1,2]
], dtype=np.int32)
energy_fn = functools.partial(bonded.harmonic_bond, param_idxs=param_idxs, bond_idxs=bond_idxs)
box = np.array([
[2.0, 0.5, 0.6],
[0.6, 1.6, 0.3],
[0.4, 0.7, 1.1]
], dtype=np.float64)
assert_potential_invariance(energy_fn, x0, params, box)
class TestPeriodicTorsion(unittest.TestCase):
def setUp(self):
self.conformers = np.array([
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.561317027011325 , 0.2066950040043141, 0.3670430960815993],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 0.9399773448903637,-0.6888774474110431, 0.2104211949995816]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815993],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 1.283345455745044 ,-0.0356257425880843,-0.2573923896494185]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.561317027011325 , 0.2066950040043142, 0.3670430960815992],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 1.263820400176392 , 0.7964992122869241, 0.0084568741589791]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815992],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 0.8993534242298198, 1.042445571242743 , 0.7635483993060286]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.5613170270113255, 0.2066950040043142, 0.3670430960815993],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 0.5250337847650304, 0.476091386095139 , 1.3136545198545133]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.5613170270113255, 0.2066950040043141, 0.3670430960815993],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 0.485009232042489 ,-0.3818599172073237, 1.1530102055165103]],
], dtype=np.float64)
self.nan_conformers = np.array([
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815993],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 1.2278668040866427, 0.8805184219394547, 0.099391329616366 ]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901],
[ 0.561317027011325 , 0.206695004004314 , 0.3670430960815994],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652],
[ 0.5494071252089705,-0.5626592973923106, 0.9817919758125693]],
], dtype=np.float64)
self.conformers_4d = np.array([
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.4],
[ 0.561317027011325 , 0.2066950040043141, 0.3670430960815993, 0.3],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.1],
[ 0.9399773448903637,-0.6888774474110431, 0.2104211949995816, 0.9]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901,-0.5],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815993, 0.4],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.2],
[ 1.283345455745044 ,-0.0356257425880843,-0.2573923896494185, 0.3]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.4],
[ 0.561317027011325 , 0.2066950040043142, 0.3670430960815992, 0.1],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.7],
[ 1.263820400176392 , 0.7964992122869241, 0.0084568741589791, 0.8]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.1],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815992, 0.1],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.4],
[ 0.8993534242298198, 1.042445571242743 , 0.7635483993060286,-0.3]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.5],
[ 0.5613170270113255, 0.2066950040043142, 0.3670430960815993, 0.6],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.9],
[ 0.5250337847650304, 0.476091386095139 , 1.3136545198545133, 0.2]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.0],
[ 0.5613170270113255, 0.2066950040043141, 0.3670430960815993, 0.1],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.4],
[ 0.485009232042489 ,-0.3818599172073237, 1.1530102055165103, 0.3]],
], dtype=np.float64)
self.nan_conformers_4d = np.array([
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.5],
[ 0.5613170270113252, 0.2066950040043142, 0.3670430960815993, 0.2],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.3],
[ 1.2278668040866427, 0.8805184219394547, 0.099391329616366 , 0.6]],
[[-0.6000563454193615, 0.376172954382274 ,-0.2487295756125901, 0.1],
[ 0.561317027011325 , 0.206695004004314 , 0.3670430960815994,-0.2],
[-1.187055522272264 ,-0.3415864358441354, 0.0871382207830652, 0.3],
[ 0.5494071252089705,-0.5626592973923106, 0.9817919758125693, 0.9]],
], dtype=np.float64)
def test_jax_torsions_4d(self):
"""
Test agreement of torsions with OpenMM's implementation of torsion terms.
"""
torsion_idxs = np.array([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], dtype=np.int32)
params = np.array([
2.3, # k0
5.4, # k1
9.0, # k2
0.0, # t0
3.0, # t1
5.8, # t2
1.0, # n0
2.0, # n1
3.0 # n2
])
param_idxs = np.array([
[0, 3, 6],
[1, 4, 7],
[2, 5, 8]
], dtype=np.int32)
box = np.array([
[2.0, 0.5, 0.6],
[0.6, 1.6, 0.3],
[0.4, 0.7, 1.1]
], dtype=np.float64)
energy_fn = functools.partial(
bonded.periodic_torsion,
param_idxs=param_idxs,
torsion_idxs=torsion_idxs)
# there's no good finite difference tests that we can do for the nan_conformers
# so instead we compare against OpenMM implementation later on
for conf_idx, conf in enumerate(self.conformers_4d):
check_grads(energy_fn, (conf, params, None), order=1, eps=1e-5)
check_grads(energy_fn, (conf, params, None), order=2, eps=1e-7)
def test_jax_torsions(self):
"""
Test agreement of torsions with OpenMM's implementation of torsion terms.
"""
torsion_idxs = np.array([
[0, 1, 2, 3],
[0, 1, 2, 3],
[0, 1, 2, 3],
], dtype=np.int32)
params = np.array([
2.3, # k0
5.4, # k1
9.0, # k2
0.0, # t0
3.0, # t1
5.8, # t2
1.0, # n0
2.0, # n1
3.0 # n2
])
param_idxs = np.array([
[0, 3, 6],
[1, 4, 7],
[2, 5, 8]
], dtype=np.int32)
box = np.array([
[2.0, 0.5, 0.6],
[0.6, 1.6, 0.3],
[0.4, 0.7, 1.1]
], dtype=np.float64)
energy_fn = functools.partial(
bonded.periodic_torsion,
param_idxs=param_idxs,
torsion_idxs=torsion_idxs)
# there's no good finite difference tests that we can do for the nan_conformers
# so instead we compare against OpenMM implementation later on
for conf_idx, conf in enumerate(self.conformers):
assert_potential_invariance(energy_fn, conf, params, box)
if __name__ == "__main__":
unittest.main()
| 39.254777 | 103 | 0.562794 | 1,490 | 12,326 | 4.56443 | 0.11745 | 0.030878 | 0.018821 | 0.077636 | 0.937509 | 0.929128 | 0.849875 | 0.701073 | 0.69725 | 0.623732 | 0 | 0.441906 | 0.293364 | 12,326 | 313 | 104 | 39.380192 | 0.338921 | 0.047866 | 0 | 0.668033 | 0 | 0 | 0.001889 | 0 | 0 | 0 | 0 | 0 | 0.020492 | 1 | 0.028689 | false | 0 | 0.028689 | 0 | 0.069672 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ae8d03601121b48037efe80015073951d21238d1 | 361 | py | Python | tests/internal/usage_classes/test_usage_classes_spot_auto.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | null | null | null | tests/internal/usage_classes/test_usage_classes_spot_auto.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | null | null | null | tests/internal/usage_classes/test_usage_classes_spot_auto.py | frolovv/aws.ec2.compare | 582805823492f833d65c0441c4a14dce697c12aa | [
"Apache-2.0"
] | 1 | 2021-12-15T11:58:22.000Z | 2021-12-15T11:58:22.000Z |
# Testing module usage_classes.spot
import pytest
import ec2_compare.internal.usage_classes.spot
def test_get_internal_data_usage_classes_spot_get_instances_list():
assert len(ec2_compare.internal.usage_classes.spot.get_instances_list()) > 0
def test_get_internal_data_usage_classes_spot_get():
assert len(ec2_compare.internal.usage_classes.spot.get) > 0
| 36.1 | 78 | 0.853186 | 56 | 361 | 5.053571 | 0.339286 | 0.254417 | 0.339223 | 0.268551 | 0.826855 | 0.826855 | 0.614841 | 0.614841 | 0.614841 | 0 | 0 | 0.014925 | 0.072022 | 361 | 9 | 79 | 40.111111 | 0.829851 | 0.091413 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.333333 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 10 |
882b9ab187e01e3540cdb22d7836651066274ccb | 145 | py | Python | Lib/Scripts/glyphs/transform/skew.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 11 | 2015-01-06T15:43:56.000Z | 2019-07-27T00:35:20.000Z | hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/skew.py | gferreira/hTools2_extension | 9e5150082a0a39847c1078aac3dc38d914a44f83 | [
"BSD-3-Clause"
] | 2 | 2017-08-08T21:02:17.000Z | 2019-12-18T15:55:48.000Z | Lib/Scripts/glyphs/transform/skew.py | gferreira/hTools2 | a75a671b81a0f4ce5c82b2ad3e2f971ca3e3d98c | [
"BSD-3-Clause"
] | 4 | 2015-01-10T13:58:50.000Z | 2019-12-18T15:40:14.000Z | # [h] skew glyphs dialog
import hTools2.dialogs.glyphs.skew
reload(hTools2.dialogs.glyphs.skew)
hTools2.dialogs.glyphs.skew.skewGlyphsDialog()
| 20.714286 | 46 | 0.806897 | 19 | 145 | 6.157895 | 0.473684 | 0.358974 | 0.512821 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.022388 | 0.075862 | 145 | 6 | 47 | 24.166667 | 0.850746 | 0.151724 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.333333 | 0 | 0.333333 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
888113e2ddb7a2bfd267851c16b7fe6a6e9c55ea | 13,149 | py | Python | tests/test_csaf_core_rules_mandatory.py | sthagen/turvallisuusneuvonta | 503e6026463e475b02cb75fd23ae3faeb5f25b11 | [
"MIT"
] | 1 | 2021-12-08T11:12:30.000Z | 2021-12-08T11:12:30.000Z | tests/test_csaf_core_rules_mandatory.py | sthagen/turvallisuusneuvonta | 503e6026463e475b02cb75fd23ae3faeb5f25b11 | [
"MIT"
] | 2 | 2022-02-14T15:43:08.000Z | 2022-02-14T21:22:13.000Z | tests/test_csaf_core_rules_mandatory.py | sthagen/turvallisuusneuvonta | 503e6026463e475b02cb75fd23ae3faeb5f25b11 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# pylint: disable=line-too-long,missing-docstring,reimported,unused-import,unused-variable
import json
import pathlib
import string
import pytest
from hypothesis import given, strategies as st
import turvallisuusneuvonta.csaf.core.rules.mandatory.mandatory as mandatory
ENCODING = 'utf-8'
PROFILE_SAFE_LETTERS = ('k', 'j', 'q', 'b', 'g', 'h', 'w', 'z')
PROFILE_MAX_LEN = max(len(profile) for profile in mandatory.val_cat_nam.PROFILES)
LOWER_ASCII = tuple(list(string.ascii_lowercase))
def test_mandatory_exists_single_claim_single_path():
document = {'exists': 'truthy'}
claims = {'sartre': ['exists']}
assert mandatory.exists(document, claims) == (('sartre', 'exists', True),)
def test_mandatory_exists_not_single_claim_single_path():
document = {'exists': ''}
claims = {'sartre': ['exists']}
assert mandatory.exists(document, claims) == (('sartre', 'exists', False),)
def test_mandatory_exists_single_claim_multiple_paths():
document = {'exists': 'truthy', 'also': True}
claims = {'sartre': ['exists', 'also']}
assert mandatory.exists(document, claims) == (
('sartre', 'exists', True),
('sartre', 'also', True),
)
def test_mandatory_exists_single_claim_multiple_paths_mixed_results():
document = {'exists': 'truthy', 'also': False}
claims = {'sartre': ['exists', 'also']}
assert mandatory.exists(document, claims) == (
('sartre', 'exists', True),
('sartre', 'also', False),
)
def test_mandatory_exists_multiple_claims_single_paths():
document = {'exists': 'truthy', 'also': True}
claims = {'sartre': ['exists'], 'nirvana': ['also']}
assert mandatory.exists(document, claims) == (
('sartre', 'exists', True),
('nirvana', 'also', True),
)
def test_mandatory_valid_category_name_exempt():
document = {'name': mandatory.val_cat_nam.PROFILES[0]}
path = 'name'
assert mandatory.must_skip(document, path, mandatory.val_cat_nam.PROFILES) == (document['name'], path, True)
def test_mandatory_valid_category_name_not_exempt():
document = {'name': '=x='.join(mandatory.val_cat_nam.PROFILES)}
path = 'name'
assert mandatory.must_skip(document, path, mandatory.val_cat_nam.PROFILES) == (document['name'], path, False)
@given(st.text(min_size=1, max_size=2))
def test_mandatory_valid_ok_too_short_for_profile(category):
incomplete = NotImplemented
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@given(st.text(min_size=3, max_size=PROFILE_MAX_LEN))
def test_mandatory_valid_ok_at_least_one_char_separate_from_profile(category):
incomplete = NotImplemented
category = PROFILE_SAFE_LETTERS[0] + category[1:]
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@given(st.text(alphabet=PROFILE_SAFE_LETTERS, min_size=3, max_size=PROFILE_MAX_LEN))
def test_mandatory_valid_ok_profile_safe_alphabet(category_part):
incomplete = NotImplemented
category = category_part + PROFILE_SAFE_LETTERS[0]
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@given(st.text(alphabet=LOWER_ASCII, min_size=PROFILE_MAX_LEN + 1, max_size=PROFILE_MAX_LEN + 3))
def test_mandatory_valid_ok_too_long_for_profile(category):
incomplete = NotImplemented
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@pytest.mark.parametrize('category, status', [(' ', True), ('-', True), ('_', True), ('1', True), ('9', True)])
def test_mandatory_valid_ok_irrelevant_and_digits(category, status):
incomplete = NotImplemented if status is True else False
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@pytest.mark.parametrize('category, status', [(w.upper(), False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_nok_uppercase_profiles(category, status):
incomplete = NotImplemented if status is True else False
assert mandatory.is_valid({'document': {'category': category}}) is incomplete
@pytest.mark.parametrize('category, status', [(' ', True), ('-', True), ('_', True), ('1', True), ('9', True)])
def test_mandatory_valid_category_ok_irrelevant_and_digits(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'abc{w}xyz', True) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_ok_no_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(w, True) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_ok_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(w.upper(), False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_uppercase_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(w.title(), False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_titlecase_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f' {w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_leading_space_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'{w} ', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_trailing_space_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'-{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_leading_dash_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'{w}-', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_trailing_dash_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'_{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_leading_underscore_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'{w}_', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_trailing_underscore_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'- _{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_leading_irrelevant_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(f'{w}__ -- _', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_nok_trailing_irrelevant_profiles(category, status):
assert mandatory.is_valid_category({'document': {'category': category}}) is status
@pytest.mark.parametrize('category, status', [(' ', True), ('-', True), ('_', True), ('1', True), ('9', True)])
def test_mandatory_valid_category_name_ok_irrelevant_and_digits(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'abc{w}xyz', True) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_ok_no_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(w, True) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_ok_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(w.upper(), False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_uppercase_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(w.title(), False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_titlecase_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f' {w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_leading_space_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'{w} ', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_trailing_space_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'-{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_leading_dash_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'{w}-', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_trailing_dash_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'_{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_leading_underscore_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'{w}_', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_trailing_underscore_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'- _{w}', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_leading_irrelevant_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
@pytest.mark.parametrize('category, status', [(f'{w}__ -- _', False) for w in mandatory.val_cat_nam.PROFILES])
def test_mandatory_valid_category_name_nok_trailing_irrelevant_profiles(category, status):
assert mandatory.val_cat_nam.is_valid(category) is status
def test_mandatory_valid_category_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-26-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_valid_translator_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-15-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_defined_product_id_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-01-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_defined_group_id_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-04-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_unique_product_id_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-02-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_is_valid_unique_product_ids_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-02-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid_unique_product_ids(document) is False
def test_mandatory_unique_group_id_nok_spec_example():
path = pathlib.Path('tests/fixtures/rules/invalid/upstream/6-1-05-01.json')
with open(path, 'rt', encoding=ENCODING) as handle:
document = json.load(handle)
assert mandatory.is_valid(document) is False
def test_mandatory_valid_ok_grow_me():
document = {'document': {'category': ' ', 'publisher': {'category': 'translator'}, 'source_lang': 'fr'}}
incomplete = NotImplemented
assert mandatory.is_valid(document) is incomplete
| 45.341379 | 113 | 0.755723 | 1,780 | 13,149 | 5.293258 | 0.087079 | 0.08321 | 0.079813 | 0.082148 | 0.909892 | 0.891424 | 0.863511 | 0.840692 | 0.826576 | 0.811823 | 0 | 0.005132 | 0.110883 | 13,149 | 289 | 114 | 45.49827 | 0.800787 | 0.008366 | 0 | 0.542105 | 0 | 0 | 0.121203 | 0.027923 | 0 | 0 | 0 | 0 | 0.247368 | 1 | 0.247368 | false | 0 | 0.031579 | 0 | 0.278947 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ee4b9fbf1b0b3467d78585ad905b8150c066471b | 43,261 | py | Python | testcases/generated/cdn_test.py | Tanc009/jdcloud-cli | 4e11de77c68501f44e7026c0ad1c24e5d043197e | [
"Apache-2.0"
] | null | null | null | testcases/generated/cdn_test.py | Tanc009/jdcloud-cli | 4e11de77c68501f44e7026c0ad1c24e5d043197e | [
"Apache-2.0"
] | null | null | null | testcases/generated/cdn_test.py | Tanc009/jdcloud-cli | 4e11de77c68501f44e7026c0ad1c24e5d043197e | [
"Apache-2.0"
] | null | null | null | # coding=utf8
# Copyright 2018 JDCLOUD.COM
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# NOTE: This class is auto generated by the jdcloud code generator program.
import unittest
import os
import json
class CdnTest(unittest.TestCase):
def test_query_online_billing_type(self):
cmd = """python ../../main.py cdn query-online-billing-type """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_online_billing_type(self):
cmd = """python ../../main.py cdn set-online-billing-type """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_band(self):
cmd = """python ../../main.py cdn query-band """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_band_with_area(self):
cmd = """python ../../main.py cdn query-band-with-area """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domain_config_status(self):
cmd = """python ../../main.py cdn query-domain-config-status --task-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_area_isp_list(self):
cmd = """python ../../main.py cdn query-area-isp-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_preview_certificate(self):
cmd = """python ../../main.py cdn preview-certificate --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_default_http_header_key(self):
cmd = """python ../../main.py cdn query-default-http-header-key """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_ip_black_list(self):
cmd = """python ../../main.py cdn query-ip-black-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_ip_black_list(self):
cmd = """python ../../main.py cdn set-ip-black-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_operate_ip_black_list(self):
cmd = """python ../../main.py cdn operate-ip-black-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_cache_rule(self):
cmd = """python ../../main.py cdn create-cache-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_cache_rule(self):
cmd = """python ../../main.py cdn update-cache-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_cache_rule(self):
cmd = """python ../../main.py cdn delete-cache-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_http_header(self):
cmd = """python ../../main.py cdn query-http-header --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_http_header(self):
cmd = """python ../../main.py cdn set-http-header --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_http_header(self):
cmd = """python ../../main.py cdn delete-http-header --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_video_draft(self):
cmd = """python ../../main.py cdn set-video-draft --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_range(self):
cmd = """python ../../main.py cdn set-range --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_ignore_query_string(self):
cmd = """python ../../main.py cdn set-ignore-query-string --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_user_agent(self):
cmd = """python ../../main.py cdn query-user-agent --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_user_agent_config(self):
cmd = """python ../../main.py cdn set-user-agent-config --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_accesskey_config(self):
cmd = """python ../../main.py cdn query-accesskey-config --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_accesskey_config(self):
cmd = """python ../../main.py cdn set-accesskey-config --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_refer(self):
cmd = """python ../../main.py cdn set-refer --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_monitor(self):
cmd = """python ../../main.py cdn query-monitor --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_monitor(self):
cmd = """python ../../main.py cdn set-monitor --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_stop_monitor(self):
cmd = """python ../../main.py cdn stop-monitor --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_source(self):
cmd = """python ../../main.py cdn set-source --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_operate_share_cache(self):
cmd = """python ../../main.py cdn operate-share-cache --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_http_type(self):
cmd = """python ../../main.py cdn set-http-type --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_follow_redirect(self):
cmd = """python ../../main.py cdn query-follow-redirect --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_follow_redirect(self):
cmd = """python ../../main.py cdn set-follow-redirect --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_follow_source_protocol(self):
cmd = """python ../../main.py cdn query-follow-source-protocol --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_follow_source_protocol(self):
cmd = """python ../../main.py cdn set-follow-source-protocol --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_domain_config(self):
cmd = """python ../../main.py cdn set-domain-config --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domain_group_list(self):
cmd = """python ../../main.py cdn query-domain-group-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domain_group_detail(self):
cmd = """python ../../main.py cdn query-domain-group-detail --id '5'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domains_not_in_group(self):
cmd = """python ../../main.py cdn query-domains-not-in-group """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_domain_group(self):
cmd = """python ../../main.py cdn update-domain-group --id '5'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_domain_group(self):
cmd = """python ../../main.py cdn create-domain-group """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_batch_delete_domain_group(self):
cmd = """python ../../main.py cdn batch-delete-domain-group --ids '[5]'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_domain_list(self):
cmd = """python ../../main.py cdn get-domain-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_domain_list_by_filter(self):
cmd = """python ../../main.py cdn get-domain-list-by-filter """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_domain_detail(self):
cmd = """python ../../main.py cdn get-domain-detail --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_domain(self):
cmd = """python ../../main.py cdn create-domain --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_domain(self):
cmd = """python ../../main.py cdn delete-domain --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_start_domain(self):
cmd = """python ../../main.py cdn start-domain --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_stop_domain(self):
cmd = """python ../../main.py cdn stop-domain --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_oss_buckets(self):
cmd = """python ../../main.py cdn query-oss-buckets """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_batch_create(self):
cmd = """python ../../main.py cdn batch-create """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domain_config(self):
cmd = """python ../../main.py cdn query-domain-config --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_check_whether_ip_belong_to_jcloud(self):
cmd = """python ../../main.py cdn check-whether-ip-belong-to-jcloud """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_live_domain_back_source(self):
cmd = """python ../../main.py cdn set-live-domain-back-source --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_live_domain_ip_black_list(self):
cmd = """python ../../main.py cdn set-live-domain-ip-black-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_live_domain_refer(self):
cmd = """python ../../main.py cdn set-live-domain-refer --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_operate_live_domain_ip_black_list(self):
cmd = """python ../../main.py cdn operate-live-domain-ip-black-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_live_domain_back_source_host(self):
cmd = """python ../../main.py cdn set-live-domain-back-source-host --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_live_domain_access_key(self):
cmd = """python ../../main.py cdn set-live-domain-access-key --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_protocol_convert(self):
cmd = """python ../../main.py cdn set-protocol-convert --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_forbidden_stream(self):
cmd = """python ../../main.py cdn delete-forbidden-stream --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_push_domain_orapp_or_stream(self):
cmd = """python ../../main.py cdn query-push-domain-orapp-or-stream --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_operate_live_domain_ignore_query_string(self):
cmd = """python ../../main.py cdn operate-live-domain-ignore-query-string --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_push_ip_white_list(self):
cmd = """python ../../main.py cdn set-push-ip-white-list --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_publish_normal_timeout(self):
cmd = """python ../../main.py cdn set-publish-normal-timeout --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_stream_notify_info(self):
cmd = """python ../../main.py cdn set-stream-notify-info --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_forward_authentication(self):
cmd = """python ../../main.py cdn set-forward-authentication --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_origin_authentication(self):
cmd = """python ../../main.py cdn set-origin-authentication --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_batch_create_live_domain(self):
cmd = """python ../../main.py cdn batch-create-live-domain """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_live_domain(self):
cmd = """python ../../main.py cdn create-live-domain """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_bind_publish_domain(self):
cmd = """python ../../main.py cdn bind-publish-domain --domain 'xxx' --publish-domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_modify_live_domain_forward_custom_vhost(self):
cmd = """python ../../main.py cdn modify-live-domain-forward-custom-vhost --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_domain_detail(self):
cmd = """python ../../main.py cdn query-live-domain-detail --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_domain_detail_v2(self):
cmd = """python ../../main.py cdn query-live-domain-detail-v2 --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describe_publish_domains(self):
cmd = """python ../../main.py cdn describe-publish-domains """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_describebinded_domains(self):
cmd = """python ../../main.py cdn describebinded-domains --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_domain_apps(self):
cmd = """python ../../main.py cdn query-live-domain-apps --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_live_domain_prefecth_task(self):
cmd = """python ../../main.py cdn create-live-domain-prefecth-task """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_prefetch_task(self):
cmd = """python ../../main.py cdn query-live-prefetch-task """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_refresh_task_by_ids(self):
cmd = """python ../../main.py cdn query-refresh-task-by-ids """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_refresh_task_by_id(self):
cmd = """python ../../main.py cdn query-refresh-task-by-id --task-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_refresh_task_for_callback(self):
cmd = """python ../../main.py cdn create-refresh-task-for-callback """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_refresh_task_for_callback_v2(self):
cmd = """python ../../main.py cdn create-refresh-task-for-callback-v2 """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_refresh_task(self):
cmd = """python ../../main.py cdn query-refresh-task """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_refresh_task(self):
cmd = """python ../../main.py cdn create-refresh-task """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_net_protection_rules(self):
cmd = """python ../../main.py cdn query-net-protection-rules """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_net_protection_rules(self):
cmd = """python ../../main.py cdn set-net-protection-rules """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_net_protection_rules_switch(self):
cmd = """python ../../main.py cdn query-net-protection-rules-switch """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_net_protection_rules_switch(self):
cmd = """python ../../main.py cdn set-net-protection-rules-switch """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_geo_areas(self):
cmd = """python ../../main.py cdn query-geo-areas """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_attack_type_count(self):
cmd = """python ../../main.py cdn query-attack-type-count """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_ddos_graph(self):
cmd = """python ../../main.py cdn query-ddos-graph """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_search_attack_log(self):
cmd = """python ../../main.py cdn search-attack-log """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_all_upper_node_ip_list(self):
cmd = """python ../../main.py cdn get-all-upper-node-ip-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_ssl_cert_list(self):
cmd = """python ../../main.py cdn get-ssl-cert-list """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_get_ssl_cert_detail(self):
cmd = """python ../../main.py cdn get-ssl-cert-detail --ssl-cert-id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_upload_cert(self):
cmd = """python ../../main.py cdn upload-cert --cert-name 'xxx' --key-file 'xxx' --cert-file 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_mix_statistics_data(self):
cmd = """python ../../main.py cdn query-mix-statistics-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_mix_statistics_with_area_data(self):
cmd = """python ../../main.py cdn query-mix-statistics-with-area-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_mix_traffic_group_sum(self):
cmd = """python ../../main.py cdn query-mix-traffic-group-sum """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_statistics_data(self):
cmd = """python ../../main.py cdn query-statistics-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_statistics_data_group_by_area(self):
cmd = """python ../../main.py cdn query-statistics-data-group-by-area """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_statistics_data_group_sum(self):
cmd = """python ../../main.py cdn query-statistics-data-group-sum """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_statistics_data(self):
cmd = """python ../../main.py cdn query-live-statistics-data """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_statistics_area_data_group_by(self):
cmd = """python ../../main.py cdn query-live-statistics-area-data-group-by """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_live_traffic_group_sum(self):
cmd = """python ../../main.py cdn query-live-traffic-group-sum """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_statistics_top_ip(self):
cmd = """python ../../main.py cdn query-statistics-top-ip """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domains_log(self):
cmd = """python ../../main.py cdn query-domains-log """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_domain_log(self):
cmd = """python ../../main.py cdn query-domain-log --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_statistics_top_url(self):
cmd = """python ../../main.py cdn query-statistics-top-url """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_waf_switch(self):
cmd = """python ../../main.py cdn query-waf-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_waf_switch(self):
cmd = """python ../../main.py cdn set-waf-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_waf_white_rule_switch(self):
cmd = """python ../../main.py cdn query-waf-white-rule-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_waf_white_rule_switch(self):
cmd = """python ../../main.py cdn set-waf-white-rule-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_querywaf_white_rules(self):
cmd = """python ../../main.py cdn querywaf-white-rules --domain 'xxx' --rule-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_waf_white_rule(self):
cmd = """python ../../main.py cdn create-waf-white-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_waf_white_rule(self):
cmd = """python ../../main.py cdn update-waf-white-rule --domain 'xxx' --id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_waf_white_rules(self):
cmd = """python ../../main.py cdn enable-waf-white-rules --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_waf_white_rules(self):
cmd = """python ../../main.py cdn disable-waf-white-rules --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_waf_white_rules(self):
cmd = """python ../../main.py cdn delete-waf-white-rules --domain 'xxx' --ids 'xxx' --rule-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_waf_black_rule_switch(self):
cmd = """python ../../main.py cdn query-waf-black-rule-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_waf_black_rule_switch(self):
cmd = """python ../../main.py cdn set-waf-black-rule-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_querywaf_black_rules(self):
cmd = """python ../../main.py cdn querywaf-black-rules --domain 'xxx' --rule-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_waf_black_rule(self):
cmd = """python ../../main.py cdn create-waf-black-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_waf_black_rule(self):
cmd = """python ../../main.py cdn update-waf-black-rule --domain 'xxx' --id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_waf_black_rules(self):
cmd = """python ../../main.py cdn enable-waf-black-rules --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_waf_black_rules(self):
cmd = """python ../../main.py cdn disable-waf-black-rules --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_waf_black_rules(self):
cmd = """python ../../main.py cdn delete-waf-black-rules --domain 'xxx' --ids 'xxx' --rule-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_ccprotect_switch(self):
cmd = """python ../../main.py cdn query-ccprotect-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_ccprotect_switch(self):
cmd = """python ../../main.py cdn set-ccprotect-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_ccprotect_rules(self):
cmd = """python ../../main.py cdn query-ccprotect-rules --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_create_ccprotect_rule(self):
cmd = """python ../../main.py cdn create-ccprotect-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_ccprotect_rule(self):
cmd = """python ../../main.py cdn update-ccprotect-rule --domain 'xxx' --id 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_enable_ccprotect_rule(self):
cmd = """python ../../main.py cdn enable-ccprotect-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_disable_ccprotect_rule(self):
cmd = """python ../../main.py cdn disable-ccprotect-rule --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_delete_ccprotect_rule(self):
cmd = """python ../../main.py cdn delete-ccprotect-rule --domain 'xxx' --ids 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_web_protect_switch(self):
cmd = """python ../../main.py cdn query-web-protect-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_set_web_protect_switch(self):
cmd = """python ../../main.py cdn set-web-protect-switch --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_web_protect_settings(self):
cmd = """python ../../main.py cdn query-web-protect-settings --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_update_web_protect_settings(self):
cmd = """python ../../main.py cdn update-web-protect-settings --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_waf_regions(self):
cmd = """python ../../main.py cdn query-waf-regions --skip-type 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_query_ip_black_setting_status(self):
cmd = """python ../../main.py cdn query-ip-black-setting-status --domain 'xxx'"""
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_waf_query_pv_for_area_and_ip(self):
cmd = """python ../../main.py cdn waf-query-pv-for-area-and-ip """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_waf_query_pv(self):
cmd = """python ../../main.py cdn waf-query-pv """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
def test_waf_query_attack_details(self):
cmd = """python ../../main.py cdn waf-query-attack-details """
with os.popen(cmd) as f:
content = f.read()
print(content)
result = json.loads(content)
self.assertIsInstance(result, dict)
| 32.50263 | 113 | 0.585701 | 5,358 | 43,261 | 4.62654 | 0.04162 | 0.040946 | 0.076042 | 0.099439 | 0.946065 | 0.938884 | 0.926621 | 0.894147 | 0.83178 | 0.760862 | 0 | 0.000511 | 0.276115 | 43,261 | 1,330 | 114 | 32.527068 | 0.791065 | 0.014655 | 0 | 0.711482 | 0 | 0.017664 | 0.201648 | 0.053721 | 0 | 0 | 0 | 0 | 0.142296 | 1 | 0.142296 | false | 0 | 0.002944 | 0 | 0.146222 | 0.142296 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
c9e6b88302a0cfa2e3bc99d08172cfb5ab43d720 | 1,881 | py | Python | vision_imag_pro.py | SentioProberDev/Examples-Python | fefe3a2f1c2600203fcb2782c055f648f5f3eeac | [
"BSD-2-Clause"
] | null | null | null | vision_imag_pro.py | SentioProberDev/Examples-Python | fefe3a2f1c2600203fcb2782c055f648f5f3eeac | [
"BSD-2-Clause"
] | null | null | null | vision_imag_pro.py | SentioProberDev/Examples-Python | fefe3a2f1c2600203fcb2782c055f648f5f3eeac | [
"BSD-2-Clause"
] | null | null | null | from sentio_prober_control.Sentio.ProberSentio import *
from sentio_prober_control.Communication.CommunicatorTcpIp import CommunicatorTcpIp
def main():
# Setup TCPIP Communication
prober = SentioProber(CommunicatorTcpIp.create("127.0.0.1:35555"))
# Zero reference test
prober.vision.imagpro.move_z(IMagProZReference.Center, 0)
z_init_pos: float = prober.vision.imagpro.get_z(IMagProZReference.Zero)
prober.vision.imagpro.move_z(IMagProZReference.Zero, z_init_pos + 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Zero)
prober.vision.imagpro.move_z(IMagProZReference.Zero, z_init_pos - 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Zero)
# Relative reference test
prober.vision.imagpro.move_z(IMagProZReference.Center, 0)
z_init_pos = prober.vision.imagpro.get_z(IMagProZReference.Relative)
prober.vision.imagpro.move_z(IMagProZReference.Relative, z_init_pos + 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Relative)
prober.vision.imagpro.move_z(IMagProZReference.Relative, z_init_pos - 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Relative)
# Center reference test
prober.vision.imagpro.move_z(IMagProZReference.Center, 0)
z_init_pos = prober.vision.imagpro.get_z(IMagProZReference.Center)
prober.vision.imagpro.move_z(IMagProZReference.Center, z_init_pos + 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Center)
prober.vision.imagpro.move_z(IMagProZReference.Center, z_init_pos - 100)
z_pos = prober.vision.imagpro.get_z(IMagProZReference.Center)
if __name__ == "__main__":
try:
main()
except Exception as e:
print("\n#### Error ##################################")
print("{0}".format(e)) | 48.230769 | 83 | 0.709197 | 229 | 1,881 | 5.58952 | 0.19214 | 0.16875 | 0.267188 | 0.161719 | 0.775781 | 0.775781 | 0.775781 | 0.775781 | 0.775781 | 0.775781 | 0 | 0.021263 | 0.174907 | 1,881 | 39 | 84 | 48.230769 | 0.803479 | 0.048379 | 0 | 0.321429 | 0 | 0 | 0.041011 | 0.019101 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035714 | false | 0 | 0.071429 | 0 | 0.107143 | 0.071429 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4e6b0ac6f4adb3d41e26aa7f5fb927036d72c393 | 1,471 | py | Python | Python/Problem_008.py | mtakamichi/ProjectEuler | 083fafe849868f035e677ced7adfa9170d41e719 | [
"Unlicense"
] | null | null | null | Python/Problem_008.py | mtakamichi/ProjectEuler | 083fafe849868f035e677ced7adfa9170d41e719 | [
"Unlicense"
] | null | null | null | Python/Problem_008.py | mtakamichi/ProjectEuler | 083fafe849868f035e677ced7adfa9170d41e719 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
Problem 8
The four adjacent digits in the 1000-digit number that have the greatest product are 9 Γ 9 Γ 8 Γ 9 = 5832.
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
'''
import numpy as np
sl="7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"
nl = [float(s) for s in sl]
nl = np.log10(np.array(nl))
nlc=np.convolve(nl, np.ones((13,)), mode='valid')
print(int(10**max(nlc)))
| 91.9375 | 1,006 | 0.89259 | 87 | 1,471 | 15.126437 | 0.609195 | 0.021277 | 0.024316 | 0.028875 | 0.091185 | 0.091185 | 0.091185 | 0.091185 | 0.091185 | 0.091185 | 0 | 0.742754 | 0.061863 | 1,471 | 15 | 1,007 | 98.066667 | 0.208696 | 0.194426 | 0 | 0 | 0 | 0 | 0.865633 | 0.861326 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4ec94d357fd1ce77cdf8bf867291138ed413fdc9 | 131 | py | Python | bellini/api/tests/test_numpyro_api.py | choderalab/bellin | b6c03b900d34f8a5570c51af22ef2d589da2a050 | [
"MIT"
] | 3 | 2021-06-07T20:05:14.000Z | 2021-07-03T04:56:32.000Z | bellini/api/tests/test_numpyro_api.py | choderalab/bellin | b6c03b900d34f8a5570c51af22ef2d589da2a050 | [
"MIT"
] | 4 | 2021-07-19T21:05:14.000Z | 2021-09-14T15:42:04.000Z | bellini/api/tests/test_numpyro_api.py | choderalab/bellin | b6c03b900d34f8a5570c51af22ef2d589da2a050 | [
"MIT"
] | 1 | 2021-03-24T07:21:21.000Z | 2021-03-24T07:21:21.000Z | import pytest
# TODO: write test that generates graph from group operations,
# compiles to numpyro model, then performs inference
| 26.2 | 62 | 0.801527 | 18 | 131 | 5.833333 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.160305 | 131 | 4 | 63 | 32.75 | 0.954545 | 0.847328 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0.25 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
14e14e312dacd576ca251dc3161e3ceed1206d91 | 92 | py | Python | __init__.py | mastertech/moip-sdk-python | 15c28b8643dfb63242c0cd55f1a2cbee04d9fbaa | [
"MIT"
] | 2 | 2020-06-26T16:27:43.000Z | 2021-06-01T20:21:04.000Z | __init__.py | mastertech/moip-sdk-python | 15c28b8643dfb63242c0cd55f1a2cbee04d9fbaa | [
"MIT"
] | null | null | null | __init__.py | mastertech/moip-sdk-python | 15c28b8643dfb63242c0cd55f1a2cbee04d9fbaa | [
"MIT"
] | 1 | 2020-06-26T16:28:57.000Z | 2020-06-26T16:28:57.000Z | from moip_sdk.customer import *
from moip_sdk.order import *
from moip_sdk.payment import *
| 23 | 31 | 0.804348 | 15 | 92 | 4.733333 | 0.466667 | 0.338028 | 0.464789 | 0.478873 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.130435 | 92 | 3 | 32 | 30.666667 | 0.8875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
14e33ed489aa483688a8c4da8d152ac60d3b8ed2 | 38,123 | py | Python | data_utils/MyDataLoader.py | Abdulah-Fawaz/Benchmarking-Surface-DL | 9693379f26d57f9aabf28b973f40a9f6f627d26f | [
"MIT"
] | 2 | 2021-12-04T07:04:56.000Z | 2021-12-13T16:28:50.000Z | data_utils/MyDataLoader.py | Abdulah-Fawaz/Benchmarking-Surface-DL | 9693379f26d57f9aabf28b973f40a9f6f627d26f | [
"MIT"
] | 1 | 2021-12-21T09:36:11.000Z | 2022-01-25T10:26:43.000Z | data_utils/MyDataLoader.py | Abdulah-Fawaz/Benchmarking-Surface-DL | 9693379f26d57f9aabf28b973f40a9f6f627d26f | [
"MIT"
] | 1 | 2022-02-27T17:38:19.000Z | 2022-02-27T17:38:19.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 28 13:38:48 2020
@author: fa19
"""
import nibabel as nb
import numpy as np
import torch
import random
from scipy.interpolate import griddata
import os
means_birth_age = torch.Tensor([1.18443463, 0.0348339 , 1.02189593, 0.12738451])
stds_birth_age = torch.Tensor([0.39520042, 0.19205919, 0.37749157, 4.16265044])
means_birth_age_confounded = means_birth_age
stds_birth_age_confounded = stds_birth_age
means_scan_age = torch.Tensor([1.16332048, 0.03618059, 1.01341462, 0.09550486])
stds_scan_age = torch.Tensor([0.39418309, 0.18946538, 0.37818974, 4.04483381])
means_bayley = torch.Tensor([0.03561912, 0.1779468, 1.02368241, 1.30365072, 1.42005161, 1.80373678, 1.0485854, 1.44855442, 0.74604417])
stds_bayley = torch.Tensor([0.19094736, 4.11706815, 0.37789417, 4.61303946, 5.08495779, 4.94774891, 4.72248912, 4.22112396, 4.48455344])
means = torch.Tensor([1.1267, 0.0345, 1.0176, 0.0556])
stds = torch.Tensor([0.3522, 0.1906, 0.3844, 4.0476])
means = means
stds = stds
rotation_arr = np.load('data/rotations_array.npy').astype(int)
reversing_arr = np.load('data/reversing_arr.npy')
test_rotation_arr = np.load('data/remaining_rotations_array.npy').astype(int)
xy_points = np.load('data/equirectangular_ico_6_points.npy')
xy_points[:,0] = (xy_points[:,0] + 0.1)%1
grid = np.load('data/grid_170_square.npy')
grid_x, grid_y = np.meshgrid(np.linspace(0.02, 0.98, 170), np.linspace(0.02, 0.98, 170))
grid[:,0] = grid_x.flatten()
grid[:,1] = grid_y.flatten()
from scipy.interpolate import griddata
from torch_geometric.data import Data
class My_dHCP_Data(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True, *args):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.rotations = rotations
self.projected = projected
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] ).squeeze(1)
if self.projected == True:
image = griddata(xy_points, image.T, grid, 'nearest')
image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1)
if hasattr(metadata,'shape'):
sample = {'image': image, 'metadata' : metadata, 'label': label}
else:
sample = {'image': image,'label': label}
return sample
class My_dHCP_Data_Test_Rot(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False, sample_only = True, output_as_torch = True, *args):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.rotations = rotations
self.projected = projected
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(test_rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[test_rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item( )) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] ).squeeze(1)
if self.projected == True:
image = griddata(xy_points, image.T, grid, 'nearest')
image = torch.Tensor(image.reshape(170,170,4)).permute(2,0,1)
if hasattr(metadata,'shape'):
sample = {'image': image, 'metadata' : metadata, 'label': label}
else:
sample = {'image': image,'label': label}
return sample
class My_dHCP_Data_Graph(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, edges, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False,
sample_only = True, output_as_torch = True):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.edges = edges
self.rotations = rotations
self.projected = False
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item()) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] )#.squeeze(1)
if hasattr(metadata,'shape'):
sample = Data(x = image.permute(1,0), metadata = metadata, edge_index = self.edges,
y = label)
else:
sample = Data(x = image.permute(1,0), edge_index = self.edges,
y = label)
return sample
class My_dHCP_Data_Graph_Test_Rot(torch.utils.data.Dataset):
def __init__(self, input_arr, warped_files_directory, unwarped_files_directory, edges, rotations = False,
number_of_warps = 0, parity_choice = 'left', smoothing = False, normalisation = None, projected =False,
sample_only = True, output_as_torch = True):
"""
A Full Dataset for the dHCP Data. Can include warps, rotations and parity flips.
Fileanme style:
in the array: only 'sub-X-ses-Y'
but for the filenames themselves
Left = 'sub-X_ses-Y_L'
Right = 'sub-X_ses-Y_R'
if warped:
'sub-X_ses-Y_L_W1'
INPUT ARGS:
1. input_arr:
Numpy array size Nx2
FIRST index MUST be the filename (excluding directory AND L or R ) of MERGED nibabel files
LAST index must be the (float) label
(OPTIONAL) Middle index if size 3 (optional) is any confounding metadata (Also float, e.g scan age for predicting birth age)
2 . rotations - boolean: to add rotations or not to add rotations
3. number of warps to include - INT
NB WARPED AR INCLUDED AS FILENAME CHANGES. WARP NUMBER X IS WRITTEN AS filename_WX
NUMBER OF WARPS CANNOT EXCEED NUMBER OF WARPES PRESENT IN FILES
4. Particy Choice (JMPORTANT!) - defines left and right-ness
If: 'left'- will output ONLY LEFT
If: 'both' - will randomly choose L or R
If 'combined' - will output a combined array (left first), will be eventually read as a file with twice the number of input channels. as they will be stacked together
5. smoothing - boolean, will clip extremal values according to the smoothing_array
6. normalisation - str. Will normalise according to 'range', 'std' or 'None'
Range is from -1 to 1
Std is mean = 0, std = 1
7. output_as_torch - boolean:
outputs values as torch Tensors if you want (usually yes)
"""
self.input_arr = input_arr
self.image_files = input_arr[:,0]
self.label = input_arr[:,-1]
self.edges = edges
self.rotations = rotations
self.projected = False
self.number_of_warps = number_of_warps
self.parity = parity_choice
self.smoothing = smoothing
self.normalisation = normalisation
self.sample_only = sample_only
self.output_as_torch = output_as_torch
if self.number_of_warps != 0 and self.number_of_warps != None:
self.directory = warped_files_directory
else:
self.directory = unwarped_files_directory
def __len__(self):
L = len(self.input_arr)
if self.number_of_warps !=0:
if self.sample_only == False:
L = L*self.number_of_warps
if self.parity == 'both':
L = 2* L
return L
def __test_input_params__(self):
assert self.input_arr.shape[1] >=2, 'check your input array is a nunpy array of files and labels'
assert type(self.number_of_warps) == int, "number of warps must be an in integer (can be 0)"
assert self.parity in ['left', 'both', 'combined'], "parity choice must be either left, combined or both"
if self.number_of_rotations != 0:
assert self.rotation_arr != None,'Must specify a rotation file containing rotation vertex ids if rotations are non-zero'
assert self.rotations == bool, 'rotations must be boolean'
assert self.normalisation in [None, 'none', 'std', 'range'], 'Normalisation must be either std or range'
def __genfilename__(self,idx, right):
"""
gets the appropriate file based on input parameters on PARITY and on WARPS
"""
# grab raw filename
raw_filename = self.image_files[idx]
# add parity to it. IN THE FORM OF A LIST! If requries both will output a list of length 2
filename = []
if self.parity != 'combined':
if right == True:
filename.append(raw_filename + '_R')
else:
filename.append(raw_filename + '_L')
# if self.parity == 'left':
# filename.append(raw_filename + '_L')
#
# elif self.parity == 'both':
# coin_flip = random.randint(0,1)
# if coin_flip == 0:
# filename.append(raw_filename + '_L')
# elif coin_flip == 1:
# filename.append(raw_filename + '_R')
# right = True
if self.parity == 'combined':
filename.append(raw_filename + '_L')
filename.append(raw_filename+'_R')
# filename is now a list of the correct filenames.
# now add warps if required
if self.number_of_warps != 0:
warp_choice = str(random.randint(0,self.number_of_warps))
if warp_choice !='0':
filename = [s + '_W'+warp_choice for s in filename ]
return filename
def __getitem__(self, idx):
"""
First load the images and collect them as numpy arrays
then collect the label
then collect the metadata (though might be None)
"""
if self.parity == 'both':
T = self.__len__()//2
idx, right = idx % T, idx // T
filename = self.__genfilename__(idx, right)
else:
right = False
filename = self.__genfilename__(idx, right)
image_gifti = [nb.load(self.directory + '/'+individual_filename+'.shape.gii').darrays for individual_filename in filename]
image = []
if self.rotations == True:
rotation_choice = random.randint(0, len(test_rotation_arr)-1)
if rotation_choice !=0:
for file in image_gifti:
image.extend(item.data[test_rotation_arr[rotation_choice]] for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
else:
for file in image_gifti:
image.extend(item.data for item in file)
if right == True:
image = [item[reversing_arr] for item in image]
### labels
# if self.number_of_warps != 0:
#
# idx = idx%len(self.input_arr)
# label = self.label[idx]
###### metadata grabbing if necessary
label = self.label[idx]
if self.input_arr.shape[1] > 2:
self.metadata = self.input_arr[:,1:-1]
metadata = self.metadata[idx]
else:
metadata = None
if self.smoothing != False:
for i in range(len(image)):
image[i] = np.clip(image[i], lower_bound[i%len(lower_bound)].item(), upper_bound[i%len(upper_bound)].item())
# torchify if required:
if self.normalisation != None:
if self.normalisation == 'std':
for i in range(len(image)):
image[i] = ( image[i] - means[i%len(means)].item()) / stds[i%len(stds)].item()
elif self.normalisation == 'range':
for i in range(len(image)):
image[i] = image[i] - minima[i%len(minima)].item()
image[i] = image[i] / (maxima[i%len(maxima)].item()- minima[i%len(minima)].item())
if self.output_as_torch:
image = torch.Tensor( image )
label = torch.Tensor( [label] )
if isinstance(metadata,np.ndarray):
metadata = torch.Tensor( [metadata] )#.squeeze(1)
if hasattr(metadata,'shape'):
sample = Data(x = image.permute(1,0), metadata = metadata, edge_index = self.edges,
y = label)
else:
sample = Data(x = image.permute(1,0), edge_index = self.edges,
y = label)
return sample | 33.737168 | 182 | 0.517771 | 4,343 | 38,123 | 4.398342 | 0.072991 | 0.021987 | 0.038111 | 0.032039 | 0.949168 | 0.940687 | 0.940687 | 0.938698 | 0.937389 | 0.937389 | 0 | 0.025338 | 0.395431 | 38,123 | 1,130 | 183 | 33.737168 | 0.803454 | 0.27511 | 0 | 0.930023 | 0 | 0 | 0.070398 | 0.005481 | 0 | 0 | 0 | 0 | 0.054176 | 1 | 0.045147 | false | 0 | 0.018059 | 0 | 0.099323 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
0934aed9516b80a837f5a69e792a6412a48475d6 | 2,407 | py | Python | test/test_envdel.py | jwodder/morecontext | e2f75ab5797e66410b87984f93d88028015f8df3 | [
"MIT"
] | 1 | 2021-11-30T03:54:02.000Z | 2021-11-30T03:54:02.000Z | test/test_envdel.py | jwodder/morecontext | e2f75ab5797e66410b87984f93d88028015f8df3 | [
"MIT"
] | null | null | null | test/test_envdel.py | jwodder/morecontext | e2f75ab5797e66410b87984f93d88028015f8df3 | [
"MIT"
] | null | null | null | import os
import pytest
from morecontext import envdel
ENVVAR = "MORECONTEXT_FOO"
def test_envdel(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv(ENVVAR, "foo")
with envdel(ENVVAR):
assert ENVVAR not in os.environ
assert os.environ[ENVVAR] == "foo"
def test_envdel_error(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv(ENVVAR, "foo")
with pytest.raises(RuntimeError, match="Catch this!"):
with envdel(ENVVAR):
assert ENVVAR not in os.environ
raise RuntimeError("Catch this!")
assert os.environ[ENVVAR] == "foo"
def test_envdel_modified(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv(ENVVAR, "foo")
with envdel(ENVVAR):
assert ENVVAR not in os.environ
os.environ[ENVVAR] = "quux"
assert os.environ[ENVVAR] == "foo"
def test_envdel_modified_error(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.setenv(ENVVAR, "foo")
with pytest.raises(RuntimeError, match="Catch this!"):
with envdel(ENVVAR):
assert ENVVAR not in os.environ
os.environ[ENVVAR] = "quux"
raise RuntimeError("Catch this!")
assert os.environ[ENVVAR] == "foo"
def test_envdel_unset(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv(ENVVAR, raising=False)
with envdel(ENVVAR):
assert ENVVAR not in os.environ
assert ENVVAR not in os.environ
def test_envdel_unset_error(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv(ENVVAR, raising=False)
with pytest.raises(RuntimeError, match="Catch this!"):
with envdel(ENVVAR):
assert ENVVAR not in os.environ
raise RuntimeError("Catch this!")
assert ENVVAR not in os.environ
def test_envdel_unset_modified(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv(ENVVAR, raising=False)
with envdel(ENVVAR):
assert ENVVAR not in os.environ
os.environ[ENVVAR] = "quux"
assert ENVVAR not in os.environ
def test_envdel_unset_modified_error(monkeypatch: pytest.MonkeyPatch) -> None:
monkeypatch.delenv(ENVVAR, raising=False)
with pytest.raises(RuntimeError, match="Catch this!"):
with envdel(ENVVAR):
assert ENVVAR not in os.environ
os.environ[ENVVAR] = "quux"
raise RuntimeError("Catch this!")
assert ENVVAR not in os.environ
| 32.527027 | 78 | 0.68467 | 291 | 2,407 | 5.591065 | 0.106529 | 0.110633 | 0.110633 | 0.125384 | 0.947757 | 0.947757 | 0.947757 | 0.947757 | 0.928703 | 0.899816 | 0 | 0 | 0.212713 | 2,407 | 73 | 79 | 32.972603 | 0.858575 | 0 | 0 | 0.785714 | 0 | 0 | 0.05941 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 1 | 0.142857 | false | 0 | 0.053571 | 0 | 0.196429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
118067d024b7d64fbe25b85014cb67952bed5726 | 4,716 | py | Python | piptui/actionForms.py | UltraStudioLTD/PipTUI | 62f5707faa004ba6d330288656ba192f37516921 | [
"MIT"
] | 33 | 2019-07-11T13:19:29.000Z | 2022-02-23T12:42:22.000Z | piptui/actionForms.py | UltraStudioLTD/PipTUI | 62f5707faa004ba6d330288656ba192f37516921 | [
"MIT"
] | 6 | 2019-07-20T21:21:51.000Z | 2021-09-24T16:24:34.000Z | piptui/actionForms.py | UltraStudioLTD/PipTUI | 62f5707faa004ba6d330288656ba192f37516921 | [
"MIT"
] | 8 | 2019-07-21T07:38:08.000Z | 2021-11-11T13:30:45.000Z | import curses
from npyscreen import ActionForm
class UninstallForm(ActionForm):
CANCEL_BUTTON_BR_OFFSET = (2, 45)
OK_BUTTON_BR_OFFSET = (2, 5)
CANCEL_BUTTON_TEXT = 'Cancel'
OK_BUTTON_TEXT = 'Uninstall'
def create(self):
y, x = self.parentApp.MainForm.useable_space()
self.show_atx = x // 2 - 10
self.show_aty = y // 2 - 5
self.name = "Uninstall Package?"
exit_handlers = {'^Q': lambda: exit(0),
155: lambda: exit(0),
curses.ascii.ESC: lambda: exit(0)}
self.add_handlers(exit_handlers)
self.display()
def on_ok(self):
current_selection = self.parentApp.MainForm.PkgBoxObj.value
pkg = self.parentApp.MainForm.PkgBoxObj.values[current_selection].split()[
0]
self.parentApp.MainForm.LogBoxObj.uninstall_pkg(pkg, current_selection)
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.parentApp.switchForm('MAIN')
class UpdateForm(ActionForm):
CANCEL_BUTTON_BR_OFFSET = (2, 45)
OK_BUTTON_BR_OFFSET = (2, 5)
CANCEL_BUTTON_TEXT = 'Cancel'
OK_BUTTON_TEXT = 'Update'
def create(self):
y, x = self.parentApp.MainForm.useable_space()
self.show_atx = x // 2 - 10
self.show_aty = y // 2 - 5
self.name = "Update Package?"
exit_handlers = {'^Q': lambda: exit(0),
155: lambda: exit(0),
curses.ascii.ESC: lambda: exit(0)}
self.add_handlers(exit_handlers)
self.display()
def on_ok(self):
current_selection = self.parentApp.MainForm.PkgBoxObj.value
pkg = self.parentApp.MainForm.PkgBoxObj.values[current_selection].split()[
0]
self.parentApp.MainForm.LogBoxObj.update_pkg(pkg, current_selection)
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.parentApp.switchForm('MAIN')
class InstallForm(ActionForm):
CANCEL_BUTTON_BR_OFFSET = (2, 45)
OK_BUTTON_BR_OFFSET = (2, 5)
CANCEL_BUTTON_TEXT = 'Cancel'
OK_BUTTON_TEXT = 'Install'
def create(self):
y, x = self.parentApp.MainForm.useable_space()
self.show_atx = x // 2 - 10
self.show_aty = y // 2 - 5
self.name = "Install Release?"
exit_handlers = {'^Q': lambda: exit(0),
155: lambda: exit(0),
curses.ascii.ESC: lambda: exit(0)}
self.add_handlers(exit_handlers)
self.display()
def on_ok(self):
current_selection = self.parentApp.MainForm.PkgBoxObj.value
pkg = self.parentApp.MainForm.PkgBoxObj.values[current_selection].split()[
0]
self.parentApp.MainForm.LogBoxObj.install_pkg(pkg, current_selection)
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.parentApp.switchForm('MAIN')
class InstallVersionForm(ActionForm):
CANCEL_BUTTON_BR_OFFSET = (2, 45)
OK_BUTTON_BR_OFFSET = (2, 5)
CANCEL_BUTTON_TEXT = 'Cancel'
OK_BUTTON_TEXT = 'Install'
def create(self):
y, x = self.parentApp.MainForm.useable_space()
self.show_atx = x // 2 - 10
self.show_aty = y // 2 - 5
self.name = "Install Package?"
exit_handlers = {'^Q': lambda: exit(0),
155: lambda: exit(0),
curses.ascii.ESC: lambda: exit(0)}
self.add_handlers(exit_handlers)
self.display()
def on_ok(self):
current_selection = self.parentApp.PkgInfoForm.releases.value
version = self.parentApp.PkgInfoForm.releases.values[current_selection].split()[
0]
pkg = self.parentApp.PkgInfoForm.pkg_name.value + '==' + version
self.parentApp.MainForm.LogBoxObj.install_pkg(pkg, current_selection)
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.parentApp.switchForm('PKG_INFO')
class UpdateAppForm(ActionForm):
CANCEL_BUTTON_BR_OFFSET = (2, 45)
OK_BUTTON_BR_OFFSET = (2, 5)
CANCEL_BUTTON_TEXT = 'Cancel'
OK_BUTTON_TEXT = 'Update'
def create(self):
y, x = self.parentApp.MainForm.useable_space()
self.show_atx = x // 2 - 10
self.show_aty = y // 2 - 5
self.name = "Update App?"
exit_handlers = {'^Q': lambda: exit(0),
155: lambda: exit(0),
curses.ascii.ESC: lambda: exit(0)}
self.add_handlers(exit_handlers)
self.display()
def on_ok(self):
self.parentApp.MainForm.LogBoxObj.update_app()
self.parentApp.switchForm('MAIN')
def on_cancel(self):
self.parentApp.switchForm('MAIN')
| 32.081633 | 88 | 0.610051 | 565 | 4,716 | 4.899115 | 0.115044 | 0.136199 | 0.121387 | 0.054191 | 0.901734 | 0.876445 | 0.876445 | 0.876445 | 0.876445 | 0.876445 | 0 | 0.02449 | 0.272689 | 4,716 | 146 | 89 | 32.30137 | 0.782507 | 0 | 0 | 0.826087 | 0 | 0 | 0.041773 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.017391 | 0 | 0.365217 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
11b6edb908a184f1dc3dd55ab87c48e2dbd8ac4d | 17,373 | py | Python | openprocurement/auction/tests/data/couch_data.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 23 | 2015-07-09T17:07:39.000Z | 2020-11-14T11:23:39.000Z | openprocurement/auction/tests/data/couch_data.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 23 | 2015-01-14T22:33:58.000Z | 2018-02-08T16:31:20.000Z | openprocurement/auction/tests/data/couch_data.py | OrysiaDrabych/openprocurement.auction | d68b4aca7313dd4c7c13bd22c772a32a1b70d79f | [
"Apache-2.0"
] | 27 | 2015-02-17T10:22:32.000Z | 2021-06-08T06:50:45.000Z | # 'limit_replications_progress' in 'server_config' is undefined
l1a = [
# 'limit_replications_func' is undefined
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'any'
({'server_config': {'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'all'
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
]
# 'limit_replications_progress' in 'server_config' is 1024
l1b = [
# 'limit_replications_func' is undefined
({'server_config': {'limit_replications_progress': 1024},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'any'
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'all'
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1023,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 1024,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
]
# 'limit_replications_progress' in 'server_config' is 512
l1c = [
# 'limit_replications_func' is undefined
({'server_config': {'limit_replications_progress': 512},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 511,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 512,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 513,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'any'
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 511,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 512,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'any'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 513,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
# 'limit_replications_func' is 'all'
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 511,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 512,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0001": 10}'}),
({'server_config': {'limit_replications_progress': 512,
'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 513,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10}'}),
]
# first couch task doesn't have a type
l2a = [
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}
]},
{'status_int': 200, 'body': '{"0002": 20}'}),
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0002": 20}'}),
]
# first couch task has type 'smth'
l2b = [
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'smth',
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0002": 20}'}),
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'smth',
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
}]},
{'status_int': 200, 'body': '{"0002": 20}'}),
]
# check difference between 'any', and 'all' for 'limit_replications_func' key
l3 = [
({'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0,
'xxx': 'xxx'
}
]},
{'status_int': 200, 'body': '{"0001": 10, "0002": 20}'}),
({'server_config': {'limit_replications_func': 'all'},
'couch_tasks': [{'replication_id': '0001',
'progress': 10,
'type': 'replication',
'source_seq': 1024,
'checkpointed_source_seq': 0
},
{'replication_id': '0002',
'progress': 20,
'type': 'replication',
'source_seq': 1025,
'checkpointed_source_seq': 0
}]},
{'status_int': 503, 'body': '{"0001": 10, "0002": 20}'}),
]
| 50.066282 | 77 | 0.359236 | 1,122 | 17,373 | 5.245098 | 0.049911 | 0.119286 | 0.139167 | 0.145794 | 0.974342 | 0.967884 | 0.964146 | 0.957179 | 0.957179 | 0.957179 | 0 | 0.09434 | 0.508836 | 17,373 | 346 | 78 | 50.210983 | 0.595336 | 0.037242 | 0 | 0.924765 | 0 | 0 | 0.327708 | 0.11167 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
6d6be24b2c674e77a1eaedbaa39607b95ece57f9 | 38,213 | py | Python | model_wrapper.py | colorfulbrain/brain2020 | 1dde5d34fd2ba1f38bcc38f2c973d167c8c3a168 | [
"MIT"
] | 91 | 2020-04-01T08:13:54.000Z | 2022-03-27T22:47:50.000Z | model_wrapper.py | GaelKBertrand/brain2020 | 5de2d625cce3392985cb7efedbe7df9431fb4f49 | [
"MIT"
] | 17 | 2020-07-25T09:01:28.000Z | 2022-03-07T13:53:15.000Z | model_wrapper.py | GaelKBertrand/brain2020 | 5de2d625cce3392985cb7efedbe7df9431fb4f49 | [
"MIT"
] | 37 | 2020-05-06T04:08:51.000Z | 2022-03-25T13:44:17.000Z | import os
import numpy as np
from model import _CNN, _FCN, _MLP_A, _MLP_B, _MLP_C, _MLP_D
from utils import matrix_sum, get_accu, get_MCC, get_confusion_matrix, write_raw_score, DPM_statistics, timeit, read_csv
from dataloader import CNN_Data, FCN_Data, MLP_Data, MLP_Data_apoe, CNN_MLP_Data
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torch.optim as optim
from tqdm import tqdm
import numpy as np
"""
model wrapper class are defined in this scripts which includes the following methods:
1. init: initialize dataloader, model
2. train:
3. valid:
4. test:
5. ...
1. FCN wrapper
2. MLP wrapper
3. CNN wrapper
"""
class CNN_Wrapper:
def __init__(self,
fil_num,
drop_rate,
seed,
batch_size,
balanced,
Data_dir,
exp_idx,
model_name,
metric):
"""
:param fil_num: output channel number of the first convolution layer
:param drop_rate: dropout rate of the last 2 layers, see model.py for details
:param seed: random seed
:param batch_size: batch size for training CNN
:param balanced: balanced could take value 0 or 1, corresponding to different approaches to handle data imbalance,
see self.prepare_dataloader for more details
:param Data_dir: data path for training data
:param exp_idx: experiment index maps to different data splits
:param model_name: give a name to the model
:param metric: metric used for saving model during training, can be either 'accuracy' or 'MCC'
for example, if metric == 'accuracy', then the time point where validation set has best accuracy will be saved
"""
self.seed = seed
self.exp_idx = exp_idx
self.Data_dir = Data_dir
self.model_name = model_name
self.eval_metric = get_accu if metric == 'accuracy' else get_MCC
self.model = _CNN(fil_num=fil_num, drop_rate=drop_rate).cuda()
self.prepare_dataloader(batch_size, balanced, Data_dir)
self.checkpoint_dir = './checkpoint_dir/{}_exp{}/'.format(self.model_name, exp_idx)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
def train(self, lr, epochs):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.5, 0.999))
self.criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1, self.imbalanced_ratio])).cuda()
self.optimal_valid_matrix = [[0, 0], [0, 0]]
self.optimal_valid_metric = 0
self.optimal_epoch = -1
for self.epoch in range(epochs):
self.train_model_epoch()
valid_matrix = self.valid_model_epoch()
print('{}th epoch validation confusion matrix:'.format(self.epoch), valid_matrix, 'eval_metric:', "%.4f" % self.eval_metric(valid_matrix))
self.save_checkpoint(valid_matrix)
print('Best model saved at the {}th epoch:'.format(self.optimal_epoch), self.optimal_valid_metric, self.optimal_valid_matrix)
return self.optimal_valid_metric
def test(self):
print('testing ... ')
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
Data_dir = self.Data_dir
if stage in ['AIBL', 'NACC', 'FHS']:
Data_dir = Data_dir.replace('ADNI', stage)
data = CNN_Data(Data_dir, self.exp_idx, stage=stage, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}.txt'.format(stage), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
preds = self.model(inputs)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
def gen_features(self):
self.model.load_state_dict(
torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
self.feature_dir = './DPMs/{}_exp{}/'.format(self.model_name, self.exp_idx)
if not os.path.exists(self.feature_dir):
os.mkdir(self.feature_dir)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
Data_dir = self.Data_dir
if stage in ['AIBL', 'NACC', 'FHS']:
Data_dir = Data_dir.replace('ADNI', stage)
data = CNN_Data(Data_dir, self.exp_idx, stage=stage, seed=self.seed)
filenames = data.Data_list
dataloader = DataLoader(data, batch_size=1, shuffle=False)
for idx, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
preds = self.model(inputs, stage="get_features").cpu().numpy().squeeze()
np.save(self.feature_dir + filenames[idx] + '.npy', preds)
def save_checkpoint(self, valid_matrix):
if self.eval_metric(valid_matrix) >= self.optimal_valid_metric:
self.optimal_epoch = self.epoch
self.optimal_valid_matrix = valid_matrix
self.optimal_valid_metric = self.eval_metric(valid_matrix)
for root, Dir, Files in os.walk(self.checkpoint_dir):
for File in Files:
if File.endswith('.pth'):
try:
os.remove(self.checkpoint_dir + File)
except:
pass
torch.save(self.model.state_dict(), '{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch))
def train_model_epoch(self):
self.model.train(True)
for inputs, labels in self.train_dataloader:
inputs, labels = inputs.cuda(), labels.cuda()
self.model.zero_grad()
preds = self.model(inputs)
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
def valid_model_epoch(self):
with torch.no_grad():
self.model.train(False)
valid_matrix = [[0, 0], [0, 0]]
for inputs, labels in self.valid_dataloader:
inputs, labels = inputs.cuda(), labels.cuda()
preds = self.model(inputs)
valid_matrix = matrix_sum(valid_matrix, get_confusion_matrix(preds, labels))
return valid_matrix
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = CNN_Data(Data_dir, self.exp_idx, stage='train', seed=self.seed)
valid_data = CNN_Data(Data_dir, self.exp_idx, stage='valid', seed=self.seed)
test_data = CNN_Data(Data_dir, self.exp_idx, stage='test', seed=self.seed)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.valid_dataloader = DataLoader(valid_data, batch_size=batch_size, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=batch_size, shuffle=False)
class FCN_Wrapper(CNN_Wrapper):
def __init__(self, fil_num,
drop_rate,
seed,
batch_size,
balanced,
Data_dir,
exp_idx,
model_name,
metric,
patch_size):
"""
:param fil_num: output channel number of the first convolution layer
:param drop_rate: dropout rate of the last 2 layers, see model.py for details
:param seed: random seed
:param batch_size: batch size for training FCN
:param balanced: balanced could take value 0 or 1, corresponding to different approaches to handle data imbalance,
see self.prepare_dataloader for more details
:param Data_dir: data path for training data
:param exp_idx: experiment index maps to different data splits
:param model_name: give a name to the model
:param metric: metric used for saving model during training, can take 'accuracy' or 'MCC'
for example, if metric == 'accuracy', then the time point where validation set has best accuracy will be saved
:param patch_size: size of patches for FCN training, must be 47. otherwise model has to be changed accordingly
"""
self.seed = seed
self.exp_idx = exp_idx
self.Data_dir = Data_dir
self.patch_size = patch_size
self.model_name = model_name
self.eval_metric = get_accu if metric == 'accuracy' else get_MCC
self.model = _FCN(num=fil_num, p=drop_rate).cuda()
self.prepare_dataloader(batch_size, balanced, Data_dir)
if not os.path.exists('./checkpoint_dir/'): os.mkdir('./checkpoint_dir/')
self.checkpoint_dir = './checkpoint_dir/{}_exp{}/'.format(self.model_name, exp_idx)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
if not os.path.exists('./DPMs/'): os.mkdir('./DPMs/')
self.DPMs_dir = './DPMs/{}_exp{}/'.format(self.model_name, exp_idx)
if not os.path.exists(self.DPMs_dir):
os.mkdir(self.DPMs_dir)
def train(self, lr, epochs):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.5, 0.999))
self.criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1, self.imbalanced_ratio])).cuda()
self.optimal_valid_matrix = [[0, 0], [0, 0]]
self.optimal_valid_metric = 0
self.optimal_epoch = -1
for self.epoch in range(epochs):
self.train_model_epoch()
if self.epoch % 20 == 0:
valid_matrix = self.valid_model_epoch()
print('{}th epoch validation confusion matrix:'.format(self.epoch), valid_matrix, 'eval_metric:', "%.4f" % self.eval_metric(valid_matrix))
self.save_checkpoint(valid_matrix)
print('Best model saved at the {}th epoch:'.format(self.optimal_epoch), self.optimal_valid_metric, self.optimal_valid_matrix)
return self.optimal_valid_metric
def valid_model_epoch(self):
self.fcn = self.model.dense_to_conv()
DPMs, Labels = [], []
with torch.no_grad():
self.fcn.train(False)
for idx, (inputs, labels) in enumerate(self.valid_dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
DPM = self.fcn(inputs, stage='inference')
DPMs.append(DPM.cpu().numpy().squeeze())
Labels.append(labels)
valid_matrix, ACCU, F1, MCC = DPM_statistics(DPMs, Labels)
return valid_matrix
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = FCN_Data(Data_dir, self.exp_idx, stage='train', seed=self.seed, patch_size=self.patch_size)
valid_data = FCN_Data(Data_dir, self.exp_idx, stage='valid', seed=self.seed, patch_size=self.patch_size)
test_data = FCN_Data(Data_dir, self.exp_idx, stage='test', seed=self.seed, patch_size=self.patch_size)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.valid_dataloader = DataLoader(valid_data, batch_size=1, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
def test_and_generate_DPMs(self):
print('testing and generating DPMs ... ')
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.fcn = self.model.dense_to_conv()
self.fcn.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
Data_dir = self.Data_dir
if stage in ['AIBL', 'NACC', 'FHS']:
Data_dir = Data_dir.replace('ADNI', stage)
data = FCN_Data(Data_dir, self.exp_idx, stage=stage, whole_volume=True, seed=self.seed, patch_size=self.patch_size)
filenames = data.Data_list
dataloader = DataLoader(data, batch_size=1, shuffle=False)
DPMs, Labels = [], []
for idx, (inputs, labels) in enumerate(dataloader):
inputs, labels = inputs.cuda(), labels.cuda()
DPM = self.fcn(inputs, stage='inference').cpu().numpy().squeeze()
np.save(self.DPMs_dir + filenames[idx] + '.npy', DPM)
DPMs.append(DPM)
Labels.append(labels)
matrix, ACCU, F1, MCC = DPM_statistics(DPMs, Labels)
np.save(self.DPMs_dir + '{}_MCC.npy'.format(stage), MCC)
np.save(self.DPMs_dir + '{}_F1.npy'.format(stage), F1)
np.save(self.DPMs_dir + '{}_ACCU.npy'.format(stage), ACCU)
print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
print('DPM generation is done')
class MLP_Wrapper_A(CNN_Wrapper):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count=200, choice='count'):
self.seed = seed
self.imbalan_ratio = imbalan_ratio
self.choice = choice
self.exp_idx = exp_idx
self.model_name = model_name
self.roi_count = roi_count
self.roi_threshold = roi_threshold
self.eval_metric = get_accu if metric == 'accuracy' else get_MCC
self.checkpoint_dir = './checkpoint_dir/{}_exp{}/'.format(self.model_name, exp_idx)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self.Data_dir = './DPMs/fcn_exp{}/'.format(exp_idx)
self.prepare_dataloader(batch_size, balanced, self.Data_dir)
self.model = _MLP_A(in_size=self.in_size, fil_num=fil_num, drop_rate=drop_rate)
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = MLP_Data(Data_dir, self.exp_idx, stage='train', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
valid_data = MLP_Data(Data_dir, self.exp_idx, stage='valid', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
test_data = MLP_Data(Data_dir, self.exp_idx, stage='test', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.imbalanced_ratio *= self.imbalan_ratio
self.valid_dataloader = DataLoader(valid_data, batch_size=1, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
self.in_size = train_data.in_size
def train(self, lr, epochs):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.5, 0.999))
self.criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1, self.imbalanced_ratio]))
self.optimal_valid_matrix = [[0, 0], [0, 0]]
self.optimal_valid_metric = 0
self.optimal_epoch = -1
for self.epoch in range(epochs):
self.train_model_epoch()
valid_matrix = self.valid_model_epoch()
#print('{}th epoch validation confusion matrix:'.format(self.epoch), valid_matrix, 'eval_metric:', "%.4f" % self.eval_metric(valid_matrix))
self.save_checkpoint(valid_matrix)
#print('Best model saved at the {}th epoch:'.format(self.optimal_epoch), self.optimal_valid_metric, self.optimal_valid_matrix)
return self.optimal_valid_metric
def train_model_epoch(self):
self.model.train(True)
for inputs, labels, _ in self.train_dataloader:
inputs, labels = inputs, labels
self.model.zero_grad()
preds = self.model(inputs)
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
def valid_model_epoch(self):
with torch.no_grad():
self.model.train(False)
valid_matrix = [[0, 0], [0, 0]]
for inputs, labels, _ in self.valid_dataloader:
inputs, labels = inputs, labels
preds = self.model(inputs)
valid_matrix = matrix_sum(valid_matrix, get_confusion_matrix(preds, labels))
return valid_matrix
def test(self, repe_idx):
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
accu_list = []
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS_Full']:
data = MLP_Data(self.Data_dir, self.exp_idx, stage=stage, roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (inputs, labels, _) in enumerate(dataloader):
inputs, labels = inputs, labels
preds = self.model(inputs)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
# print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
class MLP_Wrapper_B(MLP_Wrapper_A):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice):
super().__init__(imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice)
self.model = _MLP_B(in_size=4, fil_num=fil_num, drop_rate=drop_rate)
def train_model_epoch(self):
self.model.train(True)
for _, labels, inputs in self.train_dataloader:
inputs, labels = inputs, labels
self.model.zero_grad()
preds = self.model(inputs)
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
def valid_model_epoch(self):
with torch.no_grad():
self.model.train(False)
valid_matrix = [[0, 0], [0, 0]]
for _, labels, inputs in self.valid_dataloader:
inputs, labels = inputs, labels
preds = self.model(inputs)
valid_matrix = matrix_sum(valid_matrix, get_confusion_matrix(preds, labels))
return valid_matrix
def test(self, repe_idx):
accu_list = []
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
data = MLP_Data(self.Data_dir, self.exp_idx, stage=stage, roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (_, labels, inputs) in enumerate(dataloader):
inputs, labels = inputs, labels
preds = self.model(inputs)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
# print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
class MLP_Wrapper_C(MLP_Wrapper_A):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice):
super().__init__(imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice)
self.model = _MLP_C(in_size=self.in_size+4, fil_num=fil_num, drop_rate=drop_rate)
def train_model_epoch(self):
self.model.train(True)
for inputs, labels, demors in self.train_dataloader:
inputs, labels, demors = inputs, labels, demors
self.model.zero_grad()
preds = self.model(inputs, demors)
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
def valid_model_epoch(self):
with torch.no_grad():
self.model.train(False)
valid_matrix = [[0, 0], [0, 0]]
for inputs, labels, demors in self.valid_dataloader:
inputs, labels, demors = inputs, labels, demors
preds = self.model(inputs, demors)
valid_matrix = matrix_sum(valid_matrix, get_confusion_matrix(preds, labels))
return valid_matrix
def test(self, repe_idx):
accu_list = []
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
data = MLP_Data(self.Data_dir, self.exp_idx, stage=stage, roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (inputs, labels, demors) in enumerate(dataloader):
inputs, labels, demors = inputs, labels, demors
preds = self.model(inputs, demors)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
# print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
class MLP_Wrapper_D(CNN_Wrapper):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric):
self.seed = seed
self.imbalan_ratio = imbalan_ratio
self.exp_idx = exp_idx
self.model_name = model_name
self.eval_metric = get_accu if metric == 'accuracy' else get_MCC
self.checkpoint_dir = './checkpoint_dir/{}_exp{}/'.format(self.model_name, exp_idx)
if not os.path.exists(self.checkpoint_dir):
os.mkdir(self.checkpoint_dir)
self.Data_dir = './DPMs/cnn_exp{}/'.format(exp_idx)
self.prepare_dataloader(batch_size, balanced, self.Data_dir)
self.model = _MLP_D(in_size=self.in_size+4, fil_num=fil_num, drop_rate=drop_rate)
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = CNN_MLP_Data(Data_dir, self.exp_idx, stage='train', seed=self.seed)
valid_data = CNN_MLP_Data(Data_dir, self.exp_idx, stage='valid', seed=self.seed)
test_data = CNN_MLP_Data(Data_dir, self.exp_idx, stage='test', seed=self.seed)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.imbalanced_ratio *= self.imbalan_ratio
self.valid_dataloader = DataLoader(valid_data, batch_size=1, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
self.in_size = train_data.in_size
def train(self, lr, epochs):
self.optimizer = optim.Adam(self.model.parameters(), lr=lr, betas=(0.5, 0.999))
self.criterion = nn.CrossEntropyLoss(weight=torch.Tensor([1, self.imbalanced_ratio]))
self.optimal_valid_matrix = [[0, 0], [0, 0]]
self.optimal_valid_metric = 0
self.optimal_epoch = -1
for self.epoch in range(epochs):
self.train_model_epoch()
valid_matrix = self.valid_model_epoch()
# print('{}th epoch validation confusion matrix:'.format(self.epoch), valid_matrix, 'eval_metric:', "%.4f" % self.eval_metric(valid_matrix))
self.save_checkpoint(valid_matrix)
# print('Best model saved at the {}th epoch:'.format(self.optimal_epoch), self.optimal_valid_metric, self.optimal_valid_matrix)
return self.optimal_valid_metric
def train_model_epoch(self):
self.model.train(True)
for inputs, labels, demors in self.train_dataloader:
inputs, labels = inputs, labels
self.model.zero_grad()
preds = self.model(inputs, demors)
loss = self.criterion(preds, labels)
loss.backward()
self.optimizer.step()
def valid_model_epoch(self):
with torch.no_grad():
self.model.train(False)
valid_matrix = [[0, 0], [0, 0]]
for inputs, labels, demors in self.valid_dataloader:
inputs, labels = inputs, labels
preds = self.model(inputs, demors)
valid_matrix = matrix_sum(valid_matrix, get_confusion_matrix(preds, labels))
return valid_matrix
def test(self, repe_idx):
self.model.load_state_dict(
torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
accu_list = []
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
data = CNN_MLP_Data(self.Data_dir, self.exp_idx, stage=stage, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (inputs, labels, demors) in enumerate(dataloader):
inputs, labels = inputs, labels
preds = self.model(inputs, demors)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
class MLP_Wrapper_E(MLP_Wrapper_B):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice):
super().__init__(imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice)
self.prepare_dataloader(batch_size, balanced, self.Data_dir)
self.model = _MLP_B(in_size=5, fil_num=fil_num, drop_rate=drop_rate)
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='train', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
valid_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='valid', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
test_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='test', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.imbalanced_ratio *= self.imbalan_ratio
self.valid_dataloader = DataLoader(valid_data, batch_size=1, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
self.in_size = train_data.in_size
def test(self, repe_idx):
accu_list = []
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
data = MLP_Data_apoe(self.Data_dir, self.exp_idx, stage=stage, roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (_, labels, inputs) in enumerate(dataloader):
inputs, labels = inputs, labels
preds = self.model(inputs)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
# print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
class MLP_Wrapper_F(MLP_Wrapper_C):
def __init__(self, imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice):
super().__init__(imbalan_ratio, fil_num, drop_rate, seed, batch_size, balanced, exp_idx, model_name, metric, roi_threshold, roi_count, choice)
self.prepare_dataloader(batch_size, balanced, self.Data_dir)
self.model = _MLP_C(in_size=self.in_size + 5, fil_num=fil_num, drop_rate=drop_rate)
def prepare_dataloader(self, batch_size, balanced, Data_dir):
train_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='train', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
valid_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='valid', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
test_data = MLP_Data_apoe(Data_dir, self.exp_idx, stage='test', roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
sample_weight, self.imbalanced_ratio = train_data.get_sample_weights()
# the following if else blocks represent two ways of handling class imbalance issue
if balanced == 1:
# use pytorch sampler to sample data with probability according to the count of each class
# so that each mini-batch has the same expectation counts of samples from each class
sampler = torch.utils.data.sampler.WeightedRandomSampler(sample_weight, len(sample_weight))
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, sampler=sampler)
self.imbalanced_ratio = 1
elif balanced == 0:
# sample data from the same probability, but
# self.imbalanced_ratio will be used in the weighted cross entropy loss to handle imbalanced issue
self.train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, drop_last=True)
self.imbalanced_ratio *= self.imbalan_ratio
self.valid_dataloader = DataLoader(valid_data, batch_size=1, shuffle=False)
self.test_dataloader = DataLoader(test_data, batch_size=1, shuffle=False)
self.in_size = train_data.in_size
def test(self, repe_idx):
accu_list = []
self.model.load_state_dict(torch.load('{}{}_{}.pth'.format(self.checkpoint_dir, self.model_name, self.optimal_epoch)))
self.model.train(False)
with torch.no_grad():
for stage in ['train', 'valid', 'test', 'AIBL', 'NACC', 'FHS']:
data = MLP_Data_apoe(self.Data_dir, self.exp_idx, stage=stage, roi_threshold=self.roi_threshold, roi_count=self.roi_count, choice=self.choice, seed=self.seed)
dataloader = DataLoader(data, batch_size=10, shuffle=False)
f = open(self.checkpoint_dir + 'raw_score_{}_{}.txt'.format(stage, repe_idx), 'w')
matrix = [[0, 0], [0, 0]]
for idx, (inputs, labels, demors) in enumerate(dataloader):
inputs, labels, demors = inputs, labels, demors
preds = self.model(inputs, demors)
write_raw_score(f, preds, labels)
matrix = matrix_sum(matrix, get_confusion_matrix(preds, labels))
# print(stage + ' confusion matrix ', matrix, ' accuracy ', self.eval_metric(matrix))
f.close()
accu_list.append(self.eval_metric(matrix))
return accu_list
if __name__ == "__main__":
pass
| 56.949329 | 174 | 0.636982 | 4,940 | 38,213 | 4.69413 | 0.054656 | 0.03299 | 0.017551 | 0.016301 | 0.933934 | 0.927164 | 0.922549 | 0.911251 | 0.901678 | 0.895166 | 0 | 0.006316 | 0.258394 | 38,213 | 670 | 175 | 57.034328 | 0.811955 | 0.131107 | 0 | 0.803002 | 0 | 0 | 0.038976 | 0.003189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073171 | false | 0.003752 | 0.020638 | 0 | 0.138837 | 0.018762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
6d97279c40d05823b1c203cf645f262e5e60e658 | 28,280 | py | Python | sdk/python/pulumi_mongodbatlas/online_archive.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 9 | 2020-04-28T19:12:30.000Z | 2022-03-22T23:04:46.000Z | sdk/python/pulumi_mongodbatlas/online_archive.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 59 | 2020-06-12T12:12:52.000Z | 2022-03-28T18:14:50.000Z | sdk/python/pulumi_mongodbatlas/online_archive.py | pulumi/pulumi-mongodbatlas | 0d5c085dcfd871b56fb4cf582620260b70caa07a | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2020-09-25T21:22:08.000Z | 2021-08-30T20:06:18.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['OnlineArchiveArgs', 'OnlineArchive']
@pulumi.input_type
class OnlineArchiveArgs:
def __init__(__self__, *,
cluster_name: pulumi.Input[str],
coll_name: pulumi.Input[str],
criteria: pulumi.Input['OnlineArchiveCriteriaArgs'],
db_name: pulumi.Input[str],
project_id: pulumi.Input[str],
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None):
"""
The set of arguments for constructing a OnlineArchive resource.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input['OnlineArchiveCriteriaArgs'] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[str] project_id: The unique ID for the project
:param pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
pulumi.set(__self__, "cluster_name", cluster_name)
pulumi.set(__self__, "coll_name", coll_name)
pulumi.set(__self__, "criteria", criteria)
pulumi.set(__self__, "db_name", db_name)
pulumi.set(__self__, "project_id", project_id)
if partition_fields is not None:
pulumi.set(__self__, "partition_fields", partition_fields)
if paused is not None:
pulumi.set(__self__, "paused", paused)
if sync_creation is not None:
pulumi.set(__self__, "sync_creation", sync_creation)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Input[str]:
"""
Name of the cluster that contains the collection.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: pulumi.Input[str]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="collName")
def coll_name(self) -> pulumi.Input[str]:
"""
Name of the collection.
"""
return pulumi.get(self, "coll_name")
@coll_name.setter
def coll_name(self, value: pulumi.Input[str]):
pulumi.set(self, "coll_name", value)
@property
@pulumi.getter
def criteria(self) -> pulumi.Input['OnlineArchiveCriteriaArgs']:
"""
Criteria to use for archiving data.
"""
return pulumi.get(self, "criteria")
@criteria.setter
def criteria(self, value: pulumi.Input['OnlineArchiveCriteriaArgs']):
pulumi.set(self, "criteria", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Input[str]:
"""
Name of the database that contains the collection.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: pulumi.Input[str]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Input[str]:
"""
The unique ID for the project
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: pulumi.Input[str]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter(name="partitionFields")
def partition_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]:
"""
Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
"""
return pulumi.get(self, "partition_fields")
@partition_fields.setter
def partition_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]):
pulumi.set(self, "partition_fields", value)
@property
@pulumi.getter
def paused(self) -> Optional[pulumi.Input[bool]]:
"""
State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
return pulumi.get(self, "paused")
@paused.setter
def paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "paused", value)
@property
@pulumi.getter(name="syncCreation")
def sync_creation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sync_creation")
@sync_creation.setter
def sync_creation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_creation", value)
@pulumi.input_type
class _OnlineArchiveState:
def __init__(__self__, *,
archive_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
coll_name: Optional[pulumi.Input[str]] = None,
criteria: Optional[pulumi.Input['OnlineArchiveCriteriaArgs']] = None,
db_name: Optional[pulumi.Input[str]] = None,
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None):
"""
Input properties used for looking up and filtering OnlineArchive resources.
:param pulumi.Input[str] archive_id: ID of the online archive.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input['OnlineArchiveCriteriaArgs'] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
:param pulumi.Input[str] project_id: The unique ID for the project
:param pulumi.Input[str] state: Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
if archive_id is not None:
pulumi.set(__self__, "archive_id", archive_id)
if cluster_name is not None:
pulumi.set(__self__, "cluster_name", cluster_name)
if coll_name is not None:
pulumi.set(__self__, "coll_name", coll_name)
if criteria is not None:
pulumi.set(__self__, "criteria", criteria)
if db_name is not None:
pulumi.set(__self__, "db_name", db_name)
if partition_fields is not None:
pulumi.set(__self__, "partition_fields", partition_fields)
if paused is not None:
pulumi.set(__self__, "paused", paused)
if project_id is not None:
pulumi.set(__self__, "project_id", project_id)
if state is not None:
pulumi.set(__self__, "state", state)
if sync_creation is not None:
pulumi.set(__self__, "sync_creation", sync_creation)
@property
@pulumi.getter(name="archiveId")
def archive_id(self) -> Optional[pulumi.Input[str]]:
"""
ID of the online archive.
"""
return pulumi.get(self, "archive_id")
@archive_id.setter
def archive_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "archive_id", value)
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the cluster that contains the collection.
"""
return pulumi.get(self, "cluster_name")
@cluster_name.setter
def cluster_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cluster_name", value)
@property
@pulumi.getter(name="collName")
def coll_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the collection.
"""
return pulumi.get(self, "coll_name")
@coll_name.setter
def coll_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "coll_name", value)
@property
@pulumi.getter
def criteria(self) -> Optional[pulumi.Input['OnlineArchiveCriteriaArgs']]:
"""
Criteria to use for archiving data.
"""
return pulumi.get(self, "criteria")
@criteria.setter
def criteria(self, value: Optional[pulumi.Input['OnlineArchiveCriteriaArgs']]):
pulumi.set(self, "criteria", value)
@property
@pulumi.getter(name="dbName")
def db_name(self) -> Optional[pulumi.Input[str]]:
"""
Name of the database that contains the collection.
"""
return pulumi.get(self, "db_name")
@db_name.setter
def db_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "db_name", value)
@property
@pulumi.getter(name="partitionFields")
def partition_fields(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]:
"""
Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
"""
return pulumi.get(self, "partition_fields")
@partition_fields.setter
def partition_fields(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['OnlineArchivePartitionFieldArgs']]]]):
pulumi.set(self, "partition_fields", value)
@property
@pulumi.getter
def paused(self) -> Optional[pulumi.Input[bool]]:
"""
State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
return pulumi.get(self, "paused")
@paused.setter
def paused(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "paused", value)
@property
@pulumi.getter(name="projectId")
def project_id(self) -> Optional[pulumi.Input[str]]:
"""
The unique ID for the project
"""
return pulumi.get(self, "project_id")
@project_id.setter
def project_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project_id", value)
@property
@pulumi.getter
def state(self) -> Optional[pulumi.Input[str]]:
"""
Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
return pulumi.get(self, "state")
@state.setter
def state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "state", value)
@property
@pulumi.getter(name="syncCreation")
def sync_creation(self) -> Optional[pulumi.Input[bool]]:
return pulumi.get(self, "sync_creation")
@sync_creation.setter
def sync_creation(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_creation", value)
class OnlineArchive(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
coll_name: Optional[pulumi.Input[str]] = None,
criteria: Optional[pulumi.Input[pulumi.InputType['OnlineArchiveCriteriaArgs']]] = None,
db_name: Optional[pulumi.Input[str]] = None,
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineArchivePartitionFieldArgs']]]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None,
__props__=None):
"""
`OnlineArchive` resource provides access to create, edit, pause and resume an online archive for a collection.
> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
> **IMPORTANT:** The collection must exists before performing an online archive.
> **IMPORTANT:** There are fields that are immutable after creation, i.e if `date_field` value does not exist in the collection, the online archive state will be pending forever, and this field cannot be updated, that means a destroy is required, known error `ONLINE_ARCHIVE_CANNOT_MODIFY_FIELD`
## Example Usage
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input[pulumi.InputType['OnlineArchiveCriteriaArgs']] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineArchivePartitionFieldArgs']]]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
:param pulumi.Input[str] project_id: The unique ID for the project
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: OnlineArchiveArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
`OnlineArchive` resource provides access to create, edit, pause and resume an online archive for a collection.
> **NOTE:** Groups and projects are synonymous terms. You may find group_id in the official documentation.
> **IMPORTANT:** The collection must exists before performing an online archive.
> **IMPORTANT:** There are fields that are immutable after creation, i.e if `date_field` value does not exist in the collection, the online archive state will be pending forever, and this field cannot be updated, that means a destroy is required, known error `ONLINE_ARCHIVE_CANNOT_MODIFY_FIELD`
## Example Usage
:param str resource_name: The name of the resource.
:param OnlineArchiveArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(OnlineArchiveArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
coll_name: Optional[pulumi.Input[str]] = None,
criteria: Optional[pulumi.Input[pulumi.InputType['OnlineArchiveCriteriaArgs']]] = None,
db_name: Optional[pulumi.Input[str]] = None,
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineArchivePartitionFieldArgs']]]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = OnlineArchiveArgs.__new__(OnlineArchiveArgs)
if cluster_name is None and not opts.urn:
raise TypeError("Missing required property 'cluster_name'")
__props__.__dict__["cluster_name"] = cluster_name
if coll_name is None and not opts.urn:
raise TypeError("Missing required property 'coll_name'")
__props__.__dict__["coll_name"] = coll_name
if criteria is None and not opts.urn:
raise TypeError("Missing required property 'criteria'")
__props__.__dict__["criteria"] = criteria
if db_name is None and not opts.urn:
raise TypeError("Missing required property 'db_name'")
__props__.__dict__["db_name"] = db_name
__props__.__dict__["partition_fields"] = partition_fields
__props__.__dict__["paused"] = paused
if project_id is None and not opts.urn:
raise TypeError("Missing required property 'project_id'")
__props__.__dict__["project_id"] = project_id
__props__.__dict__["sync_creation"] = sync_creation
__props__.__dict__["archive_id"] = None
__props__.__dict__["state"] = None
super(OnlineArchive, __self__).__init__(
'mongodbatlas:index/onlineArchive:OnlineArchive',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
archive_id: Optional[pulumi.Input[str]] = None,
cluster_name: Optional[pulumi.Input[str]] = None,
coll_name: Optional[pulumi.Input[str]] = None,
criteria: Optional[pulumi.Input[pulumi.InputType['OnlineArchiveCriteriaArgs']]] = None,
db_name: Optional[pulumi.Input[str]] = None,
partition_fields: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineArchivePartitionFieldArgs']]]]] = None,
paused: Optional[pulumi.Input[bool]] = None,
project_id: Optional[pulumi.Input[str]] = None,
state: Optional[pulumi.Input[str]] = None,
sync_creation: Optional[pulumi.Input[bool]] = None) -> 'OnlineArchive':
"""
Get an existing OnlineArchive resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] archive_id: ID of the online archive.
:param pulumi.Input[str] cluster_name: Name of the cluster that contains the collection.
:param pulumi.Input[str] coll_name: Name of the collection.
:param pulumi.Input[pulumi.InputType['OnlineArchiveCriteriaArgs']] criteria: Criteria to use for archiving data.
:param pulumi.Input[str] db_name: Name of the database that contains the collection.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['OnlineArchivePartitionFieldArgs']]]] partition_fields: Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
:param pulumi.Input[bool] paused: State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
:param pulumi.Input[str] project_id: The unique ID for the project
:param pulumi.Input[str] state: Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _OnlineArchiveState.__new__(_OnlineArchiveState)
__props__.__dict__["archive_id"] = archive_id
__props__.__dict__["cluster_name"] = cluster_name
__props__.__dict__["coll_name"] = coll_name
__props__.__dict__["criteria"] = criteria
__props__.__dict__["db_name"] = db_name
__props__.__dict__["partition_fields"] = partition_fields
__props__.__dict__["paused"] = paused
__props__.__dict__["project_id"] = project_id
__props__.__dict__["state"] = state
__props__.__dict__["sync_creation"] = sync_creation
return OnlineArchive(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="archiveId")
def archive_id(self) -> pulumi.Output[str]:
"""
ID of the online archive.
"""
return pulumi.get(self, "archive_id")
@property
@pulumi.getter(name="clusterName")
def cluster_name(self) -> pulumi.Output[str]:
"""
Name of the cluster that contains the collection.
"""
return pulumi.get(self, "cluster_name")
@property
@pulumi.getter(name="collName")
def coll_name(self) -> pulumi.Output[str]:
"""
Name of the collection.
"""
return pulumi.get(self, "coll_name")
@property
@pulumi.getter
def criteria(self) -> pulumi.Output['outputs.OnlineArchiveCriteria']:
"""
Criteria to use for archiving data.
"""
return pulumi.get(self, "criteria")
@property
@pulumi.getter(name="dbName")
def db_name(self) -> pulumi.Output[str]:
"""
Name of the database that contains the collection.
"""
return pulumi.get(self, "db_name")
@property
@pulumi.getter(name="partitionFields")
def partition_fields(self) -> pulumi.Output[Sequence['outputs.OnlineArchivePartitionField']]:
"""
Fields to use to partition data. You can specify up to two frequently queried fields to use for partitioning data. Note that queries that donβt contain the specified fields will require a full collection scan of all archived documents, which will take longer and increase your costs. To learn more about how partition improves query performance, see [Data Structure in S3](https://docs.mongodb.com/datalake/admin/optimize-query-performance/#data-structure-in-s3). The value of a partition field can be up to a maximum of 700 characters. Documents with values exceeding 700 characters are not archived.
"""
return pulumi.get(self, "partition_fields")
@property
@pulumi.getter
def paused(self) -> pulumi.Output[bool]:
"""
State of the online archive. This is required for pausing an active or resume a paused online archive. The resume request will fail if the collection has another active online archive.
"""
return pulumi.get(self, "paused")
@property
@pulumi.getter(name="projectId")
def project_id(self) -> pulumi.Output[str]:
"""
The unique ID for the project
"""
return pulumi.get(self, "project_id")
@property
@pulumi.getter
def state(self) -> pulumi.Output[str]:
"""
Status of the online archive. Valid values are: Pending, Archiving, Idle, Pausing, Paused, Orphaned and Deleted
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="syncCreation")
def sync_creation(self) -> pulumi.Output[Optional[bool]]:
return pulumi.get(self, "sync_creation")
| 52.081031 | 724 | 0.677652 | 3,497 | 28,280 | 5.310552 | 0.072348 | 0.076409 | 0.049755 | 0.037909 | 0.88692 | 0.871197 | 0.852674 | 0.827958 | 0.81148 | 0.794734 | 0 | 0.002608 | 0.227157 | 28,280 | 542 | 725 | 52.177122 | 0.84709 | 0.394802 | 0 | 0.669697 | 1 | 0 | 0.126022 | 0.038306 | 0 | 0 | 0 | 0 | 0 | 1 | 0.160606 | false | 0.00303 | 0.021212 | 0.009091 | 0.278788 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
6dae32ee224393d402e8472dd8a975f4f9196783 | 44,260 | py | Python | tests/test_maintenance.py | future-haus/django-inbox | 02da1f825b6abc56a0c2c80d6e9b3616f3ef09b8 | [
"MIT"
] | null | null | null | tests/test_maintenance.py | future-haus/django-inbox | 02da1f825b6abc56a0c2c80d6e9b3616f3ef09b8 | [
"MIT"
] | 14 | 2021-08-31T04:18:57.000Z | 2021-08-31T04:55:32.000Z | tests/test_maintenance.py | future-haus/django-inbox | 02da1f825b6abc56a0c2c80d6e9b3616f3ef09b8 | [
"MIT"
] | null | null | null | import uuid
from django.conf import settings
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.utils import timezone
from faker import Faker
from freezegun import freeze_time
from inbox import settings as inbox_settings
from inbox.models import Message
from inbox.test.utils import InboxTestCaseMixin
from inbox.utils import process_new_messages, process_new_message_logs
User = get_user_model()
Faker.seed()
fake = Faker()
class MaintenanceTestCase(InboxTestCaseMixin, TestCase):
user = None
def setUp(self):
super().setUp()
email = fake.ascii_email()
self.user = User.objects.create(email=email, email_verified_on=timezone.now().date(), username=email)
self.user.device_group.notification_key = 'fake-notification_key'
self.user.device_group.save()
inbox_settings.get_config.cache_clear()
def tearDown(self):
super().tearDown()
inbox_settings.get_config.cache_clear()
def test_maintenance_max_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should still be two since the first one is more than 5 days max age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
def test_maintenance_max_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should still be two since the first one is more than 5 days max age
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
def test_maintenance_max_age_min_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
def test_maintenance_max_age_min_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 4)
def test_maintenance_max_age_min_count_max_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating 3 more should move it to 6 because the oldest is only 5 days old and there are only 6 total
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
# Creating another should keep it at 6 because max is 6 and there's no min age defined
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
def test_maintenance_max_age_min_count_max_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-02'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 4)
# Creating 3 more should move it to 6 because the oldest is only 5 days old and there are only 6 total
for i in range(3):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
# Creating another should keep it at 6 because max is 6 and there's no min age defined
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_age_min_count_max_count_min_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-05'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating 3 more should move it to 6 because the oldest is only 2 days old and there are only 6 total
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
# Creating 1 more should move it to 7 because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
# Creating 1 more should add 1 more because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 8)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_age_min_count_max_count_min_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MIN_COUNT'] = 3
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
with freeze_time('2020-01-05'):
# Create another, should be two
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 2)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 2)
with freeze_time('2020-01-07'):
# Create another, should be 3 because the min count saves it
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 3)
# Creating another should keep it at 3 though because it will delete that oldest one now
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 3)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 4)
# Creating 3 more should move it to 6 because the oldest is only 2 days old and there are only 6 total
for i in range(3):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
# Creating 1 more should move it to 7 because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
# Creating 1 more should add 1 more because the oldest is only 2 days old and there is a min age
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 8)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 9)
def test_maintenance_max_age_max_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should leave it at 6 because there is a max of 6
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
with freeze_time('2020-01-07'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
def test_maintenance_max_age_max_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should leave it at 6 because there is a max of 6
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-01-07'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_age_max_count_min_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should be at 7 because of min age
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-01-05'):
for i in range(3):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
def test_maintenance_max_age_max_count_min_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_AGE'] = timezone.timedelta(days=5)
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should be at 7 because of min age
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-01-05'):
for i in range(3):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 10)
def test_maintenance_max_count(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should keep it at 6 because that's the max
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
with freeze_time('2020-02-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
def test_maintenance_max_count_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should keep it at 6 because that's the max
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-02-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
def test_maintenance_max_count_min_age(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should go to 7 because of min age
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-01-04'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 8)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
with freeze_time('2020-01-05'):
Message.objects.create(user=self.user, key='default', fail_silently=False)
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 6)
def test_maintenance_max_count_min_age_with_message_id(self):
INBOX_CONFIG = settings.INBOX_CONFIG.copy()
INBOX_CONFIG['PER_USER_MESSAGES_MAX_COUNT'] = 6
INBOX_CONFIG['PER_USER_MESSAGES_MIN_AGE'] = timezone.timedelta(days=3)
with self.settings(INBOX_CONFIG=INBOX_CONFIG):
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 0)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 0)
with freeze_time('2020-01-01'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 1)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 1)
# Creating 6 more should go to 7 because of min age
for i in range(6):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 7)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 7)
with freeze_time('2020-01-04'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 8)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 8)
with freeze_time('2020-01-05'):
Message.objects.create(user=self.user, key='default', fail_silently=False, message_id=uuid.uuid4())
process_new_messages()
process_new_message_logs()
messages = Message.objects.filter(user=self.user).live()
self.assertEqual(len(messages), 6)
messages = Message.objects.filter(user=self.user)
self.assertEqual(len(messages), 9)
| 41.715363 | 119 | 0.606801 | 5,114 | 44,260 | 5.084474 | 0.026398 | 0.097223 | 0.107992 | 0.176602 | 0.975963 | 0.973771 | 0.973771 | 0.967733 | 0.967733 | 0.966195 | 0 | 0.02055 | 0.289765 | 44,260 | 1,060 | 120 | 41.754717 | 0.806617 | 0.060755 | 0 | 0.940922 | 0 | 0 | 0.044955 | 0.023044 | 0 | 0 | 0 | 0 | 0.236311 | 1 | 0.025937 | false | 0 | 0.01585 | 0 | 0.044669 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
6df9ddf34a6940d83f39a6b5f1c7560e2bce1d7d | 2,607 | py | Python | sdk/ml/azure-ai-ml/tests/local_endpoint/unittests/test_dockerfile_resolver.py | dubiety/azure-sdk-for-python | 62ffa839f5d753594cf0fe63668f454a9d87a346 | [
"MIT"
] | 1 | 2022-02-01T18:50:12.000Z | 2022-02-01T18:50:12.000Z | sdk/ml/azure-ai-ml/tests/local_endpoint/unittests/test_dockerfile_resolver.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | sdk/ml/azure-ai-ml/tests/local_endpoint/unittests/test_dockerfile_resolver.py | ellhe-blaster/azure-sdk-for-python | 82193ba5e81cc5e5e5a5239bba58abe62e86f469 | [
"MIT"
] | null | null | null | # ---------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# ---------------------------------------------------------
import pytest
from azure.ai.ml._local_endpoints.dockerfile_resolver import DockerfileResolver
@pytest.mark.unittest
class TestDockerfileResolver:
def test_resolve_base_image(self):
dockerfile = DockerfileResolver(
dockerfile=None,
docker_base_image="ubuntu:latest",
docker_conda_file_name=None,
docker_port="5001",
docker_azureml_app_path="/var/azureml-app",
)
expected = '\
FROM ubuntu:latest\n\
RUN mkdir -p /var/azureml-app\n\
WORKDIR /var/azureml-app\n\
CMD ["runsvdir", "/var/runit"]'
assert expected == str(dockerfile)
def test_resolve_base_image_with_conda_file(self):
dockerfile = DockerfileResolver(
dockerfile=None,
docker_base_image="ubuntu:latest",
docker_conda_file_name="conda.yml",
docker_port="5001",
docker_azureml_app_path="/var/azureml-app",
)
expected = '\
FROM ubuntu:latest\n\
RUN mkdir -p /var/azureml-app\n\
WORKDIR /var/azureml-app\n\
COPY conda.yml /var/azureml-app\n\
RUN conda env create -n inf-conda-env --file conda.yml\n\
CMD ["conda", "run", "--no-capture-output", "-n", "inf-conda-env", "runsvdir", "/var/runit"]'
assert expected == str(dockerfile)
def test_resolve_dockerfile(self):
dockerfile = DockerfileResolver(
dockerfile="FROM ubuntu:latest\n",
docker_base_image=None,
docker_conda_file_name=None,
docker_port="5001",
docker_azureml_app_path="/var/azureml-app",
)
expected = '\
FROM ubuntu:latest\n\n\
RUN mkdir -p /var/azureml-app\n\
WORKDIR /var/azureml-app\n\
CMD ["runsvdir", "/var/runit"]'
assert expected == str(dockerfile)
def test_resolve_dockerfile_with_conda_file(self):
dockerfile = DockerfileResolver(
dockerfile="FROM ubuntu:latest\n",
docker_base_image=None,
docker_conda_file_name="conda.yml",
docker_port="5001",
docker_azureml_app_path="/var/azureml-app",
)
expected = '\
FROM ubuntu:latest\n\n\
RUN mkdir -p /var/azureml-app\n\
WORKDIR /var/azureml-app\n\
COPY conda.yml /var/azureml-app\n\
RUN conda env create -n inf-conda-env --file conda.yml\n\
CMD ["conda", "run", "--no-capture-output", "-n", "inf-conda-env", "runsvdir", "/var/runit"]'
assert expected == str(dockerfile)
| 34.76 | 93 | 0.616034 | 312 | 2,607 | 4.971154 | 0.195513 | 0.116054 | 0.117344 | 0.090264 | 0.888459 | 0.867827 | 0.867827 | 0.851064 | 0.851064 | 0.851064 | 0 | 0.007824 | 0.215573 | 2,607 | 74 | 94 | 35.22973 | 0.750611 | 0.06636 | 0 | 0.8125 | 0 | 0.03125 | 0.131687 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 1 | 0.0625 | false | 0 | 0.03125 | 0 | 0.109375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
099f8cb441f751968e6c9c4eedbe9ab3b7d8cdc6 | 17,160 | py | Python | genomics_data_index/test/integration/storage/io/mutation/test_NucleotideSampleDataPackageFactory.py | apetkau/genomics-data-index | d0cc119fd57b8cbd701affb1c84450cf7832fa01 | [
"Apache-2.0"
] | 12 | 2021-05-03T20:56:05.000Z | 2022-01-04T14:52:19.000Z | genomics_data_index/test/integration/storage/io/mutation/test_NucleotideSampleDataPackageFactory.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | 30 | 2021-04-26T23:03:40.000Z | 2022-02-25T18:41:14.000Z | genomics_data_index/test/integration/storage/io/mutation/test_NucleotideSampleDataPackageFactory.py | apetkau/genomics-data-index | d0cc119fd57b8cbd701affb1c84450cf7832fa01 | [
"Apache-2.0"
] | null | null | null | from pathlib import Path
from tempfile import TemporaryDirectory
from typing import List
import pandas as pd
from genomics_data_index.storage.io.mutation.NucleotideSampleDataPackageFactory import \
NucleotideInputFilesSampleDataPackageFactory
from genomics_data_index.storage.io.mutation.NucleotideSampleDataPackageFactory import \
NucleotideSnippySampleDataPackageFactory
from genomics_data_index.test.integration import data_dir as snippy_dir
from genomics_data_index.test.integration.storage.io.mutation import vcf_and_mask_files, vcf_and_bed_mask_files
def create_samples_input_file(tmp_dir: Path, sample_dirs: List[Path],
use_bed: bool = False) -> Path:
file = tmp_dir / 'input-files.tsv'
if use_bed:
vcf_mask_files = vcf_and_bed_mask_files(sample_dirs)
else:
vcf_mask_files = vcf_and_mask_files(sample_dirs)
data = []
vcfs = vcf_mask_files['vcfs']
masks = vcf_mask_files['masks']
for sample in vcfs:
vcf = vcfs[sample]
mask = masks[sample]
data.append([sample, str(vcf.absolute()), str(mask.absolute())])
input_files_df = pd.DataFrame(data,
columns=['Sample', 'VCF', 'Mask File'])
input_files_df.to_csv(file, sep='\t', index=False)
return file
def verify_features_df_for_sample(sample_name: str, features_df: pd.DataFrame,
is_snippy: bool) -> None:
if is_snippy:
lengths = {
'SampleA': 46 + 414,
'SampleB': 50 + 234,
'SampleC': 33 + 302,
}
else:
lengths = {
'SampleA': 482,
'SampleB': 326,
'SampleC': 362
}
if sample_name == 'SampleA':
assert lengths['SampleA'] == len(features_df)
assert {'SampleA'} == set(features_df['SAMPLE'].tolist())
elif sample_name == 'SampleB':
assert lengths['SampleB'] == len(features_df)
assert {'SampleB'} == set(features_df['SAMPLE'].tolist())
elif sample_name == 'SampleC':
assert lengths['SampleC'] == len(features_df)
assert {'SampleC'} == set(features_df['SAMPLE'].tolist())
else:
raise Exception(f'sample={sample_name} is not expected')
def test_snippy_data_package():
# Test no index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
data_package_factory = NucleotideSnippySampleDataPackageFactory(ncores=1, index_unknown=False,
preprocess_dir=preprocess_dir,
snippy_dir=snippy_dir)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 129 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 46 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 50 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 33 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
# Test with index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
data_package_factory = NucleotideSnippySampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
snippy_dir=snippy_dir)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 1079 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 46 + 414 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 50 + 234 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 33 + 302 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
def test_snippy_data_package_iter():
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
data_package_factory = NucleotideSnippySampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
snippy_dir=snippy_dir)
assert 3 == data_package_factory.number_samples()
number_iterations = 0
handled_samples = {}
for data_package in data_package_factory.create_data_package_iter(1):
assert 1 == len(data_package.sample_names())
processed_data_package = data_package.process_all_data()
sample_name = list(processed_data_package.sample_names())[0]
features_df = processed_data_package.get_features_reader().get_features_table()
verify_features_df_for_sample(sample_name=sample_name, features_df=features_df,
is_snippy=True)
handled_samples[sample_name] = True
number_iterations = number_iterations + 1
assert 3 == number_iterations
assert handled_samples['SampleA']
assert handled_samples['SampleB']
assert handled_samples['SampleC']
def test_vcf_mask_files_data_package(sample_dirs: List[Path]):
# Test no index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=False,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 129 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 46 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 50 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 33 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
# Test with index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 1170 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 482 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 326 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 362 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
def test_vcf_bed_mask_files_data_package(sample_dirs: List[Path]):
# Test no index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs,
use_bed=True)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=False,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 129 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 46 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 50 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 33 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
# Test with index unknown
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
data_package = data_package_factory.create_data_package()
assert {'SampleA', 'SampleB', 'SampleC'} == data_package.sample_names()
# Preprocess data
processed_data_package = data_package.process_all_data()
assert {'SampleA', 'SampleB', 'SampleC'} == processed_data_package.sample_names()
features_df = processed_data_package.get_features_reader().get_features_table()
assert 1170 == len(features_df)
assert {'SampleA', 'SampleB', 'SampleC'} == set(features_df['SAMPLE'].tolist())
assert 482 == len(features_df[features_df['SAMPLE'] == 'SampleA'])
assert 326 == len(features_df[features_df['SAMPLE'] == 'SampleB'])
assert 362 == len(features_df[features_df['SAMPLE'] == 'SampleC'])
def test_vcf_mask_files_data_package_iter(sample_dirs: List[Path]):
# Test batch size 1
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
number_iterations = 0
handled_samples = {}
for data_package in data_package_factory.create_data_package_iter(1):
assert 1 == len(data_package.sample_names())
processed_data_package = data_package.process_all_data()
sample_name = list(processed_data_package.sample_names())[0]
features_df = processed_data_package.get_features_reader().get_features_table()
verify_features_df_for_sample(sample_name=sample_name, features_df=features_df,
is_snippy=False)
handled_samples[sample_name] = True
number_iterations = number_iterations + 1
assert 3 == number_iterations
assert handled_samples['SampleA']
assert handled_samples['SampleB']
assert handled_samples['SampleC']
# Test batch size 2
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
handled_samples = {}
data_package_iter = data_package_factory.create_data_package_iter(2)
# Iteration 1
data_package = next(data_package_iter)
assert 2 == len(data_package.sample_names())
processed_data_package = data_package.process_all_data()
features_df = processed_data_package.get_features_reader().get_features_table()
sample_names = list(processed_data_package.sample_names())
for sample_name in sample_names:
features_df_sample = features_df[features_df['SAMPLE'] == sample_name]
verify_features_df_for_sample(sample_name=sample_name, features_df=features_df_sample,
is_snippy=False)
handled_samples[sample_name] = True
# Iteration 2
data_package = next(data_package_iter)
assert 1 == len(data_package.sample_names())
processed_data_package = data_package.process_all_data()
features_df = processed_data_package.get_features_reader().get_features_table()
sample_names = list(processed_data_package.sample_names())
assert 1 == len(sample_names)
sample_name = sample_names[0]
verify_features_df_for_sample(sample_name=sample_name, features_df=features_df,
is_snippy=False)
handled_samples[sample_name] = True
assert handled_samples['SampleA']
assert handled_samples['SampleB']
assert handled_samples['SampleC']
# Make sure no data packages left to iterate over
assert next(data_package_iter, None) is None
# Test batch size 3
with TemporaryDirectory() as preprocess_dir:
preprocess_dir = Path(preprocess_dir)
input_file = create_samples_input_file(tmp_dir=preprocess_dir, sample_dirs=sample_dirs)
data_package_factory = NucleotideInputFilesSampleDataPackageFactory(ncores=1, index_unknown=True,
preprocess_dir=preprocess_dir,
input_files_file=input_file)
assert 3 == data_package_factory.number_samples()
handled_samples = {}
data_package_iter = data_package_factory.create_data_package_iter(3)
# Iteration 1
data_package = next(data_package_iter)
assert 3 == len(data_package.sample_names())
processed_data_package = data_package.process_all_data()
features_df = processed_data_package.get_features_reader().get_features_table()
sample_names = list(processed_data_package.sample_names())
for sample_name in sample_names:
features_df_sample = features_df[features_df['SAMPLE'] == sample_name]
verify_features_df_for_sample(sample_name=sample_name, features_df=features_df_sample,
is_snippy=False)
handled_samples[sample_name] = True
assert handled_samples['SampleA']
assert handled_samples['SampleB']
assert handled_samples['SampleC']
# Make sure no data packages left to iterate over
assert next(data_package_iter, None) is None
| 49.028571 | 111 | 0.635256 | 1,825 | 17,160 | 5.593425 | 0.066849 | 0.127155 | 0.051724 | 0.048981 | 0.899687 | 0.888911 | 0.87598 | 0.864322 | 0.853742 | 0.816125 | 0 | 0.011352 | 0.276166 | 17,160 | 349 | 112 | 49.169054 | 0.810482 | 0.024417 | 0 | 0.748062 | 0 | 0 | 0.056639 | 0 | 0 | 0 | 0 | 0 | 0.310078 | 1 | 0.027132 | false | 0 | 0.031008 | 0 | 0.062016 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
09ab11971f8da899855195237c2013d56788e7df | 2,896 | py | Python | backend/database/setup/init_scripts/db_scripts/attribute_init.py | miversen33/ProjectPsittacosaurus | 0a97586a914e260b8570a10d0ab53ca73e2e9250 | [
"MIT"
] | 9 | 2019-02-12T04:47:12.000Z | 2020-11-18T05:07:02.000Z | backend/database/setup/init_scripts/db_scripts/attribute_init.py | miversen33/ProjectPsittacosaurus | 0a97586a914e260b8570a10d0ab53ca73e2e9250 | [
"MIT"
] | null | null | null | backend/database/setup/init_scripts/db_scripts/attribute_init.py | miversen33/ProjectPsittacosaurus | 0a97586a914e260b8570a10d0ab53ca73e2e9250 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import sys
import psycopg2 as psql
db_uri = sys.argv[1]
db_connection = psql.connect(db_uri)
cursor = db_connection.cursor()
cursor.execute('SELECT COUNT(id) FROM attribute')
count, = cursor.fetchone()
if count > 0:
sys.exit(0)
try:
print('Restoring Table: attribute')
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (2, 'speed', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (5, 'strength', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (6, 'carrying', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (7, 'throw_power', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (8, 'short_throw_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (9, 'medium_throw_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (10, 'long_throw_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (11, 'catching', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (12, 'run_blocking', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (13, 'pass_blocking', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (14, 'finesse_block_shedding', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (15, 'power_block_shedding', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (16, 'tackling', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (17, 'man_coverage', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (18, 'zone_coverage', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (19, 'kick_power', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (20, 'short_kick_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (21, 'medium_kick_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (22, 'long_kick_accuracy', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (23, 'stamina', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (24, 'injury_resistance', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (28, 'agility', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (29, 'acceleration', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (30, 'awareness', True)")
cursor.execute("INSERT INTO attribute (id,name,boostable) VALUES (31, 'intelligence', False)")
cursor.execute("ALTER SEQUENCE attribute_id_seq RESTART WITH 31")
db_connection.commit()
print('Restored Table: attribute Succesfully')
except:
sys.exit(1)
cursor.close()
db_connection.close()
| 59.102041 | 104 | 0.751036 | 396 | 2,896 | 5.414141 | 0.252525 | 0.163713 | 0.221549 | 0.26819 | 0.729478 | 0.729478 | 0.729478 | 0.729478 | 0.729478 | 0.704757 | 0 | 0.019901 | 0.097721 | 2,896 | 48 | 105 | 60.333333 | 0.800612 | 0.007251 | 0 | 0 | 0 | 0 | 0.710755 | 0.063696 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.023256 | 0.046512 | 0 | 0.046512 | 0.046512 | 0 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
09d07365277f3e12bc7928d55a2777cd25a08222 | 186 | py | Python | src/yellowdog_client/model/keyring_access_secrets.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/keyring_access_secrets.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | src/yellowdog_client/model/keyring_access_secrets.py | yellowdog/yellowdog-sdk-python-public | da69a7d6e45c92933e34fefcaef8b5d98dcd6036 | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from typing import Optional
@dataclass
class KeyringAccessSecrets:
keyringPassword: Optional[str] = None
accessorSecret: Optional[str] = None
| 20.666667 | 41 | 0.790323 | 19 | 186 | 7.736842 | 0.631579 | 0.14966 | 0.204082 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.155914 | 186 | 8 | 42 | 23.25 | 0.936306 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.166667 | 0.333333 | 0 | 0.833333 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 7 |
09fca40404614333d6c384d5bd0e96387c2c35f9 | 2,830 | py | Python | src/autosetup/tests/test_mqtt_camnode.py | cdeck3r/3DScanner | bbd80d676985d2978ed2bc981164b9f72f4c3737 | [
"MIT"
] | 2 | 2021-03-26T18:14:24.000Z | 2021-10-13T06:32:11.000Z | src/autosetup/tests/test_mqtt_camnode.py | cdeck3r/3DScanner | bbd80d676985d2978ed2bc981164b9f72f4c3737 | [
"MIT"
] | null | null | null | src/autosetup/tests/test_mqtt_camnode.py | cdeck3r/3DScanner | bbd80d676985d2978ed2bc981164b9f72f4c3737 | [
"MIT"
] | 3 | 2020-12-01T08:23:46.000Z | 2021-10-13T06:32:11.000Z | #
# Testinfra testcases to test mqtt
#
# Author: cdeck3r
#
import random
import pytest
#####################################################
# Tests
#####################################################
@pytest.mark.usefixtures("camnode_ssh_config")
class TestMQTTCamnode:
@pytest.mark.parametrize('broker', ['centralnode'])
def test_autosetup_mqtt_publish(self, pytestconfig, broker, host):
# we use the installed mosquitto client
broker_name = pytestconfig.getini(broker.lower())
msg = '"Test message from ' + __file__ + '"'
mqtt_pub = 'mosquitto_pub -h ' + broker_name + ' -t camnode/test -m ' + msg
assert host.run(mqtt_pub).succeeded
@pytest.mark.parametrize('broker', ['centralnode'])
def test_autosetup_mqtt_subscribe(self, pytestconfig, broker, host):
# 1. publish retained msg with random number
# 2. subscribe to received msg with random number
#
# we use the installed mosquitto client
broker_name = pytestconfig.getini(broker.lower())
rnum = random.randint(0, 1000)
msg = 'Test message from ' + __file__ + ' with random number ' + str(rnum)
mqtt_pub = (
'mosquitto_pub -h '
+ broker_name
+ ' -t camnode/test --retain -m '
+ '"'
+ msg
+ '"'
)
assert host.run(mqtt_pub).succeeded
mqtt_sub = 'mosquitto_sub -h ' + broker_name + ' -t camnode/test -W 2'
assert host.run(mqtt_sub).stdout.rstrip() == msg
@pytest.mark.parametrize('broker', ['centralnode'])
def test_autosetup_mqtt_clear_retained_msg(self, pytestconfig, broker, host):
# 1. publish retained msg with random number
# 2. subscribe to received msg with random number
# 3. publish an empty retained msg
#
# we use the installed mosquitto client
broker_name = pytestconfig.getini(broker.lower())
rnum = random.randint(0, 1000)
msg = 'Test message from ' + __file__ + ' with random number ' + str(rnum)
mqtt_pub = (
'mosquitto_pub -h '
+ broker_name
+ ' -t camnode/test --retain -m '
+ '"'
+ msg
+ '"'
)
assert host.run(mqtt_pub).succeeded
mqtt_sub = 'mosquitto_sub -h ' + broker_name + ' -t camnode/test -W 2'
assert host.run(mqtt_sub).stdout.rstrip() == msg
msg = ''
mqtt_pub = (
'mosquitto_pub -h '
+ broker_name
+ ' -t camnode/test --retain -m '
+ '"'
+ msg
+ '"'
)
assert host.run(mqtt_pub).succeeded
mqtt_sub = 'mosquitto_sub -h ' + broker_name + ' -t camnode/test -W 2'
assert host.run(mqtt_sub).stdout.rstrip() == ''
| 33.294118 | 83 | 0.553004 | 306 | 2,830 | 4.931373 | 0.222222 | 0.066269 | 0.051027 | 0.055666 | 0.858184 | 0.843605 | 0.843605 | 0.843605 | 0.821736 | 0.706428 | 0 | 0.009596 | 0.300353 | 2,830 | 84 | 84 | 33.690476 | 0.752525 | 0.135336 | 0 | 0.740741 | 0 | 0 | 0.197935 | 0 | 0 | 0 | 0 | 0 | 0.12963 | 1 | 0.055556 | false | 0 | 0.037037 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1120b1392e0cc4a42e53d4236aa38ad89ee4de0a | 46,404 | py | Python | tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py | artiseza/quantum | 72f6e5bae843c841117426a0c8e1ee1d6557995e | [
"Apache-2.0"
] | 1 | 2022-02-10T11:20:48.000Z | 2022-02-10T11:20:48.000Z | tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py | artiseza/quantum | 72f6e5bae843c841117426a0c8e1ee1d6557995e | [
"Apache-2.0"
] | null | null | null | tensorflow_quantum/core/ops/math_ops/simulate_mps_test.py | artiseza/quantum | 72f6e5bae843c841117426a0c8e1ee1d6557995e | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 The TensorFlow Quantum Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests that specifically target simulate_mps."""
# Remove PYTHONPATH collisions for protobuf.
# pylint: disable=wrong-import-position
import sys
NEW_PATH = [x for x in sys.path if 'com_google_protobuf' not in x]
sys.path = NEW_PATH
# pylint: enable=wrong-import-position
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
import cirq
import cirq_google
import sympy
from scipy import stats
from tensorflow_quantum.core.ops import batch_util
from tensorflow_quantum.core.ops.math_ops import simulate_mps
from tensorflow_quantum.python import util
def _make_1d_circuit(qubits, depth):
"""Create a 1d ladder circuit."""
even_pairs = list(zip(qubits[::2], qubits[1::2]))
odd_pairs = list(zip(qubits[1::2], qubits[2::2]))
ret = cirq.Circuit()
for _ in range(depth):
# return ret
ret += [(cirq.Y(q)**np.random.random()) for q in qubits]
ret += [
cirq_google.SycamoreGate()(q0, q1)**np.random.random()
for q0, q1 in even_pairs
]
ret += [(cirq.Y(q)**np.random.random()) for q in qubits]
ret += [
cirq_google.SycamoreGate()(q1, q0)**np.random.random()
for q0, q1 in odd_pairs
]
return ret
class SimulateMPS1DExpectationTest(tf.test.TestCase):
"""Tests mps_1d_expectation."""
def test_simulate_mps_1d_expectation_inputs(self):
"""Makes sure that the op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, util.convert_to_tensor(list(pauli_sums)))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[[x]] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
simulate_mps.mps_1d_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
simulate_mps.mps_1d_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), 1)
with self.assertRaisesRegex(TypeError, 'Expected int'):
# bond_dim should be int.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), [])
with self.assertRaisesRegex(TypeError, 'positional arguments'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), 1, [])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums
][:int(batch_size * 0.5)]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='Found: 3 qubit gate'):
# attempting to use 3 qubit gate
three_qb_circuit = cirq.Circuit(
cirq.ISWAP(qubits[0], qubits[1]).controlled_by(qubits[2]),
cirq.X.on_each(*qubits))
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([three_qb_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
small_pauli = cirq.Z(qubits[0])
simulate_mps.mps_1d_expectation(
util.convert_to_tensor([circuit_small for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[small_pauli] for _ in pauli_sums]))
def test_simulate_mps_1d_expectation_simple(self):
"""Makes sure that the op shows the same result with Cirq."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = [
cirq.Z(qubits[0]) * cirq.X(qubits[4]) for _ in range(batch_size)
]
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
# Default bond_dim=4
mps_result = simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]))
# Expected value of 0.349...
self.assertAllClose(mps_result, cirq_result)
def test_complex_equality(self):
"""Check moderate sized 1d random circuits."""
batch_size = 10
qubits = cirq.GridQubit.rect(1, 8)
circuit_batch = [_make_1d_circuit(qubits, 3) for _ in range(batch_size)]
pauli_sums = [[
cirq.Z(qubits[0]),
cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) * cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) + cirq.Z(qubits[-1])
] for _ in range(batch_size)]
symbol_names = []
resolver_batch = [{} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
mps_result = simulate_mps.mps_1d_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor(pauli_sums),
bond_dim=32)
self.assertAllClose(mps_result, cirq_result, atol=1e-5)
def test_correctness_empty(self):
"""Tests the mps op with empty circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
out = simulate_mps.mps_1d_expectation(empty_circuit, empty_symbols,
empty_values, empty_paulis, 32)
self.assertShapeEqual(np.zeros((0, 0)), out)
class SimulateMPS1DSamplesTest(tf.test.TestCase, parameterized.TestCase):
"""Tests tfq_simulate_mps1d_samples."""
def test_simulate_mps1d_samples_inputs(self):
"""Make sure the sample op fails gracefully on bad inputs."""
n_qubits = 5
num_samples = 10
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# programs tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor([circuit_batch]),
symbol_names, symbol_values_array,
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# symbol_names tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
np.array([symbol_names]),
symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 2. Got rank 3'):
# symbol_values tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names,
np.array([symbol_values_array]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 2. Got rank 1'):
# symbol_values tensor has the wrong shape 2.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array[0],
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'rank 1. Got rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
[[num_samples]])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type, but invalid value.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
['junk'], symbol_values_array,
[num_samples])
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample([1] * batch_size, symbol_names,
symbol_values_array, [num_samples])
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
[1], symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.UnimplementedError,
'Cast string to float is not supported'):
# programs tensor has the wrong type.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, [['junk']] * batch_size,
[num_samples])
with self.assertRaisesRegex(Exception, 'junk'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
['junk'])
with self.assertRaisesRegex(TypeError, 'missing'):
# too few tensors.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_sample(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)], num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_sample(
util.convert_to_tensor([noisy_circuit for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sample(util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
[num_samples], 1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_not_1d for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_not_1d for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
simulate_mps.mps_1d_sample(
util.convert_to_tensor([circuit_small for _ in circuit_batch]),
symbol_names, symbol_values_array, [num_samples])
@parameterized.parameters([
{
'all_n_qubits': [4, 5],
'n_samples': 10
},
{
'all_n_qubits': [4, 5, 8],
'n_samples': 10
},
])
def test_sampling_output_padding(self, all_n_qubits, n_samples):
"""Check that the sampling ops pad outputs correctly"""
op = simulate_mps.mps_1d_sample
circuits = []
expected_outputs = []
for n_qubits in all_n_qubits:
expected_outputs.append(np.ones((n_samples, n_qubits)))
circuits.append(
cirq.Circuit(cirq.X.on_each(*cirq.GridQubit.rect(1, n_qubits))))
results = op(util.convert_to_tensor(circuits), [], [[]] * len(circuits),
[n_samples]).to_list()
for a, b in zip(expected_outputs, results):
self.assertAllClose(a, b)
def test_ghz_state(self):
"""Test a simple GHZ-like state."""
op = simulate_mps.mps_1d_sample
qubits = cirq.GridQubit.rect(1, 6)
circuit = cirq.Circuit(cirq.I.on_each(*qubits))
circuit += [
cirq.X(qubits[0]),
cirq.H(qubits[1]),
cirq.CNOT(qubits[1], qubits[2])
]
circuit_batch = [circuit]
resolver_batch = [cirq.ParamResolver({})]
n_samples = 1000
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, cirq.Simulator())
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch), [], [[]], [n_samples],
bond_dim=16).to_list())
self.assertAllClose(np.mean(op_samples, axis=1),
np.mean(cirq_samples, axis=1),
atol=1e-1)
def test_sampling_fuzz(self):
"""Compare sampling with tfq ops and Cirq."""
op = simulate_mps.mps_1d_sample
batch_size = 10
n_qubits = 6
qubits = cirq.GridQubit.rect(1, n_qubits)
symbol_names = []
n_samples = 10_000
circuit_batch = [_make_1d_circuit(qubits, 1) for _ in range(batch_size)]
resolver_batch = [cirq.ParamResolver({}) for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
op_samples = np.array(
op(util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array, [n_samples],
bond_dim=16).to_list())
op_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in op_samples
]
cirq_samples = batch_util.batch_sample(circuit_batch, resolver_batch,
n_samples, cirq.Simulator())
cirq_histograms = [
np.histogram(
sample.dot(1 << np.arange(sample.shape[-1] - 1, -1, -1)),
range=(0, 2**len(qubits)),
bins=2**len(qubits))[0] for sample in cirq_samples
]
for a, b in zip(op_histograms, cirq_histograms):
self.assertLess(stats.entropy(a + 1e-8, b + 1e-8), 0.05)
class SimulateMPS1DSampledExpectationTest(tf.test.TestCase):
"""Tests tfq_simulate_mps1d_sampled_expectation."""
def test_simulate_mps1d_sampled_expectation_inputs(self):
"""Make sure sampled expectation op fails gracefully on bad inputs."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = util.random_pauli_sums(qubits, 3, batch_size)
num_samples = [[10]] * batch_size
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'programs must be rank 1'):
# Circuit tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_batch]), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_names must be rank 1.'):
# symbol_names tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), np.array([symbol_names]),
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
np.array([symbol_values_array]),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'symbol_values must be rank 2.'):
# symbol_values_array tensor has too few dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[0],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too few dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names, symbol_values_array,
util.convert_to_tensor(list(pauli_sums)), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'pauli_sums must be rank 2.'):
# pauli_sums tensor has too many dimensions.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
[util.convert_to_tensor([[x] for x in pauli_sums])],
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
[num_samples])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'num_samples must be rank 2'):
# num_samples tensor has the wrong shape.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples[0])
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# circuit tensor has the right type but invalid values.
simulate_mps.mps_1d_sampled_expectation(
['junk'] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Could not find symbol in parameter map'):
# symbol_names tensor has the right type but invalid values.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), ['junk'],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'qubits not found in circuit'):
# pauli_sums tensor has the right type but invalid values.
new_qubits = [cirq.GridQubit(5, 5), cirq.GridQubit(9, 9)]
new_pauli_sums = util.random_pauli_sums(new_qubits, 2, batch_size)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in new_pauli_sums]),
num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'Unparseable proto'):
# pauli_sums tensor has the right type but invalid values 2.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [['junk']] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# circuits tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
[1.0] * batch_size, symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# symbol_names tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), [0.1234],
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.UnimplementedError, ''):
# symbol_values tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
[['junk']] * batch_size,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(TypeError, 'Cannot convert'):
# pauli_sums tensor has the wrong type.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, [[1.0]] * batch_size, num_samples)
with self.assertRaisesRegex(TypeError, 'missing'):
# we are missing an argument.
# pylint: disable=no-value-for-parameter
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array, num_samples)
# pylint: enable=no-value-for-parameter
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong op size.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([cirq.Circuit()]), symbol_names,
symbol_values_array.astype(np.float64),
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]),
num_samples,
bond_dim=-10)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='do not match'):
# wrong symbol_values size.
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array[:int(batch_size * 0.5)],
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='cirq.Channel'):
# attempting to use noisy circuit.
noisy_circuit = cirq.Circuit(cirq.depolarize(0.3).on_each(*qubits))
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([noisy_circuit for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
'at least minimum 4'):
# pylint: disable=too-many-function-args
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples,
1)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit not in 1D topology
# 0--1--2--3
# \-4
circuit_not_1d = cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1])**sympy.Symbol(symbol_names[0]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.CNOT(qubits[2], qubits[4]),
)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='not in 1D topology'):
# attempting to use a circuit in 1D topology, which looks in 2D.
# 0--1
# \-2-\
# 3--4 == 1--0--2--4--3
circuit_not_1d = cirq.Circuit(
cirq.CNOT(qubits[0], qubits[1]),
cirq.CNOT(qubits[0], qubits[2]),
cirq.CNOT(qubits[2], qubits[4]),
cirq.CNOT(qubits[3], qubits[4]),
)
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_not_1d for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
with self.assertRaisesRegex(tf.errors.InvalidArgumentError,
expected_regex='minimum 3 qubits'):
# too few qubits.
circuit_small = cirq.Circuit(cirq.X(qubits[0]), cirq.X(qubits[1]),
cirq.X(qubits[2]))
small_pauli = cirq.Z(qubits[0])
simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor([circuit_small for _ in pauli_sums]),
symbol_names, symbol_values_array,
util.convert_to_tensor([[small_pauli] for _ in pauli_sums]),
num_samples)
def test_simulate_sampled_mps_1d_expectation_simple(self):
"""Makes sure that the op shows the same result with Cirq."""
n_qubits = 5
batch_size = 5
symbol_names = ['alpha']
qubits = cirq.GridQubit.rect(1, n_qubits)
circuit_batch = [
cirq.Circuit(
cirq.X(qubits[0])**sympy.Symbol(symbol_names[0]),
cirq.Z(qubits[1]),
cirq.CNOT(qubits[2], qubits[3]),
cirq.Y(qubits[4])**sympy.Symbol(symbol_names[0]),
) for _ in range(batch_size)
]
resolver_batch = [{symbol_names[0]: 0.123} for _ in range(batch_size)]
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
pauli_sums = [
cirq.Z(qubits[0]) * cirq.X(qubits[4]) for _ in range(batch_size)
]
num_samples = np.ones(shape=(len(pauli_sums), 1)) * 10000
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
# Default bond_dim=4
mps_result = simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch), symbol_names,
symbol_values_array,
util.convert_to_tensor([[x] for x in pauli_sums]), num_samples)
# Expected value of 0.349...
self.assertAllClose(mps_result, cirq_result, atol=5e-2)
def test_complex_equality(self):
"""Check moderate sized 1d random circuits."""
batch_size = 10
qubits = cirq.GridQubit.rect(1, 8)
circuit_batch = [_make_1d_circuit(qubits, 3) for _ in range(batch_size)]
pauli_sums = [[
cirq.Z(qubits[0]),
cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) * cirq.Z(qubits[-1]),
cirq.Z(qubits[0]) + cirq.Z(qubits[-1])
] for _ in range(batch_size)]
symbol_names = []
resolver_batch = [{} for _ in range(batch_size)]
num_samples = np.ones_like(pauli_sums, dtype=np.int32) * 1000
symbol_values_array = np.array(
[[resolver[symbol]
for symbol in symbol_names]
for resolver in resolver_batch])
cirq_result = [
cirq.Simulator().simulate_expectation_values(c, p, r)
for c, p, r in zip(circuit_batch, pauli_sums, resolver_batch)
]
mps_result = simulate_mps.mps_1d_sampled_expectation(
util.convert_to_tensor(circuit_batch),
symbol_names,
symbol_values_array,
util.convert_to_tensor(pauli_sums),
num_samples,
bond_dim=32)
self.assertAllClose(mps_result, cirq_result, atol=2e-1)
def test_correctness_empty(self):
"""Tests the mps op with empty circuits."""
empty_circuit = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_symbols = tf.raw_ops.Empty(shape=(0,), dtype=tf.string)
empty_values = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.float32)
empty_paulis = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.string)
num_samples = tf.raw_ops.Empty(shape=(0, 0), dtype=tf.int32)
out = simulate_mps.mps_1d_sampled_expectation(empty_circuit,
empty_symbols,
empty_values,
empty_paulis, num_samples,
32)
self.assertShapeEqual(np.zeros((0, 0)), out)
class InputTypesTest(tf.test.TestCase, parameterized.TestCase):
"""Tests that different inputs types work for all of the ops. """
@parameterized.parameters([
{
'symbol_type': tf.float32
},
{
'symbol_type': tf.float64
},
{
'symbol_type': tf.int32
},
{
'symbol_type': tf.int64
},
{
'symbol_type': tf.complex64
},
])
def test_symbol_values_type(self, symbol_type):
"""Tests all three ops for the different types. """
qubits = cirq.GridQubit.rect(1, 5)
circuits = util.convert_to_tensor(
[cirq.Circuit(cirq.H.on_each(*qubits))])
symbol_names = ['symbol']
symbol_values = tf.convert_to_tensor([[1]], dtype=symbol_type)
pauli_sums = util.random_pauli_sums(qubits, 3, 1)
pauli_sums = util.convert_to_tensor([[x] for x in pauli_sums])
result = simulate_mps.mps_1d_expectation(circuits, symbol_names,
symbol_values, pauli_sums)
self.assertDTypeEqual(result, np.float32)
result = simulate_mps.mps_1d_sample(circuits, symbol_names,
symbol_values, [100])
self.assertDTypeEqual(result.numpy(), np.int8)
result = simulate_mps.mps_1d_sampled_expectation(
circuits, symbol_names, symbol_values, pauli_sums, [[100]])
self.assertDTypeEqual(result, np.float32)
if __name__ == "__main__":
tf.test.main()
| 45.494118 | 80 | 0.578506 | 5,299 | 46,404 | 4.799207 | 0.06171 | 0.042114 | 0.07019 | 0.08816 | 0.886438 | 0.867485 | 0.850065 | 0.840234 | 0.830325 | 0.818057 | 0 | 0.019207 | 0.326825 | 46,404 | 1,019 | 81 | 45.538763 | 0.794897 | 0.103569 | 0 | 0.739073 | 0 | 0 | 0.034256 | 0 | 0 | 0 | 0 | 0 | 0.103311 | 1 | 0.018543 | false | 0 | 0.01457 | 0 | 0.039735 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
1121f50508f940e5f1ce49ddc43745e03438defd | 9,062 | py | Python | utils/scripts/OOOlevelGen/src/sprite_templates/Caterpillar.py | fullscreennl/bullettime | 8967449cdf926aaed6bb7ec217d92e0689fb0c3c | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/sprite_templates/Caterpillar.py | fullscreennl/bullettime | 8967449cdf926aaed6bb7ec217d92e0689fb0c3c | [
"MIT"
] | null | null | null | utils/scripts/OOOlevelGen/src/sprite_templates/Caterpillar.py | fullscreennl/bullettime | 8967449cdf926aaed6bb7ec217d92e0689fb0c3c | [
"MIT"
] | null | null | null | import MonsterBuilder
def create(lb,xpos):
xml = """
<level>
<!-- BEGIN Monster construction -->
<sprite shape="circ" type="Enemy.EnemySprite" x="105" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar1" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="125" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar2" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="145" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar3" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="165" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar4" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="185" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar5" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="205" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar6" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="225" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar7" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="245" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar8" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="265" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar9" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="285" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar10" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="305" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar11" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="325" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar12" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="345" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar13" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="365" y="17" width="34" height="34" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_body.png" setName="caterpillar14" classname="caterpillarLimb" spritedata="caterpillar" groupIndex="-1"/>
<sprite shape="circ" type="Enemy.EnemySprite" x="75" y="20" width="40" height="40" angle="0" restitution="0.2" static="false" friction="0.5" density="5" sheet="6" firstframe="caterpillar_head.png" setName="caterpillar0" classname="caterpillarBrain" spritedata="caterpillar" groupIndex="-1"/>
<sprite type="Joints.RevoluteJoint" id="2" body1="caterpillar1" body2="caterpillar0" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="75" by="20" b2_Xoffset="0" b2_Yoffset="0" ax="105" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="3" body1="caterpillar2" body2="caterpillar1" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-9" b2_Yoffset="-8" ax="125" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="4" body1="caterpillar3" body2="caterpillar2" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-29" b2_Yoffset="-8" ax="145" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="5" body1="caterpillar4" body2="caterpillar3" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-49" b2_Yoffset="-8" ax="165" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="6" body1="caterpillar5" body2="caterpillar4" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-69" b2_Yoffset="-8" ax="185" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="7" body1="caterpillar6" body2="caterpillar5" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-89" b2_Yoffset="-8" ax="205" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="8" body1="caterpillar7" body2="caterpillar6" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-109" b2_Yoffset="-8" ax="225" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="9" body1="caterpillar8" body2="caterpillar7" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-129" b2_Yoffset="-8" ax="245" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="10" body1="caterpillar9" body2="caterpillar8" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-149" b2_Yoffset="-8" ax="265" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="11" body1="caterpillar10" body2="caterpillar9" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-169" b2_Yoffset="-8" ax="285" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="12" body1="caterpillar11" body2="caterpillar10" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-189" b2_Yoffset="-8" ax="305" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="13" body1="caterpillar12" body2="caterpillar11" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-209" b2_Yoffset="-8" ax="325" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="14" body1="caterpillar13" body2="caterpillar12" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-229" b2_Yoffset="-8" ax="345" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<sprite type="Joints.RevoluteJoint" id="15" body1="caterpillar14" body2="caterpillar13" motor_speed="2.0" torque="1000.0" enable_motor="true" lower_angle="12" upper_angle="45" enable_limit="false" collide_connected="false" bx="96" by="9" b2_Xoffset="-249" b2_Yoffset="-8" ax="365" ay="17" b1_Xoffset="0" b1_Yoffset="0"/>
<!-- END Monster construction -->
</level>
"""
MonsterBuilder.createFromXMLString(lb,xpos,xml)
| 215.761905 | 320 | 0.732289 | 1,348 | 9,062 | 4.807864 | 0.095697 | 0.025459 | 0.034717 | 0.043975 | 0.817004 | 0.807283 | 0.807283 | 0.801728 | 0.798025 | 0.798025 | 0 | 0.089556 | 0.06599 | 9,062 | 41 | 321 | 221.02439 | 0.676158 | 0 | 0 | 0 | 0 | 0.763158 | 0.986868 | 0.317038 | 0 | 0 | 0 | 0 | 0 | 1 | 0.026316 | false | 0 | 0.026316 | 0 | 0.052632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
1125a7626ad2e7b7abebffde1f2e22ef98a23e97 | 7,416 | py | Python | app/tests/admin/governance/credential_definitions/test_credential_definitions.py | didx-xyz/aries-cloudapi-python | 0c8004265c4bfd88f313a152f2757ec0441740a7 | [
"Apache-2.0"
] | 7 | 2021-05-19T17:50:31.000Z | 2022-01-16T13:52:34.000Z | app/tests/admin/governance/credential_definitions/test_credential_definitions.py | didx-xyz/aries-cloudapi-python | 0c8004265c4bfd88f313a152f2757ec0441740a7 | [
"Apache-2.0"
] | 181 | 2021-05-25T14:55:14.000Z | 2022-03-30T11:37:34.000Z | app/tests/admin/governance/credential_definitions/test_credential_definitions.py | didx-xyz/aries-cloudapi-python | 0c8004265c4bfd88f313a152f2757ec0441740a7 | [
"Apache-2.0"
] | 5 | 2021-06-02T06:57:52.000Z | 2022-03-23T10:23:07.000Z | import pytest
from aries_cloudcontroller import AcaPyClient
from assertpy import assert_that
from httpx import AsyncClient
from app.admin.governance.credential_definitions import (
CredentialDefinition,
create_credential_definition,
get_created_credential_definitions,
get_credential_definition,
router,
)
from app.admin.governance.schemas import SchemaDefinition, create_schema
from app.tests.util.ledger import create_public_did
from app.tests.util.event_loop import event_loop
from app.tests.util.string import get_random_string
BASE_PATH = router.prefix
@pytest.mark.asyncio
async def test_create_credential_definition(yoma_acapy_client: AcaPyClient):
# given
definition = SchemaDefinition(name="x", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result = (
await create_schema(definition, yoma_acapy_client)
).dict()
credential_definition = CredentialDefinition(
schema_id=schema_definition_result["schema_id"],
tag=get_random_string(5),
support_revocation=False,
)
# when
result = (
await create_credential_definition(credential_definition, yoma_acapy_client)
).dict()
# then
written = (
await get_credential_definition(
result["credential_definition_id"], yoma_acapy_client
)
).dict()
assert_that(written).is_not_empty().contains_key("credential_definition")
assert_that(written["credential_definition"]["tag"]).is_equal_to(
credential_definition.tag
)
@pytest.mark.asyncio
async def test_create_credential_definition_via_web(
yoma_client: AsyncClient, yoma_acapy_client: AcaPyClient
):
# given
definition = SchemaDefinition(name="x", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result = (
await create_schema(definition, yoma_acapy_client)
).dict()
credential_definition = CredentialDefinition(
schema_id=schema_definition_result["schema_id"],
tag=get_random_string(5),
support_revocation=False,
)
# when
result = (
await yoma_client.post(BASE_PATH, json=credential_definition.dict())
).json()
# then
written = (
await get_credential_definition(
result["credential_definition_id"], yoma_acapy_client
)
).dict()
assert_that(written).is_not_empty().contains_key("credential_definition")
assert_that(written["credential_definition"]["tag"]).is_equal_to(
credential_definition.tag
)
@pytest.mark.asyncio
async def test_get_credential_definitions(yoma_acapy_client: AcaPyClient):
# given
definition1 = SchemaDefinition(name="x", version="0.1", attributes=["average"])
definition2 = SchemaDefinition(name="y", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result_1 = (
await create_schema(definition1, yoma_acapy_client)
).dict()
schema_definition_result_2 = (
await create_schema(definition2, yoma_acapy_client)
).dict()
credential_definition_1 = CredentialDefinition(
schema_id=schema_definition_result_1["schema_id"],
tag="tag",
support_revocation=False,
)
credential_definition_2 = CredentialDefinition(
schema_id=schema_definition_result_2["schema_id"],
tag="tag",
support_revocation=False,
)
await create_credential_definition(credential_definition_1, yoma_acapy_client)
credential_definition_result_2 = (
await create_credential_definition(credential_definition_2, yoma_acapy_client)
).dict()
# when
credential_definition = (
await get_created_credential_definitions(
schema_id=schema_definition_result_2["schema_id"],
aries_controller=yoma_acapy_client,
)
).dict()
# then
assert_that(credential_definition["credential_definition_ids"]).contains_only(
credential_definition_result_2["credential_definition_id"]
)
@pytest.mark.asyncio
async def test_get_credential_definitions_via_web(
yoma_client: AsyncClient, yoma_acapy_client: AcaPyClient
):
# given
definition = SchemaDefinition(name="x", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result = (
await create_schema(definition, yoma_acapy_client)
).dict()
credential_definition = CredentialDefinition(
schema_id=schema_definition_result["schema_id"],
tag="tag",
support_revocation=False,
)
credential_definition_result = (
await create_credential_definition(credential_definition, yoma_acapy_client)
).dict()
# when
credential_definition = (
await yoma_client.get(
f"{BASE_PATH}/created",
params={"schema_id": schema_definition_result["schema_id"]},
)
).json()
# then
assert_that(credential_definition["credential_definition_ids"]).contains_only(
credential_definition_result["credential_definition_id"]
)
@pytest.mark.asyncio
async def test_get_credential_definition(yoma_acapy_client: AcaPyClient):
# given
definition1 = SchemaDefinition(name="x", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result_1 = (
await create_schema(definition1, yoma_acapy_client)
).dict()
credential_definition_1 = CredentialDefinition(
schema_id=schema_definition_result_1["schema_id"],
tag=get_random_string(5),
support_revocation=False,
)
credential_definition_result = (
await create_credential_definition(credential_definition_1, yoma_acapy_client)
).dict()
# when
result = (
await get_credential_definition(
credential_definition_result["credential_definition_id"], yoma_acapy_client
)
).dict()
# then
assert_that(result).contains_key("credential_definition")
assert_that(result["credential_definition"]).is_not_none()
assert_that(result["credential_definition"]["tag"]).is_equal_to(
credential_definition_1.tag
)
@pytest.mark.asyncio
async def test_get_credential_definition_via_web(
yoma_client: AsyncClient, yoma_acapy_client: AcaPyClient
):
# given
definition1 = SchemaDefinition(name="x", version="0.1", attributes=["average"])
await create_public_did(yoma_acapy_client)
schema_definition_result_1 = (
await create_schema(definition1, yoma_acapy_client)
).dict()
credential_definition_1 = CredentialDefinition(
schema_id=schema_definition_result_1["schema_id"],
tag=get_random_string(5),
support_revocation=False,
)
credential_definition_result = (
await create_credential_definition(credential_definition_1, yoma_acapy_client)
).dict()
# when
result_json = (
await yoma_client.get(
f"{BASE_PATH}/{credential_definition_result['credential_definition_id']}"
)
).json()
# then
assert_that(result_json).contains_key("credential_definition")
assert_that(result_json["credential_definition"]).is_not_none()
assert_that(result_json["credential_definition"]["tag"]).is_equal_to(
credential_definition_1.tag
)
| 31.828326 | 87 | 0.721548 | 817 | 7,416 | 6.135863 | 0.105263 | 0.251347 | 0.086774 | 0.060642 | 0.860363 | 0.846998 | 0.821664 | 0.773389 | 0.724516 | 0.687413 | 0 | 0.007784 | 0.185814 | 7,416 | 232 | 88 | 31.965517 | 0.822458 | 0.01281 | 0 | 0.646067 | 0 | 0 | 0.089975 | 0.061627 | 0 | 0 | 0 | 0 | 0.073034 | 1 | 0 | false | 0 | 0.050562 | 0 | 0.050562 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
28afe6a066200affacc8d14506bacdedd2703017 | 3,639 | py | Python | isystem_to_mqtt/test/test_tag_definition.py | pierok13/isystem-to-mqtt | b3f68a29ee2db37db78d5ba2e11f3c3844b374e5 | [
"MIT"
] | 10 | 2016-07-03T13:43:04.000Z | 2020-11-08T15:30:09.000Z | isystem_to_mqtt/test/test_tag_definition.py | pierok13/isystem-to-mqtt | b3f68a29ee2db37db78d5ba2e11f3c3844b374e5 | [
"MIT"
] | 5 | 2016-08-05T10:41:56.000Z | 2022-01-09T22:28:15.000Z | isystem_to_mqtt/test/test_tag_definition.py | pierok13/isystem-to-mqtt | b3f68a29ee2db37db78d5ba2e11f3c3844b374e5 | [
"MIT"
] | 13 | 2016-08-03T08:01:14.000Z | 2020-11-11T22:49:30.000Z | """ Unit test for TagDefinition class """
try:
import unittest.mock as mock
except ImportError:
import mock
import unittest
import paho.mqtt.client as mqtt
from .. import convert
from .. import tag_definition
class TestTagDefinion(unittest.TestCase):
""" Test for TagDefinition """
def test_simple_puplish(self):
""" Test a simple call """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.TagDefinition("test/test1", convert.unit)
tag.publish(client, "base/", [1], 0)
client.publish.assert_called_once_with(
"base/test/test1", 1, retain=True)
def test_multiple_same_puplish(self):
""" Test a simple call """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.TagDefinition("test/test1", convert.unit)
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [1], 0)
client.publish.assert_called_once_with("base/test/test1", 1, retain=True)
def test_multiple_puplish(self):
""" Test a simple call """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.TagDefinition("test/test1", convert.unit)
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [2], 0)
calls = [mock.call("base/test/test1", 1, retain=True),
mock.call("base/test/test1", 2, retain=True)]
client.publish.assert_has_calls(calls)
self.assertEqual(2, client.publish.call_count)
class TestMultipleTagDefinition(unittest.TestCase):
""" Test for MultipleTagDefinition """
def test_simple_publish(self):
""" MultipleTagDefinition Test a simple call """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.MultipleTagDefinition(
[("test/test1", convert.unit),
("test/test2", convert.unit)])
tag.publish(client, "base/", [1], 0)
calls = [mock.call("base/test/test1", 1, retain=True),
mock.call("base/test/test2", 1, retain=True)]
client.publish.assert_has_calls(calls)
self.assertEqual(2, client.publish.call_count)
def test_multiple_same_puplish(self):
""" MultipleTagDefinition Test multiple call with same value """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.MultipleTagDefinition(
[("test/test1", convert.unit),
("test/test2", convert.unit)])
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [1], 0)
calls = [mock.call("base/test/test1", 1, retain=True),
mock.call("base/test/test2", 1, retain=True)]
client.publish.assert_has_calls(calls)
self.assertEqual(2, client.publish.call_count)
def test_multiple_puplish(self):
""" MultipleTagDefinition Test multiple call with different value """
client = mock.MagicMock(spec=mqtt.Client)
tag = tag_definition.MultipleTagDefinition(
[("test/test1", convert.unit),
("test/test2", convert.unit)])
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [1], 0)
tag.publish(client, "base/", [2], 0)
calls = [mock.call("base/test/test1", 1, retain=True),
mock.call("base/test/test2", 1, retain=True),
mock.call("base/test/test1", 2, retain=True),
mock.call("base/test/test2", 2, retain=True)]
client.publish.assert_has_calls(calls)
self.assertEqual(4, client.publish.call_count)
| 36.029703 | 81 | 0.618851 | 435 | 3,639 | 5.087356 | 0.121839 | 0.056936 | 0.08676 | 0.10845 | 0.825124 | 0.825124 | 0.821509 | 0.765025 | 0.765025 | 0.765025 | 0 | 0.02195 | 0.236329 | 3,639 | 100 | 82 | 36.39 | 0.774379 | 0.085738 | 0 | 0.701493 | 0 | 0 | 0.100887 | 0 | 0 | 0 | 0 | 0 | 0.149254 | 1 | 0.089552 | false | 0 | 0.104478 | 0 | 0.223881 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
28c78fcb72077f4bf648e3bdc625016302d06efa | 196,927 | py | Python | modules/resources_rc.py | eyadgad/CamGUI | 361bccea1bf19063db52814e6e5a65f2850afe4c | [
"MIT"
] | 1 | 2021-08-03T01:41:34.000Z | 2021-08-03T01:41:34.000Z | modules/resources_rc.py | eyadgad/Dracula-GUI | 361bccea1bf19063db52814e6e5a65f2850afe4c | [
"MIT"
] | null | null | null | modules/resources_rc.py | eyadgad/Dracula-GUI | 361bccea1bf19063db52814e6e5a65f2850afe4c | [
"MIT"
] | null | null | null | # Resource object code (Python 3)
# Created by: object code
# Created by: The Resource Compiler for Qt version 6.1.2
# WARNING! All changes made in this file will be lost!
from PySide6 import QtCore
qt_resource_data = b"\
\x00\x00\x1c\xfc\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x19\x00\x00\x00\x14\x08\x06\x00\x00\x00xw\x96\xbd\
\x00\x00\x00\x01sRGB\x00\xae\xce\x1c\xe9\x00\x00\x00\
\x04gAMA\x00\x00\xb1\x8f\x0b\xfca\x05\x00\x00\x00\
\x09pHYs\x00\x00\x0e\xc3\x00\x00\x0e\xc3\x01\xc7o\
\xa8d\x00\x00\x02\x9aIDATHK\xdd\x95\xbbk\
\x14Q\x14\xc6w7\xecc$\xb8b\x93\xa0\x04\xf1\x01\
\xe2\x0bAH\xa1\x82AD\xd0&*\xa2\x85\x8d\x85\xda\
(D\x8c\xc4\xce\xc6@*!\x08\xf9\x13B\x82\x04E\
\x09ha\xa7\xa8\x10\x8d\xa4PP$\xc6l\xb5\x22A\
\x5cg\x1f3Kf\xfc\x9d;g\xc7\xccl\xa2\x1b\xd2\
\xf9\xc1\xc99\xe7;\xaf{\xef\xec\xbdI\xfc7H\xaa\
NT\xab\xd5\x1d\xc9d\xb2\x17\xb3=`\xd6\x04\xdb\xf7\
\xfdG\x96e}\x11\xc7\x0c\xa9\xd5j\xbb\x18\xf0\x86@\
\x11w^8\x01\x9c\xaff\x13\xc8\x0d\x17\x18\x07u[\
P\x1d\xe4t\xe7r\xb9\x8f\x86t]\xf7\xba\xe38>\
\xbb\xd9f\x88U\x82f)5\x0d\xa4\x8f\xf4\xa3o\x9f\
\xf8\x8d\xa0\xd1\xd9l\xb6n\xbc\x16Q\xa9T\xbah\xf6\
\x01\xf9\x81}Q\xe9\xa5}\xda\xe4Od\x05\x1c\x9bZ\
\xad!\x95J\xedE\xed\xe6x\xd6c\x9f\x0c\xd8fD\
\x86,\x07\xf9^+\x9d?\xe7\xfd\x14u\x9ex?\xbb\
\xb9\x16\xb0\xcd\x88\x0c\xa1H\xad\x00\x14^@\xcd\xa0\xc7\
\x0b\x85\x82\x15\xb0Qd2\x99I\xea\x86\xf3\xf9\xfc\x82\
R\x02\x87\xc1/\x91\x92\xfa\xe6\xc3\xdf\xa0\x91\xefy^\
\x97R\xc2\xf5\xb3\x0b\x0f\xfe1\xfa\x17\xf2\x9ax\x87\x86\
\x0dd\x11\xf0E>\xf4Q\xa5B\xc4\x7f\x0c\x91!r\
4\xd8w\xc5G\x06%\x8e\xde\x8f\xcc\xab\xec\x13\x8e\xe6\
f\x11\xc8\x1cB\x0b\xf7\x88\xf0\x0c<A\xce,\x5c\x19\
\x19\xa1gV\xf8p\x08\x09\xdb\x09\x8c\x22\x8b\xd8WM\
P\xc1\xf0N\xf8)\xa4D\xfe\x98\xe4#w\xe0\xdb\xd0\
\xf2]R\xc4nI-\xfe\x04r\x05\xdb\xa6\xcf\x13\xd3\
\xa01\x04\xfd\x96@\x159k\x021\xb0*\x8b\xd8}\
\xc9\xa5xR\xe9\x06?\x86,\x22\xb7\xe54\x90v\xf2\
^\xe0\x7f7I\x8d!\x10\xf2{\xef1\xe4\x0a\x90\x06\
\xe4\x0e\x92\xfb\xb5\x5c.o\x92\xbb\x82-\x8b+!\xa7\
$\x07\xbd\x93\x1c\xb9?\x0b,\xe6\xb8)\x5c\xb2\x93\xc3\
\x86h\x01r\xd6\xe4\x1f\xa2a\x91\xdaYd\x8f\xf0\xf8\
\xa7\x91\x9f\xc8\x0c\x03\xfe\xbc \x10\xab~V\xc8\xbfD\
\x1d\xcay\xc6\xc0\x8d\xfam\x86\xe0<\x86\x8f\xe2\xaf\xd3\
\xd4\xc8\x039\x85\xf9\x8d`\xf8@\xc6\xc1\xad\x9e\xe3^\
\x5c&\xbf\x8f\xfc{P\xc3\xf8\x03\xd4l\xa8\xd7\xeb\xe3\
\xf8=\xf07\xe1FL\x81\x22\xbc\xc9\xacf++8\
\x83\xf9\xb7\xa7\xfe=\x17\xef!C\xde\xd1\xcc\xe1\x8d:\
h\xdb\xf6\x81t:\xfd\x80X\x06\xee\x1c\x03^\x05\xa9\
k\x00\x8b\xd9,G\x8b|f\xd84\xdaE?g7\
\x9d\x9a\xd2\x84\x7f\xbe]qp,\xf2\x10~\xa2\xe94\
z\x82\xa1\xbd\xec\xe8\x18\xbb\x90\xffE\xcb \x91\xf8\x0d\
\x1dO<\xec\xdb:\x86\xb9\x00\x00\x00\x00IEND\
\xaeB`\x82\xa3_\x94\xd4\xc7\x0d\xf8\xbau\xeb6\xaf\
^\xbd\xfa\xbbS\xa7N=6y\xf2\xe4\xc16\xedu\
\xdao\xb6\xfb\xef>{S\x9f\xb2\xc2a\x0eU\xcf\xa3\
\xcfF\xe7\xcd\x1fK\x97\x7f\xb7\xab\x97\x85C\x9e\xe3k\
'o;yp\xec=o\xbd\xf5V\xef\xb1c\xc7\xa6\
n\xdb\xb6m\xcd\x96-[>/\x99L6o\xde|\
\x0f\xd4\xffQ\x168\xea\x7f?q\xe2\x04\x12\x98\x1dk\
9}\xfa\xf4_\x1f9r\xe4:u\xc1\xacAC\xc0\
\x13\x81\x83\x07\x0f\xce\xec\xeb\xeb;)\xb1\xe7\xb3g\xcf\
N\xf0\xecV^\x1c\xdc\x12x\xdd\x83E\xc2\xb9s\xe7\
\xee\x7f\xef\xbd\xf7D\xb3\xb0|\x04\xd6\x82!\xf0>\x02\
\xe7\xce\x9d\x1b\x0d\x13\xc3a\xaeMK\x82\xbc\xd9{\xc2\
={\xf6,c*\xb1\xbes\xe7\xce\x8f\xc2\xe5\xc9\xdf\
3\xeb[5C@\x1d\x81\xf1\xe3\xc7\xd7\xc0.\x97\x13\
\x09\xbd\xdb\xdf\xb7o\xdf\x9fxW\xa2\x0al\x12B\xa7\
\xb7r:\xdd\xb0a\xc3C\xfd\xfd\xfd8\xf5\xdb\x8f!\
\x90\x14\x020\x13\x1e[\xbbv\xed\xb78B\xbd\xf6\xda\
k\xf38\xf5Du\xa6L\x99\xc2\xba)\xf1\xfc\xf3\xcf\
/\x10ul\x95\x0d\x81\x80\x08\xd0\xadz\xefm\xd6\xc2\
\x85\x0b_\x0c(V\xc7\xa6\xbd\x05\xc5\xa9\xfe\xf4\xe9\xd3\
\x93\xf2\x10\xd6\xfa4\x04\x5c\x10\xa0\xc3Bo\xdb\x86I\
\xe9\x94K\xfb\xdae\xbc\x05E\x12\xc2)\xd2\xa5\xda\x82\
X{\x86\x80\x16\x02d\x9f\x1c\xdb\xc6\xa0\x15\xd6\x0f{\
O\xc8\xea\xcd*\x19\x02\x86\xc0\x08\x04\xf2 a\x0a\x0e\
]3\x05C \x19\x04\xf2 a2\x837A\x0c\x81\
\x14\x100\x12\xa6\xa0\x05\x93\xa1\xd2\x08\x18\x09+\xad~\
\x1b|\x0a\x08\x18\x09S\xd0\x82\xc9Pi\x04,~\xb3\
\xd2\xea\x0f7x\x0ch\x86\xa8\xaa\x85\x18\xce\x85\xd1$\
\x17.\x5c\x18;z\xf4\xe8\xda\xd5W_\xfd?\xe0\xd8\
\xde\xb7h\xd1\xa2}\x10\xa1\xf2\xdbp\x12T\xa3e\x8e\
/\x05\xfd\x84\xe3\xab\x01O5G\x09\x11Q\x0bW\xad\
Z\xf5\x13\x18}\xa6}@z\xcc_\xc3\x9d\xd4\xcf\xa7\
\x14\xcc\x9f\x87\x9fPb)\x99 \xb7S\x84\x91P\x02\
y\xbau\xdf|\xf3\xcd^H\x05\xf8S\x17\xf2\xb5)\
S\x83p\xb1;R\x18\x9d\x910\x05-\x98\x0c\xde\x08\
P<0F\x8c\xb0>\xcc\x8dz\x90F\x10\x1f\x0e\xca\
\xf5\xc7H\x98+\xfc\xd69\x07\x01\xbaG'&`\x83\
\x88\x90\xbf\xf6\xeb\x1c9\xb4\xea\x18\x09\xb5\x90\xb4v\xa2\
\xf0\xea\xab\xaf\xe2\xf5\x1d5\x026\x88\x08\x8f\xaf|\
%\xca\x00\xdatb$\xcc\x0by\xeb\x97\x85\x00\xe6\xdd\
\x94.A;\xd5\x87\x19\x16\x13JG\xff\xa9\x0a\x09\xc7\
EG\xd6:TG\x00\xf2\xcd~9\x14\x01\xa9]\xf6\
\xad\x04\xc9`\x8bFB\xd62\x04\x06i$\x94XI\
\x02u\xe1N(f\x9d\x16\x1d\xc28\xd4\xafm\xdd\xba\
uu\xec\xe1\x1a\x09c#n\xfd\xb1\x10\x80\xac\xd3\x98\
\xe2/4\x09\xeb\x98\x91\x8f%\xa0\xa0\x92\x91P\x00\x9e\
U\x8d\x87\xc0\xd2\xa5Kw\xc7 !\xf6\x01\xe9\x08\xaf\
\x8f7\xb2Q\xa3\xf2 \xa1\x85\xad\xc5\xd4p\x09\xfa\x02\
#\x1d;q\xe2D\x95\xd7\x88\x5c\xe0\xd8\xbbw/&\
\x14;\xeaR\xb6\xa8e,\x80\xbb\xa8\x9a\xcbI\xee\xc3\
\x87\x0f\xcf\x8c\xd9\xf5+\xaf\xbcR\xfa\xc4`F\xc2\x98\
\x16U\x82\xbe\xce\x9c9\xd3\x1bs\x18o\xbc\xf1\xc6\x87\
b\xf6\x97G_F\xc2<P\xb7>\x9d\x11\xa8\xd5j\
\xa5\xb7\xd1\xd2\x0f\xd0Y\xdbV0I\x04\xc6\x8e\x1d{\
!I\xc1\x14\x852\x12*\x82Y\x85\xa6\xe6\xcc\x99s\
\xe68\x17,X\xf0R\xcc\xfe\xf2\xe8KBB{\
\x970\x0f\x8d\xe5\xdc'<ap\x06D\x88\x961o\
\xd6\xacY\x07s\x1er\xd2\xdd\xb3\x9c\xb5\x161\x93\xb4\
N\x9d\x84\x837%\x1f%\x22\xb2l\xc0\xa7n\xec'\
\xc7\xf2\xf0\x13JfB'\x85Y\xa1\xf2!\x00\x8f\xba\
n\x8b1\xaa\x81\x81\x81\x1f\x80O\xf2\x9d\x18}\x15\xb5\
\x0f\xd6W\xd0f\xc2\xa2\xaa{\xb8\xdcp\x83\xe2w>\
3\x1a\xa7l\x1e\x8f\x07\xe51\x13J,\xc2H(A\
\xaf\xe0u\xe1\x91\xd8\x15\x1cb\xb9\xd6\x81T\x19\x8f\xe7\
\x01\x91\x910\x0f\xd4\xadO6\x02k\xd6\xacy\xcc\x95\
T\x9e\xe5j\x90\xb3\xe6J\xb6`\x82\x8aFB\x01x\
V5>\x02\x90%\xad\x87n:\xb0VE\x1d\x88Y\
\x83\x0b\xbd\xb7\xc5\x1f\xcd\x1fz4\x12\xe6\x85\xbc\xf5\xcb\
F\x00\x8dv\xee\xdc\xb9\xafz\xcet\x9dH\x9b+\x01\
\x8d\x84l3\xb0\x8a) u[\xf4\xf5\xf5\x9d\x84\
\x9c57\xe4=\x16\x9b\x09\xf3\xd6\x80\xf5/B\x003\
\xaf\xd1\x8b\xb5>\xcb\xd3\x1a\xa4\xca\xb8W\xd4\xb1be\
\xca\x8b\xeb#\x7f\xa3l.\xe988\x82b\x06nK\
o\xa1h4)6\x05'\xa7+\xc1\xc7\xf7=\x90\x0d\
\x0d\xb3\xf1\xdbl/5H\x85\xff\xc2C\x0f=\xf4E\
H\x95qEJc\xc8\x83\x84\x92\xd03V\xe8\x12\x0e\
\x12\x1c\xb0\xe7S\x02\xded\x09\x87\x00\xbc\x01\x7f\xed\xd1\
\xa3G\x87n\xc7\xc3\xb2\xf3\x8d\x993g\x1e\x1d?~\
|\x92\x81\xd9d\x9f\xef2\x10A>D\x0f~\xa9\xec\
LH\x89\x8e\x18z\xb2*\xa9#\x90\xc7L\x18\x9d\xb9\
\xa9+!K><\x96\x9f:u\xea\xeb\x90\xb2\xfd\xc1\
\xac\xb2\xf6wC 4\x02\xdc\x99\xb0\xd0\xaf2-[\
\xb6l(\xc9\x11\xeci\xfe64\xc8\xd6~\x5c\x04\xf2\
\x98\x09%#\xac\x1c\x09\xd7\xae]\xfb-\x00l\xd8\xb8\
!\xfd\xdfz\x09\x88V7-\x04\x8c\x84i\xe9c\x98\
4\xf0>\xc2W[\x09\xd8\xf8\xff\x90\xa4\xf63\x09\x8b\
n\xa2y `$\xf4\x00+fQ\x9a\xed\xba\xce\xfc\
\xdb\xb7o\xbf+\xa6L\xd6W\x18\x04\x8c\x84ap\x15\
\xb5\x0a>\xaf\x8fw\x9a\x01[\xff{*\x0f]\x8a\x06\
\x5c\xf1\xca\xe4\xc7\xe6l\xb5\x0a\xe5\xac/\xcc\xc1\x0c\xdc\
g\xfb#\xb0I\xaf77\xe8\xbd\xbe\x8a\x9brq\x87\
o$LHw\xe0d\xbe\xce\x97\x8043\xd6^~\
\xf9\xe5\x9b\x12\x1a\x8a\x89\xe2\x81\x80\x91\xd0\x03\xac\x90E\
\xc1\x19?\x89I\xc0\xa18BxC!j\xa6\xea\x90\
xT\xa9mL\xf3\xef\xba\xfdh)g\xcbQ-C\
\x01g\xfc\x07f\xcc\x98q\x98\xa9\x88a\xf1\x910\x9b\
N\xd3\x92\xcb\xda\x89\x83\x00\x91\xd0k\x0b\xd2X\x01\xc5\
\x91px/\x9c\xcd+\x06p'\xbd'\x5c\xb1b\xc5\
\x7f(\x10phF<q\xe2\xc4Uy(\xc7\xfa\xe4\
!`$\xe4\xe1\xa6V\x0b\xd25|G\x91\x80CD\
\xcc+U\x83\x1a0\x15j(\x0f\x12\xda\xd3hd`\
\xe0\x8c\xff\xca\x03\x0f<\xb0&\x80\xbd\xf5\xc0\xcd\xf3\x03\
\xe7\xce\x9d\xbb\x0en\x0e\xd8\xed\x91\x00\x00k6\x097\
|.\xac_\xbf\xfe\x11\xc84\xfe\xda\xb4i\xd3\x8e`\
\xdb\xa7N\x9d\xba\xea\xf5\xd7_\xbf\x06\x1e\xa7\xe9{\xe4\
\x91G\xfeZ\xb3?i[\xa5Y\x8e\xc6xy\x16\xf7\
\x99@\xc4R\xdf\xa5\x04\x9f\xea'\xa4FU\x84\xfa\xa0\
\xc71\xe0\xbe\xba\x05\xefC\xc2\xf6\xe5)\x90\xb9qg\
2\xba\xf8\xa5 !^@%\x10Y\xe3\xf1Y\xbe.\
^\xbc\xf8\xd9\xe8Z\x8a\xd4!E\x0c\xd57l\xd8\xf0\
\xcf\x91\xbaL\xa6\x1b\xd8n\x5c\x01\xa1\x8b\x9f\xceC \
\x96\xd1\xa6t0\xc3q\xc6\xfb\x90\xae]Y\xb8\x85\xb1\
+\x0fe\x85\xec\x93\x02\x14\x86\xec\x01\x97\xf6!\xfb\xb3\
\xb6\xdfG\xa0\xd0$\x148\xe3Y\xe3n&\xe4\xaaU\
\xab~\x5c\x16C\x02\x7f\xe8\x87\xdb\xad$\xec\x9aW\x1c\
\x0d\xb3\x8c1\x85\x99\x10\x9c\xf1\x97\xc7Z\x82v\x9a9\
\xe1Z\xd47\xe3\xa8)\x5c/YA\x0d\x9b6m\xfa\
\x5c\xb8\xde\xadeD\xa0\x90$Dg|\x80\x84\xb5,\
,\x8a\xbc\x7f\x82\xc3\x89\xb1.A\x0dy\xed\x95\xaaB\
Q\x96\xe1\xe5=\x13\xd2\x9e\x8c%;\xf7\xc3\xd3\xad^\
Q\xd3dx\xe0X\xa3\xc3\xaf\xaa\xf0\x22\xea8Y\x86\
\x9c'\x09i\x09\xc8\x92\xbb\x99H0\x03\x1c\x81\xb66\
i\x912\xa5\xbc\x9b.\x16\xc4\xc0\xb1\x06\xd7\xbc>\xe2\
\xd2\xb6\x95\xf1C\x80e\xccy\x91\x90N\xecX2\xb7\
\x90\xad\xd6\x88\x09\x85\x97\x83\x9e\xd0\x22\x22\x1cd\xfc\x8d\
\x1f\xfc\xf9\x94\x16\xe0X\x83\xd3\xe8\x85\xf9H]\xde^\
Y\x06Mi\xc6\xa3\xa2\x02\x07\x04k\x95\xc8\x82\xd7\x94\
nl\x16^3\xd64\xf5\x83\x0c\x85\xa0\x86\x1a\xa4\xba\
\x9f\x13U\xf9%\xef\xac\x10$\xf4\xb9\x19\x9fETz\
\x93o\x84Z\x15\x1fD\xa9\xa7\x9a&\x83\xf6u,\x9d\
\xb7YI\x5c_rnD\x1b\x1eK!1gBM\
g|\xb7Y\xca\xf5\xa40\x8b\xe4\xf4w\xdc?\xdd\x11\
M\x8b\x0e\x1d\xd12\x92s\xbd\xa7\x93\x8d\xd4\xecv\x89\
\x03\xf0\x0eE\x92&!\xed\xdbT\x0c\x07N07f\
\xe1\x01\x1f\x97\x09\x8aOH\xe3\x13aK\xb2\xfa\x8c\xf1\
\xf7\x80A\x0d\xb9=\x04\x1a\x037\x9f>\xf2x\x8bb\
\x02D\xaasr\xfd;\x8f\x0b\x9d\xf1\xbd\xbd\xbd\xa7\xa1\
\x82d|\x17\xfb\xc3\x93\xc0-[\xb6\xacs\xe9\x1cb\
\x08{\xe1U\xa2A\x8d~qV\x84\x19\xe8\x8f\x17-\
Z\xf4\x92K\xdf!\xcah\xe2\xd8N>\xf8h\x9d=\
y\xf2d?\xd8\xc3\xdb!\xe4\xe7\xb4\x09:\xbc\xfc\xf8\
\xf1\xe3\xd7\x1c;vl\xda\xbb\xef\xbe{)\xb61i\
\xd2\xa4\xff\x9d={\xf6\xaf\xfa\xfb\xfbO\xc1M\x98\xf7\
8\xed\x86\xaa\x93\xe4L\xa8\xb94\x84\x80\xeb=\xbe\xe0\
\xc12k\x0a\xd4Q\x99\x81\xb1\x1d8\xc8\x98\xed+\x83\
Fy\x0cj\xc07\x03\xf1c\x10\xf2\x17\xf7\xd3y\xdf\
.A\x8c\xd1M\x04A\x1cG\xb2\xc6\x0a\xafI\xbd\x88\
\x07T\xb0B\x98\xaa\x81\xb3\xb4\x0d\x96rB\xef\x09\xc1\
\x89\xbc3\x0bH\x97\xbfcT\x0d\x12\x9a\x03\x12)H\
\x8d\x88y\xa4\xc9\x80S\xdf\x1d.8i\x94Y\xb0`\
\xc1\x8b\x1c\x9c\xa5up\xc9\x0f\x1f\xda\xe7\x98c\xa8\xc1\
%\xf0\xc7\xf2>\xedM\x8e\x84\x00\xcaV&\xa0\xadc\
\xa9I\xdf\xcd\xa3Wg\xd5\x88\x18\xf3 \x83\xe1\x8cg\
\xd9B\xb3\xaeb\xde.\xc1m\x03\x05\xd1\x8b\xe5\xc6\xd5\
\x8a\xcb\x99\x81\xf4\x83\xd1\xa9>k\x00\xa1fB\x81\x13\
y\x04\x01\xe1f\xc0\x0c\x0d\xd0\xf0\xe2\xa7\xd2\xd2\xf4\xe2\
\xa5Q\xe9\x87\xc1eL\x8a8z\xdb\x07\x04?<\xe9\
\x22\xa3\xa4\x0c\xcc~\xb7+\xe9d\xd8\xf8`Y\xbd?\
\x8f4&\xde \xe3\x97/D\xc4\x8cK\x9az\xc7\x19\
\x12\xa3:\x16H\x94\xdcZ\x17\x94~\x9b\x92\xd2/\x12\
\x11\xf0\xbbLS\xbe\xe6\xb6\xc8\x0d\xc3\xd2\xab#\xbe\x99\
mS\x9e\x9f C\xa47C\xb4V'\xed\xc6\x82{\
\xf8\xb9A\x84\xef\xd0h&\xa0\xed\x14\xa3MBr\xc6\
\xab\x00\x0b\x8e\xf2U!\x00\x04\xbf\xdfr-\x22\x86J\
\x93\x113\xc3@\x16a!\xc7\xcb\xbfi\xeb\x81\x08\xc8\
\xb2\xd9,y[\xfe\x1e\x95\x88\xac\x01i\x92P\xd3\x19\
O!Y\xda\xba\x1fj\x0f\x08\xfe)-\x22\xe2{\xef\
\x9a\x82\x06p\xc6\xb3l\xa3\xd9\x985\xafyE\x98\x01\
Glib\xcd\x88,\xa0\xb5H\xa8\xe9D\x8e\xb5\xb1\
\x86\xe5\xdeg=\xbf\xaa\x1d#N(fU\xccEM\
\x1c\xf1\x98_\xebv\x89V@;\x85\x01\xaa\xac\x94<\
u\x17%N67\x12\xd2\x01\x85\x0a\xb0\x03\x03\x03\xdf\
\x13[\xb2G\x03\x98\xa1\xcbS\x99\x1d\x89\x08'|?\
\xf2\xe8zDQ\xcd\x0c\x03\xcdKH\xe9\xed\x92\x12\x10\
\xb0\xa1\xb3\xe0D\xcc\x85\x84\xe0D\xbeD\xcb\x89\x0c\xc7\
\xe2?\x93\x181\xb7.\xcc\x18_R\x22b\x9d{\x90\
\x81\xcex\xad\xc0\xf3v\xb32a\xebm#%\x22`\
\x14\x22z\x03\x8c\x86']\x8e\x82\xc2\xffS\xc3\x801\
\xce\x93\xeb\x8c\xe7\x92\xaf\xb9\x1e,\x81\x1f\xd0\x18\x07\xee\
39\x07\x19ZA\x0dHd\xfc0\xb6b\x82$'\
'\xbc\xb3\x9dh\x110\xa5C&\xd2q\xb0\x19\xd1\x19\
\xdcfc\x93\x90\x10\xbe\xfa\xdb\xb4\x0c7\x0f\x9fN\xab\
\xa1\xc2\xe1\xc3\xd7\xb4\xc6\xe3\x93&C\xd1\x19_\xeb\xf6\
!\xc3p4\x97<4\x88A\x89\x09\x18tF\xe4\x92\
\x90\x95\x85\xba\xdb\x9b\xf1\x9e\x86\x5c\x03g\xfc\xf5\x1a\xb3\
\x99F\x1b\xf0a\xf9\xb6\xa7\xfc\x1d\xf7\x88.i2\x14\
\x9d\xf1\x98a\x00\xdfp\xec\xfaC~\xcd\xae\xfbw\x17\
\xb9\xb3\xfa\xc1\xbf\xd3\xcd\x13\x95\xb3\x02%\x9d\x8c85\
\x05\xccTm/\x1a\x095o\xc6S\x14\x8b\x8bN\xa3\
\x95\x81\x03\x96\x1f*)\xbd\xd6mF\xd1\x0cj\x80#\
\xf8Y\xae\x00u;HK\x95\x80x\xe8\x85\xc1\x0b\x88\
\xa7rv>\xd5\xc8\xa7($Tt\xc6\xd7\xc8W\xe7\
j;Q\xcbq\x0f2\xda\x90\xb7\xd6\xee\x02\xb2f\x86\
\x01N\xd2\xa6v\xb7K`V\xbe_\x03d\xe5\x19\x10\
\xeddD\xd0\x86\xe2\xd6\xa1N\xc1\xf1\x1aC\xe7]q\
\xf1\xd9\x13*:\xe3k0\x0b\xfc\x95\xca\xa8\x036B\
Nx\xd6\xc7\xad\x85\x8c5rP_\x94V\x11\xc7\xba\
$\x8fh\xf3\xed\x12-\x02*\x07\x1at\xfdP\x93\xcc\
\x1a\xfa\xc14&\x7f\xa9aJ,a\x5cI\xa8\xe9D\
\x8e\xe5\x8c\x97\x82\x8a\x07\x19Zn\x03\x90\xe5b\x9aA\
\x98\x81&\xe3\xbf5\x96\xbb\x1aKG\x5c\xc6jE\xc3\
h\x13\xd0\xe5\x03\xa3ID\xa9\xbd`\xfd`$\xd4t\
\x22\xd3\xc1\x87\xc6x\xa3\xb4\x81i2\xe0v\xfe).\
\xbe\xad\xf5(\xe5\x86\x98\x84\x1c7HH\xc0 \xeb\xdd\
MZ\x1f\x17l\xc7\x85\x80\x8d\xf1h\x11\x11\xfa\x5c-\
\xc5(\x08\x09\xd1\xe7\xa45\x1bpn\xc6KA\xd1\xa8\
\x8f\xf7\xdd\x14\x0d\x8c\xa5\xa7f2K#s40i\
nC\x9b\x80\x9cT\x93\x1aD\x04?\xea+RlX\
\xca\xcdZ\x8e.]\xbat\xb7\xc6, \xb9\x19/\x05\
F\xa3\xber\x9a\x0c\x96\xaeP\x0f\xf4A\xd4\x18\x92J\
\x1b\x14\x18-\x9e\xd9\xc9\xc6\xda\x1eb\xb9\x0aJ\xdb\x1c\
6\xb6(\x03l\xbb>\xe4\xda_\xbbr\xac\xce\xbb]\
\xea\x05'\xf2\x16\x0d\x02\xe2,\x12\xe3\x02\xac\x04<\x97\
\xba\xcai2\xbc\xf5\x85K\xd9\x90\xf7\x17]0h.\
\x93\x12\x01\xb5\x96\xa6\xd2\x13{o\xa5\x22\xc1:\x91P\
\xd3\x89\x0c\xce\xf8\xe9\xbe\x0aN\xb5<\xf9\xe3\xb4\xbe\xfc\
>:K*7h\xa7w\x10\x99\x1fm<-\xbfG\
K\xe7\x92\x19\x11\xec\xfe\x1f%r\xf8(t\xa8l;\
\x12j:\x91\xb5o\xc6K\x00\xd2\xaa\x0b{\xa0y8\
\xbb3\x0d\x8e\xa3'L\xf7?_K~i;)\x13\
\xb016X\xc5m\xe6\xe8\x07n\xf1|_\x82\x0fG\
\xb98\x13Nh\xeeT3\xd86\xd4\xcdx\x09HZ\
u\xc1!}k$\x22&\xf5\x82\x92f\x12g$\x89\
\x86\x9b\xa5\x9dN)/\x917'`\xcf}Pb#\
\xde\x1d\xd2rt\x88\x844k\xa9|\xe15\x97\x17\x12\
PB\xd6\x05\xbf\xdf\xb2\xd0D\xf49\xaa\x0f9Vl\
[\x9b\x80ZA\xe2]\xc6\xed\xcd\x09\xb8\x96wF\x82\
\xa3w\x87D\xc2\x8b\x89\x8a4\x01.\x8a3^\x02v\
\xa3\xaeb\x9a\x8c\x11\xfa\xa3{\x8e\x1ab\x8a\xdb\x80\xd3\
\xe1>\xfa\xe0\xa8|\xa4C\xcd\x80\x8d\x81\x92[\xc9\x9b\
\x13p\x8a\xff[\x09X\xde\x1d\x22\x09\xf1\xea\x0b\x9c\x5c\
N\xd4\xfa\xa2\x17\xcd\x19/\x01\xbcQ\x17f\xab\x01-\
\xfcP'\xf8\x9b\x923\x1e\x08\x88\xc7\xf6\x17\xb3\xcb5\
\xe4\x93\xfc\xafV\x88\x5c7\xdd\x11\xc9\xbd9\x011\xc3\
\xcfHl\xc2\xbbC\x04\x15\xa3a\x5c\xef\x98e\x01\x0f\
>\xc5\x9d\x92\x01\x14\xb9.\xc5\xc2\xaa\x18)\x19{\x12\
ph\x130\xc6*\x89\xf2\xd8p\xf8P\xa7\xcb\xddl\
\xecY\x9d\x02q~\x9eE.\x97\xbfchW\x9e7\
\xe3\xd9\xa8)V\xd4L\x93\x01\xa7t?P\x14\x8d\xd5\
\x14]\xb4V\x9b\x01\xe9\xe6\x03K\x16\xd7J\x12\x02\xa2\
\x9dS\xf2/\xd7\xeeF\x94c\x91\xd0\x85`\x0ee\xec\
Y-R\x87b\x9a\x8c:\xdd\xb8g\x1b\x84\xa4b\x11\
\x09H\xb1\x9f\x22\x1e\xf8\xdc\xcbl\x87\xaf\xa8s\x07\xa2\
uj\x1fo\xc6O\x97(\xbclua?\xf7\xb0\x00\
\xcfa8k\xddp\xf0\xc1\xb8\xe9\xf6\xbd\xca\xf2:\xc6\
\xfeV\x83\x80\xa43\x1f\xa8\x92\x98\x091M=\xbe\xef\
`?-\x08h\xe5\xfbD\xc3\xf0\xc9W#U\x04\x86\
\x17j\xdd\xf4@\xd9c\x10P+\xd3\x03l'\xbe,\
\xc5/\xfaLXfg\xbcT\x19X\x9fr\xa8\xaa\xe8\
E\xc3@\xb2\xc6\x843\xa0V\xfaJ$ E\xadd\
u+\xfa\xbb\x16\x01Q^\x8ddc*\xcav]F\
Q\xd2\x5c\x11\x80U\xa8\x0c\x89w\x1fw\xc54\xab\x1c\
\x85\x13\x06\x81\x0d\x09\xa8\x99\xbbe\xdd\xbau\xdf\x08\x22\
hS\xa3\x8a\xe1\x95\x18\xb9#\x9e\x05Q\xb4h$\x8c\
q\xcc\x1cZ\x811\xdbWL\x93\x81\xe9,>\xa3-\
\xbbO*D\x17;\xc3\xc7:\xb5elmO\x93\x80\
ti[E\xe4($\xe4f\x98V\x19aA\x1bQ\
N\x93Q\x87\xf8\xde\x15ZPh\xcb\x16\x83\x80\x14\xee\
\xa6e\xef\xaaO\xa0k\x09\xd5\xb1\x9d*;\xe3\xa5F\
\x8f\xc6\xae\xb8\xdf\xc2SK\xf1\x0f\xca\xe4\x9b\x95\xbb\xdb\
L(\xbd\x81\xe02 e\x02\xe2\x07m\xa5K\xbf\xae\
e\x82\x92\xb0\xe87\xe3]A\x0cY\x0e\xa3\x93\xe8\xe4\
Q\xa2+\xf4\xc9b\xba\x0d\xd1\x0f\xa6-\xd1$`\x8c\
\x94\x1b\xb4o\x93`7\xacn\x88\xa5\xbd\x9apm\xbe\
v\xe6\x8c\x17\x99\xfc\xfb\x95\x85\xf9jT\xf4\x80\x04\x84\
|?\xcf\xba\xec\xef\x5c\xca\xc4 \xa0\xe2%\xf3\x8b<\
\xa1\xc8\x1a%\xad\xbe\xdfL(\x12\xa23^\xe5\xcdx\
\xf5\x11\x17\xb4Af\x9a\x0c$ \xa6K\x14\xffh\x12\
\x10\x12\xe7>%\x16(\xa3\x01\xed\x190\x14\x01q\x18\
!H\x88\xce\xf8\x85\xa1A\xaeb\xfb\xf4\xfe\x86kD\
\x0a\xa6\xb6\x10% j`\xacI@z\xf70\xa8\xfa\
(bH\xcd\xb6\xb5\xf7\x80\xad\x83W\x13\xb4Ahs\
\xc6\x07\xb5\xafQ\x90\xb2b.\xf4\x90ED$ \xde\
\xe5\x13\xffh=\xbf\x86\xf6\x01\x87t\xbb\xc4\x02e4\
\xa0L\xc0Zh\x02\xaa\xcf\x84U\xb8\x19\x1f\xda\x88\x5c\
\xda\x87\x95\xc6\xcd]\x88\xa86\x03\xd2\xc3\xa1*\x1fj\
\x22\xb3\xcb\xf0\xd8e\x94\x09\xa8~\x0a\xdai`*\x00\
\xe3W\xce\x9c\xf1l\xdbaU\x84|5\xb7\xb5!\x22\
>u6\x95\xd5`K%\xe9s\xd9\xcd[\x1dZ\xce\
j\x88\xd5\xb1\x0d\x887\xfdzs\x9f\xc2\x7fG\x99\x01\
\x1b\x83Q!\xa19\xe3\x83\xdaW\xc7\xc6a\xa9\xf4\xe7\
M\xc6\xe6\xf4\xd6\xa0\x8b\xa4\x9a\x04\x84\xc8\x9f}\xed^\
\x01v\x91\xc3\xb5L\x91\x09\xa8\xb2\x1c\x8d\xb1\xccpU\
F\x15\xcb\xc1\x1e\xfc\x938#\xc2\x0cx\xad\xc6\xf8\xc9\
u\xa0\xf2q\x06\x9f\xe2\x0b\xe8\xdc\xd7\x90\xabS\x1b\x10\
o\xbaI8\xeb5\x8f5\x97,u\x22\xb01\x9a#\
4\xc8!\x15X\x96\xb6\xc1\x0d\xf1A\x8d\xb1\xd0\x8aF\
d\x13\x0dB`\xfa\x93\xd0\xb6\xa1M@z#Q\x03\
J\xaf6$\x80\xab8\x81\xbd\xa4\xb5\xc2\xc1\x10\xd0$\
\xbe}\x11\x9a\x80\x9a\xf2\xe2J\x02\x08\xf8\xa7\xc1\xc0\
\xcdh\x98KBs\xc6\xe7\xa5\xb1\x00\xfdRJ\x0c\xae\
-\x0c\xabGy\x83\x82.A\xcbD@\xf6\x9e\xd0\x9c\
\xf1\x01\x98\x90S\x93\x9aK:\x8c\x15\x0e\xfd\xf8\x8c\xe6\
\x9e\x15g\xc0\x14\x9e\x5c`}\xfd`\xa91&'\x9b\
\xb1n\x15\x11\xa04\x12,\x1bhs\x18R\x0bM@\
\xca&\xa7&o\x0a\x04d\xcf\x84\xdd\x9eFS\xb4\x11\
k* \x02\x9a\x8em\x5c\x82\x86~\xc6N{\x06L\
\xe9\xb1\x1c\xd6W\xc5H\x18\x90\x1d\x11\x9a\xa6$P,\
\xdd\xb7\x9b\x01C\x13P\xd3o\x89KPHQ8;\
\x02\xcc\xce]\xb0\x14\x91\xf5R\xafs\xefV0:\x02\
\xda\x04\xd4Ht\xd4\x0d\x84\x00\x04\xbc!:\xe8!N\
G\x8d\x84\xa9\xa9\xd1M\x1e\xe5\xfbu\xc1]T\x9a\xc1\
\xe38\x03\xa6z\xbd\xcefB7\xfb-|)m\x02\
\xc2-\x8d\xabB\x82R\x15\x02J\x0ef\xc6\x87T\x80\
\xb5\xad\x8b\x00\xf7\xb5\xa16\xfb?\xfch\x07\x7f\x82[\
\xf3\xfe\x22\xca\x0b!}\xd7\xe9\x22\xaa\xdb\x9a\xcd\x84\xba\
x&\xd7Z\x91\x08\xa8\x9dB\xa3\x08\x04\xb4\x9909\
\xca\xe8\x0a\xa4M@z\x14VWHjM;\x8db\
\x8c\x19[\x0b\x08\x9b\x09\xb5\x90L\xac\x1d\xe54\x7fA\
\x97t\x81\x08\xa8\x92\xda#\x86Z\x8d\x841P\x8e\xdc\
\x87f\xa6\xe9\xd0\xa7\x8a\xda\x99\xbc\x8b4\x036\xcc\xc2\
H\x18\x99 \xa1\xbb+\x12\x01\xb5\xdf\xb2 \x02\x16f\
\x06\x94\x92\xf0\xd2\xd0\xc6d\xed\xfb#\xa0\xf9\xda\x10\x1a\
4D\x96\x04slk\x13\x10\x13$\x87\x0e\x1c\xf0\xd7\
\x88C\x0dnfg\xadL^\x0e\x22Z\x11G\x04(\
34ke\x03]\xb4\xd6\x0bN@zPEE^\
\xbc\x5c\x1e:t\xceQ\x0d\xfe\xc5\xf0\xf6s\x1b\x05d\
\x02\x03\x17 o\xf7\xef\xcdj\x84B@\x9b\x80!\x83\
\x9b\x91,F\xc0&K\x80\xab!\xdb9$\xa4\xc4:\
\xa1l\xca\xda\xf5@\x802Cg~8\x1d\xf5\x5c\x0b\
M@\x5c\xe6\xd2\xafXf\xba\xbf8\xc1\x03\xae\xf4\x8a\
\x0aR\x85\x07\x8f\x9aH\x0f\xad\xf4$\xd2&`\xc8\xfb\
u\x98\x8e_\xf3Im\x5c\xc5\x85\xbe\xbf\x18E\xe3;\
v\xec\xf8\xa8\xe3\x17r\xc4W\x0b\xf2\x88\xfc2t*\
\xbb( \x14\xb4\x13Hw\xf8q\xad\x19\x05\xdb\x09M\
@\xcd\x19\x10lo\x7fil\x0f\xbeNWH\x14\x09\
\xcb\x81\xdfh%\x9b-(\x17r\x11\x9bR\xb3g\xa5\
\xc2w]\xee\x05% \x1d\xe2\xa9-Ac\xa4Q\xe4\
(\xb5\x87S\xa9Q\x07\x1f\xf1\xdc\xbd{\xf7rI\x1b\
\x90xg\xeb\xddw\xdf\xfdmX\x22\xfcf\xf2\xe4\xc9\
oJ\xdaj\xa9\x8b\x86\x94\xf2O'\xf9\x9au\x82\xff\
\xc6r\xa3q \xe3\xc6\x8d{\xa7\xb7\xb7\xf7\x1d\xee\xa0\
`\xf5r\xc7\xca\x95+\xff\x0b\xea\x8b\xf4N\xfd\xd7\xf1\
\x90m\xf9\xf2\xe5{\xb8\xf2t\xab\x87\x04\xec\xef\xef?\
Ie\xc4\xf2\xe2\x13\xe2{\xf7\xee\xbdu\xcc\x981\xbf\
\x0f!onm*\x9f\xac\xb9~}+[N\xf2`\
\x8ed\xfb\xd0f\xdb\x114\xcd\x1f\xac\x90\xae\xd7\x5c\x82\
\xc2\xcd\x8a=\xa5Y\x82\xb6\xb2\x1d\xc2\x86>\xd0FA\
\x95%Ih,\xb8\xcf\x95\xd1\x9eMK/A3M\
S\xa0\xb7\xda\x124\xc6[\x16\xb9\xcd\x82\x8d\x8e\x95\x83\
}\xb5\x0c\xa5t\xed\xe0\x91:W\xd9\x90\x96\xe2\x9f\x94\
>\x10\x85\x22 \x5c\xee\xfd\x19\x17\xb3B\xd5\xa3\xd9P\
k\xa3_:\xf2(\x19\x7f\x9dR\x0c\xb2l\x03\x0e$\
^R\x90\x03_\x1bZ\xc1\x12\xc0\xa1\x12\xa4\x8f\x98\xa5\
\xb9\x04\xad\x0c\x01\x1b\xd8\xc2~\xe3#\x0aJ6\x02v\
y\xfd\x98\xfb\xe42\xf8\xc30\xa3\xb5\x14\xdb\xa0\xcf}\
\x01\x01?\xacI\xc0\x18Oj;|W\xe2\x17QN\
\x06+5\x9a\xd2\xd5\xe7\xa6Y\xa0\x87JDxH\x0e\
\x84\xb2,\x11\x02\xbd\x1b\xaf\x07\xab\xac\xa6 \xc3\xda\xe3\
Y}\x96\xfa\xef\xb0\x04\xc0'\x8dE\x0a\xb7\xfa\x1d\xf1\
c\xd9\x0eD6\xfd\x83\x00\xd3ZH\x02B\x98\xdbM\
\x9a3 %\xf9e\xe1T\x9aJx\x0c\x0c\xfe><\
@0\x22*b\x00\x8f\xael\xe6\x1a\x09\x9c\x0e\xee\xe5\
\xea\x03\x5cP\x9f\xe6\xf6\x9bUO\x9b\x80\x10\xcb\xfc\xfd\
\xac>+\xf5wX\x12<\xc9U\xbc\xd5\x1b\xf9\x01\x03\
2\xac\xe6\x18\x10e;g}\x10\xe1\x82\xef=\x9c>\
]\xea\xd0\xa3@*\xcbO\xb4\x17\x08\xf8x\xcc\xa5\xdf\
\xca\x95\xa1w\xe8Y\x06`D\x1cNDn\xeau\x89\
\x7f\x10\x83\xa6C\x18\xad6\x01%\xab\x84\x10\xe3K\xae\
M\x0c\x93\xa25\xbf\x91Q\xb0<\x85e>\x06Ex\
\xffp\xd3UH|\x92\xdd\x84\xd4&\xa0\xc4m\xe3\x0d\
f\x91+\x80\x01\x8d\xe6\x1a\x83\xcd\x88\xa3\xeap\xd8\xf0\
c\xae\xfe\xa1\xee\xbfs0\x84W\x97\xbe\xc6\xed\xb3S\
\xbd&\x02\xaa,C\x8d\x80L\x0da\x04\xbf\xf2;q\
\xa5\x9fa\xe1\x03\xf6\x05&\xdcX\x8d\x85\x0f\xd7'\xd9\
INr\x93\xa8\x85\xa2\xd1\xd3l\x02X\xd2\xaa*\x8e\
N\xe7\x0c\x07\xa2l.y\xee\xb9\xe7\x16\xef\xdf\xbf\xff\
\xe6C\x87\x0e\xddp\xe0\xc0\x81\x9b\x06\x07\x07\xfb\xce\x9f\
??\xae\xa7\xa7\xa7\xa8\xb7\x1f8P4\xd7i\xd5E\
\xcf\xf1\xe3\xc7\xafz\xfa\xe9\xa7\x97A\xc4\xcb\x01\xdf\xc6\
a\x1f9k\xfe\xfc\xf9\x07}\xebay\xf4I\xc2\x92\
\xf4\x18\xa7nk\x1d$ \xdc\xb4\xf89\xfdw\xb1\xbd\
!\x01!T\xf2\xef4d\xb36\x0c\x81\xa0\x08\x08n\
\xb8\xe0\x8c\xa5\xf2\xd34\x03\xb2f\xe4\xd6\x99\x9c\x0e\xfb\
Td\xb3F\x0c\x81\xe0\x08\xc0\xb1\xfd6\xcer\x14\xde\
\xa1\xff\x86\x86p\xda\x87rF@\x0d\xadX\x1bQ\x11\
\x80\x9c,x\xf9\xd7{\x06\xe2\xfa$\x9b\x07\xa7|{\
\xbf\x0eO\xb3\xdd\x1f\x15<\xeb\xcc\x10\x90\x22\x00{\xba\
k9\x04\xc4:\xd2\xa4\xbd\xca\xf9k\xea\xf40\x8d\x14\
\x12\xabo\x08\xc4E@\x92I\x8d\xeb\x93\xc4\x11R\xbf\
*.\x08\xfc \x18\x01\xe3\xda\x8d\xf5\xa6\x88\x00\x9c \
\xfe\x0bg&\x94\xf8$\x8d\x80\x8a\x0a\xb4\xa6J\x81\x80\
\xf7^\x10I\xeb\xe3\x93\xc4\xb8Tp\xc0\xdf\x82\x81\x18\
@\xde\x9fpH\xdf\xa9\x0eek(\x85\x22l\x10\x15\
D\x00\xf2\xd0L\xe1\x12\x82\xa2Z\xda\xa2\x86).\xf1\
\xc4\x13\x96\x88_\x82,{\xcfp\xfb\xc8\xaag\x04\xac\
\xa0\xd1\x96m\xc8t2\xc9\x99\x09\x87\xf9\x071\xe5 \
\x1e\xb2\x80k\xe0A\xeeIk\x16\xe1Z\xffN\xe1\x8d\
eS\x89\x8d\xa7j\x08\xc0q\xfeW}\x8d\x1f\xcbc\
V2\xdc\xd7aL&\xbcT4\xc8iCR\x07\x9e\
f\xfb\x5c\xd5te\xe3-)\x02\x90\xe4\xf6\xbf%d\
\xc8\xa3.E\xf7\x94T#6\xacJ!\x00O\x87M\
\xca\x83D\x92>\x8d\x80\x952\xd1\xf2\x0fV#\xa9\x93\
\x84P\x9eu1\x7f\xcd]\xe5\xd7\x8a\x8d\xb0R\x08\x90\
s\x9bs(\x13\xbdN\xc8\x04R\x95R\xba\x0d6-\
\x04$I\x9d<g1\x09i\x83\xe60MK#&\
M\xa5\x10(\xc8\xbb F\xc0JYe\xc5\x06\x8b\xd1\
+\x11g3\xceL\x18\xf4\x1d\x8b\x8a\xa9\xdb\x86\x9b\x22\
\x02\xa9\xe7\xf1\xa1\xa7\x12R\x84\xced2\x04t\x10\x80\
\xdc=?Lt&\xb4\x19PG\xc5\xd6J\x01\x10\xe0\
,\x11\x83\xd4\xc1d^\x18\x01\x03Y\xb6\xe7\x17\x007\
\x13\xd1\x10\x90#\x80I\x9d\xf2\x9c\x051\xd4\x0d\x9d\xee\
\xf0\xba\xd2t\xf9h\xac\x05C\xa0\x80\x08\x08\x92:y\
\xcf\x84\x90\xf9\xede\x0c\xea\xc6\xe0n\xba\xb1Q@\xc4\
LdC@\x19\x01H\x05\xff\xcdP3!\xc4\xa2\xbe\
\x80A\x00\x10\x8ds;\x85\xc5)Ko\xcd\x19\x02\xe5\
@\xc0{F\xebDZ\xbc]\x8f'\xad\xf8\x8e\x05=\
0Z\x0e\x84l\x14\x86@(\x04 \xa9S\xbfd\x16\
\x84\xd4\x88\xdf\x81C\x94\xb5\xd2\x04O\xa1\xc6g\xed\x1a\
\x02\xc9#\x001\x98\x9f\xf4!!\xbe3\x01u>\x05\
\xe4\xbd&\xf9\xc1\x99\x80\x86@\x11\x10\xa0w\x19Z\x97\
\xa3\x173\x9e\xc1!\xca\x8b\x98\xb3\x13\x1d\xe5p\x88r\
y\x11\xc6c2\x1a\x02\x85C\x00n\xc1\x9fl\xcc\x84\
\xe0\x9f\xfb.>\xee\x09\xfb\xb9\x9ba?\xc7zN\xad\
p\x00\x94@\xe0\xff\x07\xdcG\xec\xd9\xbf}j\xd3\x00\
\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x00\xd2\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x0b\x00\x00\x00\x0b\x08\x06\x00\x00\x00\xa9\xacw&\
\x00\x00\x00\x09pHYs\x00\x00\x01\x90\x00\x00\x01\x90\
\x01!\xf7\xeb~\x00\x00\x00\x19tEXtSof\
tware\x00www.inksca\
pe.org\x9b\xee<\x1a\x00\x00\x00_ID\
AT\x18\x95\x95\x91\xb1\x12\x800\x08C\xdb\xfe\xbf\xbd\
\xd3N~S]\xea\x0f=\x07\xd3\x85\x83\xab2\x86\x04\
HH\xc0\x05l@NN\x01Y\xfd\x9e\x80\xca[\xcd\
\x0aDl\xea\xd7\x09\xec\x02\xce)\x10~X\xdc\x13\x14\
\x97\x18\xac\xbc\xa3\xd3\xac`\x888V\xc4\xf5d\xc7L\
\xf1L\x87\xae\xdd\x94\xfe\xe6\xdc\xf9\xf8\xc1\x07Y>'\
\xe9\xd5\x83\xa9\xc6\x00\x00\x00\x00IEND\xaeB`\
\x82\
\x00\x00\x00\xf8\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\
\x00\x00\x00\x09pHYs\x00\x00\x01\x90\x00\x00\x01\x90\
\x01!\xf7\xeb~\x00\x00\x00\x19tEXtSof\
tware\x00www.inksca\
pe.org\x9b\xee<\x1a\x00\x00\x00\x85ID\
AT8\x8d\xe5\xd2O\x0a\x81\x01\x10\x87\xe1g\xa4\xfc\
?\x87\x10[\xf7_\xb1\xb0\x96r\x00;\x85\xb0\x1c\x0b\
\x9c`>E\xde\x03<\xfdj\x86o/230C\
\xa7h\xdd#b\xdb\xc6\x12\xab\xf24d\xe6\xa2\x8d#\
\x0e\xe8\x16\xbd\x1bN\xe5U\xbfW@fN\xd1+Z\
\xd7\x88\xd8Ef.\xb1.O{6oy^\xe6\xd2\
\x00\xd6\x94\xf3\xf7EfN\xb0A\xbfh]\xb0ha\
\xd4\x00\xe6\xe5\x0c\xde\x8f=\xc6\xb0\x08\x9e#b_\x9e\
\xf5\xf1\x1e\x1e\xe8$<\xae\xc1\x8d\xb7\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x02\x88\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x19\x00\x00\x00\x14\x08\x04\x00\x00\x00\xd2~^6\
\x00\x00\x00\x04gAMA\x00\x00\xb1\x8f\x0b\xfca\x05\
\x00\x00\x00\x02bKGD\x00\xff\x87\x8f\xcc\xbf\x00\x00\
\x00\x07tIME\x07\xe5\x08\x1e\x0a28\xc2\x0e\x22\
\xc5\x00\x00\x00\x01orNT\x01\xcf\xa2w\x9a\x00\x00\
\x01\xafIDAT(\xcf\x95\x92?H\xd4a\x18\xc7\
?\x8f^?\x0d\xae!1\xc4\xa3\xc1&q\xa8t\x08\
%8\xc8A7\x1b\xc4\xae4\x02\xd3\x0bTZR\x04\
\x0br\xb8\x16\x87lR\x04u\xe8\x8fCACKr\
\x0a\x87CcK\xe4\x0d\x0a\x0dB\xf2;\xa2\x96\x1aR\
\xe9\xf4\xebp\xef\xfd8\xf5\xd7\x81\xcf\xf0\xbe\xdf\xe7\xf9\
\xf2}\x9f\x7f/\x9c\xda\xec\xff\x94\xdeP\xeb`5}\
\x96+\xc6#\x8e\xee\xa1\x0a\xf0m-\x10t\x92\xb3{\
\x0e\xb73\xce\xe8\xf1\x17?\xeb\xbe\x1eh\xb9$\xb2\xa2\
\x0b%^ZuAa\xaa\xe4\x1d\xe7\xa8\xe5\x17\xb8\xb3\
P\xf0'{V\x22\xb9A\x8a\x1d\xe0/\x09\x14\xd3+\
\x17\xaeP\x17\x80jt\xdbEZ\x94\xd4\xb0:\xe4\x05\
\xd2\x05]\xaa\x00\x14\xf45\x04@+3\xf2tY\xab\
\x0c\xf2\x07\x9fV2\xea-j\x00\xc5\xf4\xd2y\x9e>\
:d\xba\xa6\x8cb\x98\xae\xa8Mg\xe5iV\x8f\x00\
4_\xc8r\xd2\xce0M\x82\x1a\xad\x91\xa4\x8b4\xb7\
\xec!\xd7u\xf5\xc8\x90\x8fY7\xef\xd9c\x8e\x84\xe5\
@\x11\x96\xb4\xc5S&\xe8\x07\x08\xcf\x12g\x85n^\
\x17\xd6gy\xc6\x19\xb1\x0db\x052\x5cr\x9e\x9f\x5c\
\xe4[\xb0\x89m\xea\x80|9\x89O\x03\xeb\xb4\x05\xa3\
mf\x13\xc3+'I\xd3\xc32\xed\x8a\x03\xa8\x9ei\
^(\xce\x97\xb0\xf6\xdd'\xb5\x8cFi\xe2\x0e\xcf5\
\xc9?\x8c1|\x16\xb9\x1b&Qp'yK\xca\x06\
0\x8c\x035\xf0\x81)\xf3\xcb\x0d\x19\xf3u\x93\x94\x1e\
\xf3\x95]\x9a\x801\xcb\x16\xb9\x08\xfbD\x15ueM\
:\x04\x90\xe7\x09\x1e\x8dx\xcc\xf0\x1b\x1c\x13e?b\
?\x94e)4\x95\xb0\xa0\xd4b\xa7Y\xfb\xce\xe9\xed\
\x10\xfc\xec\x9c(e\xef\xc54\x00\x00\x00%tEX\
tdate:create\x00202\
1-08-30T10:50:56\
+00:00\x86\xe72Y\x00\x00\x00%tE\
Xtdate:modify\x0020\
21-08-30T10:50:5\
6+00:00\xf7\xba\x8a\xe5\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00\x07\x1d\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x10\x00\x00\x00\x10\x08\x06\x00\x00\x00\x1f\xf3\xffa\
\x00\x00\x00\x09pHYs\x00\x00\x0b\x13\x00\x00\x0b\x13\
\x01\x00\x9a\x9c\x18\x00\x00\x05\xf1iTXtXML\
:com.adobe.xmp\x00\x00\
\x00\x00\x00<?xpacket beg\
in=\x22\xef\xbb\xbf\x22 id=\x22W5M\
0MpCehiHzreSzNTc\
zkc9d\x22?> <x:xmpm\
eta xmlns:x=\x22ado\
be:ns:meta/\x22 x:x\
mptk=\x22Adobe XMP \
Core 5.6-c148 79\
.164036, 2019/08\
/13-01:06:57 \
\x22> <rdf:RDF \
xmlns:rdf=\x22http:\
//www.w3.org/199\
9/02/22-rdf-synt\
ax-ns#\x22> <rdf:De\
scription rdf:ab\
out=\x22\x22 xmlns:xmp\
=\x22http://ns.adob\
e.com/xap/1.0/\x22 \
xmlns:dc=\x22http:/\
/purl.org/dc/ele\
ments/1.1/\x22 xmln\
s:photoshop=\x22htt\
p://ns.adobe.com\
/photoshop/1.0/\x22\
xmlns:xmpMM=\x22ht\
tp://ns.adobe.co\
m/xap/1.0/mm/\x22 x\
mlns:stEvt=\x22http\
://ns.adobe.com/\
xap/1.0/sType/Re\
sourceEvent#\x22 xm\
p:CreatorTool=\x22A\
dobe Photoshop 2\
1.0 (Windows)\x22 x\
mp:CreateDate=\x222\
020-03-03T09:50:\
39-03:00\x22 xmp:Mo\
difyDate=\x222020-0\
5-07T19:00:59-03\
:00\x22 xmp:Metadat\
aDate=\x222020-05-0\
7T19:00:59-03:00\
\x22 dc:format=\x22ima\
ge/png\x22 photosho\
p:ColorMode=\x223\x22 \
photoshop:ICCPro\
file=\x22sRGB IEC61\
966-2.1\x22 xmpMM:I\
nstanceID=\x22xmp.i\
id:629c6f97-efef\
-0743-bf78-faf00\
1161913\x22 xmpMM:D\
ocumentID=\x22adobe\
:docid:photoshop\
:333c7819-ad90-9\
846-91de-cab6abf\
2ebab\x22 xmpMM:Ori\
ginalDocumentID=\
\x22xmp.did:a388297\
3-1e1d-a045-88c7\
-5e755708fd64\x22> \
<xmpMM:History> \
<rdf:Seq> <rdf:l\
i stEvt:action=\x22\
created\x22 stEvt:i\
nstanceID=\x22xmp.i\
id:a3882973-1e1d\
-a045-88c7-5e755\
708fd64\x22 stEvt:w\
hen=\x222020-03-03T\
09:50:39-03:00\x22 \
stEvt:softwareAg\
ent=\x22Adobe Photo\
shop 21.0 (Windo\
ws)\x22/> <rdf:li s\
tEvt:action=\x22sav\
ed\x22 stEvt:instan\
ceID=\x22xmp.iid:62\
9c6f97-efef-0743\
-bf78-faf0011619\
13\x22 stEvt:when=\x22\
2020-05-07T19:00\
:59-03:00\x22 stEvt\
:softwareAgent=\x22\
Adobe Photoshop \
21.0 (Windows)\x22 \
stEvt:changed=\x22/\
\x22/> </rdf:Seq> <\
/xmpMM:History> \
</rdf:Descriptio\
n> </rdf:RDF> </\
x:xmpmeta> <?xpa\
cket end=\x22r\x22?>\xacr\
\xe7\xb6\x00\x00\x00\xd2IDAT8\x8d\xc5\xd31J\
\x041\x14\x87\xf1_f#.\x82\xde\xc4R\x0f\xb0\xae\
'\xf0\x06\xb66\xdeD\xef`c\xad\x9d`+X\x8f\
\xe0^\xc0\xca\xc6~\xdd\x89\x85\x09\x86a\x06e\xa70\
M\xf8\x93|_\xde{\x90\x90R2e5\x93h\xc4\
\xb6m\x1btS*\xe80\xdb\x82\x0dE\x00\x9b-\x04\
\xa9\x16\xec\xe3\xf8\x8f\xe0A}\xb7\x08\xe6\xb8\xc1\xe5/\
p\xc0-.J\x8eY\xf2\x8e3<\xe4\x83\xeb\x0a\xda\
\xc1:\xc3\xf79\x9f\x976\xa2\x9f!\xbe\xe0\x04\x8f=\
\xc9:\xefw\xd8\xc5iUM\x8a9l\x10\xf1ZI\
\x02\xae\xaa\x97\xe7XV\xadw2T\xd6\xe7\x80\xa4\xc1\
\x11\xf6\xb0\xe8\xc3}A_\xb2\xc0\x13\x9e\xc7\xe0!A\
\x91\xcc\xb0\xc2!>\xc6\xe01\x01\xdf3i\xf0\x96s\
\x18\x82!\xfc\xfbo\xfc\x02d\x060\xdc\x02*\xb0\x83\
\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x00\xac\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\
\x00\x00\x00\x09pHYs\x00\x00\x01\x90\x00\x00\x01\x90\
\x01!\xf7\xeb~\x00\x00\x00\x19tEXtSof\
tware\x00www.inksca\
pe.org\x9b\xee<\x1a\x00\x00\x009ID\
AT8\x8dc`\x18\x05\xa3`\x18\x02Fd\xce\xff\
\xff\xff\x99\x18\x18\x18\x16300\x88\x11\xa9\xff%\x03\
\x03C\x1c##\xe3?\x98\x00\x13\x16E\xff\xc9v\xde\
(\x18\x05\xc3\x15\x00\x002*\x07\x03\xc6\xcd\xbd,\x00\
\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x00\xf8\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\x14\x00\x00\x00\x14\x08\x06\x00\x00\x00\x8d\x89\x1d\x0d\
\x00\x00\x00\x09pHYs\x00\x00\x01\x90\x00\x00\x01\x90\
\x01!\xf7\xeb~\x00\x00\x00\x19tEXtSof\
tware\x00www.inksca\
pe.org\x9b\xee<\x1a\x00\x00\x00\x85ID\
AT8\x8d\xed\x93\xb1\x0d\x83@\x10\x04\xe7^\xf4\x80\
\x85\x13*q\x03\x88:\x1c\x92B\x07n\xc1\x91\x8bp\
\xec\xc8\x8d\x90\x10P\xc5\x12\xd8\x01\xa0\x97O\xd6\x87\xfc\
\x84\xab\xbb\xd1\x05\xb7\x90I\xc5\xf6\x81\xa4\x13\xd0\x03\x95\
\xb3;\x0173\x9b\xd7a\x11\x19\x1c\x80\x06x9\xc2\
\x16\x08@\xe7\x09k\xe0mf\xd7_6I\x8f\xef\xec\
\x86\xe0\x5c\xf17Yx\x04a\xec\x0fG\xa0\x91tw\
v/\xc0s\x1f\xc6\xaaW\xf2\xa9\xde\xd9\x11F\xab\x97\
Ig\x01Tv\x19\x17\xday\x9b_\x00\x00\x00\x00I\
END\xaeB`\x82\
\x00\x00^\xfe\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\xb0\x00\x00\x00L\x08\x06\x00\x00\x00\xcd\xe7=<\
\x00\x00\x00\x04gAMA\x00\x00\xb1\x8f\x0b\xfca\x05\
\x00\x00\x00 cHRM\x00\x00z&\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17p\x9c\xbaQ<\x00\x00\x00\x06\
bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\
\x00\x09pHYs\x00\x00.#\x00\x00.#\x01x\
\xa5?v\x00\x00\x00\x07tIME\x07\xe5\x08\x0f\x17\
\x1f\x15[\xa0$\x86\x00\x00]\xc0IDATx\xda\
\xed}e\x98\x1eE\xd6\xf6}\xaa\xba\x1f\x1f\xf7\x89L\
\xdc=!\x02\x11H\x88\x10$@\x08\xee\xee.\x8b\xcb\
\x22\xbb,\xb2\xf8\xa2\x8b,\x0e\x01\x82K F\xdc\xdd\
e2\x93q}\xb4\xa5\xea|?z&\x10\x08\x0b\xbb\
\xcb.\xef\xfb~{\xaek\xae\x19Hwu\xc9\xdd\xd5\
\xa7\x8e\xdc\x87\xf0\x1bK\xa4{w\x14E\xaa\x105N\
\x00\xb6?M\x955\xe0\x7f\xb6\xad\xfc\xfc|H)q\
\xd8a\x87\x89\xcc\xccLl\xda\xb4\x89jkk\x89\x88\
\xc0\xcc0M\x93#\x91\x08\xb2\xb3\xb39//\x0f\x9f\
|\xf2\x09o\xd9\xb2\xe5\x87\xcd\xfc\xd3\xcf\x07\x80\xc5\xcc\
\xc0\xf6\xed\xd4\xbdcG\xa4\xaa\xab\xf1\xe2K/QE\
E\x05\xca\xcb\xcb\x09\x00\x98\x19\x91H\x04%%%\xdc\
\xa7O_^\xb6\xb0\x1f\xff\xe1\xc1n\x00\x0c\x00\xeeo\
\xbd\x1c\xffI\xa1_p\xcd\xcf\xae\x85\xf1[\x8f\x22\x16\
\xabD\xa7\x82\x0bQ\xc3\xeb\xd2c%\x87v\xe7\xac\xea\
\xb5\x04N\xca`\x18\xe5\xab\x16\xfeCm544\x80\
\x88\xf0\xf5\xd7_k\xbf\xdf\x8f\xa6\xa6&r\x1c\xe7\xbb\
\x19#\x02\x11\xc1\xef\xf7#\x14\x0aqYY\xd9\xaf>\
\x9e\xf3\x00\xd0\x0b/\xf0\x83W_\x0ds\xe3F\xea\xd1\
\xa3\x07\xa7\xa7\xa7#\x14\x0a\x01\xf0\x00\x1c\x08\x04\xd0\xa6\
M\x1b\xee\xd6\xb5+\xb65j\x92\x13?g\xc5\x16\xf0\
\xc5Q\xbf\xe9Z\xfc\x87\xe5_\xda(Z\xe57\x05p\
Fv6FN8\x05\xe9\xfe]r\xe9L\xfb\x04&\
1\xad\xa4\xa8\xfde\xd9\x199\x9bf\xce\x9b\xf5\xcf\xcd\
\x0a3\x5c\xd7\x85\x10\x02J)\xd6Z\xef\xf3\xefD\x04\
\xa5\x14\x1c\xc7\x01\xf3\xaf2\x87\xfb\x88\x0d@\xa4R\x18\
\x9b\x9d\x8d\xf0\xe1\x87\xf3\x15\x97_\x8e{\xef\xbd\x17\xf8\
\xde\x8213\x88\x08\xb3\xbf\xf8\x0c\xa9n\x11\x14~6\
\x01\xe5\x0d\x00\xb2\xffss\xff\x7fE~\xc96\xfeo\
\x93\xd3\xae\xbe\x02\xaf<\xf4!:\x0c\xea\xd1\xb5\xaa\xce\
y\x11\x10}\xdb\x16\xf8\xee\x1d30\xe3\xe1\xaaz\xdb\
\x0a\x07L\xbc\xf1\xca\xab\xbf\xf5\x1c\xfd\xc3\x22\x84\xd8\xfb\
\xb7\xd6\x1a\xcc\x8c\x1f\xbeHB\x08L\x7f\xfd5,\xca\
\xcb\xc7\xdf\x0e=\x14{4\x00\xf9\x9b.\xc7\xffJ\xf9\
\xcdf\xec\xf0+nC\xf9\x9a\xcd()\x0e\xf8g/\
\xa8\xfe]\xca\xe1\xab\x19H\x8b\x84\xe4\x9a~\xdd\xd2\xcf\
\x9b\xb7;\xbexD\xbe\x1fV2\x86Es>\xf9\xad\
\xe7\xe9\xbf\xf2?T\xc4\xbf\xde\xc4?'w\xdc8\x18\
+\xbf^\x8a%k\xea\x87\xd9\x8e\x9cJ\xe4\x8b\x900\
\xdc\x84-\xbb\x97\xd6\xb8\xa7\x9e9\xa20R\xd2\xa9\x18\
]z\x8d\xfb\xad\xe7\xe8\xbf\xf2?X~\x13\x00\x9fy\
\xf9\xcd8\xe3\xa4w1\xe2\xd8q\xe9\xd1\x18\x1d\x07\xc8\
^\x10\x02$\x0c0I\x7f]\x13\x8eX\xb5+5\xee\
\x95\xa7f\xa1z\xe7z0\xf3\xbfE_\xfd\xaf\xfc\xef\
\x97\xff(\x80[\x81\xd8\xafo;\xac\xfff:vl\
\xa9\x1f\xab\x95\x98BRHI )\xa4!\xc8`\xcb\
F\xc7\xaa\x1au\xd2\xc4\xe3\xfb\xb5\xc9h\x1bA\xe9\xee\
\xf2\xdfz\x9e\xfe+\xffC\xe5?\xbe\x03o\xd8\xbc\x1d\
O=\xfd5\x0e\x9ctR\x9bT\x92\x8f#\x18\xed\x89\
\x04\x08\x82\x04\x88$I\x120\xd0\x18\xd3\x93\xab\x9b\xc4\
\x94\x13:\xd6\x89\x92\xae\xdd\x7f\xeby\xfa\xaf\xfc\x0f\x95\
\xff8\x80\xaf\xb9\xfd\x8fx\xe1\xf1c\xe5\xce\xf2\xd8$\
\xa5\xe9H\x22\x01\x22\x012\x0c\x10\x99\x90d@\x0a\x03\
J\x89\xb4\xfa\x06\x9e6}[F\xcf\xc9SN\xc6Q\
G\x1c\xf1[\xcf\xd5\x7f\xe5\x7f\xa0\xc8\xff\xe4\xc3\xde\x99\
\xbd\x09\x15\x15)|=gW\xd7\x9az\xe7&\x86\xd1\
\x03\xc2\x030\x84\x01\x22\x09@B\x08\x02\x04\xc1vP\
d\x9aF\xe5\x91#r\x97\x89`\x8e;v\xcc8\xcc\
\x9b7\xfb\xb7\x9e\xb3\xff\xca\xff \xf9\x8f92\xce\xbf\
\xfe6\xc4\x9b]\xb4+\x88\xf8_\x9c\xben\xa2bc\
(\x04y\x86<\x96\x00\x13\xbc\x83\x1c@ \x08&(\
\xd6\xfe\xf2j\xf7\xe4\x15\xdbR3w5\xd2\xe2\xeeb\
\xfb^'\xc0\x7f\xe5\xbf\x02\xfc\x87\x00\xcc\xcc\xb8\xe5O\
\x7f\xc03\xf7\xdf\x88>\x07\x9f\xdf;i\xe1L\x22\x19\
fju\xefJ\x10\xc8\x03\xb2\xd0\x00\x0b\x10\x04\x08\x1a\
Mqt\xdfTj\x9d:\xba;6\xef\xa95\x1b[\
\xe3\x1a\x00\xfc\x17\xc8\xfff\xc9\xcbj\x83I\x13\xc7\xc3\
\x08\xd4\xa2\xb6\xae\x19~\xbf\xc4\xa0A\xe3\xb1d\xc9\x12\
D\x02\x01\xec)\xad\xc4\xcco\xbf\xf9M\xfb\xf8\x1f\x01\
\xf0\x88c/\x86\x9f\xfc8\xfc\xb4\xeb\xd2\x97\xac\xa8<\
W\xb3\xec\x03Ah9\xbc\x01Dh\xf5\xa90h/\
\x80\x05$\x94\xd6\xe6\xeeZ}\xcc\xa6l\xf3\xd3\xa5\xaf\
\xfd\xf53\x00\xbcg\xcf\x1e\x14\x17\x17\xff\xa6\x13\xf7[\
\xc9]w\xdf\x85\xc5{\xd6\xe2\x84\x11\x93a8\x02*\
\xd6\x80\xe6\xb2R\x94\xd7Ea5\xc6!\xfd&\xfe\xf8\
\xda\x8b\xbf\xb8\xbd\x82\x82!(.\xceBqq.\xea\
\xeb\xeb\x90L\xda\xd8\xbc}-\x12\xcd\xb58r\xda\xb5\
x\xfe\xe9\xabh\xd6\xdb!z~q\xa5.p/E\
t\x8f\xc2{\xdfL\xc7g\x7f\xe8+&]\xb8V_\
}\xed\xd58n\xea\xf1\x18y\xd0\x81?\xf26~_\
&M:\x05>\x9f\x1f\xe1\xb0@EE\x05,\xcb\xc2\
\x82\x05_\xfd\xcb\xf3\xf1o\x07\xf0AG\x8dC\x9b\x82\
\x10\xde|\xea\x01t;\xe8\xc2\x03R\xb6\x98H$}\
\x02\x09\xc2w\xe7H\x02\x131\xb1`@\x08\x86\x07\
p\x01\x8dXR\x17o\xad\xd2\x97L\xba\xe8\xf2\x15f\
\xb8\xb0\xf2\xffW\xf023\x08\xc0\x9e\xbfM\xc7'c\
\x87\x1b\xb4v]XE%!\xd5,}\x8a\x94\xb4U\
L\x06\x03\xbf(\xa4m\xd2I\x8f\xa1M\x11\xb0z}\
\x0aK\xe6NAI\x87\xdb\x0d\x90T\x9a5\xdf\xfa\xec\
\xb7\xb8\xf1\x84\xee0D*\xed\xa8\xa9w\xf73\xfdn\
v~\xee\xe3[\xba\xf6\xca\xdf\x1a-\xf3\xe3\xe8\xc9w\
u\xf9\xcb\xacc\xba\x9cs\xee1u\xca\xaa[{\xe0\
\x88\xe1\xd1^\xdd\x0a\xb1~s\xe5\x8f\x9e3\xf8\xf0\xe3\
P\x12\xe8\x8d\x04\xd5b\xce\x9a$.:*\x22+*\
\x0d-\x84\xfa\xdf\x11\xcc\xe3\xa72\xac\xdb4\x1a\x07\x1f\
{evc\x93s>\x91\xd1\x01\x10\x80\xf4\x22\xc3\xc0\
\x02\xc4\x02\x90\xdcHBlc\x97z\x83e\x80XB\
\x00 \xd2`\xd6\xa2\xa6\x81\x87\x977\xf8\x8f<\xaew\
\xcd\x0b\xe7]z\xa5rb\xce\xbf\xda\xb5\xbd\xfa\xf4\xdb\
\xd3?\xc2\xda\xb5\x1b\xb0\xad.\x05\x11\xc9B4\xe5\xa2\
\xb1\xa1\x19\xec\xc6\x91\x97&\xe1\x97\x0a\x06\x18\xc9\xb2*\
\x08\xbf\x89\xd7_\x7f\xee\xdf=m\xfb\x95w\x8e9\x0e\
\xef\x0f\x1e\x8c\x95\x87\x8e\x0f\xe4\xdf}\xeb\x11TU:\
\x0eI\xe5\xcf\x8c\xe4g\x06\xdb\xb4\x9f\xb7\xccN\xfe5\
\xc3\x14\x0d\x7f\xaf\x8d\xb5k\xd7\xa2O\x9f>h\xabb\
\xf8\xdb\x9bM\x180 3\xb3k\xef\xbft\xcf\xcb\xed\
\xa4\xc2\xd6\xda\xd5\xf5\xc4\xf6\xa3w\xdf\x8cv\x03\x0e\xf6\
}8\xbf\xfa\x9cd\x22\xf7\x02\x90\x99\x91\x11\xd6\xeb\x17\
\xce\xda~{L+\xd4s\xfa\x1d\xacE\xaf\x90a7\
\x99\x81\xe6g3\xd2BOU\xd6\xf9S?|\xd6\xa1\
\x13.Bv\x10\xd8PV\x8fQ\x07Jsg\xb9\xe8\
<wYU\x89\xb0\x1a\x179\x96\xd3\xf8k\xcc\xc9\xbf\
\x15\xc0'_v\x07\x00\x81\x99\x8f\xde\x8a.\xc3/8\
\xccQ\xe2 \x90\x10\x9e\xde\xeb\x81\x18\x9a\xc0\x9a\x94\xe9\
\xa7\xb7\x22i\xe17\x9a\xea\x9c\x075\xf3@\x22\x01f\
/\x84K\xb2\x84m\xa9\x9c\xed\x15|\xea\x86=\xf9\xb3\
\xb2\xfa\x0d\xd9\xb2\xf6\x8dG0\xb4K\x17,\xde\xba\xf5\
g\xfb\xb1y\xf3fh\xedb\xf5\x9a\x8d\xd8\xbd'\x8a\
\xe5\xdb]l\xda\x91@\x87C\x9eB\xce\x88'0\xed\
\xd8\xaf\xb0\xf4=2_Z\xe8\xf3'}i\x01\x9d\xd4\
iB\x88L\x16\x14\xb4\x1d\xc7V\x8e]m\x12\xd7;\
\x8e\x13'\xfa\xcf\xbb\x04\x99\x19\x9f\x1f0\x12\x81\xde}\
\x10\xb9\xe5FY7\xea\xa0\xa3\xe4\xce\xed\xf7\x89\x84\xd5\
\x8e\x08\x22^T\xbc\xa1lK\xe9\xd3\x8f\xaf\x5c\xd08\
\xa6\xa4\xe7\xdfm\xabO\x9f>\xe8Q2\x159\xe9F\
Z\xfb6\xe9\xbd\xb6\x96\x8a\xe3\xa0\xcc\x89\x96\xad\xffR\
\x9cg\xaf\xb1\x14\xa1b}5B\xbeh\x86\x1br&\
\xfa\x0c\xb3\x07\x09B]\xc2)j\xacO\xad\x10\xba\xb1\
\xce\xcd\xc9\x1d\x07\x93\xa9\xce\xd5\xc5N5\x1fm \xed\
ev\xc4>\x00>\xf3\xbc\xdb`\xc5\x1b\xd1\xb9\x8d+\
\xb6U\xa1\xe3\xbb\x9f%\xc7%l}\xb6\xcc\xe4\xba\x1c\
\xaa\xbe\xd0\xb5\xb9\xf1\xd7\x98\x9b\x7f+\x80\xeb\xabl\xcc\
]\xba\x0b\xc3w^Y\xd2\x10S'i\x92m@\xf0\
T\x07\x220\x0b\x00\x02~\x9f\xdeYR\x10z\xed\x91\
\xbb\xce\x9c\x7f\xe2\xe5\x7f{\xb9\xb1)\xd5C+\x04\x09\
\xe4\xed\xce\x02 !P\x17\xd5\x83Wl\x8b\x9fr~\
\xc7o\xef+\x9cv\x845\xf5\xa2\x0f\xd1\x81\x08\xd3\x06\
OCtw=r&\x0cD\xb7\xee\x1d\xe0\x0b\xe6\xa0\
M\xdb\x1c4\xc5\x05^z{+\xbau{\x18\xc0<\
\xac\x9bw\xb9o\xdb\xa6\x9a\xb443\x92\xd9\xb5\xad/\
'/\xe9\xcf\xaf\x89!\x92w\xc4\xc0\xf0\xa9/\xb89\
\x1a\xa9\xbc\xa0\xcf\xca\xc8\x087+\xe5Z\xa9\xbc\x0c\xbf\
\xd5\xa7ca\x95)\xf5\xaa\x86\x86\xfa\x0dU\xf5\xf5I\
\x7f8\xa8\xfe\x9ds\xf6}i=\xac\xc66nFM\
m\x1cG\xdf}\x07-\x196\xe20\xb1c\xfb\xef\x91\
\xb4;\x09\xa1\xe1\xe6\x04\xb6T\x04R\xb7\xde\xbat\xdd\
W\x00x\xf6\xae\x0d\x7f\xb7\xcd'\x1f~\x01\x96U\x1f\
y\xe3\xcd\xfa\x13j\x12\x19W\xb3\xcf\xdfEJ\x7f\x0a\
\xa6\xf2\x99*\x08\x9f\x0f\x08\x86\xfc\x08J\xa8\x84\x99)\
\x5c\xf2\x81\xc00\x0cV\x81\xb4P\x12\xd1\xb0k\xbb\xa6\
\xd6R\x0b-4\x19F\x04a\x8a\x90\x8b\x10\xac\xefY\
\x882\xd2\x0c<\xf2\xec\x07\xe8\xda}\x5c\xbf:\xdb\x7f\
\xb7M\xfeC@nPR\xf4kW'\xe0\xe8\x9f\x1b\
\xfd/\x93\x7f\x1b\x80\xaf\xbe\xfb\x05D\x1b\xe3\xb8ax\
\x91\xf9\xf4\xabk\xa6:J\x8e$\x92-\xe75\x02H\
\x00L0\x84V\x85y\xe6\x9b\xc3J\x12\xcb\xae\xbc\xed\
5\xa7[I\xde\xa7\xab6U\x1ci\xa5\xd4X\xc0\xb3\
\xae\x11\x01\x02\x06\x94\xab\xc2\xdb\xca\xed\xe3?Ye|\
[m\x05\xbf,\xb9\xfb\x0d\xfc\xf5\xc5\xf7\xb0yG3\
jb\xc0\x86\xfa\x5c\xbcz\xeb\xe1`f\xbc\xf5\xd2K\
\x81\xad\xabw\x17\xf4l\x1bh\x93\x7fj\x8fN\xf5\x8d\
]\x0aO\xba)\x96\x95\xe4\xb4\xbc\xa8\xa6\x02\xcbq\x8b\
\x5c\x85\x0eZ \xc3\x0c\x1a:'M\x97e\xa7\x19\xeb\
:d\xf9w\xb5\xc9\x0d\xada\x8e.n\x93\x9b\xbe+\
?\x92\x99\xfc\xe4\xebY\xea\xdd\xaf>\xd2\xbaz\x0f\xfb\
\xfc\xe1\x7f\xd7\x94\xed#\xad\xe0m\xd8]\x81\xeb{\xf6\
\xc7\xb3\x9c\xc4\xdc\xe1\xe3\xc7b[\xe9=\x22\xa1\xbaA\
kp\x86,\xb5K\xf2\xee*;\xf7\xb0\x0f\xcf\x7f\xe8\
\x03u\xce\x9d\x0fc\xe8\x11\x87\xff]\xeb\x8c\xad\x5c\xd4\
5\xc7\xf2l\x1d\x98\xac)\xd0\x13\x86\x01b\x994$\
\xe0\xb3\x1d\x0a\x90\x84/\x9c\x0d\x9b\x115\x84~?@\
\x95\xb9\xc2\x90\x85YY\x91\xf9\xfe&\xfd~\x93\x91K\
B4\xbe\x0f\xd9|\x90\x196\xf6\x88\xb4\xc0\xdf\x82U\
YQ\xdb\x17\xda\xe79{\xaa\x9b\xc1\xfc*e\x15=\
;B\x99\xfe\xf1\xe4\x13>0\xb1)\x98} \xf8\xc4\
\xafcA\xfa\xb7\x01\xf8\xa1[\xceF\xb8\xcbi\xe8\xd0\
>kP,\xc1'\x11\x99\x19\x1ev[t_M\x00\
422\xe4\xd2\x82\xf4\xd8\xf4\xd5\x15\xe1\xd8\x94c\x0e\
\xc2\x8d\xe7N\xdc<p\xf2\xdd\x7f\xdd\xb6\xbbi\x80`\
\x99-H\xc0S$\x08\x04\x81\xa6\x98\xee\xb9\xb5\x5c\x1d\
\x7f\xde\xc1[\x97\x5c\xf6\xa1\xd5\xb8\xe7\xcb?\x8a\x1d\xeb\
\x16\xc8\x97\xde\xde\xe0\x8f\xd6\xaf\xeax\xe81\xf7\xf4\x1d\
4\xe1\xde\x1e\xb1fQ\xe8j\x7f\x81\xa5D\x1b\x8b\xb9\
\xa3K\x81,-$4\x08$\x89}Ai\xb7\xcd\xd0\
\xbb\xb2\x22\xf4I\x87\x82\xc0\x86.\x11g5\xa9\xb25\
\xe7\x8f\xc8\xa9\xbc\xe9os\xd4Ko\xbe\xc1\xd7\x5cz\
.\xed\xaa7\xa8m\xbf\x12\xdd}g\x17$}~\x98\
\xa1\x106o^\xfd\xef\x9a\xb6}\xa4\xb9\xae\x16\xd9\xed\
\x8b\xf1\x04\x80y\x87\x9f>\x82Kw\xde-RN?\
(\x1b\x1cT\x95N\xef\xb6\x7f0\xee\xb8\xfe\xad\xc1o\
\xbf\xabr\xba\x840\xf4\x88\xc3\x7f\xb6M\x13\x84\xb0!\
\xc8$\x10I\x13$\x04\x04\x1b0\x0d\xc0gX\xf0\x99\
>|\xeb\xfb#\xfe\xd0\xee>\xc76c\xcf[nr\
E\x22\xe5\x16C%\xd6\xad\xd95}sE\xc3\x13|\
\xd2\xf17\xdd\xd4\x14\x17}3\xd3\xcc\xaa\xcc\x5cs\xc9\
\xf4\xf22k\xf3\xba}\xe7$\x96L\x01x\x0b\xac\x98\
\xe07\x88\x04ih\x07\x04\x90)$\x99\xff\x93\x01|\
\xec\xc9\x97\xa1\xde\xc9@vn(2\xef\xdb\xb2#Y\
\x1b\x03@\x00Z\xac\x0e\xac\x01h \xe8\x17\xd1\x92|\
\xf9\xda\xa8\xb4\xe5+\xb7&\xbb\xa3mF\x04'^\xfa\
\x0c\x8f\x1e\xd6\xfe\xab\xaa\xfa\xcd\x1f\xc5c\xfat\x86\x04\
\x0b\x06\x11\x83\x98\xa0\x15aO\xa5>|\xf1\xd6\x8c9\
\x93{u\x997\xf6\x98\x87\x87U\xd5\xc5F5'\xb8\
\xd0aYh+Y\xc20\x0b\x01SB\x98P (\
\x00D\xa4\x82\xa6N\x15dSEQ\x861\xab\xa8(\
\xb8\xa8o\xae\xb3%\xc3-\xdfpF\xf8\xa5\xda\x17\x1a\
FQ\x9f\xc3N\xc5\xd2\xeaZ\xf3\x84\xd3OK;\xf1\
\xccS\xdd\xb5k7$7\xa56\xe9'~w#\xfa\
\xf5\xe9\x87\x1de?\xd6\xb7\xa7M\x9b\x06'\x1e\xc7\xca\
\xa5K\x91\xd4\x1a\x90\x12UUU?;G\xbdz\xf5\
B\xc7\xbc<tm\xdf\x1eU\xa9\x14\x12\xb6\x8d\x0f>\
\xf8`\x9fk&?z\x09f\x01\xf0\x9du\xf5\x00\xfd\
\xe5\xa7\xb7\xc9xr(X\x81M\xb7F\x17\xe7?\xe0\
\x8e\x19\xffr\xe0\x85w\xedI\xa5\xeb\xf1\xc2\xa0C0\
\xfa\x17\xd8\xc5\x09\x80i\x1a\xac\xe02\x91\x00\x98!\x88\
\xa5\x01\x0d\x17 \x03\x03\xac+p\xc7\x98IX\xb3\
\xb5\xc6\xceO\xcb\x5c\xd8\x1c'\x84\x9c\x18\xbe\x98\xf5\x06\
\x86O\xbe\x1e\x13G\x8d\xda\x5c\x17\x8fnN\x0f+\xd4\
6:\xf8\xe3s\xaf\xa0\xa2,\x8e\xf4^y{\x9f\xa3\
\x89\x000@\xa2e\xd3\xf2\x1eNL`0~\xc9I\
\xa2O\xffqh\xdf\xb1\x0d\xa2)\x03\x85iQ\xbc\xfd\
\xf6\xdb\xff~\x00?\xfde=\x16O\xbf\x03\xd3\x9f\xaa\
A\x97\x83\x82#,\x9bO%\x92\x06\x88AB\x82\x00\
h&\x08f\xe4\xe7\x1a_\x0d\xe9\xa2\xdf/\xd5'\xa9\
\xd4\xae\x8d8\xfd\xf8\x91-\x96\x81\xc2\xcaC\xa6^\xf7\
\xe6\x8a-\xf5\x07j\xe6.R\x1a\x00+\x80\x01\x09\x13\
\xd1\x94(\xfad\x89}G\xca*\xafu\xb5(\xd6\xec\
/\xf6VC\x02,\xc1\xd2\x04k\x0d\xd2.\x82\x82\xa3\
\xf99F}\x87\x02\xb9\xbc8/\xf0i\xaf\x12\xff\xea\
\xb6\xb1\xd2\xedSo9\xaff\xfe\x97\xef\x04D\xfa!\
\xe6\xac\xba\x81\xd9\x1d\xc0\xb9\xc9x\xcav\xb4\xae\xa3\xa4\
\x93\x5c\xbab\x91~k\xfa\xfb\xea\xea\xab\xae\xe2\xe7\x9e\
{\x0e\x9d\xbat\xc2\xea\xb5\xdf\xed2\x5cU\x85\x97\xce\
<\x15FQ6\x06\xef1\xd03RMh\xd3\x87!\
\x04\xf0w\x00\xfc\xfay\xe7\xa0\xdb\xb3\xcfcSZ\x1a\
F'\x5c\x5c-\x84\x0cK\xa9m\xd3\xdc\xbb\xa4\x1b7\
n\xc4\x82\xedK\xf1\xf2]o\xa1\xee\x8e{:\xaaW\
\xdf\xbcJDc\xe3H\x1b\x02a_\xc2W\xdc\xe6\x19\
\xa3\xf7\x90\xe7\xb2\xca\x1a\xe2\xbb\x0brPn\x13\xfcY\
m~\xd1\xfahM Hb\xd8\xc4\xa2\x05\xd1`\x18\
\x82\xc0\xda\x02X\xc2\xdd\xf2\x1an\xf9\xddk\xfb\xdc\xd7\
\xaa\xd2T\xd7\x071\xfd\x8b\xcd\x88\x89\xfep\x96\x9dD\
\xe8p<>\xfbf-\xef^\xf2\xca>\xd7;\xad\xb9\
\x12\xdc\xf2[\x10\x98\x09B\x10\x94r\xa1\x7f&\x97\xe2\
\xe1\x99\x8c\x17\xae<8#\x14 7\x99t\xe2\xd1\x8a\
-\x18}\xe0h\xcc\x99?g\x9f\xeb~\xf5`\x9e\xcf\
\xdf{\x02\x1b*B\x18wb~^4\xea\x9e\x00\xc8\
v\xde[hx^c-@\x9a\x10\x89\x88\xd2^\xed\
\xfc\xaf\xdf\x7f\xff\xc3\xa5mRK\xf0\xe6\xdb\xcf\xe1\xd1\
'\xbe\xc1;\xef\xae\xc2\xc9\xe7\xdc\x85\xd7\xfe0ef\
v$\xf0\xa9\x80\x04k\x01F\x00\xa0 $\x05!\x94\
\x1f\xd1\xa4\xaf\x93\x0d9\x94\x85\xd1\x96\x84_\x10\x07\xc0\
\x08A\xc3\x80\x097Y\x10\xd1\xa5\xa3z\xd0\xdc\x13F\
\x07\xee:or\xe6I\xa7\xf5Q\x17\x1f\xf0\xe7\xb3\x9f\
?tx\xc1\xca\x9c\x83\x86\x05g\xcd\xfa\xa6s\x8a\xd2\
\x87\xc6\x9a\x9aF\xa7\xacD61\x97\xef\xd8\xb2\xb5\xf2\
\xb1g\x1e\x8f\x1d9uJ\xca0M;==\x9d\x8f\
?\xfex\x00\xc0\xfb\xef\xbf\xbfw\x8c5G\x0e\xc3\xfa\
\x9b\xcfA\xbf&\x81S\x1f\xf9\x0b*\x0c\xd9\xfe\xb3N\
\x93G\xff\xa5\xc6\x8c<\xfaw\x22?\x97]p2z\
\x0c\xea\x03\xa3\xdf\x0089\xe9\x81]i\x81^g\xf5\
\xeb3\xb0\xe7\xa0!\xbeA]\xba\xee\xbdn\xcb\xd6\xad\
p^\xf9\x1cM\x1f|\x9cO3>\xbaD\xd6V\x1f\
k\xb8\xd24\x84\x03C\x1a\xf1\xb0\x1bn\xf0W'{\
\xc6\xda\x14w\xd6}\xdb\xfb\xb7\xf7;\x09\xfd\xa7\x1e\x87\
\x15s\xe6\xe0g\xc5aH\x050+\x80\x18\x90\x00\x19\
\x02R\x805\x9a\x99\x90\xd8\xefm\xc5\x1d\xc6\xa2}\xfb\
\xaePn\x18kg\xdd\x07r+:$K~7\xae\
\xa6\x86#Ue\xf1\x1f]O0\x00\xe4\x80\xc5\xdeC\
\x0c\x0b\x12\x10\x10\x7f7\xb6{\xe8\xf8K\xd1n\xe0I\
\xb8\xff\xd2#2*\xe29\xbf[\xb5\xc5\xb9\xb6 \xd7\
\x9f[\xd9\xef/H\x0b\x13\xc6\x8c\x19\xf3\xef\x03\xf0\x98\
3\xce@8\x12\xc0\xed\xf7\x8d\x17\xdbw%FY\x0e\
\x8e$\x92\x02\x04\x90ly\x1c\x13\xa4\xd4nI\x1b\xe3\
\xab\x11]\xfc_\xdc\xff\xc0\xcb\xb8\xfd\xb17\xf0\xc2\xeb\
\x8b\xf1\xf6\x97+0\xed\xb8\x01\xc8\xcb\xf4\x17\x1fy\xd9\
\xfbC\x94\x96~\xb0\x99\x84\x96\x10\xca\x80\xd0&\x00\xcf\
\xf5,\x85\x01\x83} \xed\x07k\x09!\x18\xe9a\xbb\
\xaaw1-\x9c2\xd0\xf7\xcc)\x07G\xce>\x7fl\
\xee\x09C\x12e\x8f<v\xfd)\x0b\xd2:e\x87\x0a\
\xdf|ctiMlL\xb2\xb9n\xf2\xeem\x1b\x07\
l\xd9\xbcq{\xd5\xae\x9ds\xdf~\xfd\x9d\x9d\x93\x0e\
\x9f\x1cM&\x92\x09\xdbu\x14\x11\xe1\xc9\xbf<\x83\xc5\
\x8b\x16!33\xf3G\xe3\xcc\x9d\xf1&\xcey\xee#\
\xf8C\xe9\x05\xab\x86\x8d=2w\xce\xd2G\x8b\xd6\xef\
\xfcS\xe7~\xdd\xf3\xba\x0e\xef\xbf\xdf\xb9\x99\xfb\xf5\xc7\
h\x1e:\x08\xbaGw\x1fr\xb3\xfb\xf6\xdbP~i\
x[\xf9\xcb\xa1\xa6\x86)\x09_\xc0d\xeb;+\xd4\
\x88\xe1\xc3\xf1\xea\x1d\xb7@\xbc\xfaV?\xda^z\x92\
pDD\x92\x0d\xc9\x0c#\xa5\xb3\x13\xbbw\xdd\x14[\
6\xf7=\xf1\xe6G/e\x7f\xb6\xf9<\xd5\xa6}\xc9\
\xaa\x9e}\x10i[\x88\x8d\x0b\xff~&\xb7c\xbb`\
EPP-\xab/\x00A\x90\x82\xe17\x5c\xf8\x8d\xfd\
\xfbA\xfa\xe0\x1b\xec\xde}-\x88\xa8\xa8\xed\x01\x7f\x9e\
\xd2\x14U\x8f\xc5\xe3\xd6\xbdN\xb2&\xc7N\xd6\xfc\xe8\
ziG\x00\x1c\xec\x99\x9c\x88@\xf0\xc2\x05<M\x92\
\xb1?\x15\xf8\xb4\x97_Fa\xfb\x02\xec^\xf1\x06\x98\
s\xc6\xa48|Je=]\xb3xC\xfc\xb6N\xd5\
\x8fv\xfa\xf8\x8bY\xf0[\xc0\xa4\x81G\xef\xbd\xe7W\
S!\x98\x19\xe3n,\xc2\x92o\x8b\xb1u\xdb\xec6\
\xd1\xa8u\x0e`\xe4z&0\xcfa\x01\x06\xc0\x1a\xd9\
\x99\xc6\xd6\xae\xf9\xe2\x85kn\xbc\xbd\xe9\xee\xfb_\x04\
\xf9N\xc5\xd6M\x8f\x99\xef\x7f\xb2\xb0G\xaf\x83\xef\x1e\
\xfc\xee\xbc]cc)u\x88VF. \xfc\xb25\
6B\xb6D\xaeABk\x01\x07\x1aa\xbf\xed\xe6e\
\xc8\x8d\xedr\x83\xcb\xfb\x14\x88\xd9C\xda\x84gf\xa4\
\xaa\xab\x96\xae\xf9c\xca\xdf\xe3\xdeL\x7f\xaf\xc0\x84\xdf\
?xW\xb6\x9dJ\xa4+\xe8\xe2\x8a\xb2\x9d3\x85c\
\xbf\xf6\xc9{\xd3\xed\x8f\x16\xad\xb4\x1e\xbf\xff>N&\
S \x22\x98~?\x1c\xcb\x02\x00TVV\xfe\xe4X\
7\x17\x1d\x82\xd3r\xf2z8\x9b7\x5c+\xc89R\
\xd8\xf5\xf9\xe4\xcb\xda\x963b\xa0Pa?\xf0\xb7W\
~t\x8f\x94\x02\x0d}\xba\x05r\x1exv\x126l\
\xbc\xd1\xb0u\x7f\xf6\x054\x87\x8d\xcf\xc1&\x0b\xe9\xdf\
{mVz\x1a\x9ef\xc6\x86A\xa3\x83l!\xc0 \
\x90\xd0\x10$A,\xa5\xf2#\x9b\xc8\x85QU[(\
\xbe\x9c5\x90\xca\xea\x0e\xe9\xfc\xcc\x8b\x7f\xcc\xeb\xd8u\
\xd9\xee\xc6\xa5\xea\xf3\x97\xff\x8a\x89\xa7\x9f\xb5\x7f\x00[\
.\x0c\xc1\xd0\x9a\x89\x84\x00\xd3wq'&\xf9`\x90\
\xb9\xdf\xfb\xbe\x95\x97 \x90\xfbj\xcfX\xfc\x88\xdf\xb1\
!'+V\xb9\xd0\xd8JB\x09V\xfb\x01=\x07\x00\
tm\x09\x0dh\xf9i\x010\x93\xf4\xacP?\x90\xac\
\x86\x06|\xf4\xc9b\xb4\xef|t\xe7\xfa\x94>\x1b\xc2\
\xd7V\xb3\xa2\xeaz\xfb\x9cUp\x03S\x8f;\xed\xde\
>\x0bf\xed\x9c\xd5c,F\x0c<\x16\x0bVL\xff\
\xf5\x00|\xe8\x99\xd7#\xdd\x7f1\xfaML\xf3M\x9f\
\xb1c\x8a\xab\xe8@/F\xbd5\xcaA@3\x10\x08\
\xd1!\xd7x\xbfi\xc3W\xcb\x1d\xcaA\x8fwf\
\x84\x8e\x9cZ7t\xd2i\x8f\x8en\x8c\xebq\x8e\xa2\
\x03\x18\x22\x00H\x12\xf0vn\x09\x0f\xb8\x9a$4\x1b\
\x00\x03\xe14\x1dm\x97g,\xee_\x18^\xd4\xaf\xbd\
\xf1\xd5\xb1\x87\xf7X\x94\xdfwh\x92\x88\xf8\xf3\x97\x9e\
\xef\x90\xd74\xf1\xb0\xf2\xf5s\xbb\x05B\x01\xd8\xca\xa9\
\xd8\xd3\x18\x9fa\xf82w\x5cq\xe3\x8d\xce\xdc\x0f\xde\
\xd4\xdf|\x99\x063d\xe2\xaa\xbbnC\xabW\xaf\x15\
\xbc?'\x94\x8a\x03N\xf2 \x98\x893D\xd84\x88\
\x1d@%\x19\x96\x05\x16\xfb7p\x123\x22\xae\xc8B\
E\xc5\xb1\x22\xa5\x87rP\x83\xa0\x12$\x0de\x04\xd3\
\x83\xe1}\xae\x05\x00J\xa4H\xba 6\x14\xd0\xf2\
\xc5'\xb8 \x87 \x84\x01m\x1a\x10v2d\xae\x5c\
z\x8c\x9b\xd4Y\x1b\xb5\xbe.\xe3\xc2\xb3\x97\x19/|\
\x84\x9d\xaf\xbc\x82\x0e\xa7\x9d\xf6\xa3~\xb8\xb6\x82\x90L\
\xe0\x16\x1b;\x03\xa4\x19\x82\x09\xa6/\x00i\xf8\xf6\xdb\
\x7fF\x1a\x94\x12#\x1d\xc5'\x1bR\x1a\x82\x09\xa4\x95\
\xd6\xb1*&\xed\xfc\x88\xe4A\x04\x05\x80\x00\xc8\x10`\
\xdak\x7f\x82\x10\x04\x01\xd9\x12:\xbb\xaf8D\xc8\x8e\
H\x84#Fa\xbc\xd1\xed\x93$\x1f\x09\x18`p\xa8\
\xba\xd1=ec\xb9\xdb\xd4\xf6\xf2+\xee\xec:\xa6g\
\xec\xd9\xa7\xcf\x05\xd1\xaf\x04\xe0i\xd3\xa6\xc1\x97\x17\xc2\
\xab\x0f\xac\xc5\xe0\xc3\xbat\x8d\xc5\xf5\xa9\x80\x91\x09\xe6\
\x96@\x1d\x01\x80\x14\x08H3\x9c\x15\xd9hzq\xc0\
\xb4\xfbd}\xee\xca\xc3\xafx\xe0\x8b\xc3\x9b\x93j\x94\
\xabdO\x82!\x09\x04\x12\x0ch\x01\x09\x09\x80\xa0\xc8\
\x84f@\x92\xd6\xd9\x19\xaa\xb6c\xbb\xf0\x97\x03;\x84\
g\x0f\xcaw\xe6\x9dp\xd9i\x9b\x88H\x1f3nY\
p\xce\x0dW\x0c{|\xda\xa4\x89k\xbf\xfa\xb0K0\
+\xd4\xc4\xb6o}iU\xfd\x8c\xbb^x\xb3\x8c\x99\
\xf1\xc2#\x0f\xe2\xd0\x91\xc30j\xca\x09\x88D\x22p\
\x12\xff\x9c;\x9a\x94F:\x81\x04\xbbD\xae\x01\xe1\x02\
l\x0a \x18\x02\x82\xfb\x07@\xc4\x07\xa4g\xa419\
\xb6\xd6\xd2\x00\x19\x0eH\x01\x82\x04\xd2\x83\x06\xcc\xe0w\
K\x91L\xa6\x10\xf2\xf9)\x96\x164\x9dA%M\x22\
?\xb4< |\xdb\x0d\x96\x09\xdd\x1c\xcd\xe3\xca\xcaA\
T\xdb\xd4Ih\xe9c\xd3\x04\x93M\xe6\xb6\x95c\x92\
\x1fg\xdc\xb8\xe5\xdd\x0f\xaep\xf6\x94\x97?\xf3\xf6\x87\
\xfb\xed\x87\xeb\x02\x06\x01\xd4\xf2\x11'\xf6^\x18b\x86\
\xe9\xf7\xc10\xf6\xafU2I0IbA\xa2\x15{\
\x82\x00SJ(\xed\xfe\x08\xc0\x19A\x09 \x0b$\x05\
\xb3 xOh\xdd\xcb\xfc\x80\xf8\xf1s\x1e?\xfbl\
\x1c\xf4\xfe,\x04\x03j\x8d\xf0\xfbn\xdfY\x9d\xbc\xca\
!c0\x91\x80b#TQo\x1d\xbfrK\xed\xbc\
+\xc6u\x9f1m\xea#\x5cRR\xf2\xeb\x00\xb8\xf1\
\x9a\xb7\xa0\x1f\xff#\xce\xb8nX\xe0\xab\x99\xa5gi\
%\xfa\xb3\xa7\xb8C\x0a\x09\x86\x00\x83\x94\x1f\xf1\xe6\x9c\
`\xe3K\x95\xbaw\xd1\x93\x7f\x9b\x7f]<\xa5\xc68\
\xa0.`\x13\x02\x02\xc4^\x1cp\xab\xcd\x97Y\xc2q\
\x00\xe9s\xed6\x99\xa2\xa6g\xa7\xe0G\x03:\xe5|\
:\xbamx\xc9\x98\xb3\xee\xd8\xc3\xbc\x94\xb6<\xf9`\
\xe6\x9a\xd3\xa6\x1d\xb8\xee\xc6\xeb\xa7\xee\x96v/.\xcc\
\xac\x0c\xe4\xb6y[e\x86f\x06J\xe7W\x1e>\xf6\
D\xac\xdc\xb2\x07\xfd;g\xe3\x9e\x93\x86b\xfd\xf6\xdd\
\x00\x80X,\xf6\xcf\x0fX\x112\x98 \x14 4{\
\x1fF\x22 \x9c\x06\x84\x03\xfb\xbd\xc5O\x06B\xc1\x10\
\xe2\x8e\x04\x19\x12l*0\x0c\x08S\x22lX0|\
\xdf9\xf8>\xfe\xf4S\x8c>\xf1D\xbci`\x89\xe8\
\xd0\xf1\xa2\xe2\xc1\x1dJG\x8d\x1fR\xdd\xb3\xff\x14{\
\xd3\x8e\x95\xe9\xf6\xe7_u\x08|\xf8\xcd8^\xbb\xe5\
B\x91j*\xd4A\x13d9\xc2\xb7e\xd3\xd8\xc8\xae\
\xedC\xc6^\xfd\xca\x9e\xcb\x87\xe6\xef\xf7\xa4\xe4\xb8\x0c\
Cz;.\x81\xc1\xbc\xf7\xfb\x08\x98\x04\xfa\x89\x14\x07\
M^\xd4 \x11\x98\xa0\xc1\x9aAd\xc0L/\x82p\
\x1dX\xb5\xdb\xf7\xb9>\x92.\xbeC9\xbc%eh\
0k\x08)\xf7\xebl\x11\xe10\xde~\xe5)\x9cw\
\xf9\x1d\xcd'\x1d\x9b\xf1\xc6K\xef\x94\xa6\xcak\x9c{\
\x14\xfb\xba\x13I$\x1d\xb4\xdb]cO\x9b]^\xff\
u8/7\xea\xa6\x12\xbf\x0e\x80{\xcc|\x04\x8f\xfd\
\xedc\xf4\x1b?bP\x22\xa5&0\x19\x01\x90\x97\xe5\
\x06H\x10\x0b\x08\x22\x8e\x84#\xb5\x8d\xc8\x1aQ\xbb#\
q\x89b\xee\xab\x85\x10`\xef\xdf\xa5\x16 bhA\
`\x96\xd0\xac`\x9a\x96\xd5!\xcb,\xef]\xe4{w\
hq\xf8\xe3\xfe\xc5\xc9u\x9f\x7f\xba\xa2v\xf4\x97\x7f\
\x10\xdbV\xcd.\xd8p\xcc\xc4\x03c[6\x9c\xc1u\
\x0d\x03\xa8{\xbf\x881d\xc4\xf3\xcc\xea\xe9\xce\x8b\x9e\
\xda^;\xe5fD\x07=\x8c\xb5\x95s0c\xee\x5c\
\x00\xc0\x91\xf7|\xfek\x0c\x17\xa4\x0d\x04\xb4\x01\xd1\xaa\
\x9b\x93\xf0t:\xd3\x0f\xf2\x07\xf7{\x8f\xcf\xf4#\x10\
L\xa7\x94\x96\x04\x96\x80\x92\xd0dB\x18\x82B\x22\x01\
)\xbf;\xc4\x0d\x18\xd0\x1f\x05\x00\x0f\xcc\xcf,uK\
\x8aJ#\x05\xed\xe0KF\x90\xa8)\x057\xa5\x9a\xdd\
\x8d\x1b\xcb\xb2?\xfedy\xdd\xb1Ski\xd1\xe2;\
\x04\xbb\xb9\x90\x80\xd9X\x19\xa6M\xeb\x0f{\xe2\xdei\
\xb3\xe5\xd6\xad\x8dX\xfc\xe3~(\xad\xa0\xa1\xbd\xfdP\
\xc1\xb3\x12@\x03\x9a\xe1BB\xf1\xfe\xcd[$\x09$\
\x0d\xef\x1c.]\xb0\xd6\x80\x00\xa5\x17\xe6\x92\xab5\xac\
\xda\x1f\xdc \xbd7\x81H\xb4\x1c\x7f\x18 \x06\x13\xa0\
\x0d\xc1\xea'H\x5c\xda\x16\xe7\x03\xda\xc5\xe2U\x095\
\xf9\x90\xc2O\xde\xf9\xa0lDm\xcc\xed\x08C\xf84\
\x09j\x88:}\xd6n\xd9\xd3{\xea%o-\x5c\xbe\
t\xce\xbf\x0e\xe0+o|\x1e\xabVV\xe1\xb8K\x8e\
L\xffv~\xd9\x19L\xb2\xd7\xf7\x83\xd4\x09FK\xec\
\x03\xcb\xe6\x94Y\xa4\xe3\x98\xc6\x10!H@\xb0df\
A\xb2\xc5\xd9\xc6\xda\x84\x86\x86\xe9\xb7\xed6YbW\
\xdf\x1c\xfa\xb4O\x16\xbf\x96k\xad\xd9\x90\x8cvm\x1e\
{\xf5\x8d(\xd8yI\xdb\xf5cG\x8fH\x94\x96\x9e\
\xa5\x12\xd1\x81L\xaa\x80\x04S`Oy\xbc\xc8\x17l\
<\xe5\xae\xdf\xef\x98\xf3\xcd44\xcc\x9a\x83\xcb\xee\x1a\
\xfa\xab\x00\xf6G\x8bI&\xfcp![\x8e?\x10&\
4\xb1\x97M\x22\xf6\xff\x09\xf6\xb9\x1a\xa6\xe1\x83\xd9\xfa\
!%\x09\x90\x86d\x9b\xb2\x94\x05\xa1\xbe\x03p\xb7\x9e\
\xbd\xbe\xbbq\xc6\xbe\xed03\x1e~\xe4@\x14\x0c\x1c\
\x96\xc8\xbf\xe3\xda\xd7\x9a\x8e?\xa7\x97\xd3\xd0|\x11|\
&\x91n\xf6\x05\xd7\xaf;\xa0\xcb\xa8\x83\xd2\xb8(\xaf\
\x11/<\xff\xa3~8\x8e\x02\xfb\x00\xd6\x9e\x12\xc1L\
\x9e\x0a\x01\x90k'\xe1\xda?\x01\x09\x9f\x04\x0c\xc3\x03\
#\x03\xadDB\x85\xf9\xd9\x80\x94\xa8\xfd\x81sR\xb6\
\xcc\x03I\xe1\xcd\x09y\x1f\x04\x16\x0caj\x90\xf3\xe3\
\x0f\xc4\xf7w\xe5\xcb/\xbb\x02\x0f\xfc\xf1\xa1\xe4\xa0\x03\
N\xfe\xb0\xb6\x19\x93\x00\xf4\x96\x90p\xb5\xd3\xb6\xa2&\
1\xe8\xe2\xc9\xb4\xa8C\xaf~\xfc/\x03\xb8_\x97v\
\xf8\xf3}\xe7\xa0\xfb\x98\xab\x0fM\xd9<\x11,\xa4g\
6!\x10K\x08!\xc0\xc4\xd0,\x0c@\xa6\xb3`\xef\
d\xaa\x05\x11\x09o\x12]\x098\x04i(]T\xa0\
\xb7\x0e,\x14_\xf7\x8aX/\x06\xb7nX\x7f\xdc\x91\
wE\x9f8\xa3\x17N\xbd\xef\xb6\xe2e\x07\x8f8H\
\x95\xef9]\xc7\x13\x07\xb202\x85\xf6\x0b\xc16\xc8\
\xd0\x10\xb1\xdaph\xc1\xb7\x93g\xbf\xfb\xf6\xa7Y\x85\
\x19+G\x1d;\x11\xb8\xeb\xc6_\x0b\xb3\xfbN4\x18\
>O\xe9\x81 \x0d\x16\xdeo)\x080\xf6\xff\x0d\x0e\
XI\x98.\xd8p53\x03L\x0a\xd0\x0e\x0c\x95\xd0\
\x11\x91\x01\xc1\xf6/{\xf6\xf72R\x16\x135dd\
\xe5\xac\x15\x1c`HI\x9a|\x08n\xdd\x1d6W\xaf\
7T`\xff\xfdH\xb2@\x80$\x5c\x06\x13k\x08H\
0\x88]\xc5l\xca\x00\xe0\xee\x7fg\x14\xac\xa1\x85\xb7\
\xd3\x90\xf6T=\x12\xc4\x05\x99\xe9\xec\x0f\x05\xb0\xf2\x07\
\xd7\xa7\xe7dz\xfd\x15\x8a \x18\x0c\x0d\x22\x0dCh\
\x0a\x08I6\xfd}W\x5c\xa7N\xed\x91Y|4\xfc\
A\xffF\xa2T%3\xf7\x06\x01\x9aeNMC\xb4\
+\x00jl\x8a\xfek\x00\x1eq\xf2\xad\xf8\xf3\xdb3\
q\xc8\xb4[\xda\xaf\xdd\xd4x\x12\xb3,\xf1v\x96\x96\
\xe8\x05A\xd0\xdf\x8bcP\xc4\xc4`o\xe9[\xe2!\
\x94M\x10\xdaAf\x16o\xef\xdd9<kd;~\
\xb5k\xcd\x8eE\xc7>\xfbD\xfc\x11\x22\xa4\xae9%\
k\xca\xd8\xb1c\x9a\x9ex\xf98]_w8\xa5\x05\
2\x85\x19\x00i\x0d\xd2\x0e\xc8'\x00\xbf\x01\x82\x03\xb9\
s\xd3pc\xe1\xfc\xe3\x0b\xff\xf4\xf0&\xa7\xb9*\xa9\
\xb5\xfe.\xf6\xe2W\x14!\x08&KHb\x10\x14X\
x;\x92\x80\xcb:\xd1\x80Wn\xbd\x14\x92]\x18\x10\
\xc8\x0a\xf8Qh\x04\xe0\xab\xab\x86?\xd9\x96\x0c\xc7\x12\
\xcaU\x00+0,\x12\xca\x09\x98\x99Ed47a\
\xfe\xa3waOu-\xe2\xcd5P\xa9Fh\xdb\x81\
\xc1\x1a\x12\x12~S@\x18!\x88\x8cB\xcc~\xf4\x8f\
\x18s\xf9\x0d\x88\x042\xa1\x12)M\x01\xbfg\xce\xb5\
\x18\x1a.dU\x1d\xc8\xdc\xff\x98\xb5_\x82\x03\x0cK\
24\x14\xc0\x06\x942\xc8\xd1,\xf2\xdbw@\xbd\x03\
\x5cs\xf1uh\xa8\xaa\x86\xa2\x18\x84\x0f\xb0\x1b\x1a0\
\xab\xd1\x8f\xeax\xa4\xc5\xfdL\xd0\xcc\x08\x18\x06uh\
\x9b\x0f\xad-\x9c{\xfe\xcdpU=B~\x8dP\xc8\
\x00s\x14\xf3\xd7\xce4\x84\xd4!\xb0&\x90\x02\xb3b\
)\xcdT(\x10T\x96\xa3\x90\x97\x97\x87\xea\x15+0\
x\xdc8\xa4gf\xc2\x17\xf6\xc1\x170`\x800\xfb\
\xcbO\x90\x91\x95\x89\xb2\x9a\xfa\x18D0\xe5e\xf0\x10\
X3\x1c\xdb\xf5\x07\x00h\xe7_\xb0\x03_\x7f\xe7c\
(\x8fk\x1c{K\x1fy\xd5\xd9\x1fN\xb4]\x1aO\
\xf0\xc0K-;pk\x06\x01\x83\xc0\xdeq\x17B{\
\xff\xa6]\x01\xed*\xa4\x87\xed\xfa\xae%\xc1\xcfG\xf6\
\xcd~\xe3\xe4\x91\x85_\x7f9\xe1\xe8XCq\x0fh\
@\x0e>\xf1\xb4\x11{.\xbf\xeeD\xae\xa8<\x16,\
\x8a\xd0&\xbf\xcc\xd7\xa6\xfd\xa7\xa2t\xf7`\xe6x7\
\xf8\x0c@2H0\x04\x03\xc2\x8a\x19\xb4`\xd9\xd1;\
\xdez\xfb\xab\xf2\xaf\xbe\xf8\xfa\x90\xbb\x7f\x8f\xac\xfc\xc2\
_\x15\xbc\x00@\x86\x1f>\x10\xc8\xf0yj\x03\xfb\xa1\
}F\x18!\xa3{\x22\x81\x9d\xa7v\xaf\xd4h\xf76\
P6\x0f8e\x14\x00\xc0\xed \xb0\xf3\xe2\xdf\xb5\xf5\
\xe9D/\xef\xf5\xf6\x81\x12\xd1\x80\xdcU\xd3/p\xd6\
\xd8\x88\x9a<-6\x02\xc0/\x0a\x12 \x82u\xc5\xef\
\x10\x7f\xe9\xc1\xb4\xc8M\x8fu\xe6\x94E\x10.\xc8J\
A\xe7\xe5\xe8\xc4\x01\xc3\xb5\x0e\xfa\x80\xe7~\x1cx\x9f\
\x9d\x19@Z(\x99r\x032\xe1\xda\x0a&1\x1c\xf8\
\x82Uqc\xc0\x88\x09SC\xc7\xf7\xbf\xd5\x02V|\
\xc7\xa5\xd9\xf2\x1e,\x7f\xe1\xda\xc0QO\x14\xb7iV\
\x01R\x0a\xd0\x8a\x11\x0a\x8apA0\xda\xe3\xc0\xf5w\
\xec\x9a\xf4\xde]\x1ax\xb2\xe5\xa6[\x01\xfc\x01\xa5\xdb\
N-a\xc3?\x86]\x96\x0033\x5c\xf2\xa5o\xc9\
\xcd\xf7%\x94p\xb1k\xd7: \x98\x87{\xe6lD\
y>p.\xe05qIkoGc\xec\xf8\xf6\xe9\
\x8b7\xa9`\x8b\xde\x05h\x86\x01\xc9~\x01H\xe1\xff\
W\xc8\xfd\xc6\xa1hHo\xe4\xe5\x85\xfa\x94\x95\xc5\x9e\
R\xae1\x12\x90\xde\x82\xb6fZ@\x02\xd2\xd3\x9b@\
\x0cR\x02\xcc\x06X1L\x9f\xe3t*2\x97\x0c\xed\
\x92\xfe\xe2\xd8.\x19\x1f\x1ds\xfd\x8c\x8a9\x7f=\x17\
\xa3O>\x02\xdf\xde\xfc\xfbv\xd6'3\xa6a\xeb\xb6\
\xd3\x90H\xf5\xd7ya\xcb\xed\xdbc\xb6\xce\xcc{\xa1\
\xeb\x89\xc7\xcel\xbc\xed\xc1\x1b\xdc\xfa\xdak\x05i\x22\
\x06\x84\xe1@\x90\x0b\xc9\x0a\x90~\xb6\xc6\x1f\xfdX\xe6\
\xd3\x8f\xdea\xaf\xd9\xd4\x80j\x89>\xe3\xfb\xfc\xaa\x00\
\xde\x95\xd7\x0d\x09'un0\x1c\xfa\x8b Hvl\
\xa8\xb4\x94\x93\x1a3p\xa1\xe57?\xb1\xf2\x0aw\xb2\
\x11P\x02 Si\x0eFc$J\xeb\xb3y\xf7\xce\
I\xfe\xea\xca\xc3\x85#\x0c\x18\x04v\xa2p\x8a\x0b\x1a\
u\x8f\xbe\xafSndA\x22'7\x91\xf4\x85\xc85\
\xbc\xd7\x9e\xe1e\xab\x90F\xcb\x81\xd8\xfbj\x19\x8e\xab\
}\x95\x0d~s\xe5\xd2\xa1b\xdd\x96\xa3\xd9\xa5\x0el\
\xa4\x80\xa4\xd6\x0d\xe3&\xceXz\xc4\xd1\xe7\xa3\xa2\xaa\
\xe6\xca;\xae\xf9Q\xdf\xdf~\xf3#t*l\xf6\x9f\
p\xf9\xc6;\x1bT\xf6\x0dR\x84\xe0h\x07\x99\xbeX\
m\x97\x02\xf7\xb5\xb4\x88Z\xa2}\xae%\xa0\x88\xa4\x82\
\xed8\xec\xc4\x5c3Q\x8b\xfe\xe5\xf1\xf0\xd1\xf5\xc8\xeb\
\x06m\x83\x95\x0b\x9flv\xdad\xdb\x0b\x0cC\xce0\
#b\x970]\x92\xac!\x94K\xf1\x98\xce\xdc\xdd\xc8\
\xe3b\xc9\xd0\x14V\x86\x1fBi\xa9\x93\xf1\x81\xbd\x22\
\x17|s\xe3\xac7/x\x22_?\xfd\xe2\x0c\x10\x1d\
\x80SO\x1b\xde\xa6!\xe5\x0cp\xb5\x13qm\x8bm\
\xcb\x86c\xbb\xec:\xc2\xack\xc2\x88\xea\xa8q\x02\x84\
\x91\xc7pa8q\xbb8\xdd\xb9\xcf\xb1?\xbfS\x18\
\x07\xf0?\x05\xe0?>\xf1)\x16/_\x87\xfc\xac`\
\xf0\xfdo6_\x91H\xf0\x1d\x80\xf4S\x8b\xcb\xd0{\
s=oK\xeb\x09\x8d\x99\x00m\x02\xec\x227\x93K\
\x07\x95\x84^\x19\x91#\xde\xb9\x22\xbfv\xcd'\x83\x0e\
Q\x9b\x0e\xbf\x1a\x13\xcbW\x06\xaa/<w\x94\xbbb\
\xce\xe5\xa2\xb6~\x14\xb9\x94\xe1v)\xaau\xc6\x1e\xf0\
TEq\xe7W\xcf\xbd\xe5\xfdM\x0b\x06\xa6!\xd4\xad\
\xed\xb0\xd4\xbc\x15\x8f\xb2\x13\x1b*I@\x10C\x1aN\
\xcb'\xdd\x81\x9d\xd3q\xb7s\xe1\x85\x17<p\xe1\x85\
\x9f>\xb8\xba\x0a\x19\xbc\x05\x19\xfdG\xfe*\xe0e\xd7\
Ai\xfb^p\x95}\xaei\x1a\x7f!\xedH\x96\x80\
\xf6%\xc1\xa6\x00\x0b\xc4!\xcc\x06h\xa9\xa8E\xd7%\
\xe5\x80,7\x0d\x8a\xb3I\xb3g\x16\x17-\xd3\xc1\x1a\
pY\x81\x8cj\x0e\x98)\xf6\xfb\x09\xa6\x1f`\x03\xf0\
\xfc\x0d\x00k\xef\x90\xcb\x0apm\xb0k3;\x8e\x8f\
\xa2n\x1e'\xb5\x8f!\xc0\x8e\x05\xa7\xa8]}\xcd\xb9\
\xe7^~F\x85|s\xf0\xae\x8d\xee\x8c\x0f\x9f\xfdq\
\xff[\x82\xce\xc7\x8e\xff\xfd\x09\xab+\xfd\x7f\x86H/\
d\x97\xa1aA \xe9\x0a\xe9V\x93\xa1\x1d\xcf\xd2\xc5\
p\x5c\x9b\x95\xe3\x1a\xda\xe1\x5c\xb0\xf43I\xcf\xdcI\
\x06\xb4T\xd0\xc2\x06\x91nf\xd2\x0d^\xb4\x99\x06\xb1\
\x82R:\xa2Y\xe6@{\xd7\xb3v\x91\x99\x96\x9a=\
\xa2o\xda\x85u\x0d\xcd\x1b\xbf\x99\xf1\x0c\x00\xe0\xca3\
.\x95o-N]c\x09y\xbe&\xc7d\xed@+\
\x07Z9\xcc\x8a\xa5\xabE\x1e\xc1\xf0\x0b0\xb3R\xe4\
\xe7\xe4\xe6\xc1\xbd\x02\x975\x8e|\xe7\x8b\xd0W\x07\xff\
\xe3*\xc4\x19\x97\xfd\x19u\xf5\xf5x\xf7\xf9\xd7\xd0\xf1\
\xc0\x83\x07$S\xea\x0c&\xd3\xff]^1\xc0$Z\
|\x17\x9ey\x060\x00-`\x06\x9cd\xf76Xt\
P\xbb\xcc\x87\x0f\xa5\xa6of\xa9\xf4\xe8\xdc.\xa3Q\
v\xf8\xe1\x18x\xcb\xc3E\x15S\xc6\x9f\xcc\xdb6\x9c\
C\xf1dO\x0e\xf9\x94\xdd\xaf\xd3Bk\xec\xf8\xc7\xd5\
\xc1\x93g\x04\xbe\xfa0\x0a\xacA\xe1\x1d\xaf\xa0\xc3Q\
\xa7.]6l\xd4[z[\xbc;\x84\xca\xa0\x96\xfc\
:!4H\x03f\xfd\xce6j\xee\xec\xd3\xae_\xb5\
v\xd5\xb6mK\xf6lY\xf4\xf3\xe1\x8d\xbfT\xca\x0d\
\x13\x94\xdf\x0e\x91P\x10\x8em\x01\xa6\xeb\xd9Gu\x10\
H)\x90\xb6\xc2\x80\x1d\x86\xf2N\xdfL\x00+\x05\xd8\
\xb6\xe7N5\x04\xc8\xf6\xccI\xc2\x81g6\x84\x90\xe4\
\xaa\x22R.8ey\xb6X\x97\xc0\x0e\x03P\x9e\xfa\
\x05\xde\xeb\x96e\x10X1\xe0H\xcf\xce\xea&\xa1r\
3\xed\xba\x09c\xbf\xda\x90W8\xe7zT\xbb\x8f~\
\xb8i\xbf\xfd\xdf\xb3g\x0f\x8e<\xec.L\x9aP\xf2\
\xd5\xee\xb7\xca\xdf\xaf\xb1\xacsA\xd2 \xe5\x03\x84a\
8\xd2-\xd6\xc4-/\x8d\x0b\xad\x05\x18\x0a$\xdc\xbd\
<\xc7\x12`I \xed\x1d\xaa\x00\x12\xe9\x9a9\xbdU\
e\x94\xa4\xc1\xa4\xc1\xad/\xa0v\xe0\xf7[\x95>\xd3\
zn\xdbWOoYQ\x99\x89\x82~\xc7\xa2j{\
3>\xd9\x98\xea[\x9f\xe4\xa3\xb5\xe0\xce\x90\x02\x80\xe9\
\x8d\x93$ \x14\x18\x80\xd0\xc4Zk\x90r8\x9cm\
\xae(\xe9\xd6nIx\xebU\x109\x19\xbf<\x98\xe7\
\x9bo\xbe\xc1\xb0I\xd7 \x12 \xdc\x7f\xeb)\xe8~\
\xc8\xd8\xee\x8d\xd1\xd4\x05\x9aeg\x22jqDx\xf6\
\xd0\x96\xf8\x0d\x90\x06\xa0%H1\xd22\x9c\xca\x91\xdd\
\xcc\xa7\x8fn+.\xb9o\xfe\x82\x0f\x17l\xa8\x8a\x1e\
2\xac#F\x9dx$u=\xf9\xc2\xbe\xf6+O\xff\
Y\xad_w+\xa2VO\x95\x9f\x96\x8c\x1f>\xf4\xed\
\xaa\x09\x13\xae|\xce\x7f\xf0\x9b\x8d\xcb\x97F\x17\x7f\xfb\
-\x98\x19\xc6Q\xa7b9\xf9Th\xd4\xf0\xd7)\x1c\
\x99\x0bV\x80\xa1A\x8a\xbd\xe0l\xbf\x1fB@\x98\xab\
\x97\x8f\xd7_\x7f>\xb6z\xf0P#\xa3\xad\x8d\x1bN\
\x9e\xfa\x8bA\xba~\xf6l,\xfd\xf0C\xc4K\xe7\x83\
W\x8c\x03\xaf\xbd}\xef\xeeU\xfc\xd63hWU*\
d\x86?\x93\x0c\x9b\xc8\xd0\x00\x1c@Z \xcd G\
\x02\x8e\xb7\xebz\xe1\x89\x16\x84r[\x0e\x9d. \xd9\
A\xc8\xbfE\x06|\x0b\xc8\x08\xec\x02\xc0\xa4\x19,\xc9\
S;\x95\x0bv\x15\x98]\x80\x5c0t\x0b\x984\xa0\
]\xc0U\x80\xcd \xa5\x00$\x01\xd3\x82\xdb\xa3\xb8\xb6\
\xea\xd8#\xde\xfd\xb6\xb0\xf3}\x97\x9d\x7fz\xd9\x157\
_\x8b\xad;\xf7\xcf`\xd4\xa6M\x1b\xcc\xf8\xe4V\x5c\
v\xf3\xa2\xbaC\x07\x15\xfc)\xcft^g\x1d\xad\xb1\
E3,JB\xb1\x03\xb0\xf2\xe6S1\x84g\xc3\x07\
I\x01\x09\x05\x83\x13\xd5\xec\xc6\xbf\xd4nl1#\x15\
eR\xd0-\xe1\x02\x02^l1\xb5\x04\x08A(\xf8\
\x8cd\xac}\x9e\xb5\xfc\xc0\x9e\xfe[\x8e\x1dS2]\
\xb5\x9d\xa4\x96.\x99\x8d\xb1\xa3\x07\xe0\x92\x8b\x86\x84\xaa\
ct\x02\x0bc\x18Q\x8bGPko\x1e5\x03-\
\xce\x16h\x97$\xecx^\x8e\xf8\xbco\x97\x8c\x07\x9e\
\x7f\xfc\xe1\x86\xce\x19\x80e9?\xbf\x03\xb7\x9alF\
O\xb8\x11%\xc5!\xdc~\xc1!\xf2\xf3\x05\x97\x1eT\
Y\xd3|\x83V\xe6X\x222E\x0b\x93\x0e\x8b\x96\x1d\
\x01\x0cV\x00AB\xc2UEEr\xc3\x01=3\x1f\
>*\xcdyw\xd4\xbd\xb75\x11\x11>\xf8\xfd\x03h\
\x7f\xfaT\xdf\xcc\x93\xce\x1a\xe5\xcc\x9dy\x0b5\xc5G\
\x13\x93pKr\xa3\xc9\xd1C\x9e\xaf\x1b0\xec\x81\xf3\
\xae\xb8\xae\xfc\xbc\xa9\x0d\x18\x7f\xff\x8d8\xfb\xaa\xabA\
D(\xdb\xbc\x1ai\x8bg#\xe7\x80\x11\x15\xa5\xeb7\
\xbf\xaaV,\x1f\x0c\xed\x14\x11L\x08M\x10\x86\x06\x0b\
\x032^\x93\x8bys.\xc8\xeb\xde}a\xf7#\x0e\
\xdd\xfa\xd5\xf4\x97\x7f1\x80\xabZB\xf6V\x8ek\x87\
\xb4\xeb\x9fE\xdb\x03G\x82\xf9\x0e\xe8\xb9K\xd1p\xc7\
\xfd\x08\x7f5\xbf\x8b\x1b\x8f\x1e\x0ahAN\x0aB\xb1\
c\xf8B\xeb\xe0\xa2Z\x93i\xea\x90_hS\x02&\
\x80\x80\x04\x0b\x83t0\xa0\x9d\x88\xcfMt\xee\xb25\
\xab]\xc7\xbf\xe5\xcd\xf8`\xdb\x9e\x03\x06\x0cOm\xde\
|\xbc\xbf<\x9e'4\x9b\xac-@\x83xo\xa8\x16\
\x03^\x1c\x7f\xcb\x0e\xdcbK%\x02\x84`6\xb4J\
\xb4/\xa8\xdf5d\xc2\x97sE\xc1G\x0f]\x7f\xf1\
\x1e\x00\xbcz\xf5\xda\xbf;\xbeY\xb3\x16@\xc5o\x05\
Q\xdf\xedw\xdf\xfb\xe8\xd5\x0b\xd6\xec\xferO\xbds\
tcC<+e\xbb\x86R\x8a\x053\x884\x08.\
\x0c\xe1\x92\xe3\xa6t\xd0\xb0j\xfa\x0c*\xfet\xdd\x96\
\xca/\xf2\x0a\xb2\xd2\x9bc\xcdS\xea\xeb\xdd\x91d\x04\
\x83\xae\xad\x0cf\xcd\x02\x04C\x10\xa4\xc1\x88H\x1d\xed\
\x5c\x88%\x9d\xf2\xf8\xfdni\xd80\x7fw\xb9\xfd\xdc\
S\x0f`\xf0\xc0\xde(\xdf\xfa$\x92Y\xbe\xb6!\xb6\
\xb3H;\x0b\x04\x1cMp\x18\xda\x85f\xed\x9deI\
\x904\x05L\xd3M\xb6-\x0c\xcd\xed\xd29\xef\xd5\xd7\
\xfe<|\xc7\xed7\xdd\x08##\x82O?\xfd\xf4\xa7\
\x0fq\xdf\xb1\xdf\x84\xc1\x1c\xc7\x84\xa9wbHW\x8a\
|\xb8\xa8f\xf2\xeej\xeb&\xa5e\x7f\x8f\x8cO\xb6\
\xf2J\x82\x85\x00\xc3;\xa1\x12\x03~?\xc7\xba\xb4\xf3\
}:\xaag\xc6\xa3\xd7\x8dl\xbf\xf0\xd5\xcf6\xbb\x97\
=~+\xde<\xe7jt8jBZ\xc3\xf3\xcfM\
\xd1+\xbe\xbd\x9a\x9a\xad\x81$\x05\xac\x9e\x85\x15u%\
\xdd\x9f\xdaQ\xd2\xe7\xf1\x01\xe9v\xc3k\x1f,\xc1\x87\
\x8bf\xc2\xfe\x1e\x13\x0f3\xa3\xf4\xd3\x8f\xb0\xf9\xce\xbb\
Q|\xca\xc9Y\xa9g^|\x9cj+\x8e7|\xc2\
\xf03A\xf8\x04\x5c\xa9\xe1\xaa\x14\xdc@F\xb4q\xf2\
\x91\x7f\x98\xd3w\xd8\x83\xf9\xe5\xe5Vq$\x0fS.\
;\xf3g\x01\xbc\xf0\xc2#\xe1$S\x85Y\x83\x06\x0d\
\x15\xc3\x8f\xdd\xd6v\xe8\xd0\xcdU\x069]\x5c\x96M\
\xe7_\xd8#1\xff\xf3\x0bTc\xec<\xd2*@\xda\
\x866\xc2\x8d\xbe\xbe]\xcf\xcd\xb7\x1b\xbeH\x8e\x1c\xeb\
\xb7\xbaw\x96Nv:t0\x00\x0eD\xa0\xd33\xe1\
\xb4\xed\xac\x1b\x9bb\xf6\xaa9\xcb\xec\x8fN>9\xf5\
`&\xf1\xb2\xc6\xa4\xf0\xbd\xf7@\xa0}\xb7\xe1fN\
\xd2\x08P\xac\x06\xda\xd0`\xd3\x04\x0b\xe9Y\xff\xa4\x00\
[\xdd\x00\x1e\x90\x89\xa0\x03AN\xb8){\xe5\xea\
\x1d\xd6\xa9\xafm\xb3\xd6?x\x8d\xdb\xa3G\x0f,Y\
6\x1fC\x06\xf5\xdd;_?%\xf3\xe7\xcf\xc7\x88\x11\
\x058\xe7\xb2\xb9x\xfe\xb1\x91r\xd1\xc2=\xc1e\xcb\
+\xfdu\xf5q_4\x1eci\x0a\x18\x86\xc9&\x11\
\xf9\xfd\xa0=\xd5uj\xc1g\xaf\xc6\x7f\x7f\xd3Q\xd6\
\xa4\xcb\xd7\xb8\xa8\x9b\x81K\xce?\xdc\xe7\xa4\xf5\xf7\xe7\
\xe6t\xf0'\xe3I\xd3\xb2\x1c\x96\xac9\xe2\x17d\xb0\
b\x7f\xb2\x22\xe1T/\xb5\xeexu\xae}\xce\x94\xa9\
\xb8\xeb\xd1\x07\xd0\xa6\xa4#\x00\xe0\xf4\xd3.E \xe4\
\x0b\xc7\x9a\xec,\x83\xa0\x0d\x03,\xa1\x98\x95\x86\xe52\
\xd8\x0b\xe2!\x19\x92P\x8d\x8d\xc9\xa2,;\xd1X2\
\xce\xd6\x1b\xe6B\x06\x82x\xeee\xcf\xc2\xb2\xdf\x11\xb6\
\x82\xf7\xfa\xdf\xdd\x86\xfb\xffp\x17\x06N\xfa\x13\xdaD\
J\xf3wT\xaa\xd3\xcb\xeb\xec\x8b\x15\x1b\x1d\x89%\x04\
\x9b\x10B@H\x09\x08\x01M\x0c\xad\xbdOA \xa0\
\x1b\xfbt\x0c<\x7fp\xfb\xe0\x93\xb7<t\xcb\xf6\xa7\
\xef}\x0c\xe7\xddt\x19^>\xfe|\xe4\x8d\x1b\x9a\xa1\
\xdez\xe7,Z\xbf\xfcrJ\xda\x1da\x18H\x0d\xed\
\xb9\xa54?\xf7\xc1\x97\x17nz\xe5\xe6\x93\x8fLL\
\xfb\xd3\x0c\x5c?m,\xee\xff\xeb\x93\xfb\xed\xdb\xced\
3:\x04\xd3i\xed9g\x8e\xb1\xbe\x9a\xf3\x92d\xbb\
\xbdi\x9a\x90\x9a\xa1L\x0bJy\x9f\x5c\xbbK\x8f\xd5\
ug\x9cu\xc6\xc4S\x87\xaf\xbc\xe5\x8a\xd7\xb1 \x1e\
\xc6\xd7\xcf\xdf\xfcw\x01\xbc\xf2\xecC\xe0B\x0c\xcdh\
hxB4\x05\x9b\x08\xf6\x12Q\xbe#*\xa4/\xc4\
\xca\x1d\xa1\xa3\xd6H\x10\xf9\xa4r\xc1Rp|@\xff\
\x99\x15\xa3\xc6^\x90\xbfa\xd5\xf6\xf6P\xb0\xba\xb4\x87\
\x93\x95\x0e\x15\xf4\x81\x03\x11Pu-\x9c\xdc|T\xca\
\x10*6\x97\xa2\xff\xd1\xc7!\xfd\xa1\xdb\xd0\x18\xf1\xa3\
\xb1o\x1f\x84\xf3\x8a\x91\xe6JP\xbc\x16,5\xb4i\
\x80E+c\x91\x06+\xdd\x92\xc1\xe0\x01\x18R\x82\x83\
a\xd8\xae\x8d\xed\x1b\xcb\xf0\xe2\x86\x14\xbe]\xf05\xaa\
\xb6m\xf8\x87h\xb7\xfe\xfa\xd7\xbf\xa2\xb9\xae\x0eE\x1d\
; \x9a\xf4c\xcb\xe6z\x94\x96\xd5Ai\x07\xa6\xcf\
\x80a\x18\xf0\x09 \x18$TT\xd7a\xfd\x92/1\
q\xc2`t\xea\xd5\x13\xb5{*\xb1h\xe5Z\xf8\xb3\
\xbb\x22\x92V\x88\xda\x9a\x06\xf8}F\xcbG\x87!\xd8\
A\x98\x9b\x11\xaf\xdd\x84\xa5\x8bW`SU5\x9c\xef\
\xf5-\xda\xc0\x88d\x02\xa7\x9d\xf6\x18\xca\xcbV\x22\x18\
\x100\xa5\x02\xb4\x86\xad\x80TJ\xc1r\x1d\x84\xb3\xc2\
\x90\xa98\x82\x01\x1b\xb55\x8dpl\xc6\xc2\x95\xb3\xf6\
\x8e\xe1G\xa3l\x9d\x80{\xef\xbd\x177\xdf|3\xba\
\x8d~\x08i\xb4\xa6]}\xcc\xbc\xaa>\xea\x9e\xa6X\
\xe4\x82\x0cHez;\xaf!\xc0-\x0c\x93\x1a\x1a\x0c\
F \xc0\xf5\xbd;\x86\x1e<\xb4C\xf0i\xabm\x97\
\xba\xfaw\xbf\xc4\xa3s\x9e\xc3\xb3\xd3\xaeD\xc1\xd8\xde\
\xe9\xfa\x8d7.\x10\x9bW^%\x12N\x11|&\x12\
\xa3\x86\xac\xdeUX\xf4\xc7\xc7\xa7\x7f<}DF0\
\xf5\xc6\xa6\xd2\x9f\x5c\x84\xd6\xfe\x95n\x5c\x8d\x86\x0d\xeb\
\xc0i\xe1tu\xcb\x03\xb7\xd3\xae\xd2K\xa4 \xbf\x10\
\x00K\x07\xdap==\xca\x17HYc\xc7?Y\xf5\
\xe8s7w[\xbf<\xd5\xbd\xdf\xc0\x96\x98\xe2\x9f\x96\
\x0d\x17\x1f\x03\x15\x0a\xf7O\x9b=\xfbyQQ;X\
\xb7\xfa\xf3M\x03\x9c\x92 \x9b\x01\xe5\x80}@\xf3\xf0\
!\xdb\xb6\x8d\x98x\xcd\xab\xe9\x93?\x1a\x11]\xae\xae\
=y\x02\xd0\xca\x1cD/b\x0d\x8f\x94\x05\xe7\x9ec\
\xc63\x80;wW9/\xbe\xb5Q\x01\xa0\xfa\x87\xee\
5ce\x1be\xe2\xa1\x97\xdd\x1e\x80\x03\xac\xc1\xd6\xd2\
|ti_\xb0w\xdc\xbf\x84\x95\xfe\xd7r\xd2\xec\xef\
YD\x84\xdbo\xbf}\x9f\xffw\xe7\x9dw\xfe\xec}\
\xbf\xa4\x8f\xdd\xbau\x83R\x0a\x99\x99\x998\xe2{\xd4\
\xb9w\xdey\xe7\xde6\xe3\xf18\x9e{\xee9TU\
U\xc1\xe7\xf3\xfd\xe8\xd9\xc0\x0f\x1c\x19\xad7\xfe\xf1\x8f\
\xf7\xe3\xe6\x9boF\xe7\xc3\xde\x85\xaf\xfe\xbd\x92\xcad\
\xe0\xd6h\xc29QC\x84\x09\x02By\xa6\x11\x98\xec\
\x05\x86@@k\x05&\xc0\x1fP\xb5=:\x84\xffx\
\xd6\xe8NO/\xdbX\x19\xb5\x9f|\x06\x0f-\x99\x89\
\x17\xce\xff\x13\xf2\x8e\x1a\x1a\xb1\x9f\x7f\xe1\x1c\xb9i\xc5\
\x95H\xbaE\xda\xef\xe3\xc4\xa8\xc1+w\xb5-\xb9\xe7\
\x89\xd7\xdf\xfe\xa8kPX\x7f\x9c\xf1\xe9/\x9a\x8c\xb5\
k\xd7\xa2\xff1'\xe1-\xa2\xe6n\xd7\xde\xf8\x8a[\
\xfd\xe1\xc1hj\x1c\xc4\x92\x00r\x00\xe9z\x99\x06\xaa\
9\x10X\xb9\xf8\xb0\x8c\xe7\x1f\xfd\xa4\xfb\x85W\xcc\xfc\
\xfd\xdd\x8f\xfd\xfcb\xe6\xe6\x82\xc3i.GBI\x0a\
\x98\x10,\xc1\x96\x86v\x15H\xbb`WCe\x04\x9d\
\xe8\xe8\xe1\x9b\xd6\xf7\x1a\xf1\xd0\xa3KJg\xf6\xc9x\
E\x95\x96\x97\x83\xae;\x0b\xd7\x9d\xd6\x197\x9f\xf54\
J7\xf4\xf4G\xa6\x9c\x7f\x9c\xb5l\xe9\x91\x94\x93\xa6\
\xce9\xec\x88\xe9\x0fE\xf1\xc1\x19\xf7\x5c\xd6;\xf1\xd9\
;\xe7\xe8\xa6d{s\xfb\xa4\x15\x1b\xaf\xbd\xf6\xc9v\
s\x9f\xae\xe9z\xd3;\xfb\x00\xe2?I^\xf8S\xcf\
\xda\x1fh~\xc9}?'\x9b7o\xde\xfb\xf7\xb2e\
\xcb\xfe\xe96\x7ft\x88\x9b\xbf`\x11n\xb8\xe1z<\
\xf3Y:\xcc\xfaw\xda\xd7\xc7\x83w\xc5\x12\xee\xf1\x9a\
D\x80 Z\x22\xb0\x18\x90-\xb1\xbeD\xd0\xac\x01f\
\xf8\xfc\xaa\xaeS\xdb\xf0\xddg\x8e\xeb\xf1\xec\xec5\xbb\
\x13\xcf\xbd4\x0b\xcf\x5c0\x02\x1f\xbd\xfc4\xba>q\
\x95Yu\xc4\xb1Sh\xe3\x8a\xcb(\xe9\x14k\x9f\x89\
\xc4\xc8\xfe\xabK\xdb\xb6\xfd\xfd\x13\xaf\xbe\xf5q\xe7\x88\
i?5\xe3K\x94t\xef\xfd\x8b\x06\xd1\xa7O\x1f4\
\xee\xde\x8d>\xdf|\x09:\xf8\xd0\xf5j\xe5\xea\xe7\xf4\
\x86\xf8=\x0c+BB0\xf9L\xcf\x1cE\x06\x99\xb1\
\xda\xce\x19\x9bW\x1f\xff\xe5\x9a\xb5\xcbO\xf2\xa3\xe1\xb6\
[\xfe\xfe\xa4p\xdf!@vN\xa9U\xd3\xf0\x02\xa5\
\x16\x93\x88\xd5u@\xc0\xf6\x93-\x88\xe1w\x9d\x9e\xf9\
\xcd\xd11#\xbf\xde\xd6a\xe0+\xcf\xbd\xf1\xe1\xf2\xae\
9\xf9\xc9\x1b\xae\xbf\x10]\xbbv\xc6c\xf4:\xce\xbe\
\xf4\x19d\x0e\x1d\x8bm\xd7]9\x80\x96\xad\xbc\x5ck\
w(\x95\xd7\xa3`\xd5\xb6\xec\xa3JWlK~6\
\xfdd\x15\xaf\xbd\x08Z\x1b\xb4\xea\xdb\x83\xaa\xdeo_\
3u\xe0\x15\xcf\x9eu\xeb\xd0\x7f\x9d/\xeb\xff3\xd9\
\x07\xc0\xad\xa0\xe9~\xf0\xc3\x08\xa5\xe6\x17\xd5\xc5\x227\
\xc5\x93\xeax\xcd2@\x90\x1ex[II\xc8\xb3K\
zZ\x99\x86\xdf\xaf\xeb;\xb7\xf3\xff\xf1\xb4\x83\xbb<\
\xbb`\xfd\xee\xc4\xfcek\xb1i\xe1\x9fQi\x04p\
G\xaf\xce\xb8\xe4\xab\xaf\x0f\xc2\xaa\x85WQ\xdc\xea\xc8\
\x86DrH\xaf\xed;\xf3\x8b\x1f|\xec\x8dw?\xee\
\x14 \xfb\x85Og\xa1S\xcf_\x06\xdeV\xc9l\xd7\
\x0e_\xdfw\x17\xd4\xe2E6\x9dt\xd4\xc7zmA\
\xae\xd6\xa9\x12\x10+\x98 \x110!\x0dA\xa6\x9d0\
\x12>\xa3q\xd7\xfb\xcf\xb5+\x19\xd2\xbb\xe1\xe7\xda\xed\
\xed;\x1f\x9c\xfdL\xf4\xe3\x8bo|\xa3\xd3\xd85_\
\x8b\x95\x8b\x8a\x1cN\x95\xb00\xfc\x22-\xb3Z\xf7\xe8\
\xbd{[\xd2\xa8\x98v\xc4Q\xf5_|1\x1bw\xdc\
}\x0b\xda\x94\x94|7\xa9\xa6\x1f\xab\x01\xe2\xda\xd2\x8e\
\x22\x91*F\xc8\x04\x8b\x14tmmq\xa2\xaa\xa2\xab\
\xd9\x98*a\x86A\x01\x01\xe18Yvm\xc3 k\
W\x85Y[\x97r\xfeK\x19\xfb\x8f\xc9>\x00>\xe6\
\x9c\x17\xb0{\xc7\x0a\x18rqZYm\xe8\x82xR\
\x9f\xa8A\x010y\xe0\x15-6>x\x06j/\xbf\
_\xc1\xef\xd3M\x9d\xdb\xf8\x1e\x9d6\xb4\xed_Vn\
\xaaHl\xab\xa8\xc2[/\xbc\x8a\xb9\xd7OD\xda\xca\
\x0d\xb8\xf0\xc43\xba\xbb\xb3\xbf\xb8A\xc4\x12\x83X\x01\
v\xcfN\xb5\xbb\xf3\xdb\xfd\xf9\xc5O\xbe~\xa7\x7f\xc0\
g\xbf\xbdb\x1d\xd2\xb2=N\x81\x7ft\x01\x0f\xf9\xdd\
\xad\xb8\xe4\xc9\xbf`\xcf\xa6\x9de=\xfb\x0dy\xd0\xa8\
\xaa4\xech\x1c\xb6r`\xc7\x1d\xc0\xef@F\x996\
/\xfb\x06\xf55\x15\xa9\x0bn\x8d\xfel\x9b7.\xbc\
\x094\xe5>|\xf6\xc9\x07I\x87hW\xbf\xd2Ow\
\x9ds\xf6\xfbK\x8a\x0dE\xcf\xe6vU3\xd7\xbc\xc9\
TY\x83C\xc7\x1c\x82m\xeb\xd7\xe2\xd9\x96\x1c\xb8\xd6\
\xbe\xaf_\xbe\x18G\x0d\x1a\xc9\xab\xef\xb9lkFz\
x\x0f%\x9a\xdb\xb2\xf4\xc1\xea\xd4\xb1<U\xd2k\x9d\
.j7\x08\xa5\xeb\x5c\xc4\x95\xe1f\xe65\x89\xde\x03\
\x165GSv\xd8\xf8\x8f\xb1V\xfd\x9f\x91\xbd\x05\xa8\
\xcb+\xaaq\xdc\x197#\x12\xa956oM?\xa9\
1\xc6\xbf\xd7\xa0\x12\xb4D\xbd\x0a6 Z\xdc\xc3\xad\
9z,\x18\x86\xa1\xad\x0e\x85\xf4\xf2\xc0\x12\xba\x0dF\
A\xe5\xaa\x1d\x0e\x96|y\x17\x96\xce\xfc\x1a;\xe6}\
\x0b_I\x97H\xfc\xee\xbbn\x135\xe5WI\xdb5\
tI\xdbD\xf5\xf0\x11\x8f|\xb0j\xfd}\xed}\x88\
\xbe\xb0h\xc9w\x9d\xf9'w\x9f\xeb\xae\xbb\x0e\xb1D\
\x12m\xb2\x02@S=\xecx\x0264\x1c\xe5\x02>\
\x072\x1e\xc5\xba-\xbb\xb1\xa7\xbe\x06\x9bv\xc6\x90P\
\xbf\x9c\x98\xeb\xf2\x8b.\x82m\xbb\xe8\xdc\xb5\x0d\x22\xe1\
t4\x81\xf5u\x12\xb9\x11\xf5w\xf5\xc3\xf5o\x5c\
\x8b5.\xf9{}\xb6\xe8\xb8\xd0\xaa\xedc\x13=;\
\xeb\xa5\xd9]?9\xab\xba\xcb\x8c\xf9\x87D\xfb\xe4|\
\xf0\xe1\xc9\x886\xb7K\x0e\x18\xb85y\xfc\xe5\x8fk\
'^]\xb3\xdb\xc2\xd1\xe7\xfdr\x87\xcb\x7f\xe5{\x00\
nl\xb6\x90\xd5\xf5\x0c\xb4-4F7G\xf1\x98\x02\
\xf5C\xab\xceK\x02\x82L\x906\x00\xe9\x85Hj0\
\xa4d\x14f\xf3\xfc\xee\xb9\x89\x8bGN\xb9w\xd5\x8b\
\x0fO\xc61\xc7N\xc1\xef\xef\xba\x13\xdb\x89\xf0\x0c\x11\
\xfa\x8e\x1a{8\xd6\xaexD\xa4\xac\xce\x14\x0a\xda\x89\
I\xe3^\xd9\x96SrS\xc7H\xb0\xfa\xdc{\xee\xc4\
\x11G\x1c\x81\x8f>\xfa\xe8\xff\x14\xdb\xfa\xf7MY\xbf\
g\x16\xc7a\x9d\xac@o\x1a\xdbs\xa0\x83\x8d+\x19\
\xccX\x03\x18N\xcd&\xe3K_7\xf7\xfa\x0cr/\
\xba\xf5\xcf\xe8\x10\x09\xe3w\xbf;\xff\xb7\xee\xfe\xff*\
\xf1P\xd3~0\xdaeuB$(r+\xaa\xc5=\
\x8e\xa2\xf3A-\x91P\xd4J4\xed\xf7R\xe3\x05\xa0\
\xc93\x97\xa5\x85yO\xafv|\xdd\xd6\x8f\x9f}c\
'\xa0W\xaf^\x8f\xbe}{\xe2\xa6\xe3\x8fG\x17\x97\
a\xb4+i\x8bw\xde\xf8\x93h\xae?Q\xb8\x02\xf6\
\xb0A\x8b*\x0f\x18u\xe1\xf1\xc3\xc7\xaf\x1cy\xe6!\
\xa8\x88\xe1\x1f\xb2[\xfeo\x11f\xc6\x86\xe5\x8b\xd1s\
\xd0P\xcc\xfe\xeb\xad(\xb1\x81\xc6\xdc\xcex\xec\xedO\
\xf0\xfc\x1bo\xe1\xc9g\xff\x8c\x09\x99\x04'%\xb0%\
\xa6Q\xe3\x06!M\x81\xb3.:\xef\xb7\xee\xfa\xff:\
\xa1.]&At\x0aa\xf3\x17}\x91\xdfc\xd3\xb1\
\x89\x04\x9e`\x88B\x0f\xc0-\xd5\xdb`@\xc0\xdf\x92\
N\xc2`(\xc0\xd0V\xa7\x22z\xbeov\xdd\xcd\xb1\
F\xdd\xd8\x7fm\x18\xb7T\xbd\x04)%>|\xfdU\
\x1cu\xf2\xa9xa\xc8A\xd3h\xeb\x9a\xa7D\xca\xca\
\xe1N\x1d\x1a\xa3\xe3\x0f\xbbv\xfa\xeau/\xe7\xda1\
\xe7\xedo\x17\xfc\xd6c\xff\xb7\xc9\x0fm\xa3?e\xd7\
\xfd\xbf\xf4\xd2\xfeV\x22\xcc|\x89\xa6\x0a\x85\xa2\x01\xcb\
\x8bS)g*3\x0a\xc0\xba5O\xcaSv\xb5\xf0\
\x0el\xa4\xbc\x08%\xb8\x88\x04uYa&\xff\xed\xa1\
\xf7\xdfm\x5c\xbay\x16\xd6]\xda\x05\x86a\xe0\xb9\x07\
\x1fD\xc5\xbc\xf9x\xfa\xfa\x1b\x0aUM\xd9Q\xdaM\
e\xa9PX'\xfa\x0fx\xbd\xd4\x9f\xf9\xc1\x01\x1d\xbb\
;\xf7=\xfd\xc2\xff\xe9\xd2\xb1\xadY ?\xcc\x06\xf9\
\xa9\xff\xff_\xf9\xe7E\x84\x00TU\x10\xac\x98\xd9S\
)\x1c\xda\x12\xb8\x8b\x96@\x92\x96_\xec\x85\x96\xb1\xa7\
\xfd\x0a\xd2Vn\x1a\xde\xc9rv\xad=\xf9\xa0C\x10\
\x09e\xe0\xed\x96\x03\x8d)$\xacg^\x81\x9e\xb3\xb8\
\xb3\x8e6\x8c\xd5\x0e\x0b\xabk\xf7M\x0d\x85\xed\xdf\x1e\
\xde\xadS\xed[\x7f}\x1c\x9d{\xf7\xf8\xad\xc7\xfd_\
\xf9?\x22bSM32\x0ak\x83\x8e\xab\x07BS\
\x9a\xc7}\x8a\xef\xed\xc0-\x5c\xa8\xac\xc1ZC\xb3F\
\xc0\xd4\x0d\xb9\x11\xf5i\xb9oTt\xdb\x9eu\xd8\xb0\
m\xd7\xde\x06\xebc\xcd\xb0\xdf|&\xa0b5\xc3\xb4\
\x9b\xcct3\xb2\x9dd\xb7\x1eoW\xc1\x99\xbfz\xe5\
*\x1cp\xe0\xc8\xff\xee@\xff\x95_M\x84D\x04\xb0\
#\xd9\xca\xd5\x87\x80\xb5\xbf%\x83\xcd\xabO\xc1\xad;\
1Ck\x06\x14\x03p9#\xa8\x17Et\xe9\xce@\
\xd3\x5c\x04\x03\xf9\xfb4\xa8\x9bc\xc0\x9c\xc5\xd9\xdc\x5c\
7Q\xbb:d\xb7m\xbf\xa21;\xf7\xc3\xbe=\xfb\
X\xdc\x94\xc0\xdb\xdf\xce\xfb\xad\xc7\xfc_\xf9?$\xc2\
M\x11\xb4#\xd3Y\xe9\xee\xccZ\x10tKbaK\
\x10u\x0b\x80\xa1\x15\x98\x15$\xb4\x1b\x0a\xd2\xcc\xc2\xb4\
e\xe5\x8a\x1b\xb1n\xe3\xba\xbd\x8d\xdd|\xd6Y\xe0e\
K W\xae\x0csS\xb4\x8b\xf2\x87\x91\xea\xd0eQ\
\xac\xd7\x98U\x8d\x9bk\xd1f\xe4\x80\xdfz\xbc\xbf\xa9\
0\xf3>?\xff\x95\x7f]\x0cV\x0c&\x18\x04\xf8<\
\xd0\xa2\xc5\xb8\xe6\x85\xd93\x0b\x8f\xe3\x9c\x09L\x1a\x01\
\x03V\xba\xe9\x96-\xday\xa4;w\xf9\xa5H\xa7\x11\
{\x1bc\xc7A\xf0\x9bY\xb0\xfb\xf5K\xd3\x09\xdb\xd4\
\x1d;\xecj.,\xfc\xfa\xa1\x0b\x8epN\x1dy,\
V-\x00\x8e\x9cr\x15\xea\x1a\xe2p\xdd$\xc2\x06`\
\x92\x86A\x02a\xbf\x0f\x91\x88\x01\xf6+46T`\
\xcb\xba\xd5\x08\xa6\xa5c\xe9\xbau\xfbt\xf8\xe8\xf1\x17\
\xc1\x88\x00\xf1\xe6\x04\xa2\x8a\xe1\xc0Sm\x98\x05 %\
$I\xf8\xa4\x03\xb8\x0eX\xbb0H!\xae\x15j\xea\
j\x10\x8f\xd7\xa1j\xe7&\x5cv\xe5#H\xc6\x130\
\xfdQ Y\x8d\xbf\xbc\xf0<\x0e\xe8\xd5\x0bK\xd6\xaf\
\xdf\xfb\x9c\x9c\xdcb\x8c:\xea4\xa45\x13\x9a\xeab\
PV\x14I\xb6\x91r\x0d\x98\x91\x00\xd2\xd3M\x04\xd3\
\x02\x08\x08\x01m\x0b\xb0\xa3\x90\x95\xe1\x87\xf4\x03\x81\xa0\
D\xfb\x0e\x85\xe8\xd8\xb13&\x1f9io\x9buu\
u\xc8\xc9\xc9\xc1\x94\xa3/FF~\x09N:\xed\x0e\
\xb0\xbb\x07\xb5\xe5\x15\xf8j\xce\x87\xc8\xce\xceF}}\
\xfd?\xbc\x88\x1b6l@\x8f\x1e=\xf0\xd6\xd3\xcf\xc1\
^\xba\x00\xfe\xafg\x81k\x1b\x11o\x93\x89\xdaA\x03\
\xb1\xad\xb1\x09O\x7f\xfc\x15\x06G\x8a\xb0,V\xb1\xdf\
6.=\xff$D\x9b\xeb\xd0\xaf\xcf\xc1\x08K\x89x\
\xb4\x09u{j\xf1\xc5\x82\x85\x88\x84C\x98\xb5\xfc\xc7\
\xb4\xad\xed\x0a\xf2q\xf6QG\xa0\xaa\xae\x0aB3\xa4\
\xeb\xc2o\x0a\xf8|\x01Hax\xf5\xaf\x0d\x8f>K\
\xfa\x02\x08e\xe5\xc2M$p\xc3\xc3\x0f\x03\x00\xee\xbb\
\xeb\x1e\xe4d\x17\xe0\xdb9_\xc0\x8a6\xc2tR\x90\
p Y\xc0\x80\xb7o\xba\xae\x02\x0b\x82\x92\x06\x94a\
\x82%A\x19&\xea\x1b\x5c\x0c;\xea\x00\x18-\xe9\xee\
\xccD n\xc5o\x0b\x0dP\x0b\xa7+kn\xe1z\
P\x08\xfaUY\xba\xd9P\x9d\x95\x9f\x8e\xe3\x0f\xbbp\
\x9f\x01\xa9\xbc<,f\x16\xbd\x06\x0e-\xf4\x89@\xc0\
i\xdba\x95nW2\xff\xf1\xe7\x9f\xc2_\x1e\xfd\x14\
\x96\xcf\x80\xdf/}ia\xe1S\x0e!\xc3\xef\x15\xfb\
0H -d\x22+\xc3\xe4`\x96\xd6\xa1\xa2\x84\x13\
\x19\xf0\xa4\xdb3\xb8\x0ey\xcf}\x89OKg\xee\xdd\
\xb1\xce<\xe6\x22\xb8\xa60d\x04~\xa1@.<\xf6\
C\xd6\x12d\x1a\x90d\xc0/\x01\xb8\x00i\x82O\x0a\
D\x95\xa0TR:;\xd6oN=\xf1\xe4\xf3\x94\x11\
\x11\xc1\xb0A\xd2\x17\x96\xba\xae\x026\x00\xc7\xf9\x01S\
LmM9N\xba\xf8jY\x98\x1d\x0a\x04\x98H\xdb\
@\x8c\x19)\x8b`\xa6\x1b\xc8\xcc4\x10\xc9\x94\x08\x0a\
\x03l\x09@\x09\xe4d\xfaa\x06\x012\x0d7\xe5:\
\x16i\xbdO\xa3999xs\xc6uh[\xe8\x1a\
\x91,\xe9\xb3\x12R\x98\xcc\xdc\xbc\xb3\xd2\x02\xe0\xfe\xa3\
\xe0M\xa5Rx\xf1\xb6{P\xbb\xa7\x0a\xef\xdd~;\
\x92/\xbeb\xa0lS\x88M\xe1\x03\x98X\x0av\x0a\
\xb2\x9d\xd2\x88\x91\x98\xf7\xee\xeb\xce\xda\xed\xd5xd|\
\x1f\x1c\xd4\x7f\xec\xde6Z\x93\x04\x8e\xff\xc3\x95\xa8+\
o\xa6\xee\x03z\xfb\xfc\x8ec8\x8d\x8dH\xa5\x5cg\
\xe7\xa6\xd5NNQ\xf1~?\x17\xa5\x95U\xb8\xf8\x8a\
K\x84\xd5\xb60 \x14H\xda.K? \xfcA\xb0\
4A\xc2\x840\x0c\xc04\xd8\x08G\xdc\xd5\x7f{\xdd\
i;n\x02&\x8d<\x04\x9f\xce\xfd\x1a\x8f<\xfd,\
`D\xe0\x16\xe4\xfb\xdd\xa0a\x90\x9dd\xe8\x94\xc7\x8b\
\xa7\xc9\xcb\xa4R\x0a,\x04\xb4i\x82\x0d\x13\xda\x14\xc4\
\x8eR\x5c\xdb`\x1b\xa6\xd4\x06\x09\x0df(\xcf\xb9\xd6\
\xd2O\xed\xc5\xbfz\xf5\x8b\x95w\x80\x13\x04\xb0\x86O\
\xba\xbb|fS\xad?(1\xe3\xfdU\xfb\x0c(j\
YH}\xfb\x09)\xdb\x8a(\x7f\x90\xed\xc2\x82\x9d\xd1\
\xb0\xafi\xe7\xf2Mh\x9b\x11FN\x1a\xf9\x92l\x8e\
\xadqr&h\x99\xe5\xabw\x95\x82K\xde\xfb\x13#\
\x98\xf5B\x07\xabD\x9d\xcf7r{~\xce\xbau\xd9\
\x85\x81]M'\x9e\xd24q\xbe\x8b\xf5\xeb7\xa0W\
\xaf\x9e\xb02\x06#\xcd_\xdd}G\x19\x1f\x17\x13f\
\x81\x96\x8eb\xed\x00\x82\x98[8'\x04{\xa9\x8f,\
4\x04\xc0l\x88p*P6\x07\x98\xf7\xca\x9b\xaf/\
\x0b\xd6\x07z\x5c\x063\xafK\xe7\xfc\xec\x9aQ\x9d\xd2\
?\xcd\x02\xbe\xedZ\x9c\xa6Wn\xf9n,g]\xf7\
\x1a\x02\x81\xee]\xd7T\x07NV\x82s|!\xa5]\
\xc5\xec\x86\x19.)\xd4\xa5\x08\xb2^2\xb3\x06)\x01\
\x93@\x14c\x04\xfc\xc2\xe8\xd5\xd6Xq@\xc7\xc8[\
]\xfb\xf7n\xdag\x82\x12\x158qJ\x12\xa7\x9d=\
\xf2\xc0\xb5\xbb\xd4\xe1\xacS\xa1\xc2\xcc\xdcT\xd7\xd1\xbd\
f\x06\x17l\xfd\xaa]\xe7\x1e\xaer\xaa\xb1\xadt\xdb\
\xcf\x82w\xfd\x92%\xf8s \x80\x9e'\x9c\x0a1\xea\
_\xfa\xa7\x1f\xb5\x09\x9e{\xe6p]\xba}\xbcL\
$\xbb\x92/\xe0K\x8b\xa6\x9c\xecM\xbbvt\xed\xd4\
\xf3\xab\xca\xf2\xea\xf9t\xd0\x90]\xeb\xdf\x9de\x8f\x1c\
0\x0ek\xe6-F\x9f\x83\x0e\x00\x00|~\xfb\x9d8\
\xb2AB\x05\xfb\x15\x1a/\xbf>\x95\x95\xd5.\xe4\x0f\
RZv\xe1\x96\xd3O\xbd\xf4]\x13\x5c_^\xb9\x07\
\x7f\xfb\xea\xbd}\xfa\xf0\xc7\xd1C\xd0\x9eu[3\x9c\
q\xb6\xf6\x07\x83ZJ\x97\xb5\x01\xd6\x8d^\x1e5\x83\
4@,\xc9q\x03\xbe\xb2\x0eC\x87-w\xb2\xd2\xb7\
\x9d\x98AMD\x84\x17\xef\xbf\x1fY\xc5!#o\xd3\
\xc6\xa3\x1d\xdb\x1e\x06\x90\xc5\x0c\xd6\xac\x98\xd9e\x06C\
h&\xad\x15\xa4\xf6\xf2\xe5\x84\xd2\x86\xce\xcc\xde\x11\xc8\
J\xfb V\x9a\xac0L\x9f\x86fNj\x17\x95`\
\x94\xb0\x16\xad\xda\x83\xb7\x13\xb7T\x1b\xa4\x16rj)\
\xb8\xd9$;\xc5\xea\xc7TH\xa9d\x0a\xb1\xbaf\xa8\
\x94e\xb8\x11\x7fC\xca'\x16\xaf\xfe|\x91\xdbY\xc4\
\xe1\x93\x8c\xac\x0cC\x96\xedI\x0e\xd8Q\x168\xc31\
\x8d\x88\x12`\xb0\x17OL\x02\xf0N\x8a\xec\x90\xd0\xa9\
\xa0\xd9\xd8\xb8f\x87\xf9\xd1\xd0\xee\x8d\x7fU\xb9\x89\xf5\
g\x1c?M\xbd\xfa\xf6\xbb\xd8X\xe1C\x97\x1cU\xb8\
u7\x8e\x8d\x83z*\x9f\xd4\x9eN\xf9=}\xdd\x9b\
6((h\xd7\x85\xd6\xdat\x93\xb1$\x80W\xd6o\
m2\xed\x0c\x1a\xe7\x06\xe4x7i\xef>0?\xb1\
eh&\xbe\xcd\x08\xedk\x15\x99\xb3\x22\x8a<\x7f\xbc\
dK\xb5q\x9a\xe3\x88\xb6\x12Z\x0b\x92 S\x83\x88\
X\xb2`M\x04W\x83Xk\x82f(Rp\xe1\xca\
\xdde\xee\x87\x03\xbb\x84?mh\xb0\xf6\x01\xf0Q\xd7\
.\xc7\xd5\xf7\x1e\x1ez\xe7\x83\x8a\xa9Q\xa2\x0bXH\
\xac+\xd3\xd4\xad\xc0\xecq\xe1\xb5W.M\xcf\xefU\
{\xe7\xf5\xd3~\x16\xbc\xcc\x8c\xcb\x880\xa1\xf7@\x04\
\xda\xb5\xcf\x8b\x1f|\xf0\xb1\x5cZv\xba\x88Z\xdd\x84\
\xedfB\xd8\x06\xfb\x01\x11e\x04\x16.\x1a\xee_\xbe\
\xf607+kW\xce\x80\x95O\xd7v\xe9\xf9\xf1P\
\x14\xed9g\xe4x^\xd4Rk0\xd2\xb1\x04_\xdd\
u\x16\x86t\x1a0\x18\xb5\x957i\xa8L!m\xe2\
v\x9d*\xdb\x8d\x1b\xb7>\xd3'\xbe\xfd\xec\xbd%?\
\xeaGr\xe9Zhpq\x9e\x8c\x9cK$r\x94H\
i\xad\xd9\xcb\xcc\xf18H\xa0\x89\xa0%\xb1\x96\xd2\xd2\
\x86\xd9(\xda\xb6\x7f\x7fY\xcf~\xcf\xf9K\xban\xda\
\xb1|\xadN#\xbf\xf4/\x5c}\x88\xb0\xad\xb3\xb5 \
G\x13\xa0$CK\x97[@G\xdf\xbd\x09\x00\x1c%\
8\xaf`\x1e\xf7\xe9;/\x19o\xae0\x0c\x9f\x82\x90\
h\x88\xdb\x98\xcb\x1a\x03\x88\xc9\xa3;\xe1\xd6\x14B`\
/+\x0atK \x9a\xc1\xfb#\xf5I\xc6\x93\xa86\
}\xac\x1c\xed\x92\xe1k\xe4\x80\x7f\xc3\xec\x8f\x9eV\xc7\
\x0f\x9a\x80@n:B\x06C;\x8ep\x94\xdf\xe72\
\xfb\xfc\xd2\xb6\xc9PQ\x80\x884I\x0d\x22\x87(\xa2\
\x84Lk\xd2\x94\x17\xabp.\x8aYN\xbf\xe1]\xc6\
\xde=\x8a\xbf\x9e{\xe1i\xa7)'p2\xdc4\x8b\
,M\xa6\xa3L\x83\x09\x960\x08\x04\x08\x8f)\x12-\
\xea\x8e\x97\xbaN\x06 \x95+X;\xd2\x01\x90\xb4\x93\
\xdcR\x95\x19J+\x9f\xe3XB%\x80\x1f\x92\x8c\xe7\
\xf9\x81\x08\xb9\xa9\xdc\xa0Sm\x99\xc2\x94Zj\x02C\
\xc3\xa6\xa4\x8btK\x99!&\xc5Yf\xb2\xc1\xf0\x19\
\x09\xe5(a\x0bM)X\xd2\x96\xd4P\xd9d\xab\x12\
\xf3;~\xb2I\xe7\xbc\x8dofnFy\xd7\xac\xa1\
q%\xc7)\xa1\xfc`\xd2\x8efQ\xd6 \xfb\xd7\xc5\
\x22c\xdey`\xda\xf4\x09\x03'\xf3\xae\xc46l\xda\
\xb4\xff\xb4\xf8\x15+V\xe0\xfe\xb6=p\xf0\xa8\x09\xf0\
\xf7\xe9[\xa0\xa6\xbf}\x0bj\xea\xce`\x9b\xd3\x98m\
\xe84\x1fsFf9\x1b\xb2\x11\xa9\xb8\x0f\xc9\xe66\
\x94\x8a\xe5`ge\x0e\xd5F\xef!K\xe7u\xef]\
\xf8@\x0e\xc8\x02\x80\xd7\xff\xf4'\x94}\xf95\x86\x9c\
vf\xbe\xfab\xf6ql%\xf3u\xc8\x94,,P\
\xd9\xd6\x22\xb9\xa7\xf3\xe4%%\x83W\x8d9\xfa\xa8\xd8\
\xe0\x1d)\xbc0\xff\x85\xbd}q\x1c\x01\xcdZ\xba~\
\x0a\x92v\x02\xae\x9f\xe1\xe6E\x92,\x5c\xad\x19Rk\
\x0dv\x14\xd8rLv\xad0\xa7\xdcl\xacm\xbe8\
\x90\xb4;^8\xee\x90k\x8c\x83JvD(\x13\xac\
\xb4\xa9]\xc7\xd4D\xa62\x18\x9a\x5c(\xf6R%4\
\x18\x1e\x9f\xa6\xe7\x9d\xd0\xccPn*\xecZ\x96T\xec\
\xc2\x10\xbe\x04\xaaWZM\xe9\x9d\xb2\xe7;\xae:\x8f\
\x88\xfc\xad\x9a\x84\x97\x85\xa5\xbd\xbf\xb4\x04\xa0\xe1(6\
\x1c7$\xa4\xfc1\x0fn\xcaIa\xc9\xe4\xa9\xfa\xd6\
\xc2.u\xf0!*\x03\xbe\xea\xbbn\xbc\x12\xe9n\x13\
R\xfe4\x04B\x04\xc91\x80\x02L\xdaE\x01m]\
\xdc>\xc7\xf7\x9avLI$\x8d\xa4\x99n6\xcb\xf4\
\xa2\xc6(\x974Xj\x84+eAY\x1d\x8f^\xc4\
\xea\xe6\x83{M\xbc\xf2\xbd\xf7\x7f\xbf\xee\xc0a\xd7\x82\
Y\xb3\x16\xccJ\x05\x11\xb0b\xcd\x19\x14\x7f\x83\x84\xaf\
\xc1+%\xa2\xa0\xb5&\xaf\xdb.\x81]V\xa4\x031\
n\x5c\xf8\x1c3\xce/\x98\xe8\xf1,h\x80\xd8e\xa5\
\x1d\xddl\x03\xb6\xbbo(\xe3\x88\xce\x8d\x88\xc7\x1a6\
uok\xdf\xe3\xbav\xc4`\xd6\x82|:#'\x1c\
^\xb8FL]\xb8\xcb?\xc1 \xa6\x83\x06\xd1{m\
\xb3\x1b>o\x8a%LCHb\x90\xf0\x85\xc2e\xf5\
\x95\xf5M:\xd6\x0c\x00x\xf7\xd3\xf5\x98\xb3\xa2\x02\xe7\
\x9d\xd4\xd6w\xcdM\x1b\x0e\xd6\x0a=\xfd\xec(\xbf_\
%\x1a\x1c\x1dl\x8e\xa2]YS\xe4\x88\xfb\x9f\xbe\xff\
\xb3&\x94\xc4/\xbc\xe0\x84\x9f\xdc}w\xafX\x81\xde\
\x97\x9e\x03t\xe8\x98\xe6\xdeu\xcf\x85\xa8\xaa:\x97-\
7\xa0\xc2\x86vzuZ\xd4\xdc\xa6\xed\xa2XI\x8f\
e\xfe\x9c\x9cr\x1d\xab\x0d\xa6/\xfaj\xa0\xafl\xd7\
PT\xc6\xc7\xd8\xb9\x5cY\xd6\xb0g\xdb\x8cu+t\
\xaf\xcc\x5c\x00\x80L\x0ba\xcfk\xaf \xb7\xff\x98^\
:\x95\x9c\xc4\xda\x91\xae\x19p\x01\x09\x8e\xc7\xfdr\xd3\
\xb6\xa3{\x0e:\xf0U\xee\xd8n\xfd\xcc\xcf\x17\xed\xd3\
\x17G\x08h&v\xa4d\xa1\x14\xac\x0e\x9d\xab\x1a\xfa\
t~\xcbj\xdc\xb2\xdbu\xb4\x9f\x15 \xc1\x08X\x14\
\x0a\xc4\xac.\xa8\xda3\x86\x9b\x92\xf9\xb4c\xeb\xa1\x9c\
\x91}\xdaV\xa4\xffa\xc2)#XI\x83\xb5\xd6P\
>\xd3r\xda\xe4/pk\xcb6y4\xb0Dn\xcb\
\xb7U\x81\xe12\xb1\x96l(\x9d\xdad\xc7\x1ak\x13\
\xd5>\x18\x89h\x12\xf9\xdd\xb3\x10\x0e\xf2\x9afK/\
f\xe5\x8eo%\x0e\xf4\x085\xbc\xf2H\xde\xe9\x8e\xe1\
*\xceNi_\xc4\xc7r/\xcbK\xab\xbc\xf7\xde{\
\xb8Q\xe6\x82\x12\x89:7\x10I\x19B\xda\xb6\xcd\xb0\
I\xb6\xc4\x12Kh\xb25\x93\x82\xeb8\xd8V\xb9e\
y|\xeb\xfa\xa7\x8b\x00.\x020m\x0ap\xc1\xfbo\
\xe1\xca\x9b\xb7\xe5\xcd[\xdex\xfc\xe6\x0a\xf7R\x1b\xb2\
KY\x1d\x0e\x5c]j\x9d\xff\xe0\xf5\xb7\xdcR\x90\xad\
\xa3B$\x19\x82XCS4Q\x15m\xde\xf1\xce\xe3\
\xd6\x1dM[\x06\xdfD\xa2\x09@\x12^F{{\x00\
>\x00\xdb\x00d\x01|*\x0dA\xa8C& \x15\x04\
3$4k\xa5\xb9\x1e\x80c\xef[\xab\xfa\xcf\x8f\xfd\
\x0e\x00*\xe1\x11\x9c\xb6F\xed\xf1\xf4\xa7\x9f\xca\x98\xbf\
*\xde\x13\x82\xc7kfQ\x1f\xd7+\xfe\xf6\xf1\xf8\xf7\
P9\xfcG`\xbb\xb5%\xda\xf2\xba\xfb_AP\xb6\
\xc3\xdaU\xa1>)\xe5Lb-(\xc7\xd7\x10\xed\xd6\
1\xf8\xe6\xec\xd5b\x92#t\xfb\x9d\x15\xce\xa0\x15\xbb\
BC\xf4\xe8)\xfb'th\x113\x14@\xfd\x8d\xd7\
#\xed\xb0\xa9\xfdEu\xc5\x19\xec\xaa\x80\x1b\xf2\xab\xd4\
\xa8!\xd3\xe3\x87\x1dv\xdf\x8b\x99CW\xcf;\xe5P\
\xc5\xfcW\xd0\xdcbL\xba\xe7\xe1O\x07MnW\x9c\
!x\x0a\x0a|;^.\xdb\xfa\xb5\x098\xf3\x1bj\
\xf0\xe6\x9d\xf7\x22\xa1\x81\xb4\xbb\xee\xcbt\x9e}\xfd$\
\xb6\x12Y\xac\x19:\x14\xf9\x0a\x8a\x83\xdaJ\x8e\xa1=\
;\xdaY\xeb\xd6Ln\x1at\xc8\xb6\xc1g\x0e\xb2\x8e\
\xf1O\xc2\x99\xf7x\x87w\x05\x01\x05\x0d\x05\x09\x86\x01\
\x97|\xe5\x8d\x22\xe7\xd5\xe7>\x9a\xbe\x88\xbeK<d\
\xf4\xc8\xc0]\x17\xdc\x90\x1b\x7f\xfd\xb5\x9b\xb8y\xe3y\
\xda\xb2\xd2P^1.\xff\xe0\xd0_\xa4\x1dnTl\
\x90\x82\x862\xcc8\xf7\xe8\xfd\x8a/\xeb\x80W?{\
\xe5\x0d'\xdd\xcf\x88[\x80\xf2\x036\x80j\xc5\xe0\x0c\
\x90\xaa\x89q\xdb\x8a\xe5\xba\x0e\x80\xd1\xb9\x7f?\xa8\x8a\
=\x180 \xb8\xfd\xb3/\x9a_\x8b\xc7\xdda\x04J\
\xf7N\xfd\xe2{?\x9eJ\x91r\xa9k\x82\x83\x85\xcd\
\x09^\x9b\x97[\xbc\xcf\xe4N\x9a4\x09jK)(\
\x1e\xb7\xc8G.+G+\x22,\x0a\x03\x83\x95\x86v\
\x09\x9a-@\xb8`v\xc0\x82Q\x05\xb0?\xb7\x0dG\
\xf2\x0b\xf1~c\x09V]\xbf\x09ZfU?qM\
\xbb\xe7\xce\xbck\x99\xb1\xab\x0e\xd7k\x92\x85\xe55\xee\
\xa1u\x9d\xd5\xd3Y]G\xacwKg\x82\xb5d\xc0\
\x05\xc3%\x07\xe0\xbc\x87\xda!k\xe4\xd1\xba@\x00\x86\
a\xc04}0\x0c\x8fy<\xc7\xb0Q[_\x8f\x9d\
;\x830D3\xb9\xe4\x02-\x14H\xaerQ\x07 \
\xe5&\xf7\x19K\xfbp\x1f(#\x81\xccp6\xfc\x11\
\xc5\x01vq\xebu\x0f\xa2w\xd7L(\xd5\xcc\x90\x0c\
\xe5:pYIT\x1e\x85\x07\xee\xbe\x17\xdf\xcc\xf8\x0c\
A\x9f\x1f\x22\x9c\x06\x99]\x88\xd1(@\x8f?\xdf\x86\
\xcf_\xfa\x00\xa7\x1e[`\x1cu\xe6\x8a\x03S\x0e\xf7\
'[\xeb\xe2|5wl_\xf3\xc9\xe5\x9b\x95\xaf\xc1\
\xc2\xc9\xf5Mn\x8fu{\x02\x13\xde*zf\xfe\xb0\
w^v\x8e\x9b\xea\xd5\xb7\xf8\xa1\xc72\xd6\x10\x83\xf9\
\xda\xdf\xd2\xf5]\x0f\x1e\xc9v\xaaX\x13\x90\x1a\xd2w\
fb\xd2\xd1\xb7\x9b\xdbVm86-\x8e\x92\xc9\xc7\
\xe3\xb2\x8b\x96\xe3\xd2`\x13\xf2_\x7f\x85o\x1d\xe4+\
\xa7O\xa2Or\x11\xe1\x86\xca\xef\x82\x8a\xfa\x1e4\x1c\
W\x8c\x1f\x87\xb3'\x9d\xd8!Q_;\x96\x1d\xc7\xc7\
\xc1P)\x15\x14?MN<_\xc7\x9b\xfb\xead,\
\x9b\xd6\xad;:o\xd8\x01o\xd5e[\xa5\xb9\xc9\xac\
\xbd}\xd1\xec\xd5\xfcs=\xbb\x01\x5cR\x86J\xc5\x8c\
+\xcf\xbf\x10c\xdaIMB \x08\xc2\x01\x07\x1d\x86\
\xdb\xae\xba\xa9\xe6\x8eS\xa7\xbd\x11\xdd\xb0u\xbcb\xb7\
\xb7N\xc6\xd2\xea\xb7l+*\xefR\xd6\xa4X\xb5\xac\
\xa4\x22W\xbbFvq\x17}\xd8\x91Gh\xab\xb1\x0a\
\x96\xed\xa9\x13\xae\xeb\x22\xe58\x88%\x13\x88\xfb\xe3p\
\xc1\xc8\x0b\xa7\xc3X\xfd\xde\xdf\xd0\xbe\xff\x18\xcc\x99\xdd\
\xa43#\xf8\xcc\xb6\xf8\x1d\xc7q\xcf\x10,$\xa3\x85\
a\x9d\xb0\x17\xc4\x09\x8bs\x9am\x7f\xee\xf6\x1a\x85!\
=\xfb\xe1\xaby\xdf\xd9\x15?\xfb\xec3\xb4\xef\xd5\x13\
\x12\x22\x1aN3\x97\xb1c\xbby\xe9\x11\xb8\xae\xdbR\
\xd2\x89\xe1\xb6\xbc\x0a\x82543|\x00v5Tb\
{m9\x80e\xc0\xec\xe9\x00\x80\xcfg^\x92:\xa0\
W\xd6[\x95u\xd1\x89)EE\xcdQ7\xb7\xb4\xbc\
aX\xfb\x8d\xc7o\xdc\xdd\xf1B@\x81\x08\x1a\x06\x19\
\xc8J\xcf\xf2\xddr\xfa\x01\xe6\xd6zSF\xfc\x06\x82\
\xbe\x00\x82\xc1\x08\xa4i\xc2\x04\x81\xfdI^\xbe\x9e\xdd\
U\x15\x96rR\x09p\xc0!h\x0dh\x0dv]\x98\
\x00\x04\xf6U!J\xe3\x1e9Hy\xd3w\xd4\xf9\xc7\
\x04$\x82*\xe5\x9d5\x85\x00\xb3\x0bG9\x00\x92\xb8\
\xe6\xe6\x1bq\xed-7\xed\xd3\xc6\xeb\x00N3\x04\xb6\
E\xb3\xb0j}]\xc7D\xca>J\x83\xcc\x90a[\
E%\x19\x1f]q\xc3\x985\xaf\xcd\x9f\xfd^\xd3n\
\xf7p[Q\xde\xd6=\xd6\xc1\x7f\xfa\x80\xfaD\x0dg\
\xc5\xa8\xaa*\x14\x14\x14\xec\xd3\xde\xdd\xbf\xff=\x9ce\
+`8:G75N\x04l\xbf\x93\x9f\xdfP7\
\xf8\xa0w.\xb9\xf3\x92\x0d\xcf<\xf49\xae:u\xc2\
>\xa0\x9f6m\x1a\xa6\xd5\xe4`\xec\xb8q8b\xf0\
\xe1\xb8\xae\x97G\x9e\xbd\xe0\xddO\xb0e\xcbz\x9c\xf1\
\xfe{\x81\xf8\xb5w\x1e\xa3\x9c\xe6\x22\x06C\x15\x15.\
\xca8|\xea\xacp\xf5\x8e\xec\x8a\xb2\x9d\xa7(\xa7a\
4vo\xeb\x12_\xb1ll\xc1\xf9\xb7\xbc\xb2\xf1\x93\
\xd7\xd5\xf5\xe3\xc6\xe1\xfe\x993\xe1x\xc5\x06\xe0\xb2\xc7\
\x1f\xec\xc2%\xa4\xa2D\xc8\xc4\xec\xdd\x0a\x80\xc2'\xaf\
\xbe\x8a\xc3N>\x19\x8f<\xff\x06|ZF]\xed\xd5\
\xf9VZ\x93kY\x94\x8c%\xa1\xb4\x82\xcb\x0c\x05b\
\x16B\xe7d\xe5\x18\xf6\xc1\x87\xa2\xb9a7R\xb1$\
\x5c\xd7\x81J\xa4 \x13q$++\xc9.\xdd\xa9W\
m\xdf\xe3F\xd0\xe8\x9d\xc4JW\xcd\xc6\xdd\x97\x0f\xc1\
-o\x84+\xdbd\xda\xcf6Fu\x0f\xe5\x8a\x03\x89\
%Z\x19a\x88$\xc0\x80\xed\xb8FS3\xf7\xe8\x9f\
\xd7\x14n\xb4\x8c\xf8\xb4i\xd3\xf6)\x01\xba\xaa\xb6\x0a\
&;\xb5\xbd{\x94|\xc81\xcb\xf7\xc2\x93\xefP\x97\
\xee\x1dX\xe7+(v\xa1\x95\x06\xb4j\xf1\xf2\xed\x9f\
\x97\xe2\x81\x87\x1f\xc3\x86\xad\xd5\x98|`z\xcd\xe2u\
\xf5\xdbSM\xc2u\x95\xce\xa8\xae\xb5\x07\x89\x1c\xbc\xe6\
\xb1\xd3\x0ahV*#;3\xd8w\xf0\x94\x93>\xd9\
e\xd6\xb1f\x83\xe1\xb9\xc0534R h\x19\x09\
\x19\xcd\x85\x81\xe2Y#\xa6\x1d\xbea\xf6\xf37\x00\xca\
\x01\xc8\x85V6\x14\x14\x07\x01@\xfc|:\x0fk\x17\
\xec\x02\x8ev\x00\xa1@\xe4xm\xc1\xfd\xc9{z\x1e\
\xd4\x13/_p\xb8\xe81\xf6\xa9\x01)W\x0d\xd6\x0a\
\x94\x9fk-\xe9\xdbN,\xbc\xf2\xfao\xf4\xd0^\xfe\
U\xa5U\xd1\xe5MI9\xbe\xba\x1e\x83\x96m7G\
\xcf\xf8\xeb;\xab&\x8fyH\x7f2{_\x86\x1dS\
\x12\x8c\xe7\x9e\x02FO\xccT\x89D\x16\x1c\x0d73\
}M<\xbfh\xe1\xe2\x97^\x85Z\xbc\x1eD\x13\xf7\
\xb9\xe7\x87\xe5Y?\xfe\xf8c\xfc\xe9O\x7f\xc2\x929\
\x0b\xb1\xf1\xb8\xeb\xd0\xf7\x84s\x8b\x9d\x9a\x8a\xf1\xac\x9c\
\xb0\x9b\x1e\xa9sz\xf7\x9d\xdd;\xf7\x8a\xa6\xe6#\xd7\
$x\xe6g\xdf\xaa\xd2\xe6\x03tsS\x81^\xbdz\
Jj\xc3\xec\x19F\xbb\xfcz3\xee\x19Wlb(\
f\xb8`H\xcf\xe6\xa3\x8c`\xd8\xa1\x0d+qyQ\
:\x00\xc2\xc7\x97]\x81/\xae\xb8\x19\x8f\xb0c\xac\x1a\
0\xec@\xd7u\x0b\xb4\x06\x5c\x92\x96\x11\x0e6\xa6\x05\
\xfc\xecz\x01\xbap\xb5\x0e8U\x15\xa37L\xff\x1b\
iG\xb9\xae\xd6\xc2U\xec\xa9\x17Z\xc3\xf5tV3\
#\x92\xbda\xc8\x91]\xe6\x84j\x12\xdf\x11\x5c\xbf\xf3\
\xad@\xef\x12\x05'Q\xb54\xe9\xcby0\xa1D\xb6\
f\xee!X\xb6D\xa3yXf\xade,\xe5\x1e\x8c\
\xfc\xecW\x0dr\xb7,[\xbco!\x86E\xd5\xf5\xa8\
\xd9\xb8 u\xed]\x0fmuc\xb1\xb4\xcc\x80$;\
\x16en\xc9\xe2\xd0\xae\xcd`\x97\x19\x0e\xb8\xe5\x88\xf8\
C\xaf\xea\xb5W]\x86\xa9SO\xc6\x9e\xf51\x0e\xfb\
swI\x98\xcd \xceJ\xa4\xb8s\xc2\xc80\x22P\
H&('\xee\x0a\xff\x92\xed\xc6E\xccZBk\
\xd2Z{u\xc4\xe19^4\xb3L\xf3s\xe9\xb8\xae\
TY/\x0f\xdb\xe0\xc4\xafc\xce\xd0\xcc\xc2\x85V.\
\x143Bh\xe1\x10\xf9\x19I)\x07q\x87\xc9b\x8b\
X(\x805\xb3n\x09\xd9\xdb\x8f\xdc\xfd\xf8\xcbX\xba\
\xb2\x1a\xc7]\xf8\xd7\xbc\xe6x\xeaX\xc5\x22\xec\x93\x8e\
\xd3\xb9\xc4\xfc\xe6\xb8^\xcd\xeb\x176v\xc2)gw\
+\x9f}\xf8\xbcw\x9a-wT\xd2Ehk\x85s\
\xf8\xd9\xe7w\xff\xbc\xb0\xff\xb0\x8d\x87\x1fy\xcc>\xed\
uo\xdb\x15\xbd\x99\xb1x\xd4\x84,\xe1:\x06\x18P\
\x86\xbf\x22c\xd8\xe0\xd2\xdd\xdb\xd6\xe1\xe2;\xae\xfa\xf9\
A\x00\xf8\xe8\xe1?a\xc3W_c\x18\xb3\xd8\xd3c\
\xe8d\xd7It\xd3\x04X\x1d\xdam\xf4\xf5\x1d\xf4\xd9\
\x8c!\xcf\xf0\xc3\x03/\xb0_\x9cr\xdc{5e\x15\
\xc7(\xd5\xdcC\x97\xed\x1aP\xfd\xe5\xe7c\xee{\xe5\
\xb5\xf7\xce}\xd5+C\xab\xc9\x05\x93\x86b\x05f\x82\
\xb6-_Z\xc5\xce\xaer\xdb\xe6\x94\x12$\xa9\xd5\x95\
\x10O\xd2\xca\x0e}\xba\xdb5e\x17(\xc7-R\xd2\
`'=\xb2\xc5\xae\xaf\xa8uSI\xb8Z\xc1%\x82\
J\xa5\x82X\xbdn\xaa\xc5\xee\xe1\xda+\xe1\x01E\x04\
M\x1e\xc0=C\xa90D\xd0\xffBV\xff.\xf3D\
E\xb3\xda\xcb\xf0\xb1r\xd9b44\xc6\xe0\x06\xba\xb8\
\x06\xd7|d\x9a\xee\x9f\x08\xee6\x0d\x05b\xc5`\x97\
\xc1\x8a\x89\x19\xf1\x84\xdb\xb7\xbe\xc9\xed\xb1`\xde\x97\x10\
\xbc\xef\xce\x15\x14\x02\x87\x8c;\x0a;V/K\xd4\xd4\
\xee\xa8\xde%m\xbd=\xd5\x00W\xb9\xb0]\x87\x95\xeb\
\x00\xec\xb6\xb8\xa75R\xfb\x01\xf0\xa4I\x93\x10\xb3\x5c\
T\xa7\x5c\xa4,%\x19\x8e\x00\xdb0\xe0*\xc3\x0c\x02\
\xae\x03(E\xe42\xb4\xad\xd9U\xf0)&\xbff\xe9\
g2} \xc3G\xd2\xf0C\x9aA\x92FD\x833\
\x12\xb6cd\xb5\x018\x9e\x02\x1c\x07`\x07\xd0^\xb1\
\x12\x07\xf8\xd9\xda\xbd\x00\xa0\x1d\x0d\xd7fh\xc7&v\
\x15\xd8U\x80\xf2\xce\xc8?\x941\xb7\xdf\x8e\x9c\x88\x0f\
\xef>u\x11\xd6m\x8d\xf6J\xd8\x18\xa9m\xd7(L\
s6v\xcb\x89\xce}\xabv\xa8s\xd2\xd9\xa7c\xc2\
)s\xdd\x8e\x05X\x1a\x12\xceF\xb8\x0a\xd5\x8d8h\
g,k\xd8\x0b\xab\xee\x80)R8f\xe2\xa0\xef\x9e\
\x9f2\xd1\x04\x90\xeb(\xe92\xc3%\x82K\x02\xcaM\
\x09\xf3g\xfb\xdf*\xc5\xc5\x05\xd8z\xd7CH\x9d\
~~\x9eS\xb9g\xa2k\xeb\x1c'\x1c\x8a\xeb\x8e\xdd\
f\xa5\xefp\xcb\x86<\xb9\x0b\xcf^~\x19\xec\xae%\
\x9buF\xdaJWK\xa5\x9b\x1a;`\xd7\xf6#\xaf\
\x7f\xf8\xd1p\xc7C\xc6\xe0\xc9k\xae\xd9\xeb#\xf0\x22\
\xc4\x19\xbc\xa7\xb2+o\xdc\xf4'7\xe9\xbc\x8f\x04O\
wS\xc6tM\xc6t\x1b\xa9\x0f\x92U\xdb\x9ft\x9c\
\xf8 \xdb\x10H\x15\xe7.u\x0as\x9e\x9f\xfe\xd9W\
q\xc7J\xc2ef\x17\x80\x0bM\x0e\xe9\x90#u\xb6\
#T\xb6C*\xdb%7\xdb\x81\x9b\xed\xb2\xf7\xa3\x94\
\x9d\xae\x92\xf1\xb4\xf8\xb6MH\xd6\xd5\xeck\xcc\xad\xd8\
\xba\x0aF0\x13y\x9d\x07\xd9F\xed\xa6W\xc9,p\
m\x9b\xafW,z\x0a\xcd\xba\xc5\xbf\x0c\xc7\xe6\xb4\xba\
f\xeb\xc4\x81CG,D,Uc\x98\xb5p\x1d\xef\
\x14\x9f\xd4\x1ayy9\x90B\xb0\x14\x06\x13{\x0ei\
/^\x81\xa1\x94\xf2X\x16Y\xfd\xe4\xce5\xfc\xe0\xa3\
\xb1p\xd5\x168\x99\x11\x8a[\xa5\xed\x19N\xba`\xdb\
\x0a\x86\xd4\x0e)*]\xad\x1c\x80Mh\xd6\x86\xb4c\
\x96_$^'a6\xc2a\xc1\x0a\xd0\xc4\x10\xd0\xa4\
\xc8%h\xd7\xe7\x0f \x9a\x88a\xe3\xee\xd7\x1f\x02@\
\xd0\xae\x032<\x00+e\xc31[\xab\xab\xff}Q\
\xda\x85W\x1d\xc0\x05k\x1b\xd0\x8eW\xad\x07?\xa6s\
\xe8\x16\xcf\xc5\xa7s\xaa1\xf3\xdbG2\x1a\x9b\xac\x93\
\x95\x969Bj\xce\xcd\x0e.<\xf3\xe8\xb4E=F\
O&\x00\xf4\xc5\xeb\xd7qj\xeb'\x9b\xfa\x9c\xb9\xf4\
\xfd\xa4\xcd\xfd\x13\x09'X]\xab\x8e9\xb9\xf8\x8cY\
l\x1b\xbb6m\xd8\xb3\xb7M+U\x83\xcf\x88\xb8h\
\xd8\x811!\x5c\xcd\xa4\xe0\xe8DFl\xd1\xa2\xec\x8c\
^=\x9b.=d\x02\x1e\xff\xe6\x8b\xbd\xd7\xffT\xc0\
\xd0\x8b\xb7\xdd\x8d\xe5\xa8E\xf6\x82\x85\x93\x1c7:\x84\
\xc1\xd0\xe9Y\xbb\xb2z\x0f|\xf3\xec\xbb~\xf7\xfdj\
\x8f\xcdO\xcc\x9b\xfb|mS\xedh\xa5\xedb\xec)\
\x1f\x11]\xb6\xe8\xa0HA\xe8\x8bo\xcd\x1c\x18,\xc0\
\xac\xe1\xc2\xab0\xc5q65'\x0a\x99Z\xec\xb6Z\
BK\x1f\xb4\xf0\xc2p\xdd`\xa0\xc9-\xcc\x9d\x93\xc8\
+|F\xd7\xd7\xcc?o\xe28\xb69\x01\x874;\
`()RN~\xde\x1c\x15\xad\xd9\xa0\x94\xcbJC\
(f\xb8Lp\x19p\x18`\x01\x1f\x11\xcf\xa5\x9a*\
\xb6\xe2\xce\x8f\x99y\x9cD\x03\xaa\xb7\xafFnI/\
\xcb\x8d\xee|\xc3\x08\xe55&\x93\xea\x0aW\xe9\x91\xe4\
\xa5\x19i\x06|M1>\xc4\xef\xa3>\xdb\xd7-\xff\
\xe6\x87'\xe5oVn\xdc\xe7\xbf\xeb\x121\xb8pa\
i\x07Z;\xe0\xd6\x85\xdf;\xc1\xdfM\xf4Yg]\
\x84\x1c\xa3\x1e\xdf\xcc\xdb\x8e\xe6\x86\xdc\xe2\x84\xa5\xba1\
\x93 \xd6M>\xbf\xb1\xccOp\xe2)\x9b\x14|`\
v\x0d+Y\x9f\x8c\xef\xfe\xe4\xd1\x03\xcf\x8bo\x99\xf7\
\x97\xef:B-\x8e\x1b\x09\xa0\x01@\xf9zh`!\
\xccp\x16\xb4p\x99\x84\x0d\x08\x05W\xdb\x88*\xc0\xd2\
?\x1f\x1df\xbb\x16R\x8e\x82V\xb6\xb7\x83\xb3&n\
\xd9\xba\xb5\xde7\xd3Y\xa41f<p\x0fz\x1c|\
q\x97T\x8aFC\xb8&\x01\xa9-e\xa2\xc7\xb0\x0b\
\xe27\x0b\xfa\x83\x94\xc40\x14\xc3']-}\xba\xaf\
\xa1I\xd9\xaceM#\x8d\x89\xe6\x17\x0d\xf8\xf0\xdd\xfb\
v\xf5h\xdfio\x9b\x0f\x9ew>N\xfb\xe8e8\
\x81p\x83\xd8f\xda\xb0\x12p\xad\xa6~N\xf9\xee\xa1\
fa\xfa\x0e\xaeS\xb8\xe3\xf4Kp\xc7\xcbO\xec\xbd\
g\xd1\x82\x05\x186b\x04\x86\x16\xf8\xd1\xae \x0b\xef\
\xae\xaeD\xd3\x17\xdf\xe0\xb0S\xcf\xcd\x8a~\xf0\xe9x\
\xa5\xa8\x90}\x06\x10m\xc8\xaa\x7f\xf4\x9es\xef}\xf8\
\xce8\xb3 fb\xad\xb4f\x9f\xcc\xb2%\x07\xb5A\
P\xf5u\xdd\xfd\xe5\xe5\x87\x96\xf7<rv\xf7D\xbd\
\xb5\xb9\x85J\xcc\xf5jX\xc2\x0d\x87bna\xc9b\
rS\x15pR\x01U\x1f\xed\xc7\x96\xddE\x0b&'\
'\xb3\xc1\xc9+xR\x15\x16=\xfb\xf1\x86\x86Rg\
\xe7V\xbeu\xc8\x81hN5\xc1\x86\x82\xc3\x80\x122\
\xc1\xd9\x05o\x04\xf3\xba\xbf\xfa\xde\xb2\xaf\x9d\x08@\x09\
x\xaa\xc3\x0f\x84\xb1\xa5\x9e\xb33\xd3\xf7O\xaf\xea\xc6\
kP\xb3m\x19\xda\xf6\x19e\x1b\xce\xf6\x8fL\x99^\
\x1e\x8b\xa9\xablW\x1d\x0d\x08\x93H*\xdbrs\xa3\
q:\xaf[\xdf\x81\xab\xfa\x0d\x1f[\xbfg\xfb\x06\xd4\
V\xef?\xd2\x09\xf0\xcel\xcae(\x97=\xee[\xaf\
[\x0c\x80\xbb\xe4wBz^\x1aB!?\xcaw\xef\
\xc1+\x1f\x97\xe1\xa4\x89\x1dB\x9f,\xae=\xcdq\xe4\
@\xad\x1d\x0e\x04TMnN\xce\xe2-\xe5\xf7\xe8L\
w1\x94\xd6\x0c8`X\xc4\x00\xafx\xb9\x00\xdd\x8a\
\x062)\xf2\xbe\xe8\xdaaM\x0a\xa0\x184\x92\x88\xbb\
6*cm\x01\xa1\xc1`bv\xc1ZA\xb1\x89z\
\x0dH\xd1\x1e\xc7\x8e\x1c\x08\xc9.\xb4!\x90\x82\x0d\xa9\
-XF\x12\xd5M\x16z\xf5\xec\x05GiX\xaew\
\xa0`\xed\x00\xae\x0b\xd7\x05\x80\x00jj\xbe+x=\
\xf6\xa4\x9b\xb1n}\x12G_rc`\xc1\xfc\xba\x93\
\xb52\xda\x08v](\x1fG-=B+\x1a\xa3I\
\x81\x85w\xd8dW\x81\x92\x9aMC9B\x92\x8a\xc6\
T\xa4\xb2\x01G_p\xfa\xe9\xdf\xf6\x1e0\xb4\xb6\xae\
)\x8a\xda\xe6Z|\xb2}=\xfev\xd0\xc1H\x80\xeb\
}k\xb6.#+Q\xa2\xab\xea\x8a\xcd\xcd['\xaf\
\x88d\xce\xce\x19\xd7\xab2\xcb\x1f\xc11\xd7\x5c\x83\x93\
3\x04z\xe7\x0d\xc4\xe81m\xb1\xbe\xef\x10,\xa9\xb6\
e\xb4\xa6\x8a\x89H_\x03 \xb3\xa1y,\xbb\xf6\x18\
-\x0c\x8f\x878\x19-\xd2J]\xce\xda\xb3\xfb\xb7\x9e\
Y\xd8!h\x1fA\xfb\x0d\xb0N\x91\xdeS:N\xad\
_\xfd\xde\x0e\xdb^\xe0\x92\xc7\x86\xe7\x90\x06A\xc1\x8a\
\x84+vu\xec\xf9x\xb8m\xce\xfc\xb0b\x9fX>\
\xef`\xb1\xbd\xe2\x0evS\x9dT\xc26l\xa5\xcdX\
,\x91\x18_\x0c\xfex\x07\xe3\xf7\x97\x9c\x8f\x98\x93\x80\
C\x1a\x8e\xd0P\xc4\xa4\xd85\x22\xc10\x8f\xee\xd8\x89\
\xe3\xc9&\xd6\xac\xe0ze\x18\xe1B\xc0e\xf6XP\
\x03a\xf8M\xf3\xa7\xf9\x81\x95\x15C\xaa\xb1\x16>\x11\
\xd4U<dYq\xda\x82\xdfE\xe3Xf;\xfa,\
\xcd\xba;\x83|\xf1\x04\x1d\x126\x8d\x09\xa9\x15\xb3\xde\
2\xd2\xdb\xff]\xb2\x05\xe5*\xd8\xb6\x0b\xa5\xe0\x11(\
\xb3\x82I\xd2\x18\xd4)\x10\x08\x06\xc2\xf0g\x19\x08\x87\
Lr$\xcc\xfe\xdd\xb3\xf2?]PyJ\xdc\x92\xe7\
k\xb8\xe9\x02n<?\xc7\xf7~\xbb4\xb7tE\xdd\
&\x04B\xd2\xdb\xc0\xc9\x86\x04\x10\x0ef\x06\xfb\xb6k\
\x13d\xb6\x85\xa1\x04Zlu\xd0\x10\x80`\xb8ZS\
\x9dm\xba\x95M*\x05j)sJ\x1a\xac\x1c\x80\xb5\
\xbcb|WYTR$X1\xa4\xd2P\x86\x0f1\
\x06\xc8\xb2Q\x91\xb4Q\xdb\x98\xd4\x80\xa5]\xad\xe1h\
\x0d\xa5\x15H+h\xd7\x06\x94W\x0e7\x1e\x8f\x03\x00\
\xbat\xe9\x02S\xa6\xf0\xf5\xeb@\xa7QN\xc7d\xca\
\x18\xad\x15\x87\xc8r\xe3\x02\xca\x91,,\xd1R\xbb\xc9\
\xab\xb7\xa6\xc1\x82\x88I0\xd8\x12\xd0\x14b\xa6@y\
\x0d\x0en\x9fW\xd0\xf5\xdd\x19\x7f\xaa\xcd\x0e\xa5\x01\x00\
\xf2z\xf5Be}3r\xff\xfch\x0d>\xfd\xfae\
j\xae\x1e\x03'\x91/V.;<\x02\xde\xc1#F\
?\x9bw\xd3M\x15\xdd{t\xd7kRI\x1c-\x1c\
\xf4{g\x05-\xbf\xf8\xa2\x9c+\x8e;\xf6P3\x18\
\x89N\xeb\xd2}fN\xc26\x1a\x9e\xfd\xeb(\xad\xdd\
\xb6\xac\x5cfJ%X\x92\xa5}fK\x22\x83\x17\xa1\
\xa6[\x5c\xf2Z\x80X\xc3\xcf\x10!]U\xd3W\xec\
)=(>\xf1\x84\xa5\xee\x97_8n<\x0aG\x03\
\xc2\xdbN\x92u5uU\xb9AY\xa5\xac(v\xa7\
\xfb\xdfj\x93\x95\x9d\x86\xca\xea\xbb8\x9a\xcc\xa1\x9de\
\xe7\xfa;\x181\xa7m\xbb\xc7\xc7\x8d\x1e\xd90\xb0}\
\x07\xd8\xd2\x84\xcd\xde\x92)0\x14kQ\x1f/\x0f&\
C~C\x9b\xe9\x04\xed\xb6\xd0:x\xf5\xee\xb5\xabI\
khf\xb6\x0c\xbf_\xff]\x82\xeb\xaamK\xd0\xb6\
}7t\xa2\xf7\x90\xa0\xb6e\x99i\xd1\xa7RVp\
I\x22\xa9.\xb4\x14\x8f\xb5l\xe4\xd5\xc5\xe4\x05\x99%\
\x83Vf\xe4\xe4l\xac\x8d\x95\xc2M\xee\x1f\xc7\x96\xe5\
\x22\x91l\xf9\xd4\xb2\x0ba\x08\xcen\xd3\xbe_c\xb0\
\xfd\xa5\x8d\x1a\x92\x1b\x89U\x1d\xc9h\xca\xed\x16\xb3\x9a\
\x07:Z\x94h\xe2t\x08\x8d\xbcLZ\x9c'\xe2\xaf\
l\xa3\xe1\xf1\xed;\xe6\x22\xb3\x1bCk\x17\x0cW\xa5\
ef\x86\x8b;\x1d|AB\xc8&C@*A\xcc\
\xad\xe9|\xa4!\xa0\xa1]7\xc4\xd5\x95\xf3\x98g\xbf\
-3\x0fdOgu9I\xd2\xd8\xd1\x98>\xcc\xe2\
\xf1\x89\xb5edJ\x22\x16\x82\xe1Y\x0c5\x84\x14\x22\
\x9cf\xc4:w\x89.\xea\xd6\xa7Oy\xc5\xe6\xddH\
Z\x00;\x16\xd8\xb1\x01\xdb\xf2j\xdc\x01{w\xe0\xad\
\xf20\xa0Rb\xeau\x019\xeb\x8b\xe4Q\x9a\xd1M\
)\xa6\xcc\x80\xb3\xb3c\xb1\xff\x0fB\x89\x1a\x10Z\x0a\
\x8eq\x0b\xa3\xa7 f\x82\xa3Th{\x95\xbe\xd8Q\
b\x5cCT\x17n\xaf\xd0SO;\xe5\x94\xd5C\x0e\
\x99\x1c\xdf\xb8\xc3c\xec\xbc\xe6w7\xe1\x81\xd3N`\
c\xd8\xc0\xc5i\x89\xc6\x19r\xd7\xf638\xde\x98C\
\x0b\x16\x5c\xa6v\xed\xee\xbbu\xfe\xdc\xd7\xd22\xb36\
\xd8{\xe2\xae/\x14\x92\x1b\xfbt\xee\x18hJ\x9e\x14\
p0N\x1bf\x0dV\xaf\xbe\xb5\xae\xc9N\xba\xa9\x86\
I\xac\x14Tz \xcemK\xfel\xfa\xcd\x85:`\
B\x11\x93fOO\x84\x17\x1a\x06v]\xa1*\xca\x0e\
\xe5\x86\xe6\x0b\xb4\x9b0\xd5\x9e\xed\x87\xd3\xaa\xb9\x9f\xd8\
\xd9\x19\xeb\xadx\x13\x22ZA(\x0d\x97\x1cHr(\
\x11\x8f\xe3\xb6\xdf]\x87;.\xbd\xc8\x12\x07\x0c|\xd9\
\xfezN\xben\xb6\xae\xd5\xf1x6\xef\xd8y!\x18\
u\x81\x9e\xbd^\xdc\xb0\xbb4\xd1\xc9\xce\x83\xad\xb5g\
\x85\x00\x07\xdd\x86\xfa\xa3\x93$\xda*\xa5\x5c\xd6\xaa\xa5\
/\x0c\xa5\x99\xb5\x97\x08`\xb2a\xeeV>\xdf\x07\x8e\
\xebV\xfc,C{Y\xa9\xc7\x22h\x06\x9a\x91SP\
\x92\xaa)]1\xafC\xf7\x81\x1b\xeb\x9a\xd4\xc1\x96\xc3\
\xe7'\x13N_A\xfe\xeb\x5c\xbb\xf4\x86\x8e]\x87\xd7\
\x86\x84\x85U+\x97\xfd\xa8\x9dd\xcaE4\xaa`i\
\xe9\x9d9I:\xf5)\xdf\xc0\xfa\x14\xfay\x0ck\x02\
\x8a\x894\x8b\x00\xc0&\xd8\x85\x94\x14\xcfJ\xc3\xa2\xc2\
Ly[b\xde\x07[f\xcd\xfb\x00\x08\x1d\x07\xa5\xbc\
p;\x22[%\x1d\xf2o\xab\x96\xa7\x12`\xa0\xa52\
\x83g\xd8\xf2j\x1c03\xb3\xe5\xf8Uc3\x13\xd1\
\xdb\x94>\x04\xa6NA\xb8\x86\xdb\x10g\xdf\xbcM8\
\x01\x84c\x89@\xdc\x12\xf2D\xc2+\xca\xa7\x09\x94\x1e\
q7\x97\x84\xed\x1b\x02\x15(G\xd2A\xd4\xe7\xc2V\
\x06`1\xb1\xb2\xe18\x04 \x89D\x22\x01\x00\xf0m\
z\x16\xf1\x9c\xdfa\xf6\xcc\xb2\xe2T*0Z+\x95\
\xae\xb4\x1bW\x01\xf5QI\xd6\xda\xe9\xaf\xbf]\x90\x04\
\xea\xd1\xf2\x12\x01H\x01\xc8\x07\x90\x0e\xe6\x0e\x94\xdew\
OG\xd2\xe1\x03\x14t\xfa\x8e*\x1cj\x06\xf2\x9f5\
K\x86o\xf2\xef\xf1\x8a\xa2\xe4\x17\x16bI\xdc\xc17\
m\x16V_\xd9\xb7\xef\x13\x99V*_\x96\x97M\xd0\
*\x95\xcd;\xb7M\xe1\xb2\xd2Q\x0e#\xc1\xac5\x9a\
Y0\xeb\x00\xc09\xec\xb2\xc1\x0a\xf9:\x96<\x85a\
Vr\xca\xed\xc4\xd0\xb0M\xb1\xbc23\xed\xb5\x9a9\
\x0b6|\x03\xecc\x12\xba\x00\xc0}\x00\x8e'B\xdb\
\xde\xf9\xf5F\xb3\x98\xc0\x16w\xd3\xe55C(s\xf3\
\x01\xa3v\xecX?C\x10\x1c\xd7s\xcd\xbb\xae\xf26\
\x0c\xad0t\xcc!\xa8\xda\xb4\x0d\x86\xa3c\xa2{\xe7\
G\x9d\xb5\x1b\xd2t<z\x81\x8e\xc7\x8b\xd5\xb6\xed\xd7\
8\xc9TS\xc6\x01C\xdeLF\x1b`k\x0d\x05\x82\
\xd6*\xa0\xab\xaa\xc7)\xa2Q\x1a`\xe5\xad\x88\x17\xc0\
\xc3^\xb0\xa2\x06K\xe5\xf3\x7fk\xa7g\xcc\xf7;\xf6\
\xcf\x03\xb8U\x9cT\x02:\x19Efz&ta\xe7\
\xda4\xb9\xe7\xbd`\xa8\xed\xbcTRO\xd1\x8e5F\
\x1arb\xf3\x9e\x1d\x1f\x223\xaby\x7f\xf7\xdb\xb6\x83\
\xa4\x10\x94t\xa4\xd4\xca2\x00\xf21\x09\x9f\x17\xe8\xe6\
\x85\x90ifH\xc0\x91\x12;M\xbf\xdc\x95\x9fi~\
[\xe8\x8f\xbf\xd2\xb9y\xeb\xa6em\xfa\xf2\x8eY\x1f\
\xa1c\xdfk\x90r\x94\xa1\xec\xa4O\x0b\xd7\xa7I\x0a\
&!\xbc\xd2\xa0\xd4Z\x0e\xc5+\x05$Z\xa3,\xb5\
`\x9d\xf2\x03\x80\x84C\xe4\xa6$A\x9a\x8a\x10v\x89\
\x8c\xd6\xf2\xb0\xba\xe5\xd4G-\xd1w\x8a\x18~\x8d<\
W[>\x11$D\x93.\xfc\x86\x80\xedJ\xc9p\xa4\
\xa6\xb8\x88\xa6\xbc\xf2\xd8\x96\xe5\x1d\xde\xcdH>\xfc[\
\xef\x10\xd1\xb4\xb3\x8eP\x9aGhfH\x89RK\xb9\
o\xc4\xda?\x99\x1c2\xf4\x02\x1cu\xd8\x18\x8f\xc0\x1a\
\x9e\xd3\xa5\xbe\xbe\x1e\xb5;b\xe80)\x87\x9d\xc4\xca\
\x19\xccy\xc7B\xc8\x03\xe3V\xaa\xdb\x86\xd5\xa5'\xf9\
V\xbft\xaf\xcby\xf6>)\xfa\x00\xd0\x95W]8\
\xb8\xef\x8d\x99\x86\x7f\x9b,/\x9f\xa8\xdddw(;\
\x8f\xbd\xd3\xfa\xde\xd2\x1a,\x08\xf0\xc9R\xb2\xf5\x97 \
c\x96\x95l\xbc\x8a\x0d\xc7t\xfd\xa4\x9a\xe0\xbc\xbf\xb3\
a\xf7\xd6\xce\xc3\xfa\xe3\xaa\xe1\xe3\xb1\xf6\x8c\xf3\xa1\xeb\
M\x18\xb6\x05\xd19\x84\xfb\x22\x01\xe4\x0c\xe8\x88\xf2\xe6\
\x86\x95\x99\x82?4\x0d}\x0dT*\xd2\xb4u\xe3\xc9\
\xcf\x15\xe7|M{\xea\x91N\xae\x11 \x17\x0e\x93_\
\xdaI\xa9\x9d\xc0^n\xe17\xee\xbc\x13m\xcd\xd2z\
cP\xaf\x07\x9c\x0d\x9b2t\x22y\xbaJ%:\xa5\
JK\xef\xa8\xd9S\x95\x8cv\xe8\xf4\xa9\xad\x85\xd4\xac\
\xa0\x98\xa1\xc1^TZ\x0bp5\x01\x8a\xb8\xc5m\x0d\
(\xcf\x92\x15v\x1cG\xd8\xb6\xfd\x8fU)\xaa\xae.\
\x03\x00\xe4uIG\xb41\xac\x02\x91P\xe5\x88\xfe\xc1\
g7\xae\xad\xf9\x0c\x9a;\x8aP0\x1c\x8c\x04\xf7\x0b\
\xe0\x94E0\x85\xdfuR\xcdK\x94\xe5\xfc\x05\xcc\x81\
\xd6\x226\xdf\xc5@j\x0a\xa5\x85\x9b\xf22\x82\xcb\xb3\
\xb2\x02\xcb\x86u\x89\x94\x8f\x7fq\x86=\xf3\xf4\x8b\xb1\
\xfe\x95g1}\xc6;@j\x1d\xa2\xcd\x99en\xb4\
\xf9U\x05*\x22\x88V\x8d\xc1k\x87[KI\xb5\xae\
2\x13k[\x90S\xf3M\x10\x00t\x83\xedD\xb7\xbc\
\xef\x8a\xc0&\x06)\x10\x81\x84\xc4^\xa7\xa0@Kl\
\xbf\x02\x93&\xd7\x12U\xb1\x94]V\xb9m\x0d\xec:\
\x0b\xda\xf2YN\xbca\x0e\xbb\x95\x06\xdb\xf5\xa2>\x1a\
X\x09\x00ee\xde\xdc(7\x0e\x0eu\x16\xa9\x9a\xd5\
\xf5\x1a\xe2-\x18\xa6+\x82\x81\xb5Ns\xcd\xb6/^\
\x9c\x0a\x150\xb1\xf4\xce;\xf63C\x19\x80\xf9\x05|\
N\xfd\x0e\x95\xd1\xf3\xaf\x90\xfe\xd5\xdam\xa4Xrw\
-q\x94@\xb1\xbdW23\x96.Z\x88\x03\x86\x8f\
\xe0\xbflY\xb1\xfe\xa2\xbb\xef\xb9\xc5\x9c\xf7\xc5\x0cc\
\xf7\xce\x89\x88\xbbEL:\x83\x810\x94v\x98\xcc&\
\xed\xe3\x06\x15\x11\xdfDb\xa9\xcf\xb6W5\x14\xc7a\
\xcd\xb6M,J\xf9\x8d\xc4\xf6h\xea\xab\xbcd\xca9\
\xb1\xbb\x85)\x8f<\xf0\x93k\xdf?\x0d\xd1\x0d\x16\xbd\
\x9f'\x110\x00Q\xdd\x1c\x8bU5%X\x12\xedq\
\xd9y6[p8\xe0$\xcb\xedX]\xb9%<\xb3\
\xe2\x9dw\xde\x89)7NA\xc5f\x17\x93\xdf\xfd\xb8\
\xe2\xc3n\xed\x1ep\xcb*\xcaR\x96\x9b\xdf\xec\xd8\x82\
\x1d\x95\x99\xd9TO\xcdp\xbfR\xac-hV-\xd9\
\x98\xde9\x9c\xe1\xc5\x15c\x9f\x1f\xc1Jmu\xadD\
\x8d\xad\xff\x052\xc4P\x0b\x9bd\xa7^\x03\xd0i\xf0\
d\x00\xa0`n\xf6/\xc9\x95o\x85\x96\xf8\xde\xdf?\
\xfc\xc1\x11g\xdf\x8dC\x0f=\x12\xfd\xbbvB$\x14\
\xfa\xeen#\xeb\xfb\xed\xfc\xa8\x8d\x96\x02M-4\x84\
\xdf=\xcb\x07\x10\x05\x8a\x90\x1d\xf9\xe9{\xf7\xf3\xd3z\
\x0dr\x8b\xda\xfcp\x0c\xadQN\xc4\xccX\xb1b\x05\
\x00 =\x0d\x18=\xb2\xff\xfe\x9e\x01_F\xc7\x9f\x9c\
\x14f\x86\xf3]\xb2\xe7\xf7\xdb\x17\x00A\x04\x83\xfb\x5c\
\xcb\xcc\xd8\xb1c;\xfa\x0f\x1d\x8a\xab\xcf;\x07\x0c\xe0\
d\xd6\xe2\xa2?\xdf\x11\x9cv\xfaqE\xc7\x9d8\xb9\
\xdb)\xa7\x1e\xdd\xe1\x8c\xeb.\xcf<\xfe\xcd\x97\x0d\x06\
p\x16\x00\x14D\xe0\xb5)Z\xfa\x96\xf5\x8b\x92L\xf3\
\xf6\xfe%\x05`\x0a@\x886=zQ\xda\x01\x83~\
8V\xc8H\xfa>\xf7\x9ex\xce\x89\x18u\xe1\xc9\x98\
3r\x02\x90\x1e\xf6j\x8d\xc1\x10 \x88\xbf\xdc}7\
\xbc\x8a\xe9\xbfh=\xf6yN:\xf6n;\xff\xbct\
\xee\xdc\x03\x89D\xdc\xf3d\x09F\xd0\xf0a\xe7\xae\x1f\
\xa7\xc4\xe4\xe6\xf6\x80\xcf\x0c\xa3\xae\xae\x1c\x96\xd3\xecy\
\xe2\xf6\xd6}h=\xf8\x11\x82\x91,d\x86|P*\
\x89x4\x8e\x98\x95\xfcQ[\xa1P[$\x93\xb5`\
\xd6?\xc3/\xd1\x92\xa5\x01\x80\xc8\xabqv\xdcq\x93\
0}\xfa\xd7^\x12h\xeb\x5c\x08\xb9\xb74\xae\xb7\x8b\
\xb7\xee\x01\x0c\xc3\x90\x08\x05M\xc4\xe3Q\xf4h3\x01\
Z%\xb0\xb3\xbe\x02I\xbb\xdek;\x10\xc4\xd2Y3\
0hp\x7f\x10\x11\x82\xc1\x10:th\x87\xcd\x9bw\
zA?dx\xaf\x93J\xb6x\xfc\xf7\x0f\x14f\xc6\
G\x1f}\x84#\x8e8\x02B\xf8Z\xfa\xa6[L\x8e\
\xbc\xdf\xeb[\xa5x\xea\xf98DoC~\xd0\x8fP\
n1\xb2s\x8b T\x04)'\x01\xcbn\xc2\xee]\
\xdb\xa1\xca\xd6\xe3\xc5\x05[\xe1'B\xd8$\xc4\x5c\xaf\
\xda\x91\xab5f\xce\x9c\x89\xb1c\xc7\xfe\xdd\xb9\xf4\xea\
\xbf\xb5\xf2A\xc3#6o\xb1}\x0b!\x10\x86F\x9e\
\xdf\x84\x11\x0a\xa0\xac9\x81\x84\xf3\xdd\xeeXPP\x80\
\x83\xbbWa\xd1\xae\x5c\xc4j\x1a\xd1\x94Tp\x01\x04\
\x19H\x80\x91\x85\xefr\x89\xc5\xde\x99\xff\xeew+:\
\xf4\xf7\xfaB\x02H7\x80\xff\x07=\x7f\xa0\xae\x93\x13\
t\xae\x00\x00\x00!tEXtCreati\
on Time\x002021:08:\
16 01:19:48\xd1L\xe7F\x00\
\x00\x00%tEXtdate:crea\
te\x002021-08-15T23\
:30:59+00:00\xf7\xd4\x8c7\
\x00\x00\x00%tEXtdate:mod\
ify\x002021-08-15T2\
3:30:59+00:00\x86\x894\
\x8b\x00\x00\x00\x00IEND\xaeB`\x82\
\x00\x00\x0e\x14\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00(\x00\x00\x00(\x08\x06\x00\x00\x00\x8c\xfe\xb8m\
\x00\x00\x00\x04gAMA\x00\x00\xb1\x8f\x0b\xfca\x05\
\x00\x00\x00 cHRM\x00\x00z&\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17p\x9c\xbaQ<\x00\x00\x00\x06\
bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\
\x00\x09pHYs\x00\x00.#\x00\x00.#\x01x\
\xa5?v\x00\x00\x00\x07tIME\x07\xe5\x08\x0f\x17\
\x1e\x11E\xd6\xd1\xde\x00\x00\x0c\xd6IDATX\xc3\
\xbd\xd8ytTE\xbe\x07\xf0o\xdd{\xbbo\xba;\
I\x93\xb5;\x0bKH\x08\x09!\x01\xc2\xe2\xb0$\x98\
a\x0d\x08\x18E\x90\x1dd\x19GD\xf0)\xfa@\x9c\
\xe8\xe88\xec#\x8b\x8c\x80O\x04\xc4m@\x91\xcda\
\x13P\x18\x02$\x84\x9d\x04\x08!@ \xfb\xd6\xeb\xbd\
\xb7\x96\xf7\x873\xbes\xde \x83p\xe0w\xce\xfd\xb3\
\xaa>\xe7\xf7\xfb\xd5\xa9\xaaK\xf0\x80\xf1\xee\xd2\xaf\x90\
\x7f\xe9\x16T\x93\x1c.\x13\x91\x91\xde&x\xb7Y5\
y[\xc7\x86\xe2\x89A\xd9\x0f:=\xe4\x07\x19|\xf9\
\xf2eX\xcd\x0c\xf3f.\x84\xc9\xd9&\xbb\xbcZ{\
MV-\xa7\xd6\xef\xad\xb8\xb9zj0\x0e\x16V\xa2\
\xac\xac\xec\x81\x80\xe4A\x06w\x1a2\x0f\xf60'\x22\
C,Qg\x8a\x1a\xd7\xfbu\xda/:*\xf0\xa3?\
L\xed:\xb3\xa4\xdc\xe5\xcd\xe8\x12\x87\xb4\xc4\xb8\x07\x02\
J\xf7;p\xdf\xfe#\x183\xa0=\x0e\xac\xebKJ\
n\xfb\x86\xebB\xee\x05\xc9\x8c\x9a&>d\xfd\x8e+\
Y\xd3\xc7\xefE\xb8t\x03\xdd\xd2\xd3\x1f}\x06{\xf7\
\xee\x8dC\x87\x0e\xa1\xc5\xe0\xf9h\xeb\xb4$_\xadb\
\x9b\x0c?\xefD\x04\x01\x08AD\x98\xfa\xed\xa8\xbe1\
S\xdc\xbaR3yl\x06b\x83\xed\xf7\x0d\xbc\xaf\x1e\
\x9c\x9b\xfb.\xe2R\xfbcd\xb7 \xf3\xbe\xf3\xda\xcb\
\x1e?\xcba\x94J\xe0\x02\x84\x09h\x94\xc5j\x06\xbd\
\xfaa\xee\xa8S\xc3\xb3\xbb\xa1o\xcfTl\xd9\xf6\xfd\
\xa3\xc9\xe0\xf7\xdf\x7f\x89\xac,;H\xf7\xf3\xc8L\x91\
3\xafWb\xbd\xe67Z\x81q\x10*@\x08\x81d\
2!2\xdcz\xe4\xd9>\xb1c*\x1ax\xd9\xa0A\
\x03\xd0\xa7\xbd\xfah\x80\x0b\xff\xfa\x0d\xf2/\xdfF+\
\x87)x\xdb\x09cE}\xa3>^0\x06Y0a\
\x02\x0c\xca\x14\xb3\xac\x98a\xb6\x98\xf4\xf6\xf1\x81ol\
]6z\xe9\xa93\xa7\xb8#\xc4\x86\xe8\x16\x89?\xcf\
\x93\x9b\x9b\x8b\xb7\xdez\x0b\xef-~\x1b\xad\xdbvD\
\xf1\x95\x0a\x9c<_\x84\x06\x8f\x1b\xba_\x87\xd0\x5c0\
\x09\xff\xaf\x03\x9e;\x9e\x07\x13\xf3\xa2m\xf7\xa3\xc8\x98\
\x16;\xbc\xec\xb6\xbe\xda\xeb\xf1\x84\x12\x01\xc4\x06\xe2Z\
\xf30\xb1\xb3\xe0\x862\x16\xc2l\x97M\x12BBL\
\xe7\x87e:G\x15\x147\x9c\xdd\xb9L\xc5\xc7\xeb\xca\
\x90\x959\x04[\xb7\xe5\xa1\xe0f=N]\xad\xc4\xb9\
\xe2\x0a\xe0\xc2A\x00\x971q\xfa,\xf3\xe5\xaa&\x8b\
\xa1S\x0b1\x5c\xaa*\xfc\xf4W\xf5\xa0#e\x00V\
\x1fk@\x8f\x81\xc1\xd1%\xb7\x95w<\x1e\xa3=\x18\
C\x80$X\xe7\xd6\xd65\x83\xd3\xb1\xf8D)R|\
\xd4\x94$A\x81\xceI\xb8_\xa3\xfe]\xf3;\x1f\xaa\
\x95\xcb\xd8\x1b\x9f4a\xd9\x1b\x831k\xcc\xd3\xeau\
\x8fp4y\xfc\xf1\xaa\x95t\xb0'&d\xd8Z\xa5\
\x0f,\xad\xf6\x0c\xf7\xfa\xbc\xd9\x84\xfb\xd3LB\x0b\xe3\
\xd4\xaf\xdfs\x06\x8f\xfcc\x1b\x8a\xafx1i\x5c#\
\xe9\xf9\x82}Fe\x0d_\xe0\xf3\xf8\x02\x98\xdf\x87\xb8\
PQ\xf8\xdc\xa0f\xa3\x96|\xad\x15\xc7\x85iO\x9c\
\xabP?\x16D\x8d\x90$\xc0\x16\x88\x1bOv\x0b\x9c\
\xec\xaai*-\xba\xe5Kn\xa2\xa2m\xa3\xd7\xd7\xce\
\xa5k\xc9\x1a\xf5\xc5\x08\xee\xb3\xa9\xb2QkU\xe8\xc5\
`\xb3v\xd2n6\x8e\x06\x99\xf4\xe2`\x85\xd6\xf2\xba\
j\x8fr\xaf\xc0\x9e=\x0a\x91:\xa1\x15\xf6\x5cj\x96\
\xec\xf2\xc9\x93\x19\xd7\x03$\xc9\x04\x9b\x95\xf8\xd2\x13\xa5\
\xb5S'}wI\xa1\x83\x91\xde\xc2\xba\x7f\xe2:\xfa\
mu\x93<\x85\x83\xc0\xe5a\xb1\xdb\xf3\x1a\x96\xbb\xfc\
\x06\xf3Q\xb4\xa4\x106E1\x11\xab\x85\xd7F\x04\xb0\
\x13\x8e@\xfeC\x8c\xddr8\xc5)_N\x8f\x83O\
\x95%\xff\xa9\x13\x95\xf4p\xc1!\x1e\x9f6P\xdc\x13\
\xf0\xf4\xe9\x93\xf8{\xdem\xb4\x8b\x91\xcc\x0bv\xfa&\
\xe8\xbaH!\x90 \x13\x19I1\xea\x81'\x1e\x93\xb7\
8\xb2\xb3\xc4\xee\x19&\xeb\x92]u\x89\x9a\x11h\xa2\
Bf\x84\x0bYp\x90\x0a\x9d$\x11Y\x81-Pr\
EE(\xffh\x11\xcaw\xb7s\xf8\xf3z\xc5\x9aK\
\x22\x03\xac\xb2\xae\xa8\xd4Me\xa3\xba\xa9\x89\x95\x95\xdf\
\xa6\xbb\xf6\x14\xb2\xbe\xd9\x1d0'w!\xfe#\xf0\xc0\
\x81\x5c\xa4\xa5\xa5\xa1\xc3\xcbg\x91\xf3\x9b\xa0\xae.\x1f\
F2\xcad\x08\x02g \xa9\x1a\xd4\xd5\xb6\xeaP\x89\
\xc7\xe8\x96\x10\xf4\xe4\xd4M\xc6\x88\x1awp\x0f\xb7_\
\x8e%\x0c2\xc0\xa0\xaa\xa0\xd1\xa1\x96\xb2X\xa7\xba/\
\xa5M\xf0\xce\xc1Q\xb7\xae4\xf7\x94\x99\x1a`j\xc1\
L\xb6X\x0f7\xdf\xf2\xfbX\x8df\x0av;\xc3\x0d\
Q_\x1f\x04?7 q\x07\x08!\xffy\x17\x1f\xfc\
\xf1\x02v\xe4\x17#2\x04A\xdf\x9e \x1f\xd4\xd6\xd3\
q\xbaFA\x98\xc0\xe3qtK\x83\xe0;N\xde6\
\x8d\xd4u\xa5;7\x14;c\x00\xa7\x14\x16\xd5\xd0Z\
E\xe1\x5cr\xcb\x80\xafS\xdb\xc5\xec\x18\xed\xfb\xbe\xc1\
|\xb60\xad::\xa6\x9b\xcf\xe9,2\x87\xc7\xe4\xc3\
\x1eR\xd6\xda\xc2\xb5\xfc\xf2*T\xb88\x86=5\x0d\
\xd9\xd9\xfd\xf0\xddw{\x7f^\xff\xae\xc0\xf7\x17.\xc0\
\xa4\x119\xb0\xb7Z\x8b\xa1\xaf\xf7z\xba\xacZ\xac\xf1\
z\x8cP\xa6S\x80\x83\x07\xaa\xac\xbcA\x93\x82ta\
n\x06&\x01\x10\xb0Z\xa8\xd6\x22B/\xe8\x94\xa0|\
\xd6'+bG\xef\xfd_2#\xff|\x1f^\xd10\
N\xb6E\xc4\xcb\x13\xc7\xce)yf\xd2\xe6\xa0\xddk\
\xa9\x12\x1d\x8f\x11\xb3\x9e\xc7\xf8\xa1\xc3\xf0\xe2\xac\xbf\xdc\
\xd1pW\xe0\x84\xff^\x0b\x0f\x09Bt\x88\xe2<v\
E\xde\xd0\xe4\x15\xfd\x0c\xcd\x00\xf5\xeb`\x82\x80I\x12\
\x04\x93 \xb8\x0c5@g\xad\xa2\xb4\x82\xf4Dl\xcc\
\x1e\x18\xfbu\xbb\xcd\x1f\x1b\xca\xe1\x0bC\xacM\xdaD\
\xb8\x8c.\xd4/,\xcc\x22S\xb9{\xf7y\xdf|\xb8\
aQ\xc7\x0bE\xfc\xf3O\xd6b\xf1\xa2\xa5w\xad\xe0\
/\x02\xbf\xd8\xb2\x1d&b\xe0\xa9\x9cF\xd2g\xa6\xf5\
\xc5\x8a:y\xa1\xa1\xb3\x00nP\x18\xba\x00\x97\x00\xce\
\x04$\x99 *\x92\x97tLpo\x18\x94!mx\
2\x8e\xdc.yiM_{\xadw\xba\x08\x89t\x98\
tT(\xd7n\xf4\xa1\x86\xa4R\x89B\xb4t\x14\xd1\
\x91\x93G:\xdd\x8dg\xc8\x8cWA\x08\xf9\xf5\xc0\xbd\
\xfb\x8e\xa3o\x9f\x14d\xce\xf8\x06\xd1v)\xb9\xa8\x5c\
\xfe\xdc\xe3a\x1d\xb8\x0e0\x06pp\x08\xce\xd1,H\
\xb8\x92\xe3\xd9\xd7\x19i\xdeU3FN?qm\xce\
\xb3\xad\x83\xce\x9c\x9de\xa6\xe2IO|J^m\x8f\
\x8c\x85\xcd\x0cZ\x13\xb4~\xd3FT4\xf648@\
\x83\x85\x10]\x1f[\xba\x7f\xe5gs\xa3\x0f\xed\xd7+\
\xdd>\x8c\x1a6\xe4\x17\x81\xffv\x92\xac\xfbt\x0f\x12\
\x22T\xc4N\xd9\x8c\xac\xe4\xe0\xf0\x0b\xb7\xf0\x9a\xc7\xc3\
\x073\x8d\x13\xce\x00\x01\x0e\x05\x02\xf1\xb1\xb4x@w\
)\xf7\xf5\xb1!\x8b\xfbw\x1fw\xb5n\xe2\xe3\x03\xed\
\x17/.'Vk\xaf\x86\xf4\x8e\xab\x0a\x87<\xfbv\
/\xbd\xeeJ\xf0\xd8W\x1a\x9azvaRMm?\
pj\xe6\x9c\x12F\xbd\xcdc\x1ak\x8f\xc4\xef\xdfz\
\xb3\xcb\xec? \xec\x8b/\xe0\xab\xad\xbd#\xf0\xe7\x0b\
k.\x807\xde\xff\x14n\x7f\x03:\xf4\x5c\x8c\x91]\
\xa3\xdb\x9d\xbdN\x97\xbb\xdc\xf49\xc3\xa7I\x9cQ\x08\
\xc1aQA;%\xca\xdb\xc7\x0f\x94\xc7\xbf\xf3\xfc\x84\
\x8fnl\xbf\xaa\xb9\x86\xff\xe6\xf7A%W\xff\x87\x85\
G\x86\xdf\x1a8d\xe6\x8c\xa7g.\x88\xd3\x5c5\x18\
\xfe2n-\x99\x07\xef\xa0\xfe\xdb\xb5\x88\xc0\xfd\x5cb\
\x10\x0c\xe05\xb5\xcd\xc5\xf1\x1f\xa6\x96\xe7\xae\xb0]9\
\xb4\x07_\xfdm\xf3/fP\x02\x00\xb7\xdb\x8d\x1b\xb3\
\x17\xc3\xf0\xbb\xf1\xe2\xe4g\x90=#+\xe3\xfcM\xf7\
\xeaF7\x1de\xf8\x0c\x0bc:\x18\xe7\x08\xb2\x09\x7f\
f\xaa\xf8h\xd60\xf3\x0b\x9e\xfcK\xc7\xcf/y\xd5\
\x96\xbau\xed\xabj\xf9\xcd\xf9\x9e\xb8\x96Uu#F\
MK\xfe\xdd\x1f\xbf\xf8]u\x99\x11\x1c\x1a\x86.\xfd\
\x1eG\xfd\x7f\xbd\x032vF]SB\xcb\xd5\xbaE\
\xa9\x11L@\xf8\x18Xi\xe9\x10\xb2\xea\xdd\xc7-Y\
\x03P\x92\xda\xfe\xee=8uj.T\xbb\x8a\x15\x8b\
\xe6\xa0\xd7\xc4\x85\xfd\xeb\xbc\xa6E\x067\xa7q\xaa\x80\
Q\x01.$4\xb3\xca\xde\xccTe\xc5\xe4L\xcf\x02\
\xea\x89\xad\x8f4[\x03\xc2\xff\xf2\xea,\xb9\xaaj\x9e\
/)\xf1b\xe3\x98q/\xb5\x1a4\xe5\xe8\xe1\xca2\
\x04\xd6\xd7\xa2S\x97L\xc0\xed\x86\xbb\xfc2\xae\x1d\xdc\
\x0aoRK\x8bm\xee\x92\x15\x96\xd2\x8a\xc9B\xa60\
L\x00\x8d\x8b\xde)\x9e{~R\xa0\xcbU\x1d7v\
\xd6\x1d\x81\xcaK3\xdfB\x94*0g]5\x8e\x95\
\xce\xcb\xac\xd3\x02\x16sBR!\x088\x13\xe0\x84 \
8\x00Zf\x12V\xce\xeaK\xdf\xbb\xe5\x8di\x8a\x18\
<\x948Fd\x8d\x12\x95\xd5\xaf{\xe2[\x964\x8e\
\x1853h\xd0\x94\xa3\x97\x84\x80\xb3\xbc\x18m\x92\xfe\
\xef\x1d2x\xf4\x14\x1c:x\x10G\xb2\x1f\xf3\xdd\xcc\
\xcc\xfc@\x8e8\xef\x97\x98\xd7\xaa\xc8\x5c\x92\xac\x8a\xe6\
)\xcewFP\xbd\xfa\xae\x19t\xf6\xf8=\xe2\x22,\
mo\xbbMk\x0d\xa2f\xc8\xb2\x19\x10\x0a \xa90\
\xcb\x0a\xef\x12/o\x9c\x9c\xa9\xbf\xe2\x92cjs^\
{\x06\xb5\xbf\x1d\xdc\xd5\x5cx\xfaS_X\x88\xcd5\
\x22gZ\xcbI\x7f\xdcuU\x08X*o\xa1\x853\
\xe6\xdf\x16Y\xb9m\x03\x0a2\xd3\xb1\xce\x9e\x82>\x87\
\xd6\x9b\xc8\xe5[Rd0\x10\xedq\x91\x86\x82\xd34\
$<\x92.z\xfb\x93;\x03{\xe7\xbc\x00\x87]\xb1\
\xe6]\xe5\xf35nzQ\x92\xcdD\x22*$9\x00\
D1!!R9>\xb6\x9fm\x82\xdb\x08)\xca\xa9\
\xf8\x11\xcc\x11\x1d\x14\xf8\xf9\xba\x0f\xb8n\x0cw\xf7\xef\
3g\xe7\xbc5+{^-d!\x8a\x15q-\x93\
~\xb1\x97\xa6\xcd\x9d\x0a\xb7\xe1G\xa4\xe0\xd0\xdd\x1a\x82\
\xed&X\x03L\xb8^Y\x0dGH8\xfe\xf4\xe7\x8d\
w.\xf1\x8f\x17\x1b\x91\x18\x15\xd0\xc3\xe7\x95\x9e!\x92\
\x90%\x08Y\x86\x90\x18\x02\x15\xe2Ij\xae\xac~\
n|j\xd1\xd6\xcfw\xc2\xb1q9j\xba\xff\xb6\x87\
\xee\xf3\x0e2\xda\xa5l\xd3\xa6\xcd\xdc\xd0\xf7\xf0f\xa6\
\x84;\x11\x17\x7f\xf7\xe7\xe5\x9a\xf7\xd6\xe2~Bze\
\xa8Yih\xf2\xf5\xa3\xba\xe6\x14\xd4\x00\xa7\x06\x84`\
\x00\xa3\x88\x08\xc4\xf1\xf4D\xb2\xf3\xc3O\xce\xa2\x9d\xcf\
\x8a\xab\x1bw\xa8\xb4\xb2|\xa8\xd7\xde\xcc[\xd7%c\
\x95\xed\xcfs\xeb\xc7\xbb3\xb1\xf2\xc3\xcf\xeek\xf1{\
\x09e\xd7Q\x97\xc3\xf0\x91\x1e\x1c\x02\x8cH\x00\x91A\
\xa8\x02\x99\x10\x1ee\x93\xf6O\x9ep\xa1r\xc9|\x15\
rC-\x94\xd2\xb3N\xcd\xa0\xbd<qmw\x1d\xcc\
\x9e~\xac\x85\xb3\x1d^n\xda\x85\xd1\xcb\xfe\xfa\xf0\x80\
\x8d._\x18\xa3\xa6\x18\x01\x01!\xfdTZ\xce\x0c\x04\
\xa8\xa4\xce\x11l>:~jG\x14\x1c\xfb\x13r\x9a\
\x87\xc0\x88m\xed\xd4LVIk\x93\xb8u\xcc\xd2\x11\
Z\xe4\xb2=\x0f\x0d\xf6\xaf\x90\x00\xca9\xd7\x98\xe0\x06\
\xc0)\x04\xa3`T\x87L\xb4JI\xae\xbb\x11\x16r\
\x13\x1b7\xef\x83\xef\x87\x12h\xba\x1a\xa1\x07\xdao\xb2\
\x84\x84S\xd7\xfb\x8c\xc4\xe8\x17\xa6=|`\x84\x8dW\
\x12\x18\x97\xc4?q\x82\xe9\xe0\xdc\x80\xc1\xfc\xee\xea\xda\
r_\x93\xbb\x1a6Np\x9a\x03U2\x93+C\x02\
.{\xe3\xa2\x1bk\x08\xc3g\xab\xd6<|\xe0\xc9\xe5\
\xa8Qe\xfa#@\x99\xe0\x06\xfe\xf5\xe9\x8c\xaan]\
55z\x15\xdc\xbcv\x0d\xde\x9e\x91hP\x8d\xa6\xfa\
f\xc1\xe5?h\xa1\xfa\x913\xc5\x0f\x1d\xf7S\x06G\
\xf8\x85#X\xd9a\x96\xf99!\xf8O;Xp\x18\
B\x0a\xf7!<\xbc\x89;\xd17;\x07\xf6\xb4\xae \
!!5\x01\x911u\x97N\xe4\xe3Fu\xd5\xa3\x01\
\xf6\xeb\x1c\x85-o\xa4\x9d\xb7\x07\x90\xf7%b\xd4p\
N\x01\xce`\xe8,\xc2\xe5\xe5\x1d\xf6~\xe9\xc3\xd39\
\xd9\x08K\xef\x89\xb0\xb4\xc7\xae+\xce\xf8\xa3\xbc \x0f\
\x9e\xaa\xbaG\x02\x94\xa3b\xdb\xe0\xab\x03\x95Hp\xa8\
\xc55.\x83\x1a\x1c]\x84\x90,\x5c\x10E2\xc9\xf5\
o\xce\x8e\xdbMl\x91\xf4\xf9\x97\xdeD\xe7\xbf\xed\xd6\
z\x16\xde\xa8\xa9\xce\xcf\xe3\xb4\xce\x83s\xd7\x1e\xec\xef\
\xe9=\x01\xe3Z9p\xb6\xc8\x0bF$\xda\xc6a9\
\xd5\xe85\xbc\x94\xa3\x03\xe3$\x90q\x12\xd2\xe8\xd6\x8e\
\x1c9W\x7fs\xd6&\x1fZ\x1c\xe6\x80\xe6\x13\xd7K\
\xaf\x812\x8a\xb3%\xa5\x0f\x1fXZZ\x86\xb1\xcf\x0e\
\xc0\x81c5\xb0\xd8\x14#\xb59\x0a\x5c^Q\xa2\x0b\
\xd2B\xe7$I\xa3T\x9f\xd0\xbf\xf9\x81\x8f\xf3Z\xd3\
\xf2F\x817g\xcf\xc6\xc9\x8b\xc5\x8f\x04\x07\xfc\xf3\xca\
\x7f\xb2\xf0$\x92\x13l\x90\xcc\x91\xe0\x90XA\xde\x8e\
\x8b\xc9\xc9\xed\x0f\xfb9\xd3\xfc\x06\xebu\xa3\xdaS\xb1\
g\xd3\xaa\xa2o\xff\xbe\x07\xbd\xbbuF^^\xde#\
\xc1\x01\xff\xef\xd1\x14\x1a\x1a\x8a\x81\xd9O\xc1f\xd2\xb0\
+\xbf\x11Y]\x1c\x96\xc2\x92\x9a,\xb3\x99\xa4\xb6t\
\xd8\xd7\xe9\x0cU\xf5\xd7\x8bp\xf4\xe8\xd1G\x06\xfc_\
V\xc4V\xe9\xa8\xbe{\xb8\x00\x00\x00!tEXt\
Creation Time\x0020\
21:08:16 01:15:2\
8\xcd\xc0\xffx\x00\x00\x00%tEXtdat\
e:create\x002021-08\
-15T23:29:47+00:\
00V\xda\x03\xfe\x00\x00\x00%tEXtda\
te:modify\x002021-0\
8-15T23:29:47+00\
:00'\x87\xbbB\x00\x00\x00\x00IEND\xae\
B`\x82\
\x00\x00^\xfe\
\x89\
PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0dIHDR\x00\
\x00\x00\xb0\x00\x00\x00L\x08\x06\x00\x00\x00\xcd\xe7=<\
\x00\x00\x00\x04gAMA\x00\x00\xb1\x8f\x0b\xfca\x05\
\x00\x00\x00 cHRM\x00\x00z&\x00\x00\x80\x84\
\x00\x00\xfa\x00\x00\x00\x80\xe8\x00\x00u0\x00\x00\xea`\
\x00\x00:\x98\x00\x00\x17p\x9c\xbaQ<\x00\x00\x00\x06\
bKGD\x00\xff\x00\xff\x00\xff\xa0\xbd\xa7\x93\x00\x00\
\x00\x09pHYs\x00\x00.#\x00\x00.#\x01x\
\xa5?v\x00\x00\x00\x07tIME\x07\xe5\x08\x0f\x17\
\x1f\x15[\xa0$\x86\x00\x00]\xc0IDATx\xda\
\xed}e\x98\x1eE\xd6\xf6}\xaa\xba\x1f\x1f\xf7\x89L\
\xdc=!\x02\x11H\x88\x10$@\x08\xee\xee.\x8b\xcb\
\x22\xbb,\xb2\xf8\xa2\x8b,\x0e\x01\x82K F\xdc\xdd\
e2\x93q}\xb4\xa5\xea|?z&\x10\x08\x0b\xbb\
\xcb.\xef\xfb~{\xaek\xae\x19Hwu\xc9\xdd\xd5\
\xa7\x8e\xdc\x87\xf0\x1bK\xa4{w\x14E\xaa\x105N\
\x00\xb6?M\x955\xe0\x7f\xb6\xad\xfc\xfc|H)q\
\xd8a\x87\x89\xcc\xccLl\xda\xb4\x89jkk\x89\x88\
\xc0\xcc0M\x93#\x91\x08\xb2\xb3\xb39//\x0f\x9f\
|\xf2\x09o\xd9\xb2\xe5\x87\xcd\xfc\xd3\xcf\x07\x80\xc5\xcc\
\xc0\xf6\xed\xd4\xbdcG\xa4\xaa\xab\xf1\xe2K/QE\
E\x05\xca\xcb\xcb\x09\x00\x98\x19\x91H\x04%%%\xdc\
\xa7O_^\xb6\xb0\x1f\xff\xe1\xc1n\x00\x0c\x00\xeeo\
\xbd\x1c\xffI\xa1_p\xcd\xcf\xae\x85\xf1[\x8f\x22\x16\
\xabD\xa7\x82\x0bQ\xc3\xeb\xd2c%\x87v\xe7\xac\xea\
\xb5\x04N\xca`\x18\xe5\xab\x16\xfeCm544\x80\
\x88\xf0\xf5\xd7_k\xbf\xdf\x8f\xa6\xa6&r\x1c\xe7\xbb\
\x19#\x02\x11\xc1\xef\xf7#\x14\x0aqYY\xd9\xaf>\
\x9e\xf3\x00\xd0\x0b/\xf0\x83W_\x0ds\xe3F\xea\xd1\
\xa3\x07\xa7\xa7\xa7#\x14\x0a\x01\xf0\x00\x1c\x08\x04\xd0\xa6\
M\x1b\xee\xd6\xb5+\xb65j\x92\x13?g\xc5\x16\xf0\
\xc5Q\xbf\xe9Z\xfc\x87\xe5_\xda(Z\xe57\x05p\
Fv6FN8\x05\xe9\xfe]r\xe9L\xfb\x04&\
1\xad\xa4\xa8\xfde\xd9\x199\x9bf\xce\x9b\xf5\xcf\xcd\
\x0a3\x5c\xd7\x85\x10\x02J)\xd6Z\xef\xf3\xefD\x04\
\xa5\x14\x1c\xc7\x01\xf3\xaf2\x87\xfb\x88\x0d@\xa4R\x18\
\x9b\x9d\x8d\xf0\xe1\x87\xf3\x15\x97_\x8e{\xef\xbd\x17\xf8\
\xde\x8213\x88\x08\xb3\xbf\xf8\x0c\xa9n\x11\x14~6\
\x01\xe5\x0d\x00\xb2\xffss\xff\x7fE~\xc96\xfeo\
\x93\xd3\xae\xbe\x02\xaf<\xf4!:\x0c\xea\xd1\xb5\xaa\xce\
y\x11\x10}\xdb\x16\xf8\xee\x1d30\xe3\xe1\xaaz\xdb\
\x0a\x07L\xbc\xf1\xca\xab\xbf\xf5\x1c\xfd\xc3\x22\x84\xd8\xfb\
\xb7\xd6\x1a\xcc\x8c\x1f\xbeHB\x08L\x7f\xfd5,\xca\
\xcb\xc7\xdf\x0e=\x14{4\x00\xf9\x9b.\xc7\xffJ\xf9\
\xcdf\xec\xf0+nC\xf9\x9a\xcd()\x0e\xf8g/\
\xa8\xfe]\xca\xe1\xab\x19H\x8b\x84\xe4\x9a~\xdd\xd2\xcf\
\x9b\xb7;\xbexD\xbe\x1fV2\x86Es>\xf9\xad\
\xe7\xe9\xbf\xf2?T\xc4\xbf\xde\xc4?'w\xdc8\x18\
+\xbf^\x8a%k\xea\x87\xd9\x8e\x9cJ\xe4\x8b\x900\
\xdc\x84-\xbb\x97\xd6\xb8\xa7\x9e9\xa20R\xd2\xa9\x18\
]z\x8d\xfb\xad\xe7\xe8\xbf\xf2?X~\x13\x00\x9fy\
\xf9\xcd8\xe3\xa4w1\xe2\xd8q\xe9\xd1\x18\x1d\x07\xc8\
^\x10\x02$\x0c0I\x7f]\x13\x8eX\xb5+5\xee\
\x95\xa7f\xa1z\xe7z0\xf3\xbfE_\xfd\xaf\xfc\xef\
\x97\xff(\x80[\x81\xd8\xafo;\xac\xfff:vl\
\xa9\x1f\xab\x95\x98BRHI )\xa4!\xc8`\xcb\
F\xc7\xaa\x1au\xd2\xc4\xe3\xfb\xb5\xc9h\x1bA\xe9\xee\
\xf2\xdfz\x9e\xfe+\xffC\xe5?\xbe\x03o\xd8\xbc\x1d\
O=\xfd5\x0e\x9ctR\x9bT\x92\x8f#\x18\xed\x89\
\x04\x08\x82\x04\x88$I\x120\xd0\x18\xd3\x93\xab\x9b\xc4\
\x94\x13:\xd6\x89\x92\xae\xdd\x7f\xeby\xfa\xaf\xfc\x0f\x95\
\xff8\x80\xaf\xb9\xfd\x8fx\xe1\xf1c\xe5\xce\xf2\xd8$\
\xa5\xe9H\x22\x01\x22\x012\x0c\x10\x99\x90d@\x0a\x03\
J\x89\xb4\xfa\x06\x9e6}[F\xcf\xc9SN\xc6Q\
G\x1c\xf1[\xcf\xd5\x7f\xe5\x7f\xa0\xc8\xff\xe4\xc3\xde\x99\
\xbd\x09\x15\x15)|=gW\xd7\x9az\xe7&\x86\xd1\
\x03\xc2\x030\x84\x01\x22\x09@B\x08\x02\x04\xc1vP\
d\x9aF\xe5\x91#r\x97\x89`\x8e;v\xcc8\xcc\
\x9b7\xfb\xb7\x9e\xb3\xff\xca\xff \xf9\x8f92\xce\xbf\
\xfe6\xc4\x9b]\xb4+\x88\xf8_\x9c\xben\xa2bc\
(\x04y\x86<\x96\x00\x13\xbc\x83\x1c@ \x08&(\
\xd6\xfe\xf2j\xf7\xe4\x15\xdbR3w5\xd2\xe2\xeeb\
\xfb^'\xc0\x7f\xe5\xbf\x02\xfc\x87\x00\xcc\xcc\xb8\xe5O\
\x7f\xc03\xf7\xdf\x88>\x07\x9f\xdf;i\xe1L\x22\x19\
fju\xefJ\x10\xc8\x03\xb2\xd0\x00\x0b\x10\x04\x08\x1a\
Mqt\xdfTj\x9d:\xba;6\xef\xa95\x1b[\
\xe3\x1a\x00\xfc\x17\xc8\xfff\xc9\xcbj\x83I\x13\xc7\xc3\
\x08\xd4\xa2\xb6\xae\x19~\xbf\xc4\xa0A\xe3\xb1d\xc9\x12\
D\x02\x01\xec)\xad\xc4\xcco\xbf\xf9M\xfb\xf8\x1f\x01\
\xf0\x88c/\x86\x9f\xfc8\xfc\xb4\xeb\xd2\x97\xac\xa8<\
W\xb3\xec\x03Ah9\xbc\x01Dh\xf5\xa90h/\
\x80\x05$\x94\xd6\xe6\xeeZ}\xcc\xa6l\xf3\xd3\xa5\xaf\
\xfd\xf53\x00\xbcg\xcf\x1e\x14\x17\x17\xff\xa6\x13\xf7[\
\xc9]w\xdf\x85\xc5{\xd6\xe2\x84\x11\x93a8\x02*\
\xd6\x80\xe6\xb2R\x94\xd7Ea5\xc6!\xfd&\xfe\xf8\
\xda\x8b\xbf\xb8\xbd\x82\x82!(.\xceBqq.\xea\
\xeb\xeb\x90L\xda\xd8\xbc}-\x12\xcd\xb58r\xda\xb5\
x\xfe\xe9\xabh\xd6\xdb!z~q\xa5.p/E\
t\x8f\xc2{\xdfL\xc7g\x7f\xe8+&]\xb8V_\
}\xed\xd58n\xea\xf1\x18y\xd0\x81?\xf26~_\
&M:\x05>\x9f\x1f\xe1\xb0@EE\x05,\xcb\xc2\
\x82\x05_\xfd\xcb\xf3\xf1o\x07\xf0AG\x8dC\x9b\x82\
\x10\xde|\xea\x01t;\xe8\xc2\x03R\xb6\x98H$}\
\x02\x09\xc2w\xe7H\x02\x131\xb1`@\x08\x86\x07\
p\x01\x8dXR\x17o\xad\xd2\x97L\xba\xe8\xf2\x15f\
\xb8\xb0\xf2\xffW\xf023\x08\xc0\x9e\xbfM\xc7'c\
\x87\x1b\xb4v]XE%!\xd5,}\x8a\x94\xb4U\
L\x06\x03\xbf(\xa4m\xd2I\x8f\xa1M\x11\xb0z}\
\x0aK\xe6NAI\x87\xdb\x0d\x90T\x9a5\xdf\xfa\xec\
\xb7\xb8\xf1\x84\xee0D*\xed\xa8\xa9w\xf73\xfdn\
v~\xee\xe3[\xba\xf6\xca\xdf\x1a-\xf3\xe3\xe8\xc9w\
u\xf9\xcb\xacc\xba\x9cs\xee1u\xca\xaa[{\xe0\
\x88\xe1\xd1^\xdd\x0a\xb1~s\xe5\x8f\x9e3\xf8\xf0\xe3\
P\x12\xe8\x8d\x04\xd5b\xce\x9a$.:*\x22+*\
\x0d-\x84\xfa\xdf\x11\xcc\xe3\xa72\xac\xdb4\x1a\x07\x1f\
{evc\x93s>\x91\xd1\x01\x10\x80\xf4\x22\xc3\xc0\
\x02\xc4\x02\x90\xdcHBlc\x97z\x83e\x80XB\
\x00 \xd2`\xd6\xa2\xa6\x81\x87\x977\xf8\x8f<\xaew\
\xcd\x0b\xe7]z\xa5rb\xce\xbf\xda\xb5\xbd\xfa\xf4\xdb\
\xd3?\xc2\xda\xb5\x1b\xb0\xad.\x05\x11\xc9B4\xe5\xa2\
\xb1\xa1\x19\xec\xc6\x91\x97&\xe1\x97\x0a\x06\x18\xc9\xb2*\
\x08\xbf\x89\xd7_\x7f\xee\xdf=m\xfb\x95w\x8e9\x0e\
\xef\x0f\x1e\x8c\x95\x87\x8e\x0f\xe4\xdf}\xeb\x11TU:\
\x0eI\xe5\xcf\x8c\xe4g\x06\xdb\xb4\x9f\xb7\xccN\xfe5\
\xc3\x14\x0d\x7f\xaf\x8d\xb5k\xd7\xa2O\x9f>h\xabb\
\xf8\xdb\x9bM\x180 3\xb3k\xef\xbft\xcf\xcb\xed\
\xa4\xc2\xd6\xda\xd5\xf5\xc4\xf6\xa3w\xdf\x8cv\x03\x0e\xf6\
}8\xbf\xfa\x9cd\x22\xf7\x02\x90\x99\x91\x11\xd6\xeb\x17\
\xce\xda~{L+\xd4s\xfa\x1d\xacE\xaf\x90a7\
\x99\x81\xe6g3\xd2BOU\xd6\xf9S?|\xd6\xa1\
\x13.Bv\x10\xd8PV\x8fQ\x07Jsg\xb9\xe8\
<wYU\x89\xb0\x1a\x179\x96\xd3\xf8k\xcc\xc9\xbf\
\x15\xc0'_v\x07\x00\x81\x99\x8f\xde\x8a.\xc3/8\
\xccQ\xe2 \x90\x10\x9e\xde\xeb\x81\x18\x9a\xc0\x9a\x94\xe9\
\xa7\xb7\x22i\xe17\x9a\xea\x9c\x075\xf3@\x22\x01f\
/\x84K\xb2\x84m\xa9\x9c\xed\x15|\xea\x86=\xf9\xb3\
\xb2\xfa\x0d\xd9\xb2\xf6\x8dG0\xb4K\x17,\xde\xba\xf5\
g\xfb\xb1y\xf3fh\xedb\xf5\x9a\x8d\xd8\xbd'\x8a\
\xe5\xdb]l\xda\x91@\x87C\x9eB\xce\x88'0\xed\
\xd8\xaf\xb0\xf4=2_Z\xe8\xf3'}i\x01\x9d\xd4\
iB\x88L\x16\x14\xb4\x1d\xc7V\x8e]m\x12\xd7;\
\x8e\x13'\xfa\xcf\xbb\x04\x99\x19\x9f\x1f0\x12\x81\xde}\
\x10\xb9\xe5FY7\xea\xa0\xa3\xe4\xce\xed\xf7\x89\x84\xd5\
\x8e\x08\x22^T\xbc\xa1lK\xe9\xd3\x8f\xaf\x5c\xd08\
\xa6\xa4\xe7\xdfm\xabO\x9f>\xe8Q2\x159\xe9F\
Z\xfb6\xe9\xbd\xb6\x96\x8a\xe3\xa0\xcc\x89\x96\xad\xffR\
\x9cg\xaf\xb1\x14\xa1b}5B\xbeh\x86\x1br&\
\xfa\x0c\xb3\x07\x09B]\xc2)j\xacO\xad\x10\xba\xb1\
\xce\xcd\xc9\x1d\x07\x93\xa9\xce\xd5\xc5N5\x1fm \xed\
ev\xc4>\x00>\xf3\xbc\xdb`\xc5\x1b\xd1\xb9\x8d+\
\xb6U\xa1\xe3\xbb\x9f%\xc7%l}\xb6\xcc\xe4\xba\x1c\
\xaa\xbe\xd0\xb5\xb9\xf1\xd7\x98\x9b\x7f+\x80\xeb\xabl\xcc\
]\xba\x0b\xc3w^Y\xd2\x10S'i\x92m@\xf0\
T\x07\x220\x0b\x00\x02~\x9f\xdeYR\x10z\xed\x91\
\xbb\xce\x9c\x7f\xe2\xe5\x7f{\xb9\xb1)\xd5C+\x04\x09\
\xe4\xed\xce\x02 !P\x17\xd5\x83Wl\x8b\x9fr~\
\xc7o\xef+\x9cv\x845\xf5\xa2\x0f\xd1\x81\x08\xd3\x06\
OCtw=r&\x0cD\xb7\xee\x1d\xe0\x0b\xe6\xa0\
M\xdb\x1c4\xc5\x05^z{+\xbau{\x18\xc0<\
\xac\x9bw\xb9o\xdb\xa6\x9a\xb443\x92\xd9\xb5\xad/\
'/\xe9\xcf\xaf\x89!\x92w\xc4\xc0\xf0\xa9/\xb89\
\x1a\xa9\xbc\xa0\xcf\xca\xc8\x087+\xe5Z\xa9\xbc\x0c\xbf\
\xd5\xa7ca\x95)\xf5\xaa\x86\x86\xfa\x0dU\xf5\xf5I\
\x7f8\xa8\xfe\x9ds\xf6}i=\xac\xc66nFM\
m\x1cG\xdf}\x07-\x196\xe20\xb1c\xfb\xef\x91\
\xb4;\x09\xa1\xe1\xe6\x04\xb6T\x04R\xb7\xde\xbat\xdd\
W\x00x\xf6\xae\x0d\x7f\xb7\xcd'\x1f~\x01\x96U\x1f\
y\xe3\xcd\xfa\x13j\x12\x19W\xb3\xcf\xdfEJ\x7f\x0a\
\xa6\xf2\x99*\x08\x9f\x0f\x08\x86\xfc\x08J\xa8\x84\x99)\
\x5c\xf2\x81\xc00\x0cV\x81\xb4P\x12\xd1\xb0k\xbb\xa6\
\xd6R\x0b-4\x19F\x04a\x8a\x90\x8b\x10\xac\xefY\
\x882\xd2\x0c<\xf2\xec\x07\xe8\xda}\x5c\xbf:\xdb\x7f\
\xb7M\xfeC@nPR\xf4kW'\xe0\xe8\x9f\x1b\
\xfd/\x93\x7f\x1b\x80\xaf\xbe\xfb\x05D\x1b\xe3\xb8ax\
\x91\xf9\xf4\xabk\xa6:J\x8e$\x92-\xe75\x02H\
\x00L0\x84V\x85y\xe6\x9b\xc3J\x12\xcb\xae\xbc\xed\
5\xa7[I\xde\xa7\xab6U\x1ci\xa5\xd4X\xc0\xb3\
\xae\x11\x01\x02\x06\x94\xab\xc2\xdb\xca\xed\xe3?Ye|\
[m\x05\xbf,\xb9\xfb\x0d\xfc\xf5\xc5\xf7\xb0yG3\
jb\xc0\x86\xfa\x5c\xbcz\xeb\xe1`f\xbc\xf5\xd2K\
\x81\xad\xabw\x17\xf4l\x1bh\x93\x7fj\x8fN\xf5\x8d\
]\x0aO\xba)\x96\x95\xe4\xb4\xbc\xa8\xa6\x02\xcbq\x8b\
\x5c\x85\x0eZ \xc3\x0c\x1a:'M\x97e\xa7\x19\xeb\
:d\xf9w\xb5\xc9\x0d\xada\x8e.n\x93\x9b\xbe+\
?\x92\x99\xfc\xe4\xebY\xea\xdd\xaf>\xd2\xbaz\x0f\xfb\
\xfc\xe1\x7f\xd7\x94\xed#\xad\xe0m\xd8]\x81\xeb{\xf6\
\xc7\xb3\x9c\xc4\xdc\xe1\xe3\xc7b[\xe9=\x22\xa1\xbaA\
kp\x86,\xb5K\xf2\xee*;\xf7\xb0\x0f\xcf\x7f\xe8\
\x03u\xce\x9d\x0fc\xe8\x11\x87\xff]\xeb\x8c\xad\x5c\xd4\
5\xc7\xf2l\x1d\x98\xac)\xd0\x13\x86\x01b\x994$\
\xe0\xb3\x1d\x0a\x90\x84/\x9c\x0d\x9b\x115\x84~?@\
\x95\xb9\xc2\x90\x85YY\x91\xf9\xfe&\xfd~\x93\x91K\
B4\xbe\x0f\xd9|\x90\x196\xf6\x88\xb4\xc0\xdf\x82U\
YQ\xdb\x17\xda\xe79{\xaa\x9b\xc1\xfc*e\x15=\
;B\x99\xfe\xf1\xe4\x13>0\xb1)\x98} \xf8\xc4\
\xafcA\xfa\xb7\x01\xf8\xa1[\xceF\xb8\xcbi\xe8\xd0\
>kP,\xc1'\x11\x99\x19\x1ev[t_M\x00\
422\xe4\xd2\x82\xf4\xd8\xf4\xd5\x15\xe1\xd8\x94c\x0e\
\xc2\x8d\xe7N\xdc<p\xf2\xdd\x7f\xdd\xb6\xbbi\x80`\
\x99-H\xc0S$\x08\x04\x81\xa6\x98\xee\xb9\xb5\x5c\x1d\
\x7f\xde\xc1[\x97\x5c\xf6\xa1\xd5\xb8\xe7\xcb?\x8a\x1d\xeb\
\x16\xc8\x97\xde\xde\xe0\x8f\xd6\xaf\xeax\xe81\xf7\xf4\x1d\
4\xe1\xde\x1e\xb1fQ\xe8j\x7f\x81\xa5D\x1b\x8b\xb9\
\xa3K\x81,-$4\x08$\x89}Ai\xb7\xcd\xd0\
\xbb\xb2\x22\xf4I\x87\x82\xc0\x86.\x11g5\xa9\xb25\
\xe7\x8f\xc8\xa9\xbc\xe9os\xd4Ko\xbe\xc1\xd7\x5cz\
.\xed\xaa7\xa8m\xbf\x12\xdd}g\x17$}~\x98\
\xa1\x106o^\xfd\xef\x9a\xb6}\xa4\xb9\xae\x16\xd9\xed\
\x8b\xf1\x04\x80y\x87\x9f>\x82Kw\xde-RN?\
(\x1b\x1cT\x95N\xef\xb6\x7f0\xee\xb8\xfe\xad\xc1o\
\xbf\xabr\xba\x840\xf4\x88\xc3\x7f\xb6M\x13\x84\xb0!\
\xc8$\x10I\x13$\x04\x04\x1b0\x0d\xc0gX\xf0\x99\
>|\xeb\xfb#\xfe\xd0\xee>\xc76c\xcf[nr\
E\x22\xe5\x16C%\xd6\xad\xd95}sE\xc3\x13|\
\xd2\xf17\xdd\xd4\x14\x17}3\xd3\xcc\xaa\xcc\x5cs\xc9\
\xf4\xf22k\xf3\xba}\xe7$\x96L\x01x\x0b\xac\x98\
\xe07\x88\x04ih\x07\x04\x90)$\x99\xff\x93\x01|\
\xec\xc9\x97\xa1\xde\xc9@vn(2\xef\xdb\xb2#Y\
\x1b\x03@\x00Z\xac\x0e\xac\x01h \xe8\x17\xd1\x92|\
\xf9\xda\xa8\xb4\xe5+\xb7&\xbb\xa3mF\x04'^\xfa\
\x0c\x8f\x1e\xd6\xfe\xab\xaa\xfa\xcd\x1f\xc5c\xfat\x86\x04\
\x0b\x06\x11\x83\x98\xa0\x15aO\xa5>|\xf1\xd6\x8c9\
\x93{u\x997\xf6\x98\x87\x87U\xd5\xc5F5'\xb8\
\xd0aYh+Y\xc20\x0b\x01SB\x98P (\
\x00D\xa4\x82\xa6N\x15dSEQ\x861\xab\xa8(\
\xb8\xa8o\xae\xb3%\xc3-\xdfpF\xf8\xa5\xda\x17\x1a\
FQ\x9f\xc3N\xc5\xd2\xeaZ\xf3\x84\xd3OK;\xf1\
\xccS\xdd\xb5k7$7\xa56\xe9'~w#\xfa\
\xf5\xe9\x87\x1de?\xd6\xb7\xa7M\x9b\x06'\x1e\xc7\xca\
\xa5K\x91\xd4\x1a\x90\x12UUU?;G\xbdz\xf5\
B\xc7\xbc<tm\xdf\x1eU\xa9\x14\x12\xb6\x8d\x0f>\
\xf8`\x9fk&?z\x09f\x01\xf0\x9du\xf5\x00\xfd\
\xe5\xa7\xb7\xc9xr(X\x81M\xb7F\x17\xe7?\xe0\
\x8e\x19\xffr\xe0\x85w\xedI\xa5\xeb\xf1\xc2\xa0C0\
\xfa\x17\xd8\xc5\x09\x80i\x1a\xac\xe02\x91\x00\x98!\x88\
\xa5\x01\x0d\x17 \x03\x03\xac+p\xc7\x98IX\xb3\
\xb5\xc6\xceO\xcb\x5c\xd8\x1c'\x84\x9c\x18\xbe\x98\xf5\x06\
\x86O\xbe\x1e\x13G\x8d\xda\x5c\x17\x8fnN\x0f+\xd4\
6:\xf8\xe3s\xaf\xa0\xa2,\x8e\xf4^y{\x9f\xa3\
\x89\x000@\xa2e\xd3\xf2\x1eNL`0~\xc9I\
\xa2O\xffqh\xdf\xb1\x0d\xa2)\x03\x85iQ\xbc\xfd\
\xf6\xdb\xff~\x00?\xfde=\x16O\xbf\x03\xd3\x9f\xaa\
A\x97\x83\x82#,\x9bO%\x92\x06\x88AB\x82\x00\
h&\x08f\xe4\xe7\x1a_\x0d\xe9\xa2\xdf/\xd5'\xa9\
\xd4\xae\x8d8\xfd\xf8\x91-\x96\x81\xc2\xcaC\xa6^\xf7\
\xe6\x8a-\xf5\x07j\xe6.R\x1a\x00+\x80\x01\x09\x13\
\xd1\x94(\xfad\x89}G\xca*\xafu\xb5(\xd6\xec\
/\xf6VC\x02,\xc1\xd2\x04k\x0d\xd2.\x82\x82\xa3\
\xf99F}\x87\x02\xb9\xbc8/\xf0i\xaf\x12\xff\xea\
\xb6\xb1\xd2\xedSo9\xaff\xfe\x97\xef\x04D\xfa!\
\xe6\xac\xba\x81\xd9\x1d\xc0\xb9\xc9x\xcav\xb4\xae\xa3\xa4\
\x93\x5c\xbab\x91~k\xfa\xfb\xea\xea\xab\xae\xe2\xe7\x9e\
{\x0e\x9d\xbat\xc2\xea\xb5\xdf\xed2\x5cU\x85\x97\xce\
<\x15FQ6\x06\xef1\xd03RMh\xd3\x87!\
\x04\xf0w\x00\xfc\xfay\xe7\xa0\xdb\xb3\xcfcSZ\x1a\
F'\x5c\x5c-\x84\x0cK\xa9m\xd3\xdc\xbb\xa4\x1b7\
n\xc4\x82\xedK\xf1\xf2]o\xa1\xee\x8e{:\xaaW\
\xdf\xbcJDc\xe3H\x1b\x02a_\xc2W\xdc\xe6\x19\
\xa3\xf7\x90\xe7\xb2\xca\x1a\xe2\xbb\x0brPn\x13\xfcY\
m~\xd1\xfahM Hb\xd8\xc4\xa2\x05\xd1`\x18\
\x82\xc0\xda\x02X\xc2\xdd\xf2\x1an\xf9\xddk\xfb\xdc\xd7\
\xaa\xd2T\xd7\x071\xfd\x8b\xcd\x88\x89\xfep\x96\x9dD\
\xe8p<>\xfbf-\xef^\xf2\xca>\xd7;\xad\xb9\
\x12\xdc\xf2[\x10\x98\x09B\x10\x94r\xa1\x7f&\x97\xe2\
\xe1\x99\x8c\x17\xae<8#\x14 7\x99t\xe2\xd1\x8a\
-\x18}\xe0h\xcc\x99?g\x9f\xeb~\xf5`\x9e\xcf\
\xdf{\x02\x1b*B\x18wb~^4\xea\x9e\x00\xc8\
v\xde[hx^c-@\x9a\x10\x89\x88\xd2^\xed\
\xfc\xaf\xdf\x7f\xff\xc3\xa5mRK\xf0\xe6\xdb\xcf\xe1\xd1\
'\xbe\xc1;\xef\xae\xc2\xc9\xe7\xdc\x85\xd7\xfe0ef\
v$\xf0\xa9\x80\x04k\x01F\x00\xa0 $\x05!\x94\
\x1f\xd1\xa4\xaf\x93\x0d9\x94\x85\xd1\x96\x84_\x10\x07\xc0\
\x08A\xc3\x80\x097Y\x10\xd1\xa5\xa3z\xd0\xdc\x13F\
\x07\xee:or\xe6I\xa7\xf5Q\x17\x1f\xf0\xe7\xb3\x9f\
?tx\xc1\xca\x9c\x83\x86\x05g\xcd\xfa\xa6s\x8a\xd2\
\x87\xc6\x9a\x9aF\xa7\xacD61\x97\xef\xd8\xb2\xb5\xf2\
\xb1g\x1e\x8f\x1d9uJ\xca0M;==\x9d\x8f\
?\xfex\x00\xc0\xfb\xef\xbf\xbfw\x8c5G\x0e\xc3\xfa\
\x9b\xcfA\xbf&\x81S\x1f\xf9\x0b*\x0c\xd9\xfe\xb3N\
\x93G\xff\xa5\xc6\x8c<\xfaw\x22?\x97]p2z\
\x0c\xea\x03\xa3\xdf\x0089\xe9\x81]i\x81^g\xf5\
\xeb3\xb0\xe7\xa0!\xbeA]\xba\xee\xbdn\xcb\xd6\xad\
p^\xf9\x1cM\x1f|\x9cO3>\xbaD\xd6V\x1f\
k\xb8\xd24\x84\x03C\x1a\xf1\xb0\x1bn\xf0W'{\
\xc6\xda\x14w\xd6}\xdb\xfb\xb7\xf7;\x09\xfd\xa7\x1e\x87\
\x15s\xe6\xe0g\xc5aH\x050+\x80\x18\x90\x00\x19\
\x02R\x805\x9a\x99\x90\xd8\xefm\xc5\x1d\xc6\xa2}\xfb\
\xaePn\x18kg\xdd\x07r+:$K~7\xae\
\xa6\x86#Ue\xf1\x1f]O0\x00\xe4\x80\xc5\xdeC\
\x0c\x0b\x12\x10\x10\x7f7\xb6{\xe8\xf8K\xd1n\xe0I\
\xb8\xff\xd2#2*\xe29\xbf[\xb5\xc5\xb9\xb6 \xd7\
\x9f[\xd9\xef/H\x0b\x13\xc6\x8c\x19\xf3\xef\x03\xf0\x98\
3\xce@8\x12\xc0\xed\xf7\x8d\x17\xdbw%FY\x0e\
\x8e$\x92\x02\x04\x90ly\x1c\x13\xa4\xd4nI\x1b\xe3\
\xab\x11]\xfc_\xdc\xff\xc0\xcb\xb8\xfd\xb17\xf0\xc2\xeb\
\x8b\xf1\xf6\x97+0\xed\xb8\x01\xc8\xcb\xf4\x17\x1fy\xd9\
\xfbC\x94\x96~\xb0\x99\x84\x96\x10\xca\x80\xd0&\x00\xcf\
\xf5,\x85\x01\x83} \xed\x07k\x09!\x18\xe9a\xbb\
\xaaw1-\x9c2\xd0\xf7\xcc)\x07G\xce>\x7fl\
\xee\x09C\x12e\x8f<v\xfd)\x0b\xd2:e\x87\x0a\
\xdf|ctiMlL\xb2\xb9n\xf2\xeem\x1b\x07\
l\xd9\xbcq{\xd5\xae\x9ds\xdf~\xfd\x9d\x9d\x93\x0e\
\x9f\x1cM&\x92\x09\xdbu\x14\x11\xe1\xc9\xbf<\x83\xc5\
\x8b\x16!33\xf3G\xe3\xcc\x9d\xf1&\xcey\xee#\
\xf8C\xe9\x05\xab\x86\x8d=2w\xce\xd2G\x8b\xd6\xef\
\xfcS\xe7~\xdd\xf3\xba\x0e\xef\xbf\xdf\xb9\x99\xfb\xf5\xc7\
h\x1e:\x08\xbaGw\x1fr\xb3\xfb\xf6\xdbP~i\
x[\xf9\xcb\xa1\xa6\x86)\x09_\xc0d\xeb;+\xd4\
\x88\xe1\xc3\xf1\xea\x1d\xb7@\xbc\xfaV?\xda^z\x92\
pDD\x92\x0d\xc9\x0c#\xa5\xb3\x13\xbbw\xdd\x14[\
6\xf7=\xf1\xe6G/e\x7f\xb6\xf9<\xd5\xa6}\xc9\
\xaa\x9e}\x10i[\x88\x8d\x0b\xff~&\xb7c\xbb`\
EPP-\xab/\x00A\x90\x82\xe17\x5c\xf8\x8d\xfd\
\xfbA\xfa\xe0\x1b\xec\xde}-\x88\xa8\xa8\xed\x01\x7f\x9e\
\xd2\x14U\x8f\xc5\xe3\xd6\xbdN\xb2&\xc7N\xd6\xfc\xe8\
ziG\x00\x1c\xec\x99\x9c\x88@\xf0\xc2\x05<M\x92\
\xb1?\x15\xf8\xb4\x97_Fa\xfb\x02\xec^\xf1\x06\x98\
s\xc6\xa48|Je=]\xb3xC\xfc\xb6N\xd5\
\x8fv\xfa\xf8\x8bY\xf0[\xc0\xa4\x81G\xef\xbd\xe7W\
S!\x98\x19\xe3n,\xc2\x92o\x8b\xb1u\xdb\xec6\
\xd1\xa8u\x0e`\xe4z&0\xcfa\x01\x06\xc0\x1a\xd9\
\x99\xc6\xd6\xae\xf9\xe2\x85kn\xbc\xbd\xe9\xee\xfb_\x04\
\xf9N\xc5\xd6M\x8f\x99\xef\x7f\xb2\xb0G\xaf\x83\xef\x1e\
\xfc\xee\xbc]cc)u\x88VF. \xfc\xb25\
6B\xb6D\xaeABk\x01\x07\x1aa\xbf\xed\xe6e\
\xc8\x8d\xedr\x83\xcb\xfb\x14\x88\xd9C\xda\x84gf\xa4\
\xaa\xab\x96\xae\xf9c\xca\xdf\xe3\xdeL\x7f\xaf\xc0\x84\xdf\
?xW\xb6\x9dJ\xa4+\xe8\xe2\x8a\xb2\x9d3\x85c\
\xbf\xf6\xc9{\xd3\xed\x8f\x16\xad\xb4\x1e\xbf\xff>N&\
S \x22\x98~?\x1c\xcb\x02\x00TVV\xfe\xe4X\
7\x17\x1d\x82\xd3r\xf2z8\x9b7\x5c+\xc89R\
\xd8\xf5\xf9\xe4\xcb\xda\x963b\xa0Pa?\xf0\xb7W\
~t\x8f\x94\x02\x0d}\xba\x05r\x1exv\x126l\
\xbc\xd1\xb0u\x7f\xf6\x054\x87\x8d\xcf\xc1&\x0b\xe9\xdf\
{mVz\x1a\x9ef\xc6\x86A\xa3\x83l!\xc0 \
\x90\xd0\x10$A,\xa5\xf2#\x9b\xc8\x85QU[(\
\xbe\x9c5\x90\xca\xea\x0e\xe9\xfc\xcc\x8b\x7f\xcc\xeb\xd8u\
\xd9\xee\xc6\xa5\xea\xf3\x97\xff\x8a\x89\xa7\x9f\xb5\x7f\x00[\
.\x0c\xc1\xd0\x9a\x89\x84\x00\xd3wq'&\xf9`\x90\
\xb9\xdf\xfb\xbe\x95\x97 \x90\xfbj\xcfX\xfc\x88\xdf\xb1\
!'+V\xb9\xd0\xd8JB\x09V\xfb\x01=\x07\x00\
tm\x09\x0dh\xf9i\x010\x93\xf4\xacP?\x90\xac\
\x86\x06|\xf4\xc9b\xb4\xef|t\xe7\xfa\x94>\x1b\xc2\
\xd7V\xb3\xa2\xeaz\xfb\x9cUp\x03S\x8f;\xed\xde\
>\x0bf\xed\x9c\xd5c,F\x0c<\x16\x0bVL\xff\
\xf5\x00|\xe8\x99\xd7#\xdd\x7f1\xfaML\xf3M\x9f\
\xb1c\x8a\xab\xe8@/F\xbd5\xcaA@3\x10\x08\
\xd1!\xd7x\xbfi\xc3W\xcb\x1d\xcaA\x8fwf\
\x84\x8e\x9cZ7t\xd2i\x8f\x8en\x8c\xebq\x8e\xa2\
\x03\x18\x22\x00H\x12\xf0vn\x09\x0f\xb8\x9a$4\x1b\
\x00\x03\xe14\x1dm\x97g,\xee_\x18^\xd4\xaf\xbd\
\xf1\xd5\xb1\x87\xf7X\x94\xdfwh\x92\x88\xf8\xf3\x97\x9e\
\xef\x90\xd74\xf1\xb0\xf2\xf5s\xbb\x05B\x01\xd8\xca\xa9\
\xd8\xd3\x18\x9fa\xf82w\x5cq\xe3\x8d\xce\xdc\x0f\xde\
\xd4\xdf|\x99\x063d\xe2\xaa\xbbnC\xabW\xaf\x15\
\xbc?'\x94\x8a\x03N\xf2 \x98\x893D\xd84\x88\
\x1d@%\x19\x96\x05\x16\xfb7p\x123\x22\xae\xc8B\
E\xc5\xb1\x22\xa5\x87rP\x83\xa0\x12$\x0de\x04\xd3\
\x83\xe1}\xae\x05\x00J\xa4H\xba 6\x14\xd0\xf2\
\xc5'\xb8 \x87 \x84\x01m\x1a\x10v2d\xae\x5c\
z\x8c\x9b\xd4Y\x1b\xb5\xbe.\xe3\xc2\xb3\x97\x19/|\
\x84\x9d\xaf\xbc\x82\x0e\xa7\x9d\xf6\xa3~\xb8\xb6\x82\x90L\
\xe0\x16\x1b;\x03\xa4\x19\x82\x09\xa6/\x00i\xf8\xf6\xdb\
\x7fF\x1a\x94\x12#\x1d\xc5'\x1bR\x1a\x82\x09\xa4\x95\
\xd6\xb1*&\xed\xfc\x88\xe4A\x04\x05\x80\x00\xc8\x10`\
\xdak\x7f\x82\x10\x04\x01\xd9\x12:\xbb\xaf8D\xc8\x8e\
H\x84#Fa\xbc\xd1\xed\x93$\x1f\x09\x18`p\xa8\
\xba\xd1=ec\xb9\xdb\xd4\xf6\xf2+\xee\xec:\xa6g\
\xec\xd9\xa7\xcf\x05\xd1\xaf\x04\xe0i\xd3\xa6\xc1\x97\x17\xc2\
\xab\x0f\xac\xc5\xe0\xc3\xbat\x8d\xc5\xf5\xa9\x80\x91\x09\xe6\
\x96@\x1d\x01\x80\x14\x08H3\x9c\x15\xd9hzq\xc0\
\xb4\xfbd}\xee\xca\xc3\xafx\xe0\x8b\xc3\x9b\x93j\x94\
\xabdO\x82!\x09\x04\x12\x0ch\x01\x09\x09\x80\xa0\xc8\
\x84f@\x92\xd6\xd9\x19\xaa\xb6c\xbb\xf0\x97\x03;\x84\
g\x0f\xcaw\xe6\x9dp\xd9i\x9b\x88H\x1f3nY\
p\xce\x0dW\x0c{|\xda\xa4\x89k\xbf\xfa\xb0K0\
+\xd4\xc4\xb6o}iU\xfd\x8c\xbb^x\xb3\x8c\x99\
\xf1\xc2#\x0f\xe2\xd0\x91\xc30j\xca\x09\x88D\x22p\
\x12\xff\x9c;\x9a\x94F:\x81\x04\xbbD\xae\x01\xe1\x02\
l\x0a \x18\x02\x82\xfb\x07@\xc4\x07\xa4g\xa419\
\xb6\xd6\xd2\x00\x19\x0eH\x01\x82\x04\xd2\x83\x06\xcc\xe0w\
K\x91L\xa6\x10\xf2\xf9)\x96\x164\x9dA%M\x22\
?\xb4< |\xdb\x0d\x96\x09\xdd\x1c\xcd\xe3\xca\xcaA\
T\xdb\xd4Ih\xe9c\xd3\x04\x93M\xe6\xb6\x95c\x92\
\x1fg\xdc\xb8\xe5\xdd\x0f\xaep\xf6\x94\x97?\xf3\xf6\x87\
\xfb\xed\x87\xeb\x02\x06\x01\xd4\xf2\x11'\xf6^\x18b\x86\
\xe9\xf7\xc10\xf6\xafU2I0IbA\xa2\x15{\
\x82\x00SJ(\xed\xfe\x08\xc0\x19A\x09 \x0b$\x05\
\xb3 xOh\xdd\xcb\xfc\x80\xf8\xf1s\x1e?\xfbl\
\x1c\xf4\xfe,\x04\x03j\x8d\xf0\xfbn\xdfY\x9d\xbc\xca\
!c0\x91\x80b#TQo\x1d\xbfrK\xed\xbc\
+\xc6u\x9f1m\xea#\x5cRR\xf2\xeb\x00\xb8\xf1\
\x9a\xb7\xa0\x1f\xff#\xce\xb8nX\xe0\xab\x99\xa5gi\
%\xfa\xb3\xa7\xb8C\x0a\x09\x86\x00\x83\x94\x1f\xf1\xe6\x9c\
`\xe3K\x95\xbaw\xd1\x93\x7f\x9b\x7f]<\xa5\xc68\
\xa0.`\x13\x02\x02\xc4^\x1cp\xab\xcd\x97Y\xc2q\
\x00\xe9s\xed6\x99\xa2\xa6g\xa7\xe0G\x03:\xe5|\
:\xbamx\xc9\x98\xb3\xee\xd8\xc3\xbc\x94\xb6<\xf9`\
\xe6\x9a\xd3\xa6\x1d\xb8\xee\xc6\xeb\xa7\xee\x96v/.\xcc\
\xac\x0c\xe4\xb6y[e\x86f\x06J\xe7W\x1e>\xf6\
D\xac\xdc\xb2\x07\xfd;g\xe3\x9e\x93\x86b\xfd\xf6\xdd\
\x00\x80X,\xf6\xcf\x0fX\x112\x98 \x14 4{\
\x1fF\x22 \x9c\x06\x84\x03\xfb\xbd\xc5O\x06B\xc1\x10\
\xe2\x8e\x04\x19\x12l*0\x0c\x08S\x22lX0|\
\xdf9\xf8>\xfe\xf4S\x8c>\xf1D\xbci`\x89\xe8\
\xd0\xf1\xa2\xe2\xc1\x1dJG\x8d\x1fR\xdd\xb3\xff\x14{\
\xd3\x8e\x95\xe9\xf6\xe7_u\x08|\xf8\xcd8^\xbb\xe5\
B\x91j*\xd4A\x13d9\xc2\xb7e\xd3\xd8\xc8\xae\
\xedC\xc6^\xfd\xca\x9e\xcb\x87\xe6\xef\xf7\xa4\xe4\xb8\x0c\
Cz;.\x81\xc1\xbc\xf7\xfb\x08\x98\x04\xfa\x89\x14\x07\
M^\xd4 \x11\x98\xa0\xc1\x9aAd\xc0L/\x82p\
\x1dX\xb5\xdb\xf7\xb9>\x92.\xbeC9\xbc%eh\
0k\x08)\xf7\xebl\x11\xe10\xde~\xe5)\x9cw\
\xf9\x1d\xcd'\x1d\x9b\xf1\xc6K\xef\x94\xa6\xcak\x9c{\
\x14\xfb\xba\x13I$\x1d\xb4\xdb]cO\x9b]^\xff\
u8/7\xea\xa6\x12\xbf\x0e\x80{\xcc|\x04\x8f\xfd\
\xedc\xf4\x1b?bP\x22\xa5&0\x19\x01\x90\x97\xe5\
\x06H\x10\x0b\x08\x22\x8e\x84#\xb5\x8d\xc8\x1aQ\xbb#\
q\x89b\xee\xab\x85\x10`\xef\xdf\xa5\x16 bhA\
`\x96\xd0\xac`\x9a\x96\xd5!\xcb,\xef]\xe4{w\
hq\xf8\xe3\xfe\xc5\xc9u\x9f\x7f\xba\xa2v\xf4\x97\x7f\
\x10\xdbV\xcd.\xd8p\xcc\xc4\x03c[6\x9c\xc1u\
\x0d\x03\xa8{\xbf\x881d\xc4\xf3\xcc\xea\xe9\xce\x8b\x9e\
\xda^;\xe5fD\x07=\x8c\xb5\x95s0c\xee\x5c\
\x00\xc0\x91\xf7|\xfek\x0c\x17\xa4\x0d\x04\xb4\x01\xd1\xaa\
\x9b\x93\xf0t:\xd3\x0f\xf2\x07\xf7{\x8f\xcf\xf4#\x10\
L\xa7\x94\x96\x04\x96\x80\x92\xd0dB\x18\x82B\x22\x01\
)\xbf;\xc4\x0d\x18\xd0\x1f\x05\x00\x0f\xcc\xcf,uK\
\x8aJ#\x05\xed\xe0KF\x90\xa8)\x057\xa5\x9a\xdd\
\x8d\x1b\xcb\xb2?\xfedy\xdd\xb1Ski\xd1\xe2;\
\x04\xbb\xb9\x90\x80\xd9X\x19\xa6M\xeb\x0f{\xe2\xdei\
\xb3\xe5\xd6\xad\x8dX\xfc\xe3~(\xad\xa0\xa1\xbd\xfdP\
\xc1\xb3\x12@\x03\x9a\xe1BB\xf1\xfe\xcd[$\x09$\
\x0d\xef\x1c.]\xb0\xd6\x80\x00\xa5\x17\xe6\x92\xab5\xac\
\xda\x1f\xdc \xbd7\x81H\xb4\x1c\x7f\x18 \x06\x13\xa0\
\x0d\xc1\xea'H\x5c\xda\x16\xe7\x03\xda\xc5\xe2U\x095\
\xf9\x90\xc2O\xde\xf9\xa0lDm\xcc\xed\x08C\xf84\
\x09j\x88:}\xd6n\xd9\xd3{\xea%o-\x5c\xbe\
t\xce\xbf\x0e\xe0+o|\x1e\xabVV\xe1\xb8K\x8e\
L\xffv~\xd9\x19L\xb2\xd7\xf7\x83\xd4\x09FK\xec\
\x03\xcb\xe6\x94Y\xa4\xe3\x98\xc6\x10!H@\xb0df\
A\xb2\xc5\xd9\xc6\xda\x84\x86\x86\xe9\xb7\xed6YbW\
\xdf\x1c\xfa\xb4O\x16\xbf\x96k\xad\xd9\x90\x8cvm\x1e\
{\xf5\x8d(\xd8yI\xdb\xf5cG\x8fH\x94\x96\x9e\
\xa5\x12\xd1\x81L\xaa\x80\x04S`Oy\xbc\xc8\x17l\
<\xe5\xae\xdf\xef\x98\xf3\xcd44\xcc\x9a\x83\xcb\xee\x1a\
\xfa\xab\x00\xf6G\x8bI&\xfcp![\x8e?\x10&\
4\xb1\x97M\x22\xf6\xff\x09\xf6\xb9\x1a\xa6\xe1\x83\xd9\xfa\
!%\x09\x90\x86d\x9b\xb2\x94\x05\xa1\xbe\x03p\xb7\x9e\
\xbd\xbe\xbbq\xc6\xbe\xed03\x1e~\xe4@\x14\x0c\x1c\
\x96\xc8\xbf\xe3\xda\xd7\x9a\x8e?\xa7\x97\xd3\xd0|\x11|\
&\x91n\xf6\x05\xd7\xaf;\xa0\xcb\xa8\x83\xd2\xb8(\xaf\
\x11/<\xff\xa3~8\x8e\x02\xfb\x00\xd6\x9e\x12\xc1L\
\x9e\x0a\x01\x90k'\xe1\xda?\x01\x09\x9f\x04\x0c\xc3\x03\
#\x03\xadDB\x85\xf9\xd9\x80\x94\xa8\xfd\x81sR\xb6\
\xcc\x03I\xe1\xcd\x09y\x1f\x04\x16\x0caj\x90\xf3\xe3\
\x0f\xc4\xf7w\xe5\xcb/\xbb\x02\x0f\xfc\xf1\xa1\xe4\xa0\x03\
N\xfe\xb0\xb6\x19\x93\x00\xf4\x96\x90p\xb5\xd3\xb6\xa2&\
1\xe8\xe2\xc9\xb4\xa8C\xaf~\xfc/\x03\xb8_\x97v\
\xf8\xf3}\xe7\xa0\xfb\x98\xab\x0fM\xd9<\x11,\xa4g\
6!\x10K\x08!\xc0\xc4\xd0,\x0c@\xa6\xb3`\xef\
d\xaa\x05\x11\x09o\x12]\x098\x04i(]T\xa0\
\xb7\x0e,\x14_\xf7\x8aX/\x06\xb7nX\x7f\xdc\x91\
wE\x9f8\xa3\x17N\xbd\xef\xb6\xe2e\x07\x8f8H\
\x95\xef9]\xc7\x13\x07\xb202\x85\xf6\x0b\xc16\xc8\
\xd0\x10\xb1\xdaph\xc1\xb7\x93g\xbf\xfb\xf6\xa7Y\x85\
\x19+G\x1d;\x11\xb8\xeb\xc6_\x0b\xb3\xfbN4\x18\
>O\xe9\x81 \x0d\x16\xdeo)\x080\xf6\xff\x0d\x0e\
XI\x98.\xd8p53\x03L\x0a\xd0\x0e\x0c\x95\xd0\
\x11\x91\x01\xc1\xf6/{\xf6\xf72R\x16\x135dd\
\xe5\xac\x15\x1c`HI\x9a|\x08n\xdd\x1d6W\xaf\
7T`\xff\xfdH\xb2@\x80$\x5c\x06\x13k\x08H\
0\x88]\xc5l\xca\x00\xe0\xee\x7fg\x14\xac\xa1\x85\xb7\
\xd3\x90\xf6T=\x12\xc4\x05\x99\xe9\xec\x0f\x05\xb0\xf2\x07\
\xd7\xa7\xe7dz\xfd\x15\x8a \x18\x0c\x0d\x22\x0dCh\
\x0a\x08I6\xfd}W\x5c\xa7N\xed\x91Y|4\xfc\
A\xffF\xa2T%3\xf7\x06\x01\x9aeNMC\xb4\
+\x00jl\x8a\xfek\x00\x1eq\xf2\xad\xf8\xf3\xdb3\
q\xc8\xb4[\xda\xaf\xdd\xd4x\x12\xb3,\xf1v\x96\x96\
\xe8\x05A\xd0\xdf\x8bcP\xc4\xc4`o\xe9[\xe2!\
\x94M\x10\xdaAf\x16o\xef\xdd9<kd;~\
\xb5k\xcd\x8eE\xc7>\xfbD\xfc\x11\x22\xa4\xae9%\
k\xca\xd8\xb1c\x9a\x9ex\xf98]_w8\xa5\x05\
2\x85\x19\x00i\x0d\xd2\x0e\xc8'\x00\xbf\x01\x82\x03\xb9\
s\xd3pc\xe1\xfc\xe3\x0b\xff\xf4\xf0&\xa7\xb9*\xa9\
\xb5\xfe.\xf6\xe2W\x14!\x08&KHb\x10\x14X\
x;\x92\x80\xcb:\xd1\x80Wn\xbd\x14\x92]\x18\x10\
\xc8\x0a\xf8Qh\x04\xe0\xab\xab\x86?\xd9\x96\x0c\xc7\x12\
\xcaU\x00+0,\x12\xca\x09\x98\x99Ed47a\
\xfe\xa3waOu-\xe2\xcd5P\xa9Fh\xdb\x81\
\xc1\x1a\x12\x12~S@\x18!\x88\x8cB\xcc~\xf4\x8f\
\x18s\xf9\x0d\x88\x042\xa1\x12)M\x01\xbfg\xce\xb5\
\x18\x1a.dU\x1d\xc8\xdc\xff\x98\xb5_\x82\x03\x0cK\
24\x14\xc0\x06\x942\xc8\xd1,\xf2\xdbw@\xbd\x03\
\x5cs\xf1uh\xa8\xaa\x86\xa2\x18\x84\x0f\xb0\x1b\x1a0\
\xab\xd1\x8f\xeax\xa4\xc5\xfdL\xd0\xcc\x08\x18\x06uh\
\x9b\x0f\xad-\x9c{\xfe\xcdpU=B~\x8dP\xc8\
\x00s\x14\xf3\xd7\xce4\x84\xd4!\xb0&\x90\x02\xb3b\
)\xcdT(\x10T\x96\xa3\x90\x97\x97\x87\xea\x15+0\
x\xdc8\xa4gf\xc2\x17\xf6\xc1\x170`\x800\xfb\
\xcbO\x90\x91\x95\x89\xb2\x9a\xfa\x18D0\xe5e\xf0\x10\
X3\x1c\xdb\xf5\x07\x00h\xe7_\xb0\x03_\x7f\xe7c\
(\x8fk\x1c{K\x1fy\xd5\xd9\x1fN\xb4]\x1aO\
\xf0\xc0K-;pk\x06\x01\x83\xc0\xdeq\x17B{\
\xff\xa6]\x01\xed*\xa4\x87\xed\xfa\xae%\xc1\xcfG\xf6\
\xcd~\xe3\xe4\x91\x85_\x7f9\xe1\xe8XCq\x0fh\
@\x0e>\xf1\xb4\x11{.\xbf\xeeD\xae\xa8<\x16,\
\x8a\xd0&\xbf\xcc\xd7\xa6\xfd\xa7\xa2t\xf7`\xe6x7\
\xf8\x0c@2H0\x04\x03\xc2\x8a\x19\xb4`\xd9\xd1;\
\xdez\xfb\xab\xf2\xaf\xbe\xf8\xfa\x90\xbb\x7f\x8f\xac\xfc\xc2\
_\x15\xbc\x00@\x86\x1f>\x10\xc8\xf0yj\x03\xfb\xa1\
}F\x18!\xa3{\x22\x81\x9d\xa7v\xaf\xd4h\xf76\
P6\x0f8e\x14\x00\xc0\xed \xb0\xf3\xe2\xdf\xb5\xf5\
\xe9D/\xef\xf5\xf6\x81\x12\xd1\x80\xdcU\xd3/p\xd6\
\xd8\x88\x9a<-6\x02\xc0/\x0a\x12 \x82u\xc5\xef\
\x10\x7f\xe9\xc1\xb4\xc8M\x8fu\xe6\x94E\x10.\xc8J\
A\xe7\xe5\xe8\xc4\x01\xc3\xb5\x0e\xfa\x80\xe7~\x1cx\x9f\
\x9d\x19@Z(\x99r\x032\xe1\xda\x0a&1\x1c\xf8\
\x82Uqc\xc0\x88\x09SC\xc7\xf7\xbf\xd5\x02V|\
\xc7\xa5\xd9\xf2\x1e,\x7f\xe1\xda\xc0QO\x14\xb7iV\
\x01R\x0a\xd0\x8a\x11\x0a\x8apA0\xda\xe3\xc0\xf5w\
\xec\x9a\xf4\xde]\x1ax\xb2\xe5\xa6[\x01\xfc\x01\xa5\xdb\
N-a\xc3?\x86]\x96\x0033\x5c\xf2\xa5o\xc9\
\xcd\xf7%\x94p\xb1k\xd7: \x98\x87{\xe6lD\
y>p.\xe05qIkoGc\xec\xf8\xf6\xe9\
\x8b7\xa9`\x8b\xde\x05h\x86\x01\xc9~\x01H\xe1\xff\
W\xc8\xfd\xc6\xa1hHo\xe4\xe5\x85\xfa\x94\x95\xc5\x9e\
R\xae1\x12\x90\xde\x82\xb6fZ@\x02\xd2\xd3\x9b@\
\x0cR\x02\xcc\x06X1L\x9f\xe3t*2\x97\x0c\xed\
\x92\xfe\xe2\xd8.\x19\x1f\x1ds\xfd\x8c\x8a9\x7f=\x17\
\xa3O>\x02\xdf\xde\xfc\xfbv\xd6'3\xa6a\xeb\xb6\
\xd3\x90H\xf5\xd7ya\xcb\xed\xdbc\xb6\xce\xcc{\xa1\
\xeb\x89\xc7\xcel\xbc\xed\xc1\x1b\xdc\xfa\xdak\x05i\x22\
\x06\x84\xe1@\x90\x0b\xc9\x0a\x90~\xb6\xc6\x1f\xfdX\xe6\
\xd3\x8f\xdea\xaf\xd9\xd4\x80j\x89>\xe3\xfb\xfc\xaa\x00\
\xde\x95\xd7\x0d\x09'un0\x1c\xfa\x8b Hvl\
\xa8\xb4\x94\x93\x1a3p\xa1\xe57?\xb1\xf2\x0aw\xb2\
\x11P\x02 Si\x0eFc$J\xeb\xb3y\xf7\xce\
I\xfe\xea\xca\xc3\x85#\x0c\x18\x04v\xa2p\x8a\x0b\x1a\
u\x8f\xbe\xafSndA\x22'7\x91\xf4\x85\xc85\
\xbc\xd7\x9e\xe1e\xab\x90F\xcb\x81\xd8\xfbj\x19\x8e\xab\
}\x95\x0d~s\xe5\xd2\xa1b\xdd\x96\xa3\xd9\xa5\x0el\
\xa4\x80\xa4\xd6\x0d\xe3&\xceXz\xc4\xd1\xe7\xa3\xa2\xaa\
\xe6\xca;\xae\xf9Q\xdf\xdf~\xf3#t*l\xf6\x9f\
p\xf9\xc6;\x1bT\xf6\x0dR\x84\xe0h\x07\x99\xbeX\
m\x97\x02\xf7\xb5\xb4\x88Z\xa2}\xae%\xa0\x88\xa4\x82\
\xed8\xec\xc4\x5c3Q\x8b\xfe\xe5\xf1\xf0\xd1\xf5\xc8\xeb\
\x06m\x83\x95\x0b\x9flv\xdad\xdb\x0b\x0cC\xce0\
#b\x970]\x92\xac!\x94K\xf1\x98\xce\xdc\xdd\xc8\
\xe3b\xc9\xd0\x14V\x86\x1fBi\xa9\x93\xf1\x81\xbd\x22\
\x17|s\xe3\xac7/x\x22_?\xfd\xe2\x0c\x10\x1d\
\x80SO\x1b\xde\xa6!\xe5\x0cp\xb5\x13qm\x8bm\
\xcb\x86c\xbb\xec:\xc2\xack\xc2\x88\xea\xa8q\x02\x84\
\x91\xc7pa8q\xbb8\xdd\xb9\xcf\xb1?\xbfS\x18\
\x07\xf0?\x05\xe0?>\xf1)\x16/_\x87\xfc\xac`\
\xf0\xfdo6_\x91H\xf0\x1d\x80\xf4S\x8b\xcb\xd0{\
s=oK\xeb\x09\x8d\x99\x00m\x02\xec\x227\x93K\
\x07\x95\x84^\x19\x91#\xde\xb9\x22\xbfv\xcd'\x83\x0e\
Q\x9b\x0e\xbf\x1a\x13\xcbW\x06\xaa/<w\x94\xbbb\
\xce\xe5\xa2\xb6~\x14\xb9\x94\xe1v)\xaau\xc6\x1e\xf0\
TEq\xe7W\xcf\xbd\xe5\xfdM\x0b\x06\xa6!\xd4\xad\
\xed\xb0\xd4\xbc\x15\x8f\xb2\x13\x1b*I@\x10C\x1aN\
\xcb'\xdd\x81\x9d\xd3q\xb7s\xe1\x85\x17<p\xe1\x85\
\x9f>\xb8\xba\x0a\x19\xbc\x05\x19\xfdG\xfe*\xe0e\xd7\
Ai\xfb^p\x95}\xaei\x1a\x7f!\xedH\x96\x80\
\xf6%\xc1\xa6\x00\x0b\xc4!\xcc\x06h\xa9\xa8E\xd7%\
\xe5\x80,7\x0d\x8a\xb3I\xb3g\x16\x17-\xd3\xc1\x1a\
pY\x81\x8cj\x0e\x98)\xf6\xfb\x09\xa6\x1f`\x03\xf0\
\xfc\x0d\x00k\xef\x90\xcb\x0apm\xb0k3;\x8e\x8f\
\xa2n\x1e'\xb5\x8f!\xc0\x8e\x05\xa7\xa8]}\xcd\xb9\
\xe7^~F\x85|s\xf0\xae\x8d\xee\x8c\x0f\x9f\xfdq\
\xff[\x82\xce\xc7\x8e\xff\xfd\x09\xab+\xfd\x7f\x86H/\
d\x97\xa1aA \xe9\x0a\xe9V\x93\xa1\x1d\xcf\xd2\xc5\
p\x5c\x9b\x95\xe3\x1a\xda\xe1\x5c\xb0\xf43I\xcf\xdcI\
\x06\xb4T\xd0\xc2\x06\x91nf\xd2\x0d^\xb4\x99\x06\xb1\
\x82R:\xa2Y\xe6@{\xd7\xb3v\x91\x99\x96\x9a=\
\xa2o\xda\x85u\x0d\xcd\x1b\xbf\x99\xf1\x0c\x00\xe0\xca3\
.\x95o-N]c\x09y\xbe&\xc7d\xed@+\
\x07Z9\xcc\x8a\xa5\xabE\x1e\xc1\xf0\x0b0\xb3R\xe4\
\xe7\xe4\xe6\xc1\xbd\x02\x975\x8e|\xe7\x8b\xd0W\x07\xff\
\xe3*\xc4\x19\x97\xfd\x19u\xf5\xf5x\xf7\xf9\xd7\xd0\xf1\
\xc0\x83\x07$S\xea\x0c&\xd3\xff]^1\xc0$Z\
|\x17\x9ey\x060\x00-`\x06\x9cd\xf76Xt\
P\xbb\xcc\x87\x0f\xa5\xa6of\xa9\xf4\xe8\xdc.\xa3Q\
v\xf8\xe1\x18x\xcb\xc3E\x15S\xc6\x9f\xcc\xdb6\x9c\
C\xf1dO\x0e\xf9\x94\xdd\xaf\xd3Bk\xec\xf8\xc7\xd5\
\xc1\x93g\x04\xbe\xfa0\x0a\xacA\xe1\x1d\xaf\xa0\xc3Q\
\xa7.]6l\xd4[z[\xbc;\x84\xca\xa0\x96\xfc\
:!4H\x03f\xfd\xce6j\xee\xec\xd3\xae_\xb5\
v\xd5\xb6mK\xf6lY\xf4\xf3\xe1\x8d\xbfT\xca\x0d\
\x13\x94\xdf\x0e\x91P\x10\x8em\x01\xa6\xeb\xd9Gu\x10\
H)\x90\xb6\xc2\x80\x1d\x86\xf2N\xdfL\x00+\x05\xd8\
\xb6\xe7N5\x04\xc8\xf6\xccI\xc2\x81g6\x84\x90\xe4\
\xaa\x22R.8ey\xb6X\x97\xc0\x0e\x03P\x9e\xfa\
\x05\xde\xeb\x96e\x10X1\xe0H\xcf\xce\xea&\xa1r\
3\xed\xba\x09c\xbf\xda\x90W8\xe7zT\xbb\x8f~\
\xb8i\xbf\xfd\xdf\xb3g\x0f\x8e<\xec.L\x9aP\xf2\
\xd5\xee\xb7\xca\xdf\xaf\xb1\xacsA\xd2 \xe5\x03\x84a\
8\xd2-\xd6\xc4-/\x8d\x0b\xad\x05\x18\x0a$\xdc\xbd\
<\xc7\x12`I \xed\x1d\xaa\x00\x12\xe9\x9a9\xbdU\
e\x94\xa4\xc1\xa4\xc1\xad/\xa0v\xe0\xf7[\x95>\xd3\
zn\xdbWOoYQ\x99\x89\x82~\xc7\xa2j{\
3>\xd9\x98\xea[\x9f\xe4\xa3\xb5\xe0\xce\x90\x02\x80\xe9\
\x8d\x93$ \x14\x18\x80\xd0\xc4Zk\x90r8\x9cm\
\xae(\xe9\xd6nIx\xebU\x109\x19\xbf<\x98\xe7\
\x9bo\xbe\xc1\xb0I\xd7 \x12 \xdc\x7f\xeb)\xe8~\
\xc8\xd8\xee\x8d\xd1\xd4\x05\x9aeg\x22jqDx\xf6\
\xd0\x96\xf8\x0d\x90\x06\xa0%H1\xd22\x9c\xca\x91\xdd\
\xcc\xa7\x8fn+.\xb9o\xfe\x82\x0f\x17l\xa8\x8a\x1e\
2\xac#F\x9dx$u=\xf9\xc2\xbe\xf6+O\xff\
Y\xad_w+\xa2VO\x95\x9f\x96\x8c\x1f>\xf4\xed\
\xaa\x09\x13\xae|\xce\x7f\xf0\x9b\x8d\xcb\x97F\x17\x7f\xfb\
-\x98\x19\xc6Q\xa7b9\xf9Th\xd4\xf0\xd7)\x1c\
\x99\x0bV\x80\xa1A\x8a\xbd\xe0l\xbf\x1fB@\x98\xab\
\x97\x8f\xd7_\x7f>\xb6z\xf0P#\xa3\xad\x8d\x1bN\
\x9e\xfa\x8bA\xba~\xf6l,\xfd\xf0C\xc4K\xe7\x83\
W\x8c\x03\xaf\xbd}\xef\xeeU\xfc\xd63hWU*\
d\x86?\x93\x0c\x9b\xc8\xd0\x00\x1c@Z \xcd G\
\x02\x8e\xb7\xebz\xe1\x89\x16\x84r[\x0e\x9d. \xd9\
A\xc8\xbfE\x06|\x0b\xc8\x08\xec\x02\xc0\xa4\x19,\xc9\
S;\x95\x0bv\x15\x98]\x80\x5c0t\x0b\x984\xa0\
]\xc0U\x80\xcd \xa5\x00$\x01\xd3\x82\xdb\xa3\xb8\xb6\
\xea\xd8#\xde\xfd\xb6\xb0\xf3}\x97\x9d\x7fz\xd9\x157\
_\x8b\xad;\xf7\xcf`\xd4\xa6M\x1b\xcc\xf8\xe4V\x5c\
v\xf3\xa2\xbaC\x07\x15\xfc)\xcft^g\x1d\xad\xb1\
E3,JB\xb1\x03\xb0\xf2\xe6S1\x84g\xc3\x07\
I\x01\x09\x05\x83\x13\xd5\xec\xc6\xbf\xd4nl1#\x15\
eR\xd0-\xe1\x02\x02^l1\xb5\x04\x08A(\xf8\
\x8cd\xac}\x9e\xb5\xfc\xc0\x9e\xfe[\x8e\x1dS2]\
\xb5\x9d\xa4\x96.\x99\x8d\xb1\xa3\x07\xe0\x92\x8b\x86\x84\xaa\
ct\x02\x0bc\x18Q\x8bGPko\x1e5\x03-\
\xce\x16h\x97$\xecx^\x8e\xf8\xbco\x97\x8c\x07\x9e\
\x7f\xfc\xe1\x86\xce\x19\x80e9?\xbf\x03\xb7\x9alF\
O\xb8\x11%\xc5!\xdc~\xc1!\xf2\xf3\x05\x97\x1eT\
Y\xd3|\x83V\xe6X\x222E\x0b\x93\x0e\x8b\x96\x1d\
\x01\x0cV\x00AB\xc2UEEr\xc3\x01=3\x1f\
>*\xcdyw\xd4\xbd\xb75\x11\x11>\xf8\xfd\x03h\
\x7f\xfaT\xdf\xcc\x93\xce\x1a\xe5\xcc\x9dy\x0b5\xc5G\
\x13\x93pKr\xa3\xc9\xd1C\x9e\xaf\x1b0\xec\x81\xf3\
\xae\xb8\xae\xfc\xbc\xa9\x0d\x18\x7f\xff\x8d8\xfb\xaa\xabA\
D(\xdb\xbc\x1ai\x8bg#\xe7\x80\x11\x15\xa5\xeb7\
\xbf\xaaV,\x1f\x0c\xed\x14\x11L\x08M\x10\x86\x06\x0b\
\x032^\x93\x8bys.\xc8\xeb\xde}a\xf7#\x0e\
\xdd\xfa\xd5\xf4\x97\x7f1\x80\xabZB\xf6V\x8ek\x87\
\xb4\xeb\x9fE\xdb\x03G\x82\xf9\x0e\xe8\xb9K\xd1p\xc7\
\xfd\x08\x7f5\xbf\x8b\x1b\x8f\x1e\x0ahAN\x0aB\xb1\
c\xf8B\xeb\xe0\xa2Z\x93i\xea\x90_hS\x02&\
\x80\x80\x04\x0b\x83t0\xa0\x9d\x88\xcfMt\xee\xb25\
\xab]\xc7\xbf\xe5\xcd\xf8`\xdb\x9e\x03\x06\x0cOm\xde\
|\xbc\xbf<\x9e'4\x9b\xac-@\x83xo\xa8\x16\
\x03^\x1c\x7f\xcb\x0e\xdcbK%\x02\x84`6\xb4J\
\xb4/\xa8\xdf5d\xc2\x97sE\xc1G\x0f]\x7f\xf1\
\x1e\x00\xbcz\xf5\xda\xbf;\xbeY\xb3\x16@\xc5o\x05\
Q\xdf\xedw\xdf\xfb\xe8\xd5\x0b\xd6\xec\xferO\xbds\
tcC<+e\xbb\x86R\x8a\x053\x884\x08.\
\x0c\xe1\x92\xe3\xa6t\xd0\xb0j\xfa\x0c*\xfet\xdd\x96\
\xca/\xf2\x0a\xb2\xd2\x9bc\xcdS\xea\xeb\xdd\x91d\x04\
\x83\xae\xad\x0cf\xcd\x02\x04C\x10\xa4\xc1\x88H\x1d\xed\
\x5c\x88%\x9d\xf2\xf8\xfdni\xd80\x7fw\xb9\xfd\xdc\
S\x0f`\xf0\xc0\xde(\xdf\xfa$\x92Y\xbe\xb6!\xb6\
\xb3H;\x0b\x04\x1cMp\x18\xda\x85f\xed\x9deI\
\x904\x05L\xd3M\xb6-\x0c\xcd\xed\xd29\xef\xd5\xd7\
\xfe<|\xc7\xed7\xdd\x08##\x82O?\xfd\xf4\xa7\
\x0fq\xdf\xb1\xdf\x84\xc1\x1c\xc7\x84\xa9wbHW\x8a\
|\xb8\xa8f\xf2\xeej\xeb&\xa5e\x7f\x8f\x8cO\xb6\
\xf2J\x82\x85\x00\xc3;\xa1\x12\x03~?\xc7\xba\xb4\xf3\
}:\xaag\xc6\xa3\xd7\x8dl\xbf\xf0\xd5\xcf6\xbb\x97\
=~+\xde<\xe7jt8jBZ\xc3\xf3\xcfM\
\xd1+\xbe\xbd\x9a\x9a\xad\x81$\x05\xac\x9e\x85\x15u%\
\xdd\x9f\xdaQ\xd2\xe7\xf1\x01\xe9v\xc3k\x1f,\xc1\x87\
\x8bf\xc2\xfe\x1e\x13\x0f3\xa3\xf4\xd3\x8f\xb0\xf9\xce\xbb\
Q|\xca\xc9Y\xa9g^|\x9cj+\x8e7|\xc2\
\xf03A\xf8\x04\x5c\xa9\xe1\xaa\x14\xdc@F\xb4q\xf2\
\x91\x7f\x98\xd3w\xd8\x83\xf9\xe5\xe5Vq$\x0fS.\
;\xf3g\x01\xbc\xf0\xc2#\xe1$S\x85Y\x83\x06\x0d\
\x15\xc3\x8f\xdd\xd6v\xe8\xd0\xcdU\x069]\x5c\x96M\
\xe7_\xd8#1\xff\xf3\x0bTc\xec<\xd2*@\xda\
\x866\xc2\x8d\xbe\xbe]\xcf\xcd\xb7\x1b\xbeH\x8e\x1c\xeb\
\xb7\xbaw\x96Nv:t0\x00\x0eD\xa0\xd33\xe1\
\xb4\xed\xac\x1b\x9bb\xf6\xaa9\xcb\xec\x8fN>9\xf5\
`&\xf1\xb2\xc6\xa4\xf0\xbd\xf7@\xa0}\xb7\xe1fN\
\xd2\x08P\xac\x06\xda\xd0`\xd3\x04\x0b\xe9Y\xff\xa4\x00\
[\xdd\x00\x1e\x90\x89\xa0\x03AN\xb8){\xe5\xea\
\x1d\xd6\xa9\xafm\xb3\xd6?x\x8d\xdb\xa3G\x0f,Y\
6\x1fC\x06\xf5\xdd;_?%\xf3\xe7\xcf\xc7\x88\x11\
\x058\xe7\xb2\xb9x\xfe\xb1\x91r\xd1\xc2=\xc1e\xcb\
+\xfdu\xf5q_4\x1eci\x0a\x18\x86\xc9&\x11\
\xf9\xfd\xa0=\xd5uj\xc1g\xaf\xc6\x7f\x7f\xd3Q\xd6\
\xa4\xcb\xd7\xb8\xa8\x9b\x81K\xce?\xdc\xe7\xa4\xf5\xf7\xe7\
\xe6t\xf0'\xe3I\xd3\xb2\x1c\x96\xac9\xe2\x17d\xb0\
b\x7f\xb2\x22\xe1T/\xb5\xeexu\xae}\xce\x94\xa9\
\xb8\xeb\xd1\x07\xd0\xa6\xa4#\x00\xe0\xf4\xd3.E \xe4\
\x0b\xc7\x9a\xec,\x83\xa0\x0d\x03,\xa1\x98\x95\x86\xe52\
\xd8\x0b\xe2!\x19\x92P\x8d\x8d\xc9\xa2,;\xd1X2\
\xce\xd6\x1b\xe6B\x06\x82x\xeee\xcf\xc2\xb2\xdf\x11\xb6\
\x82\xf7\xfa\xdf\xdd\x86\xfb\xffp\x17\x06N\xfa\x13\xdaD\
J\xf3wT\xaa\xd3\xcb\xeb\xec\x8b\x15\x1b\x1d\x89%\x04\
\x9b\x10B@H\x09\x08\x01M\x0c\xad\xbdOA \xa0\
\x1b\xfbt\x0c<\x7fp\xfb\xe0\x93\xb7<t\xcb\xf6\xa7\
\xef}\x0c\xe7\xddt\x19^>\xfe|\xe4\x8d\x1b\x9a\xa1\
\xdez\xe7,Z\xbf\xfcrJ\xda\x1da\x18H\x0d\xed\
\xb9\xa54?\xf7\xc1\x97\x17nz\xe5\xe6\x93\x8fLL\
\xfb\xd3\x0c\x5c?m,\xee\xff\xeb\x93\xfb\xed\xdb\xced\
3:\x04\xd3i\xed9g\x8e\xb1\xbe\x9a\xf3\x92d\xbb\
\xbdi\x9a\x90\x9a\xa1L\x0bJy\x9f\x5c\xbbK\x8f\xd5\
ug\x9cu\xc6\xc4S\x87\xaf\xbc\xe5\x8a\xd7\xb1 \x1e\
\xc6\xd7\xcf\xdf\xfcw\x01\xbc\xf2\xecC\xe0B\x0c\xcdh\
hxB4\x05\x9b\x08\xf6\x12Q\xbe#*\xa4/\xc4\
\xca\x1d\xa1\xa3\xd6H\x10\xf9\xa4r\xc1Rp|@\xff\
\x99\x15\xa3\xc6^\x90\xbfa\xd5\xf6\xf6P\xb0\xba\xb4\x87\
\x93\x95\x0e\x15\xf4\x81\x03\x11Pu-\x9c\xdc|T\xca\
\x10*6\x97\xa2\xff\xd1\xc7!\xfd\xa1\xdb\xd0\x18\xf1\xa3\
\xb1o\x1f\x84\xf3\x8a\x91\xe6JP\xbc\x16,5\xb4i\
\x80E+c\x91\x06+\xdd\x92\xc1\xe0\x01\x18R\x82\x83\
a\xd8\xae\x8d\xed\x1b\xcb\xf0\xe2\x86\x14\xbe]\xf05\xaa\
\xb6m\xf8\x87h\xb7\xfe\xfa\xd7\xbf\xa2\xb9\xae\x0eE\x1d\
; \x9a\xf4c\xcb\xe6z\x94\x96\xd5Ai\x07\xa6\xcf\
\x80a\x18\xf0\x09 \x18$TT\xd7a\xfd\x92/1\
q\xc2`t\xea\xd5\x13\xb5{*\xb1h\xe5Z\xf8\xb3\
\xbb\x22\x92V\x88\xda\x9a\x06\xf8}F\xcbG\x87!\xd8\
A\x98\x9b\x11\xaf\xdd\x84\xa5\x8bW`SU5\x9c\xef\
\xf5-\xda\xc0\x88d\x02\xa7\x9d\xf6\x18\xca\xcbV\x22\x18\
\x100\xa5\x02\xb4\x86\xad\x80TJ\xc1r\x1d\x84\xb3\xc2\
\x90\xa98\x82\x01\x1b\xb55\x8dpl\xc6\xc2\x95\xb3\xf6\
\x8e\xe1G\xa3l\x9d\x80{\xef\xbd\x177\xdf|3\xba\
\x8d~\x08i\xb4\xa6]}\xcc\xbc\xaa>\xea\x9e\xa6X\
\xe4\x82\x0cHez;\xaf!\xc0-\x0c\x93\x1a\x1a\x0c\
F \xc0\xf5\xbd;\x86\x1e<\xb4C\xf0i\xabm\x97\
\xba\xfaw\xbf\xc4\xa3s\x9e\xc3\xb3\xd3\xaeD\xc1\xd8\xde\
\xe9\xfa\x8d7.\x10\x9bW^%\x12N\x11|&\x12\
\xa3\x86\xac\xdeUX\xf4\xc7\xc7\xa7\x7f<}DF0\
\xf5\xc6\xa6\xd2\x9f\x5c\x84\xd6\xfe\x95n\x5c\x8d\x86\x0d\xeb\
\xc0i\xe1tu\xcb\x03\xb7\xd3\xae\xd2K\xa4 \xbf\x10\
\x00K\x07\xdap==\xca\x17HYc\xc7?Y\xf5\
\xe8s7w[\xbf<\xd5\xbd\xdf\xc0\x96\x98\xe2\x9f\x96\
\x0d\x17\x1f\x03\x15\x0a\xf7O\x9b=\xfbyQQ;X\
\xb7\xfa\xf3M\x03\x9c\x92 \x9b\x01\xe5\x80}@\xf3\xf0\
!\xdb\xb6\x8d\x98x\xcd\xab\xe9\x93?\x1a\x11]\xae\xae\
=y\x02\xd0\xca\x1cD/b\x0d\x8f\x94\x05\xe7\x9ec\
\xc63\x80;wW9/\xbe\xb5Q\x01\xa0\xfa\x87\xee\
5ce\x1be\xe2\xa1\x97\xdd\x1e\x80\x03\xac\xc1\xd6\xd2\
|ti_\xb0w\xdc\xbf\x84\x95\xfe\xd7r\xd2\xec\xef\
YD\x84\xdbo\xbf}\x9f\xffw\xe7\x9dw\xfe\xec}\
\xbf\xa4\x8f\xdd\xbau\x83R\x0a\x99\x99\x998\xe2{\xd4\
\xb9w\xdey\xe7\xde6\xe3\xf18\x9e{\xee9TU\
U\xc1\xe7\xf3\xfd\xe8\xd9\xc0\x0f\x1c\x19\xad7\xfe\xf1\x8f\
\xf7\xe3\xe6\x9boF\xe7\xc3\xde\x85\xaf\xfe\xbd\x92\xcad\
\xe0\xd6h\xc29QC\x84\x09\x02By\xa6\x11\x98\xec\
\x05\x86@@k\x05&\xc0\x1fP\xb5=:\x84\xffx\
\xd6\xe8NO/\xdbX\x19\xb5\x9f|\x06\x0f-\x99\x89\
\x17\xce\xff\x13\xf2\x8e\x1a\x1a\xb1\x9f\x7f\xe1\x1c\xb9i\xc5\
\x95H\xbaE\xda\xef\xe3\xc4\xa8\xc1+w\xb5-\xb9\xe7\
\x89\xd7\xdf\xfe\xa8kPX\x7f\x9c\xf1\xe9/\x9a\x8c\xb5\
k\xd7\xa2\xff1'\xe1-\xa2\xe6n\xd7\xde\xf8\x8a[\
\xfd\xe1\xc1hj\x1c\xc4\x92\x00r\x00\xe9z\x99\x06\xaa\
9\x10X\xb9\xf8\xb0\x8c\xe7\x1f\xfd\xa4\xfb\x85W\xcc\xfc\
\xfd\xdd\x8f\xfd\xfcb\xe6\xe6\x82\xc3i.GBI\x0a\
\x98\x10,\xc1\x96\x86v\x15H\xbb`WCe\x04\x9d\
\xe8\xe8\xe1\x9b\xd6\xf7\x1a\xf1\xd0\xa3KJg\xf6\xc9x\
E\x95\x96\x97\x83\xae;\x0b\xd7\x9d\xd6\x197\x9f\xf54\
J7\xf4\xf4G\xa6\x9c\x7f\x9c\xb5l\xe9\x91\x94\x93\xa6\
\xce9\xec\x88\xe9\x0fE\xf1\xc1\x19\xf7\x5c\xd6;\xf1\xd9\
;\xe7\xe8\xa6d{s\xfb\xa4\x15\x1b\xaf\xbd\xf6\xc9v\
s\x9f\xae\xe9z\xd3;\xfb\x00\xe2?I^\xf8S\xcf\
\xda\x1fh~\xc9}?'\x9b7o\xde\xfb\xf7\xb2e\
\xcb\xfe\xe96\x7ft\x88\x9b\xbf`\x11n\xb8\xe1z<\
\xf3Y:\xcc\xfaw\xda\xd7\xc7\x83w\xc5\x12\xee\xf1\x9a\
D\x80 Z\x22\xb0\x18\x90-\xb1\xbeD\xd0\xac\x01f\
\xf8\xfc\xaa\xaeS\xdb\xf0\xddg\x8e\xeb\xf1\xec\xec5\xbb\
\x13\xcf\xbd4\x0b\xcf\x5c0\x02\x1f\xbd\xfc4\xba>q\
\x95Yu\xc4\xb1Sh\xe3\x8a\xcb(\xe9\x14k\x9f\x89\
\xc4\xc8\xfe\xabK\xdb\xb6\xfd\xfd\x13\xaf\xbe\xf5q\xe7\x88\
i?5\xe3K\x94t\xef\xfd\x8b\x06\xd1\xa7O\x1f4\
\xee\xde\x8d>\xdf|\x09:\xf8\xd0\xf5j\xe5\xea\xe7\xf4\
\x86\xf8=\x0c+BB0\xf9L\xcf\x1cE\x06\x99\xb1\
\xda\xce\x19\x9bW\x1f\xff\xe5\x9a\xb5\xcbO\xf2\xa3\xe1\xb6\
[\xfe\xfe\xa4p\xdf!@vN\xa9U\xd3\xf0\x02\xa5\
\x16\x93\x88\xd5u@\xc0\xf6\x93-\x88\xe1w\x9d\x9e\xf9\
\xcd\xd11#\xbf\xde\xd6a\xe0+\xcf\xbd\xf1\xe1\xf2\xae\
9\xf9\xc9\x1b\xae\xbf\x10]\xbbv\xc6c\xf4:\xce\xbe\
\xf4\x19d\x0e\x1d\x8bm\xd7]9\x80\x96\xad\xbc\x5ck\
w(\x95\xd7\xa3`\xd5\xb6\xec\xa3JWlK~6\
\xfdd\x15\xaf\xbd\x08Z\x1b\xb4\xea\xdb\x83\xaa\xdeo_\
3u\xe0\x15\xcf\x9eu\xeb\xd0\x7f\x9d/\xeb\xff3\xd9\
\x07\xc0\xad\xa0\xe9~\xf0\xc3\x08\xa5\xe6\x17\xd5\xc5\x227\
\xc5\x93\xeax\xcd2@\x90\x1ex[II\xc8\xb3K\
zZ\x99\x86\xdf\xaf\xeb;\xb7\xf3\xff\xf1\xb4\x83\xbb<\
\xbb`\xfd\xee\xc4\xfcek\xb1i\xe1\x9fQi\x04p\
G\xaf\xce\xb8\xe4\xab\xaf\x0f\xc2\xaa\x85WQ\xdc\xea\xc8\
\x86DrH\xaf\xed;\xf3\x8b\x1f|\xec\x8dw?\xee\
\x14 \xfb\x85Og\xa1S\xcf_\x06\xdeV\xc9l\xd7\
\x0e_\xdfw\x17\xd4\xe2E6\x9dt\xd4\xc7zmA\
\xae\xd6\xa9\x12\x10+\x98 \x110!\x0dA\xa6\x9d0\
\x12>\xa3q\xd7\xfb\xcf\xb5+\x19\xd2\xbb\xe1\xe7\xda\xed\
\xed;\x1f\x9c\xfdL\xf4\xe3\x8bo|\xa3\xd3\xd85_\
\x8b\x95\x8b\x8a\x1cN\x95\xb00\xfc\x22-\xb3Z\xf7\xe8\
\xbd{[\xd2\xa8\x98v\xc4Q\xf5_|1\x1bw\xdc\
}\x0b\xda\x94\x94|7\xa9\xa6\x1f\xab\x01\xe2\xda\xd2\x8e\
\x22\x91*F\xc8\x04\x8b\x14tmmq\xa2\xaa\xa2\xab\
\xd9\x98*a\x86A\x01\x01\xe18Yvm\xc3 k\
W\x85Y[\x97r\xfeK\x19\xfb\x8f\xc9>\x00>\xe6\
\x9c\x17\xb0{\xc7\x0a\x18rqZYm\xe8\x82xR\
\x9f\xa8A\x010y\xe0\x15-6>x\x06j/\xbf\
_\xc1\xef\xd3M\x9d\xdb\xf8\x1e\x9d6\xb4\xed_Vn\
\xaaHl\xab\xa8\xc2[/\xbc\x8a\xb9\xd7OD\xda\xca\
\x0d\xb8\xf0\xc43\xba\xbb\xb3\xbf\xb8A\xc4\x12\x83X\x01\
v\xcfN\xb5\xbb\xf3\xdb\xfd\xf9\xc5O\xbe~\xa7\x7f\xc0\
g\xbf\xbdb\x1d\xd2\xb2=N\x81\x7ft\x01\x0f\xf9\xdd\
\xad\xb8\xe4\xc9\xbf`\xcf\xa6\x9de=\xfb\x0dy\xd0\xa8\
\xaa4\xech\x1c\xb6r`\xc7\x1d\xc0\xef@F\x996\
/\xfb\x06\xf55\x15\xa9\x0bn\x8d\xfel\x9b7.\xbc\
\x094\xe5>|\xf6\xc9\x07I\x87hW\xbf\xd2Ow\
\x9ds\xf6\xfbK\x8a\x0dE\xcf\xe6vU3\xd7\xbc\xc9\
TY\x83C\xc7\x1c\x82m\xeb\xd7\xe2\xd9\x96\x1c\xb8\xd6\
\xbe\xaf_\xbe\x18G\x0d\x1a\xc9\xab\xef\xb9lkFz\
x\x0f%\x9a\xdb\xb2\xf4\xc1\xea\xd4\xb1<U\xd2k\x9d\
.j7\x08\xa5\xeb\x5c\xc4\x95\xe1f\xe65\x89\xde\x03\
\x165GSv\xd8\xf8\x8f\xb1V\xfd\x9f\x91\xbd\x05\xa8\
\xcb+\xaaq\xdc\x197#\x12\xa956oM?\xa9\
1\xc6\xbf\xd7\xa0\x12\xb4D\xbd\x0a6 Z\xdc\xc3\xad\
9z,\x18\x86\xa1\xad\x0e\x85\xf4\xf2\xc0\x12\xba\x0dF\
A\xe5\xaa\x1d\x0e\x96|y\x17\x96\xce\xfc\x1a;\xe6}\
\x0b_I\x97H\xfc\xee\xbbn\x135\xe5WI\xdb5\
tI\xdbD\xf5\xf0\x11\x8f|\xb0j\xfd}\xed}\x88\
\xbe\xb0h\xc9w\x9d\xf9'w\x9f\xeb\xae\xbb\x0e\xb1D\
\x12m\xb2\x02@S=\xecx\x0264\x1c\xe5\x02>\
\x072\x1e\xc5\xba-\xbb\xb1\xa7\xbe\x06\x9bv\xc6\x90P\
\xbf\x9c\x98\xeb\xf2\x8b.\x82m\xbb\xe8\xdc\xb5\x0d\x22\xe1\
t4\x81\xf5u\x12\xb9\x11\xf5w\xf5\xc3\xf5o\x5c\
\x8b5.\xf9{}\xb6\xe8\xb8\xd0\xaa\xedc\x13=;\
\xeb\xa5\xd9]?9\xab\xba\xcb\x8c\xf9\x87D\xfb\xe4|\
\xf0\xe1\xc9\x886\xb7K\x0e\x18\xb85y\xfc\xe5\x8fk\
'^]\xb3\xdb\xc2\xd1\xe7\xfdr\x87\xcb\x7f\xe5{\x00\
nl\xb6\x90\xd5\xf5\x0c\xb4-4F7G\xf1\x98\x02\
\xf5C\xab\xceK\x02\x82L\x906\x00\xe9\x85Hj0\
\xa4d\x14f\xf3\xfc\xee\xb9\x89\x8bGN\xb9w\xd5\x8b\
\x0fO\xc61\xc7N\xc1\xef\xef\xba\x13\xdb\x89\xf0\x0c\x11\
\xfa\x8e\x1a{8\xd6\xaexD\xa4\xac\xce\x14\x0a\xda\x89\
I\xe3^\xd9\x96SrS\xc7H\xb0\xfa\xdc{\xee\xc4\
\x11G\x1c\x81\x8f>\xfa\xe8\xff\x14\xdb\xfa\xf7MY\xbf\
g\x16\xc7a\x9d\xac@o\x1a\xdbs\xa0\x83\x8d+\x19\
\xccX\x03\x18N\xcd&\xe3K_7\xf7\xfa\x0cr/\
\xba\xf5\xcf\xe8\x10\x09\xe3w\xbf;\xff\xb7\xee\xfe\xff*\
\xf1P\xd3~0\xdaeuB$(r+\xaa\xc5=\
\x8e\xa2\xf3A-\x91P\xd4J4\xed\xf7R\xe3\x05\xa0\
\xc93\x97\xa5\x85yO\xafv|\xdd\xd6\x8f\x9f}c\
'\xa0W\xaf^\x8f\xbe}{\xe2\xa6\xe3\x8fG\x17\x97\
a\xb4+i\x8bw\xde\xf8\x93h\xae?Q\xb8\x02\xf6\
\xb0A\x8b*\x0f\x18u\xe1\xf1\xc3\xc7\xaf\x1cy\xe6!\
\xa8\x88\xe1\x1f\xb2[\xfeo\x11f\xc6\x86\xe5\x8b\xd1s\
\xd0P\xcc\xfe\xeb\xad(\xb1\x81\xc6\xdc\xcex\xec\xedO\
\xf0\xfc\x1bo\xe1\xc9g\xff\x8c\x09\x99\x04'%\xb0%\
\xa6Q\xe3\x06!M\x81\xb3.:\xef\xb7\xee\xfa\xff:\
\xa1.]&At\x0aa\xf3\x17}\x91\xdfc\xd3\xb1\
\x89\x04\x9e`\x88B\x0f\xc0-\xd5\xdb`@\xc0\xdf\x92\
N\xc2`(\xc0\xd0V\xa7\x22z\xbeov\xdd\xcd\xb1\
F\xdd\xd8\x7fm\x18\xb7T\xbd\x04)%>|\xfdU\
\x1cu\xf2\xa9xa\xc8A\xd3h\xeb\x9a\xa7D\xca\xca\
\xe1N\x1d\x1a\xa3\xe3\x0f\xbbv\xfa\xeau/\xe7\xda1\
\xe7\xedo\x17\xfc\xd6c\xff\xb7\xc9\x0fm\xa3?e\xd7\
\xfd\xbf\xf4\xd2\xfeV\x22\xcc|\x89\xa6\x0a\x85\xa2\x01\xcb\
\x8bS)g*3\x0a\xc0\xba5O\xcaSv\xb5\xf0\
\x0el\xa4\xbc\x08%\xb8\x88\x04uYa&\xff\xed\xa1\
\xf7\xdfm\x5c\xbay\x16\xd6]\xda\x05\x86a\xe0\xb9\x07\
\x1fD\xc5\xbc\xf9x\xfa\xfa\x1b\x0aUM\xd9Q\xdaM\
e\xa9PX'\xfa\x0fx\xbd\xd4\x9f\xf9\xc1\x01\x1d\xbb\
;\xf7=\xfd\xc2\xff\xe9\xd2\xb1\xadY ?\xcc\x06\xf9\
\xa9\xff\xff_\xf9\xe7E\x84\x00TU\x10\xac\x98\xd9S\
)\x1c\xda\x12\xb8\x8b\x96@\x92\x96_\xec\x85\x96\xb1\xa7\
\xfd\x0a\xd2Vn\x1a\xde\xc9rv\xad=\xf9\xa0C\x10\
\x09e\xe0\xed\x96\x03\x8d)$\xacg^\x81\x9e\xb3\xb8\
\xb3\x8e6\x8c\xd5\x0e\x0b\xabk\xf7M\x0d\x85\xed\xdf\x1e\
\xde\xadS\xed[\x7f}\x1c\x9d{\xf7\xf8\xad\xc7\xfd_\
\xf9?\x22bSM32\x0ak\x83\x8e\xab\x07BS\
\x9a\xc7}\x8a\xef\xed\xc0-\x5c\xa8\xac\xc1ZC\xb3F\
\xc0\xd4\x0d\xb9\x11\xf5i\xb9oTt\xdb\x9eu\xd8\xb0\
m\xd7\xde\x06\xebc\xcd\xb0\xdf|&\xa0b5\xc3\xb4\
\x9b\xcct3\xb2\x9dd\xb7\x1eoW\xc1\x99\xbfz\xe5\
*\x1cp\xe0\xc8\xff\xee@\xff\x95_M\x84D\x04\xb0\
#\xd9\xca\xd5\x87\x80\xb5\xbf%\x83\xcd\xabO\xc1\xad;\
1Ck\x06\x14\x03p9#\xa8\x17Et\xe9\xce@\
\xd3\x5c\x04\x03\xf9\xfb4\xa8\x9bc\xc0\x9c\xc5\xd9\xdc\x5c\
7Q\xbb:d\xb7m\xbf\xa21;\xf7\xc3\xbe=\xfb\
X\xdc\x94\xc0\xdb\xdf\xce\xfb\xad\xc7\xfc_\xf9?$\xc2\
M\x11\xb4#\xd3Y\xe9\xee\xccZ\x10tKbaK\
\x10u\x0b\x80\xa1\x15\x98\x15$\xb4\x1b\x0a\xd2\xcc\xc2\xb4\
e\xe5\x8a\x1b\xb1n\xe3\xba\xbd\x8d\xdd|\xd6Y\xe0e\
K W\xae\x0csS\xb4\x8b\xf2\x87\x91\xea\xd0eQ\
\xac\xd7\x98U\x8d\x9bk\xd1f\xe4\x80\xdfz\xbc\xbf\xa9\
0\xf3>?\xff\x95\x7f]\x0cV\x0c&\x18\x04\xf8<\
\xd0\xa2\xc5\xb8\xe6\x85\xd93\x0b\x8f\xe3\x9c\x09L\x1a\x01\
\x03V\xba\xe9\x96-\xday\xa4;w\xf9\xa5H\xa7\x11\
{\x1bc\xc7A\xf0\x9bY\xb0\xfb\xf5K\xd3\x09\xdb\xd4\
\x1d;\xecj.,\xfc\xfa\xa1\x0b\x8epN\x1dy,\
V-\x00\x8e\x9cr\x15\xea\x1a\xe2p\xdd$\xc2\x06`\
\x92\x86A\x02a\xbf\x0f\x91\x88\x01\xf6+46T`\
\xcb\xba\xd5\x08\xa6\xa5c\xe9\xbau\xfbt\xf8\xe8\xf1\x17\
\xc1\x88\x00\xf1\xe6\x04\xa2\x8a\xe1\xc0Sm\x98\x05 %\
$I\xf8\xa4\x03\xb8\x0eX\xbb0H!\xae\x15j\xea\
j\x10\x8f\xd7\xa1j\xe7&\x5cv\xe5#H\xc6\x130\
\xfdQ Y\x8d\xbf\xbc\xf0<\x0e\xe8\xd5\x0bK\xd6\xaf\
\xdf\xfb\x9c\x9c\xdcb\x8c:\xea4\xa45\x13\x9a\xeab\
PV\x14I\xb6\x91r\x0d\x98\x91\x00\xd2\xd3M\x04\xd3\
\x02\x08\x08\x01m\x0b\xb0\xa3\x90\x95\xe1\x87\xf4\x03\x81\xa0\
D\xfb\x0e\x85\xe8\xd8\xb13&\x1f9io\x9buu\
u\xc8\xc9\xc9\xc1\x94\xa3/FF~\x09N:\xed\x0e\
\xb0\xbb\x07\xb5\xe5\x15\xf8j\xce\x87\xc8\xce\xceF}}\
\xfd?\xbc\x88\x1b6l@\x8f\x1e=\xf0\xd6\xd3\xcf\xc1\
^\xba\x00\xfe\xafg\x81k\x1b\x11o\x93\x89\xdaA\x03\
\xb1\xad\xb1\x09O\x7f\xfc\x15\x06G\x8a\xb0,V\xb1\xdf\
6.=\xff$D\x9b\xeb\xd0\xaf\xcf\xc1\x08K\x89x\
\xb4\x09u{j\xf1\xc5\x82\x85\x88\x84C\x98\xb5\xfc\xc7\
\xb4\xad\xed\x0a\xf2q\xf6QG\xa0\xaa\xae\x0aB3\xa4\
\xeb\xc2o\x0a\xf8|\x01Hax\xf5\xaf\x0d\x8f>K\
\xfa\x02\x08e\xe5\xc2M$p\xc3\xc3\x0f\x03\x00\xee\xbb\
\xeb\x1e\xe4d\x17\xe0\xdb9_\xc0\x8a6\xc2tR\x90\
p Y\xc0\x80\xb7o\xba\xae\x02\x0b\x82\x92\x06\x94a\
\x82%A\x19&\xea\x1b\x5c\x0c;\xea\x00\x18-\xe9\xee\
\xccD n\xc5o\x0b\x0dP\x0b\xa7+kn\xe1z\
P\x08\xfaUY\xba\xd9P\x9d\x95\x9f\x8e\xe3\x0f\xbbp\
\x9f\x01\xa9\xbc<,f\x16\xbd\x06\x0e-\xf4\x89@\xc0\
i\xdba\x95nW2\xff\xf1\xe7\x9f\xc2_\x1e\xfd\x14\
\x96\xcf\x80\xdf/}ia\xe1S\x0e!\xc3\xef\x15\xfb\
0H -d\x22+\xc3\xe4`\x96\xd6\xa1\xa2\x84\x13\
\x19\xf0\xa4\xdb3\xb8\x0ey\xcf}\x89OKg\xee\xdd\
\xb1\xce<\xe6\x22\xb8\xa60d\x04~\xa1@.<\xf6\
C\xd6\x12d\x1a\x90d\xc0/\x01\xb8\x00i\x82O\x0a\
D\x95\xa0TR:;\xd6oN=\xf1\xe4\xf3\x94\x11\
\x11\xc1\xb0A\xd2\x17\x96\xba\xae\x026\x00\xc7\xf9\x01S\
LmM9N\xba\xf8jY\x98\x1d\x0a\x04\x98H\xdb\
@\x8c\x19)\x8b`\xa6\x1b\xc8\xcc4\x10\xc9\x94\x08\x0a\
\x03l\x09@\x09\xe4d\xfaa\x06\x012\x0d7\xe5:\
\x16i\xbdO\xa3999xs\xc6uh[\xe8\x1a\
\x91,\xe9\xb3\x12R\x98\xcc\xdc\xbc\xb3\xd2\x02\xe0\xfe\xa3\
\xe0M\xa5Rx\xf1\xb6{P\xbb\xa7\x0a\xef\xdd~;\
\x92/\xbeb\xa0lS\x88M\xe1\x03\x98X\x0av\x0a\
\xb2\x9d\xd2\x88\x91\x98\xf7\xee\xeb\xce\xda\xed\xd5xd|\
\x1f\x1c\xd4\x7f\xec\xde6Z\x93\x04\x8e\xff\xc3\x95\xa8+\
o\xa6\xee\x03z\xfb\xfc\x8ec8\x8d\x8dH\xa5\x5cg\
\xe7\xa6\xd5NNQ\xf1~?\x17\xa5\x95U\xb8\xf8\x8a\
K\x84\xd5\xb60 \x14H\xda.K? \xfcA\xb0\
4A\xc2\x840\x0c\xc04\xd8\x08G\xdc\xd5\x7f{\xdd\
i;n\x02&\x8d<\x04\x9f\xce\xfd\x1a\x8f<\xfd,\
`D\xe0\x16\xe4\xfb\xdd\xa0a\x90\x9dd\xe8\x94\xc7\x8b\
\xa7\xc9\xcb\xa4R\x0a,\x04\xb4i\x82\x0d\x13\xda\x14\xc4\
\x8eR\x5c\xdb`\x1b\xa6\xd4\x06\x09\x0df(\xcf\xb9\xd6\
\xd2O\xed\xc5\xbfz\xf5\x8b\x95w\x80\x13\x04\xb0\x86O\
\xba\xbb|fS\xad?(1\xe3\xfdU\xfb\x0c(j\
YH}\xfb\x09)\xdb\x8a(\x7f\x90\xed\xc2\x82\x9d\xd1\
\xb0\xafi\xe7\xf2Mh\x9b\x11FN\x1a\xf9\x92l\x8e\
\xadqr&h\x99\xe5\xabw\x95\x82K\xde\xfb\x13#\
\x98\xf5B\x07\xabD\x9d\xcf7r{~\xce\xbau\xd9\
\x85\x81]M'\x9e\xd24q\xbe\x8b\xf5\xeb7\xa0W\
\xaf\x9e\xb02\x06#\xcd_\xdd}G\x19\x1f\x17\x13f\
\x81\x96\x8eb\xed\x00\x82\x98[8'\x04{\xa9\x8f,\
4\x04\xc0l\x88p*P6\x07\x98\xf7\xca\x9b\xaf/\
\x0b\xd6\x07z\x5c\x063\xafK\xe7\xfc\xec\x9aQ\x9d\xd2\
?\xcd\x02\xbe\xedZ\x9c\xa6Wn\xf9n,g]\xf7\
\x1a\x02\x81\xee]\xd7T\x07NV\x82s|!\xa5]\
\xc5\xec\x86\x19.)\xd4\xa5\x08\xb2^2\xb3\x06)\x01\
\x93@\x14c\x04\xfc\xc2\xe8\xd5\xd6Xq@\xc7\xc8[\
]\xfb\xf7n\xdag\x82\x12\x158qJ\x12\xa7\x9d=\
\xf2\xc0\xb5\xbb\xd4\xe1\xacS\xa1\xc2\xcc\xdcT\xd7\xd1\xbd\
f\x06\x17l\xfd\xaa]\xe7\x1e\xaer\xaa\xb1\xadt\xdb\
\xcf\x82w\xfd\x92%\xf8s \x80\x9e'\x9c\x0a1\xea\
_\xfa\xa7\x1f\xb5\x09\x9e{\xe6p]\xba}\xbcL\
$\xbb\x92/\xe0K\x8b\xa6\x9c\xecM\xbbvt\xed\xd4\
\xf3\xab\xca\xf2\xea\xf9t\xd0\x90]\xeb\xdf\x9de\x8f\x1c\
0\x0ek\xe6-F\x9f\x83\x0e\x00\x00|~\xfb\x9d8\
\xb2AB\x05\xfb\x15\x1a/\xbf>\x95\x95\xd5.\xe4\x0f\
RZv\xe1\x96\xd3O\xbd\xf4]\x13\x5c_^\xb9\x07\
\x7f\xfb\xea\xbd}\xfa\xf0\xc7\xd1C\xd0\x9eu[3\x9c\
q\xb6\xf6\x07\x83ZJ\x97\xb5\x01\xd6\x8d^\x1e5\x83\
4@,\xc9q\x03\xbe\xb2\x0eC\x87-w\xb2\xd2\xb7\
\x9d\x98AMD\x84\x17\xef\xbf\x1fY\xc5!#o\xd3\
\xc6\xa3\x1d\xdb\x1e\x06\x90\xc5\x0c\xd6\xac\x98\xd9e\x06C\
h&\xad\x15\xa4\xf6\xf2\xe5\x84\xd2\x86\xce\xcc\xde\x11\xc8\
J\xfb V\x9a\xac0L\x9f\x86fNj\x17\x95`\
\x94\xb0\x16\xad\xda\x83\xb7\x13\xb7T\x1b\xa4\x16rj)\
\xb8\xd9$;\xc5\xea\xc7TH\xa9d\x0a\xb1\xbaf\xa8\
\x94e\xb8\x11\x7fC\xca'\x16\xaf\xfe|\x91\xdbY\xc4\
\xe1\x93\x8c\xac\x0cC\x96\xedI\x0e\xd8Q\x168\xc31\
\x8d\x88\x12`\xb0\x17OL\x02\xf0N\x8a\xec\x90\xd0\xa9\
\xa0\xd9\xd8\xb8f\x87\xf9\xd1\xd0\xee\x8d\x7fU\xb9\x89\xf5\
g\x1c?M\xbd\xfa\xf6\xbb\xd8X\xe1C\x97\x1cU\xb8\
u7\x8e\x8d\x83z*\x9f\xd4\x9eN\xf9=}\xdd\x9b\
6((h\xd7\x85\xd6\xdat\x93\xb1$\x80W\xd6o\
m2\xed\x0c\x1a\xe7\x06\xe4x7i\xef>0?\xb1\
eh&\xbe\xcd\x08\xedk\x15\x99\xb3\x22\x8a<\x7f\xbc\
dK\xb5q\x9a\xe3\x88\xb6\x12Z\x0b\x92 S\x83\x88\
X\xb2`M\x04W\x83Xk\x82f(Rp\xe1\xca\
\xdde\xee\x87\x03\xbb\x84?mh\xb0\xf6\x01\xf0Q\xd7\
.\xc7\xd5\xf7\x1e\x1ez\xe7\x83\x8a\xa9Q\xa2\x0bXH\
\xac+\xd3\xd4\xad\xc0\xecq\xe1\xb5W.M\xcf\xefU\
{\xe7\xf5\xd3~\x16\xbc\xcc\x8c\xcb\x880\xa1\xf7@\x04\
\xda\xb5\xcf\x8b\x1f|\xf0\xb1\x5cZv\xba\x88Z\xdd\x84\
\xedfB\xd8\x06\xfb\x01\x11e\x04\x16.\x1a\xee_\xbe\
\xf607+kW\xce\x80\x95O\xd7v\xe9\xf9\xf1P\
\x14\xed9g\xe4x^\xd4Rk0\xd2\xb1\x04_\xdd\
u\x16\x86t\x1a0\x18\xb5\x957i\xa8L!m\xe2\
v\x9d*\xdb\x8d\x1b\xb7>\xd3'\xbe\xfd\xec\xbd%?\
\xeaGr\xe9Zhpq\x9e\x8c\x9cK$r\x94H\
i\xad\xd9\xcb\xcc\xf18H\xa0\x89\xa0%\xb1\x96\xd2\xd2\
\x86\xd9(\xda\xb6\x7f\x7fY\xcf~\xcf\xf9K\xban\xda\
\xb1|\xadN#\xbf\xf4/\x5c}\x88\xb0\xad\xb3\xb5 \
G\x13\xa0$CK\x97[@G\xdf\xbd\x09\x00\x1c%\
8\xaf`\x1e\xf7\xe9;/\x19o\xae0\x0c\x9f\x82\x90\
h\x88\xdb\x98\xcb\x1a\x03\x88\xc9\xa3;\xe1\xd6\x14B`\
/+\x0atK \x9a\xc1\xfb#\xf5I\xc6\x93\xa86\
}\xac\x1c\xed\x92\xe1k\xe4\x80\x7f\xc3\xec\x8f\x9eV\xc7\
\x0f\x9a\x80@n:B\x06C;\x8ep\x94\xdf\xe72\
\xfb\xfc\xd2\xb6\xc9PQ\x80\x884I\x0d\x22\x87(\xa2\
\x84Lk\xd2\x94\x17\xabp.\x8aYN\xbf\xe1]\xc6\
\xde=\x8a\xbf\x9e{\xe1i\xa7)'p2\xdc4\x8b\
,M\xa6\xa3L\x83\x09\x960\x08\x04\x08\x8f)\x12-\
\xea\x8e\x97\xbaN\x06 \x95+X;\xd2\x01\x90\xb4\x93\
\xdcR\x95\x19J+\x9f\xe3XB%\x80\x1f\x92\x8c\xe7\
\xf9\x81\x08\xb9\xa9\xdc\xa0Sm\x99\xc2\x94Zj\x02C\
\xc3\xa6\xa4\x8btK\x99!&\xc5Yf\xb2\xc1\xf0\x19\
\x09\xe5(a\x0bM)X\xd2\x96\xd4P\xd9d\xab\x12\
\xf3;~\xb2I\xe7\xbc\x8dofnFy\xd7\xac\xa1\
q%\xc7)\xa1\xfc`\xd2\x8efQ\xd6 \xfb\xd7\xc5\
\x22c\xdey`\xda\xf4\x09\x03'\xf3\xae\xc46l\xda\
\xb4\xff\xb4\xf8\x15+V\xe0\xfe\xb6=p\xf0\xa8\x09\xf0\
\xf7\xe9[\xa0\xa6\xbf}\x0bj\xea\xce`\x9b\xd3\x98m\
\xe84\x1fsFf9\x1b\xb2\x11\xa9\xb8\x0f\xc9\xe66\
\x94\x8a\xe5`ge\x0e\xd5F\xef!K\xe7u\xef]\
\xf8@\x0e\xc8\x02\x80\xd7\xff\xf4'\x94}\xf95\x86\x9c\
vf\xbe\xfab\xf6ql%\xf3u\xc8\x94,,P\
\xd9\xd6\x22\xb9\xa7\xf3\xe4%%\x83W\x8d9\xfa\xa8\xd8\
\xe0\x1d)\xbc0\xff\x85\xbd}q\x1c\x01\xcdZ\xba~\
\x0a\x92v\x02\xae\x9f\xe1\xe6E\x92,\x5c\xad\x19Rk\
\x0dv\x14\xd8rLv\xad0\xa7\xdcl\xacm\xbe8\
\x90\xb4;^8\xee\x90k\x8c\x83JvD(\x13\xac\
\xb4\xa9]\xc7\xd4D\xa62\x18\x9a\x5c(\xf6R%4\
\x18\x1e\x9f\xa6\xe7\x9d\xd0\xccPn*\xecZ\x96T\xec\
\xc2\x10\xbe\x04\xaaWZM\xe9\x9d\xb2\xe7;\xae:\x8f\
\x88\xfc\xad\x9a\x84\x97\x85\xa5\xbd\xbf\xb4\x04\xa0\xe1(6\
\x1c7$\xa4\xfc1\x0fn\xcaIa\xc9\xe4\xa9\xfa\xd6\
\xc2.u\xf0!*\x03\xbe\xea\xbbn\xbc\x12\xe9n\x13\
R\xfe4\x04B\x04\xc91\x80\x02L\xdaE\x01m]\
\xdc>\xc7\xf7\x9avLI$\x8d\xa4\x99n6\xcb\xf4\
\xa2\xc6(\x974Xj\x84+eAY\x1d\x8f^\xc4\
\xea\xe6\x83{M\xbc\xf2\xbd\xf7\x7f\xbf\xee\xc0a\xd7\x82\
Y\xb3\x16\xccJ\x05\x11\xb0b\xcd\x19\x14\x7f\x83\x84\xaf\
\xc1+%\xa2\xa0\xb5&\xaf\xdb.\x81]V\xa4\x031\
n\x5c\xf8\x1c3\xce/\x98\xe8\xf1,h\x80\xd8e\xa5\
\x1d\xddl\x03\xb6\xbbo(\xe3\x88\xce\x8d\x88\xc7\x1a6\
uok\xdf\xe3\xbav\xc4`\xd6\x82|:#'\x1c\
^\xb8FL]\xb8\xcb?\xc1 \xa6\x83\x06\xd1{m\
\xb3\x1b>o\x8a%LCHb\x90\xf0\x85\xc2e\xf5\
\x95\xf5M:\xd6\x0c\x00x\xf7\xd3\xf5\x98\xb3\xa2\x02\xe7\
\x9d\xd4\xd6w\xcdM\x1b\x0e\xd6\x0a=\xfd\xec(\xbf_\
%\x1a\x1c\x1dl\x8e\xa2]YS\xe4\x88\xfb\x9f\xbe\xff\
\xb3&\x94\xc4/\xbc\xe0\x84\x9f\xdc}w\xafX\x81\xde\
\x97\x9e\x03t\xe8\x98\xe6\xdeu\xcf\x85\xa8\xaa:\x97-\
7\xa0\xc2\x86vzuZ\xd4\xdc\xa6\xed\xa2XI\x8f\
e\xfe\x9c\x9cr\x1d\xab\x0d\xa6/\xfaj\xa0\xafl\xd7\
PT\xc6\xc7\xd8\xb9\x5cY\xd6\xb0g\xdb\x8cu+t\
\xaf\xcc\x5c\x00\x80L\x0ba\xcfk\xaf \xb7\xff\x98^\
:\x95\x9c\xc4\xda\x91\xae\x19p\x01\x09\x8e\xc7\xfdr\xd3\
\xb6\xa3{\x0e:\xf0U\xee\xd8n\xfd\xcc\xcf\x17\xed\xd3\
\x17G\x08h&v\xa4d\xa1\x14\xac\x0e\x9d\xab\x1a\xfa\
t~\xcbj\xdc\xb2\xdbu\xb4\x9f\x15 \xc1\x08X\x14\
\x0a\xc4\xac.\xa8\xda3\x86\x9b\x92\xf9\xb4c\xeb\xa1\x9c\
\x91}\xdaV\xa4\xffa\xc2)#XI\x83\xb5\xd6P\
>\xd3r\xda\xe4/pk\xcb6y4\xb0Dn\xcb\
\xb7U\x81\xe12\xb1\x96l(\x9d\xdad\xc7\x1ak\x13\
\xd5>\x18\x89h\x12\xf9\xdd\xb3\x10\x0e\xf2\x9afK/\
f\xe5\x8eo%\x0e\xf4\x085\xbc\xf2H\xde\xe9\x8e\xe1\
*\xceNi_\xc4\xc7r/\xcbK\xab\xbc\xf7\xde{\
\xb8Q\xe6\x82\x12\x89:7\x10I\x19B\xda\xb6\xcd\xb0\
I\xb6\xc4\x12Kh\xb25\x93\x82\xeb8\xd8V\xb9e\
y|\xeb\xfa\xa7\x8b\x00.\x020m\x0ap\xc1\xfbo\
\xe1\xca\x9b\xb7\xe5\xcd[\xdex\xfc\xe6\x0a\xf7R\x1b\xb2\
KY\x1d\x0e\x5c]j\x9d\xff\xe0\xf5\xb7\xdcR\x90\xad\
\xa3B$\x19\x82XCS4Q\x15m\xde\xf1\xce\xe3\
\xd6\x1dM[\x06\xdfD\xa2\x09@\x12^F{{\x00\
>\x00\xdb\x00d\x01|*\x0dA\xa8C& \x15\x04\
3$4k\xa5\xb9\x1e\x80c\xef[\xab\xfa\xcf\x8f\xfd\
\x0e\x00*\xe1\x11\x9c\xb6F\xed\xf1\xf4\xa7\x9f\xca\x98\xbf\
*\xde\x13\x82\xc7kfQ\x1f\xd7+\xfe\xf6\xf1\xf8\xf7\
P9\xfcG`\xbb\xb5%\xda\xf2\xba\xfb_AP\xb6\
\xc3\xdaU\xa1>)\xe5Lb-(\xc7\xd7\x10\xed\xd6\
1\xf8\xe6\xec\xd5b\x92#t\xfb\x9d\x15\xce\xa0\x15\xbb\
BC\xf4\xe8)\xfb'th\x113\x14@\xfd\x8d\xd7\
#\xed\xb0\xa9\xfdEu\xc5\x19\xec\xaa\x80\x1b\xf2\xab\xd4\
\xa8!\xd3\xe3\x87\x1dv\xdf\x8b\x99CW\xcf;\xe5P\
\xc5\xfcW\xd0\xdcbL\xba\xe7\xe1O\x07MnW\x9c\
!x\x0a\x0a|;^.\xdb\xfa\xb5\x098\xf3\x1bj\
\xf0\xe6\x9d\xf7\x22\xa1\x81\xb4\xbb\xee\xcbt\x9e}\xfd$\
\xb6\x12Y\xac\x19:\x14\xf9\x0a\x8a\x83\xdaJ\x8e\xa1=\
;\xdaY\xeb\xd6Ln\x1at\xc8\xb6\xc1g\x0e\xb2\x8e\
\xf1O\xc2\x99\xf7x\x87w\x05\x01\x05\x0d\x05\x09\x86\x01\
\x97|\xe5\x8d\x22\xe7\xd5\xe7>\x9a\xbe\x88\xbeK<d\
\xf4\xc8\xc0]\x17\xdc\x90\x1b\x7f\xfd\xb5\x9b\xb8y\xe3y\
\xda\xb2\xd2P^1.\xff\xe0\xd0_\xa4\x1dnTl\
\x90\x82\x862\xcc8\xf7\xe8\xfd\x8a/\xeb\x80W?{\
\xe5\x0d'\xdd\xcf\x88[\x80\xf2\x036\x80j\xc5\xe0\x0c\
\x90\xaa\x89q\xdb\x8a\xe5\xba\x0e\x80\xd1\xb9\x7f?\xa8\x8a\
=\x180 \xb8\xfd\xb3/\x9a_\x8b\xc7\xdda\x04J\
\xf7N\xfd\xe2{?\x9eJ\x91r\xa9k\x82\x83\x85\xcd\
\x09^\x9b\x97[\xbc\xcf\xe4N\x9a4\x09jK)(\
\x1e\xb7\xc8G.+G+\x22,\x0a\x03\x83\x95\x86v\
\x09\x9a-@\xb8`v\xc0\x82Q\x05\xb0?\xb7\x0dG\
\xf2\x0b\xf1~c\x09V]\xbf\x09ZfU?qM\
\xbb\xe7\xce\xbck\x99\xb1\xab\x0e\xd7k\x92\x85\xe55\xee\
\xa1u\x9d\xd5\xd3Y]G\xacwKg\x82\xb5d\xc0\
\x05\xc3%\x07\xe0\xbc\x87\xda!k\xe4\xd1\xba@\x00\x86\
a\xc04}0\x0c\x8fy<\xc7\xb0Q[_\x8f\x9d\
;\x830D3\xb9\xe4\x02-\x14H\xaerQ\x07 \
\xe5&\xf7\x19K\xfbp\x1f(#\x81\xccp6\xfc\x11\
\xc5\x01vq\xebu\x0f\xa2w\xd7L(\xd5\xcc\x90\x0c\
\xe5:pYIT\x1e\x85\x07\xee\xbe\x17\xdf\xcc\xf8\x0c\
A\x9f\x1f\x22\x9c\x06\x99]\x88\xd1(@\x8f?\xdf\x86\
\xcf_\xfa\x00\xa7\x1e[`\x1cu\xe6\x8a\x03S\x0e\xf7\
'[\xeb\xe2|5wl_\xf3\xc9\xe5\x9b\x95\xaf\xc1\
\xc2\xc9\xf5Mn\x8fu{\x02\x13\xde*zf\xfe\xb0\
w^v\x8e\x9b\xea\xd5\xb7\xf8\xa1\xc72\xd6\x10\x83\xf9\
\xda\xdf\xd2\xf5]\x0f\x1e\xc9v\xaaX\x13\x90\x1a\xd2w\
fb\xd2\xd1\xb7\x9b\xdbVm86-\x8e\x92\xc9\xc7\
\xe3\xb2\x8b\x96\xe3\xd2`\x13\xf2_\x7f\x85o\x1d\xe4+\
\xa7O\xa2Or\x11\xe1\x86\xca\xef\x82\x8a\xfa\x1e4\x1c\
W\x8c\x1f\x87\xb3'\x9d\xd8!Q_;\x96\x1d\xc7\xc7\
\xc1P)\x15\x14?MN<_\xc7\x9b\xfb\xead,\
\x9b\xd6\xad;:o\xd8\x01o\xd5e[\xa5\xb9\xc9\xac\
\xbd}\xd1\xec\xd5\xfcs=\xbb\x01\x5cR\x86J\xc5\x8c\
+\xcf\xbf\x10c\xdaIMB \x08\xc2\x01\x07\x1d\x86\
\xdb\xae\xba\xa9\xe6\x8eS\xa7\xbd\x11\xdd\xb0u\xbcb\xb7\
\xb7N\xc6\xd2\xea\xb7l+*\xefR\xd6\xa4X\xb5\xac\
\xa4\x22W\xbbFvq\x17}\xd8\x91Gh\xab\xb1\x0a\
\x96\xed\xa9\x13\xae\xeb\x22\xe58\x88%\x13\x88\xfb\xe3p\
\xc1\xc8\x0b\xa7\xc3X\xfd\xde\xdf\xd0\xbe\xff\x18\xcc\x99\xdd\
\xa43#\xf8\xcc\xb6\xf8\x1d\xc7q\xcf\x10,$\xa3\x85\
a\x9d\xb0\x17\xc4\x09\x8bs\x9am\x7f\xee\xf6\x1a\x85!\
=\xfb\xe1\xaby\xdf\xd9\x15?\xfb\xec3\xb4\xef\xd5\x13\
\x12\x22\x1aN3\x97\xb1c\xbby\xe9\x11\xb8\xae\xdbR\
\xd2\x89\xe1\xb6\xbc\x0a\x82543|\x00v5Tb\
{m9\x80e\xc0\xec\xe9\x00\x80\xcfg^\x92:\xa0\
W\xd6[\x95u\xd1\x89)EE\xcdQ7\xb7\xb4\xbc\
aX\xfb\x8d\xc7o\xdc\xdd\xf1B@\x81\x08\x1a\x06\x19\
\xc8J\xcf\xf2\xddr\xfa\x01\xe6\xd6zSF\xfc\x06\x82\
\xbe\x00\x82\xc1\x08\xa4i\xc2\x04\x81\xfdI^\xbe\x9e\xdd\
U\x15\x96rR\x09p\xc0!h\x0dh\x0dv]\x98\
\x00\x04\xf6U!J\xe3\x1e9Hy\xd3w\xd4\xf9\xc7\
\x04$\x82*\xe5\x9d5\x85\x00\xb3\x0bG9\x00\x92\xb8\
\xe6\xe6\x1bq\xed-7\xed\xd3\xc6\xeb\x00N3\x04\xb6\
E\xb3\xb0j}]\xc7D\xca>J\x83\xcc\x90a[\
E%\x19\x1f]q\xc3\x985\xaf\xcd\x9f\xfd^\xd3n\
\xf7p[Q\xde\xd6=\xd6\xc1\x7f\xfa\x80\xfaD\x0dg\
\xc5\xa8\xaa*\x14\x14\x14\xec\xd3\xde\xdd\xbf\xff=\x9ce\
+`8:G75N\x04l\xbf\x93\x9f\xdfP7\
\xf8\xa0w.\xb9\xf3\x92\x0d\xcf<\xf49\xae:u\xc2\
>\xa0\x9f6m\x1a\xa6\xd5\xe4`\xec\xb8q8b\xf0\
\xe1\xb8\xae\x97G\x9e\xbd\xe0\xddO\xb0e\xcbz\x9c\xf1\
\xfe{\x81\xf8\xb5w\x1e\xa3\x9c\xe6\x22\x06C\x15\x15.\
\xca8|\xea\xacp\xf5\x8e\xec\x8a\xb2\x9d\xa7(\xa7a\
4vo\xeb\x12_\xb1ll\xc1\xf9\xb7\xbc\xb2\xf1\x93\
\xd7\xd5\xf5\xe3\xc6\xe1\xfe\x993\xe1x\xc5\x06\xe0\xb2\xc7\
\x1f\xec\xc2%\xa4\xa2D\xc8\xc4\xec\xdd\x0a\x80\xc2'\xaf\
\xbe\x8a\xc3N>\x19\x8f<\xff\x06|ZF]\xed\xd5\
\xf9VZ\x93kY\x94\x8c%\xa1\xb4\x82\xcb\x0c\x05b\
\x16B\xe7d\xe5\x18\xf6\xc1\x87\xa2\xb9a7R\xb1$\
\x5c\xd7\x81J\xa4 \x13q$++\xc9.\xdd\xa9W\
m\xdf\xe3F\xd0\xe8\x9d\xc4JW\xcd\xc6\xdd\x97\x0f\xc1\
-o\x84+\xdbd\xda\xcf6Fu\x0f\xe5\x8a\x03\x89\
%Z\x19a\x88$\xc0\x80\xed\xb8FS3\xf7\xe8\x9f\
\xd7\x14n\xb4\x8c\xf8\xb4i\xd3\xf6)\x01\xba\xaa\xb6\x0a\
&;\xb5\xbd{\x94|\xc81\xcb\xf7\xc2\x93\xefP\x97\
\xee\x1dX\xe7+(v\xa1\x95\x06\xb4j\xf1\xf2\xed\x9f\
\x97\xe2\x81\x87\x1f\xc3\x86\xad\xd5\x98|`z\xcd\xe2u\
\xf5\xdbSM\xc2u\x95\xce\xa8\xae\xb5\x07\x89\x1c\xbc\xe6\
\xb1\xd3\x0ahV*#;3\xd8w\xf0\x94\x93>\xd9\
e\xd6\xb1f\x83\xe1\xb9\xc0534R h\x19\x09\
\x19\xcd\x85\x81\xe2Y#\xa6\x1d\xbea\xf6\xf37\x00\xca\
\x01\xc8\x85V6\x14\x14\x07\x01@\xfc|:\x0fk\x17\
\xec\x02\x8ev\x00\xa1@\xe4xm\xc1\xfd\xc9{z\x1e\
\xd4\x13/_p\xb8\xe81\xf6\xa9\x01)W\x0d\xd6\x0a\
\x94\x9fk-\xe9\xdbN,\xbc\xf2\xfao\xf4\xd0^\xfe\
U\xa5U\xd1\xe5MI9\xbe\xba\x1e\x83\x96m7G\
\xcf\xf8\xeb;\xab&\x8fyH\x7f2{_\x86\x1dS\
\x12\x8c\xe7\x9e\x02FO\xccT\x89D\x16\x1c\x0d73\
}M<\xbfh\xe1\xe2\x97^\x85Z\xbc\x1eD\x13\xf7\
\xb9\xe7\x87\xe5Y?\xfe\xf8c\xfc\xe9O\x7f\xc2\x929\
\x0b\xb1\xf1\xb8\xeb\xd0\xf7\x84s\x8b\x9d\x9a\x8a\xf1\xac\x9c\
\xb0\x9b\x1e\xa9sz\xf7\x9d\xdd;\xf7\x8a\xa6\xe6#\xd7\
$x\xe6g\xdf\xaa\xd2\xe6\x03tsS\x81^\xbdz\
Jj\xc3\xec\x19F\xbb\xfcz3\xee\x19Wlb(\
f\xb8`H\xcf\xe6\xa3\x8c`\xd8\xa1\x0d+qyQ\
:\x00\xc2\xc7\x97]\x81/\xae\xb8\x19\x8f\xb0c\xac\x1a\
0\xec@\xd7u\x0b\xb4\x06\x5c\x92\x96\x11\x0e6\xa6\x05\
\xfc\xecz\x01\xbap\xb5\x0e8U\x15\xa37L\xff\x1b\
iG\xb9\xae\xd6\xc2U\xec\xa9\x17Z\xc3\xf5tV3\
#\x92\xbda\xc8\x91]\xe6\x84j\x12\xdf\x11\x5c\xbf\xf3\
\xad@\xef\x12\x05'Q\xb54\xe9\xcby0\xa1D\xb6\
f\xee!X\xb6D\xa3yXf\xade,\xe5\x1e\x8c\
\xfc\xecW\x0dr\xb7,[\xbco!\x86E\xd5\xf5\xa8\
\xd9\xb8 u\xed]\x0fmuc\xb1\xb4\xcc\x80$;\
\x16en\xc9\xe2\xd0\xae\xcd`\x97\x19\x0e\xb8\xe5\x88\xf8\
C\xaf\xea\xb5W]\x86\xa9SO\xc6\x9e\xf51\x0e\xfb\
swI\x98\xcd \xceJ\xa4\xb8s\xc2\xc80\x22P\
H&('\xee\x0a\xff\x92\xed\xc6E\xccZBk\
\xd2Z{u\xc4\xe19^4\xb3L\xf3s\xe9\xb8\xae\
TY/\x0f\xdb\xe0\xc4\xafc\xce\xd0\xcc\xc2\x85V.\
\x143Bh\xe1\x10\xf9\x19I)\x07q\x87\xc9b\x8b\
X(\x805\xb3n\x09\xd9\xdb\x8f\xdc\xfd\xf8\xcbX\xba\
\xb2\x1a\xc7]\xf8\xd7\xbc\xe6x\xeaX\xc5\x22\xec\x93\x8e\
\xd3\xb9\xc4\xfc\xe6\xb8^\xcd\xeb\x176v\xc2)gw\
+\x9f}\xf8\xbcw\x9a-wT\xd2Ehk\x85s\
\xf8\xd9\xe7w\xff\xbc\xb0\xff\xb0\x8d\x87\x1fy\xcc>\xed\
uo\xdb\x15\xbd\x99\xb1x\xd4\x84,\xe1:\x06\x18P\
\x86\xbf\x22c\xd8\xe0\xd2\xdd\xdb\xd6\xe1\xe2;\xae\xfa\xf9\
A\x00\xf8\xe8\xe1?a\xc3W_c\x18\xb3\xd8\xd3c\
\xe8d\xd7It\xd3\x04X\x1d\xdam\xf4\xf5\x1d\xf4\xd9\
\x8c!\xcf\xf0\xc3\x03/\xb0_\x9cr\xdc{5e\x15\
\xc7(\xd5\xdcC\x97\xed\x1aP\xfd\xe5\xe7c\xee{\xe5\
\xb5\xf7\xce}\xd5+C\xab\xc9\x05\x93\x86b\x05f\x82\
\xb6-_Z\xc5\xce\xaer\xdb\xe6\x94\x12$\xa9\xd5\x95\
\x10O\xd2\xca\x0e}\xba\xdb5e\x17(\xc7-R\xd2\
`'=\xb2\xc5\xae\xaf\xa8uSI\xb8Z\xc1%\x82\
J\xa5\x82X\xbdn\xaa\xc5\xee\xe1\xda+\xe1\x01E\x04\
M\x1e\xc0=C\xa90D\xd0\xffBV\xff.\xf3D\
E\xb3\xda\xcb\xf0\xb1r\xd9b44\xc6\xe0\x06\xba\xb8\
\x06\xd7|d\x9a\xee\x9f\x08\xee6\x0d\x05b\xc5`\x97\
\xc1\x8a\x89\x19\xf1\x84\xdb\xb7\xbe\xc9\xed\xb1`\xde\x97\x10\
\xbc\xef\xce\x15\x14\x02\x87\x8c;\x0a;V/K\xd4\xd4\
\xee\xa8\xde%m\xbd=\xd5\x00W\xb9\xb0]\x87\x95\xeb\
\x00\xec\xb6\xb8\xa75R\xfb\x01\xf0\xa4I\x93\x10\xb3\x5c\
T\xa7\x5c\xa4,%\x19\x8e\x00\xdb0\xe0*\xc3\x0c\x02\
\xae\x03(E\xe42\xb4\xad\xd9U\xf0)&\xbff\xe9\
g2} \xc3G\xd2\xf0C\x9aA\x92FD\x833\
\x12\xb6cd\xb5\x018\x9e\x02\x1c\x07`\x07\xd0^\xb1\
\x12\x07\xf8\xd9\xda\xbd\x00\xa0\x1d\x0d\xd7fh\xc7&v\
\x15\xd8U\x80\xf2\xce\xc8?\x941\xb7\xdf\x8e\x9c\x88\x0f\
\xef>u\x11\xd6m\x8d\xf6J\xd8\x18\xa9m\xd7(L\
s6v\xcb\x89\xce}\xabv\xa8s\xd2\xd9\xa7c\xc2\
)s\xdd\x8e\x05X\x1a\x12\xceF\xb8\x0a\xd5\x8d8h\
g,k\xd8\x0b\xab\xee\x80)R8f\xe2\xa0\xef\x9e\
\x9f2\xd1\x04\x90\xeb(\xe92\xc3%\x82K\x02\xcaM\
\x09\xf3g\xfb\xdf*\xc5\xc5\x05\xd8z\xd7CH\x9d\
~~\x9eS\xb9g\xa2k\xeb\x1c'\x1c\x8a\xeb\x8e\xdd\
f\xa5\xefp\xcb\x86<\xb9\x0b\xcf^~\x19\xec\xae%\
\x9buF\xdaJWK\xa5\x9b\x1a;`\xd7\xf6#\xaf\
\x7f\xf8\xd1p\xc7C\xc6\xe0\xc9k\xae\xd9\xeb#\xf0\x22\
\xc4\x19\xbc\xa7\xb2+o\xdc\xf4'7\xe9\xbc\x8f\x04O\
wS\xc6tM\xc6t\x1b\xa9\x0f\x92U\xdb\x9ft\x9c\
\xf8 \xdb\x10H\x15\xe7.u\x0as\x9e\x9f\xfe\xd9W\
q\xc7J\xc2ef\x17\x80\x0bM\x0e\xe9\x90#u\xb6\
#T\xb6C*\xdb%7\xdb\x81\x9b\xed\xb2\xf7\xa3\x94\
\x9d\xae\x92\xf1\xb4\xf8\xb6MH\xd6\xd5\xeck\xcc\xad\xd8\
\xba\x0aF0\x13y\x9d\x07\xd9F\xed\xa6W\xc9,p\
m\x9b\xafW,z\x0a\xcd\xba\xc5\xbf\x0c\xc7\xe6\xb4\xba\
f\xeb\xc4\x81CG,D,Uc\x98\xb5p\x1d\xef\
\x14\x9f\xd4\x1ayy9\x90B\xb0\x14\x06\x13{\x0ei\
/^\x81\xa1\x94\xf2X\x16Y\xfd\xe4\xce5\xfc\xe0\xa3\
\xb1p\xd5\x168\x99\x11\x8a[\xa5\xed\x19N\xba`\xdb\
\x0a\x86\xd4\x0e)*]\xad\x1c\x80Mh\xd6\x86\xb4c\
\x96_$^'a6\xc2a\xc1\x0a\xd0\xc4\x10\xd0\xa4\
\xc8%h\xd7\xe7\x0f \x9a\x88a\xe3\xee\xd7\x1f\x02@\
\xd0\xae\x032<\x00+e\xc31[\xab\xab\xff}Q\
\xda\x85W\x1d\xc0\x05k\x1b\xd0\x8eW\xad\x07?\xa6s\
\xe8\x16\xcf\xc5\xa7s\xaa1\xf3\xdbG2\x1a\x9b\xac\x93\
\x95\x969Bj\xce\xcd\x0e.<\xf3\xe8\xb4E=F\
O&\x00\xf4\xc5\xeb\xd7qj\xeb'\x9b\xfa\x9c\xb9\xf4\
\xfd\xa4\xcd\xfd\x13\x09'X]\xab\x8e9\xb9\xf8\x8cY\
l\x1b\xbb6m\xd8\xb3\xb7M+U\x83\xcf\x88\xb8h\
\xd8\x811!\x5c\xcd\xa4\xe0\xe8DFl\xd1\xa2\xec\x8c\
^=\x9b.=d\x02\x1e\xff\xe6\x8b\xbd\xd7\xffT\xc0\
\xd0\x8b\xb7\xdd\x8d\xe5\xa8E\xf6\x82\x85\x93\x1c7:\x84\
\xc1\xd0\xe9Y\xbb\xb2z\x0f|\xf3\xec\xbb~\xf7\xfdj\
\x8f\xcdO\xcc\x9b\xfb|mS\xedh\xa5\xedb\xec)\
\x1f\x11]\xb6\xe8\xa0HA\xe8\x8bo\xcd\x1c\x18,\xc0\
\xac\xe1\xc2\xab0\xc5q65'\x0a\x99Z\xec\xb6Z\
BK\x1f\xb4\xf0\xc2p\xdd`\xa0\xc9-\xcc\x9d\x93\xc8\
+|F\xd7\xd7\xcc?o\xe28\xb69\x01\x874;\
`()RN~\xde\x1c\x15\xad\xd9\xa0\x94\xcbJC\
(f\xb8Lp\x19p\x18`\x01\x1f\x11\xcf\xa5\x9a*\
\xb6\xe2\xce\x8f\x99y\x9cD\x03\xaa\xb7\xafFnI/\
\xcb\x8d\xee|\xc3\x08\xe55&\x93\xea\x0aW\xe9\x91\xe4\
\xa5\x19i\x06|M1>\xc4\xef\xa3>\xdb\xd7-\xff\
\xe6\x87'\xe5oVn\xdc\xe7\xbf\xeb\x121\xb8pa\
i\x07Z;\xe0\xd6\x85\xdf;\xc1\xdfM\xf4Yg]\
\x84\x1c\xa3\x1e\xdf\xcc\xdb\x8e\xe6\x86\xdc\xe2\x84\xa5\xba1\
\x93 \xd6M>\xbf\xb1\xccOp\xe2)\x9b\x14|`\
v\x0d+Y\x9f\x8c\xef\xfe\xe4\xd1\x03\xcf\x8bo\x99\xf7\
\x97\xef:B-\x8e\x1b\x09\xa0\x01@\xf9zh`!\
\xccp\x16\xb4p\x99\x84\x0d\x08\x05W\xdb\x88*\xc0\xd2\
?\x1f\x1df\xbb\x16R\x8e\x82V\xb6\xb7\x83\xb3&n\
\xd9\xba\xb5\xde7\xd3Y\xa41f<p\x0fz\x1c|\
q\x97T\x8aFC\xb8&\x01\xa9-e\xa2\xc7\xb0\x0b\
\xe27\x0b\xfa\x83\x94\xc40\x14\xc3']-}\xba\xaf\
\xa1I\xd9\xaceM#\x8d\x89\xe6\x17\x0d\xf8\xf0\xdd\xfb\
v\xf5h\xdfio\x9b\x0f\x9ew>N\xfb\xe8e8\
\x81p\x83\xd8f\xda\xb0\x12p\xad\xa6~N\xf9\xee\xa1\
fa\xfa\x0e\xaeS\xb8\xe3\xf4Kp\xc7\xcbO\xec\xbd\
g\xd1\x82\x05\x186b\x04\x86\x16\xf8\xd1\xae \x0b\xef\
\xae\xaeD\xd3\x17\xdf\xe0\xb0S\xcf\xcd\x8a~\xf0\xe9x\
\xa5\xa8\x90}\x06\x10m\xc8\xaa\x7f\xf4\x9es\xef}\xf8\
\xce8\xb3 fb\xad\xb4f\x9f\xcc\xb2%\x07\xb5A\
P\xf5u\xdd\xfd\xe5\xe5\x87\x96\xf7<rv\xf7D\xbd\
\xb5\xb9\x85J\xcc\xf5jX\xc2\x0d\x87bna\xc9b\
rS\x15pR\x01U\x1f\xed\xc7\x96\xddE\x0b&'\
'\xb3\xc1\xc9+xR\x15\x16=\xfb\xf1\x86\x86Rg\
\xe7V\xbeu\xc8\x81hN5\xc1\x86\x82\xc3\x80\x122\
\xc1\xd9\x05o\x04\xf3\xba\xbf\xfa\xde\xb2\xaf\x9d\x08@\x09\
x\xaa\xc3\x0f\x84\xb1\xa5\x9e\xb33\xd3\xf7O\xaf\xea\xc6\
kP\xb3m\x19\xda\xf6\x19e\x1b\xce\xf6\x8fL\x99^\
\x1e\x8b\xa9\xablW\x1d\x0d\x08\x93H*\xdbrs\xa3\
q:\xaf[\xdf\x81\xab\xfa\x0d\x1f[\xbfg\xfb\x06\xd4\
V\xef?\xd2\x09\xf0\xcel\xcae(\x97=\xee[\xaf\
[\x0c\x80\xbb\xe4wBz^\x1aB!?\xcaw\xef\
\xc1+\x1f\x97\xe1\xa4\x89\x1dB\x9f,\xae=\xcdq\xe4\
@\xad\x1d\x0e\x04TMnN\xce\xe2-\xe5\xf7\xe8L\
w1\x94\xd6\x0c8`X\xc4\x00\xafx\xb9\x00\xdd\x8a\
\x062)\xf2\xbe\xe8\xdaaM\x0a\xa0\x184\x92\x88\xbb\
6*cm\x01\xa1\xc1`bv\xc1ZA\xb1\x89z\
\x0dH\xd1\x1e\xc7\x8e\x1c\x08\xc9.\xb4!\x90\x82\x0d\xa9\
-XF\x12\xd5M\x16z\xf5\xec\x05GiX\xaew\
\xa0`\xed\x00\xae\x0b\xd7\x05\x80\x00jj\xbe+x=\
\xf6\xa4\x9b\xb1n}\x12G_rc`\xc1\xfc\xba\x93\
\xb52\xda\x08v](\x1fG-=B+\x1a\xa3I\
\x81\x85w\xd8dW\x81\x92\x9aMC9B\x92\x8a\xc6\
T\xa4\xb2\x01G_p\xfa\xe9\xdf\xf6\x1e0\xb4\xb6\xae\
)\x8a\xda\xe6Z|\xb2}=\xfev\xd0\xc1H\x80\xeb\
}k\xb6.#+Q\xa2\xab\xea\x8a\xcd\xcd['\xaf\
\x88d\xce\xce\x19\xd7\xab2\xcb\x1f\xc11\xd7\x5c\x83\x93\
3\x04z\xe7\x0d\xc4\xe81m\xb1\xbe\xef\x10,\xa9\xb6\
e\xb4\xa6\x8a\x89H_\x03 \xb3\xa1y,\xbb\xf6\x18\
-\x0c\x8f\x878\x19-\xd2J]\xce\xda\xb3\xfb\xb7\x9e\
Y\xd8!h\x1fA\xfb\x0d\xb0N\x91\xdeS:N\xad\
_\xfd\xde\x0e\xdb^\xe0\x92\xc7\x86\xe7\x90\x06A\xc1\x8a\
\x84+vu\xec\xf9x\xb8m\xce\xfc\xb0b\x9fX>\
\xef`\xb1\xbd\xe2\x0evS\x9dT\xc26l\xa5\xcdX\
,\x91\x18_\x0c\xfex\x07\xe3\xf7\x97\x9c\x8f\x98\x93\x80\
C\x1a\x8e\xd0P\xc4\xa4\xd85\x22\xc10\x8f\xee\xd8\x89\
\xe3\xc9&\xd6\xac\xe0ze\x18\xe1B\xc0e\xf6XP\
\x03a\xf8M\xf3\xa7\xf9\x81\x95\x15C\xaa\xb1\x16>\x11\
\xd4U<dYq\xda\x82\xdfE\xe3Xf;\xfa,\
\xcd\xba;\x83|\xf1\x04\x1d\x126\x8d\x09\xa9\x15\xb3\xde\
2\xd2\xdb\xff]\xb2\x05\xe5*\xd8\xb6\x0b\xa5\xe0\x11(\
\xb3\x82I\xd2\x18\xd4)\x10\x08\x06\xc2\xf0g\x19\x08\x87\
Lr$\xcc\xfe\xdd\xb3\xf2?]PyJ\xdc\x92\xe7\
k\xb8\xe9\x02n<?\xc7\xf7~\xbb4\xb7tE\xdd\
&\x04B\xd2\xdb\xc0\xc9\x86\x04\x10\x0ef\x06\xfb\xb6k\
\x13d\xb6\x85\xa1\x04Zlu\xd0\x10\x80`\xb8ZS\
\x9dm\xba\x95M*\x05j)sJ\x1a\xac\x1c\x80\xb5\
\xbcb|WYTR$X1\xa4\xd2P\x86\x0f1\
\x06\xc8\xb2Q\x91\xb4Q\xdb\x98\xd4\x80\xa5]\xad\xe1h\
\x0d\xa5\x15H+h\xd7\x06\x94W\x0e7\x1e\x8f\x03\x00\
\xbat\xe9\x02S\xa6\xf0\xf5\xeb@\xa7QN\xc7d\xca\
\x18\xad\x15\x87\xc8r\xe3\x02\xca\x91,,\xd1R\xbb\xc9\
\xab\xb7\xa6\xc1\x82\x88I0\xd8\x12\xd0\x14b\xa6@y\
\x0d\x0en\x9fW\xd0\xf5\xdd\x19\x7f\xaa\xcd\x0e\xa5\x01\x00\
\xf2z\xf5Be}3r\xff\xfch\x0d>\xfd\xfae\
j\xae\x1e\x03'\x91/V.;<\x02\xde\xc1#F\
?\x9bw\xd3M\x15\xdd{t\xd7kRI\x1c-\x1c\
\xf4{g\x05-\xbf\xf8\xa2\x9c+\x8e;\xf6P3\x18\
\x89N\xeb\xd2}fN\xc26\x1a\x9e\xfd\xeb(\xad\xdd\
\xb6\xac\x5cfJ%X\x92\xa5}fK\x22\x83\x17\xa1\
\xa6[\x5c\xf2Z\x80X\xc3\xcf\x10!]U\xd3W\xec\
)=(>\xf1\x84\xa5\xee\x97_8n<\x0aG\x03\
\xc2\xdbN\x92u5uU\xb9AY\xa5\xac(v\xa7\
\xfb\xdfj\x93\x95\x9d\x86\xca\xea\xbb8\x9a\xcc\xa1\x9de\
\xe7\xfa;\x181\xa7m\xbb\xc7\xc7\x8d\x1e\xd90\xb0}\
\x07\xd8\xd2\x84\xcd\xde\x92)0\x14kQ\x1f/\x0f&\
C~C\x9b\xe9\x04\xed\xb6\xd0:x\xf5\xee\xb5\xabI\
khf\xb6\x0c\xbf_\xff]\x82\xeb\xaamK\xd0\xb6\
}7t\xa2\xf7\x90\xa0\xb6e\x99i\xd1\xa7RVp\
I\x22\xa9.\xb4\x14\x8f\xb5l\xe4\xd5\xc5\xe4\x05\x99%\
\x83Vf\xe4\xe4l\xac\x8d\x95\xc2M\xee\x1f\xc7\x96\xe5\
\x22\x91l\xf9\xd4\xb2\x0ba\x08\xcen\xd3\xbe_c\xb0\
\xfd\xa5\x8d\x1a\x92\x1b\x89U\x1d\xc9h\xca\xed\x16\xb3\x9a\
\x07:Z\x94h\xe2t\x08\x8d\xbcLZ\x9c'\xe2\xaf\
l\xa3\xe1\xf1\xed;\xe6\x22\xb3\x1bCk\x17\x0cW\xa5\
ef\x86\x8b;\x1d|AB\xc8&C@*A\xcc\
\xad\xe9|\xa4!\xa0\xa1]7\xc4\xd5\x95\xf3\x98g\xbf\
-3\x0fdOgu9I\xd2\xd8\xd1\x98>\xcc\xe2\
\xf1\x89\xb5edJ\x22\x16\x82\xe1Y\x0c5\x84\x14\x22\
\x9cf\xc4:w\x89.\xea\xd6\xa7Oy\xc5\xe6\xddH\
Z\x00;\x16\xd8\xb1\x01\xdb\xf2j\xdc\x01{w\xe0\xad\
\xf20\xa0Rb\xeau\x019\xeb\x8b\xe4Q\x9a\xd1M\
)\xa6\xcc\x80\xb3\xb3c\xb1\xff\x0fB\x89\x1a\x10Z\x0a\
\x8eq\x0b\xa3\xa7 f\x82\xa3Th{\x95\xbe\xd8Q\
b\x5cCT\x17n\xaf\xd0SO;\xe5\x94\xd5C\x0e\
\x99\x1c\xdf\xb8\xc3c\xec\xbc\xe6w7\xe1\x81\xd3N`\
c\xd8\xc0\xc5i\x89\xc6\x19r\xd7\xf638\xde\x98C\
\x0b\x16\x5c\xa6v\xed\xee\xbbu\xfe\xdc\xd7\xd22\xb36\
\xd8{\xe2\xae/\x14\x92\x1b\xfbt\xee\x18hJ\x9e\x14\
p0N\x1bf\x0dV\xaf\xbe\xb5\xae\xc9N\xba\xa9\x86\
I\xac\x14Tz \xcemK\xfel\xfa\xcd\x85:`\
B\x11\x93fOO\x84\x17\x1a\x06v]\xa1*\xca\x0e\
\xe5\x86\xe6\x0b\xb4\x9b0\xd5\x9e\xed\x87\xd3\xaa\xb9\x9f\xd8\
\xd9\x19\xeb\xadx\x13\x22ZA(\x0d\x97\x1cHr(\
\x11\x8f\xe3\xb6\xdf]\x87;.\xbd\xc8\x12\x07\x0c|\xd9\
\xfezN\xben\xb6\xae\xd5\xf1x6\xef\xd8y!\x18\
u\x81\x9e\xbd^\xdc\xb0\xbb4\xd1\xc9\xce\x83\xad\xb5g\
\x85\x00\x07\xdd\x86\xfa\xa3\x93$\xda*\xa5\x5c\xd6\xaa\xa5\
/\x0c\xa5\x99\xb5\x97\x08`\xb2a\xeeV>\xdf\x07\x8e\
\xebV\xfc,C{Y\xa9\xc7\x22h\x06\x9a\x91SP\
\x92\xaa)]1\xafC\xf7\x81\x1b\xeb\x9a\xd4\xc1\x96\xc3\
\xe7'\x13N_A\xfe\xeb\x5c\xbb\xf4\x86\x8e]\x87\xd7\
\x86\x84\x85U+\x97\xfd\xa8\x9dd\xcaE4\xaa`i\
\xe9\x9d9I:\xf5)\xdf\xc0\xfa\x14\xfay\x0ck\x02\
\x8a\x894\x8b\x00\xc0&\xd8\x85\x94\x14\xcfJ\xc3\xa2\xc2\
Ly[b\xde\x07[f\xcd\xfb\x00\x08\x1d\x07\xa5\xbc\
p;\x22[%\x1d\xf2o\xab\x96\xa7\x12`\xa0\xa52\
\x83g\xd8\xf2j\x1c03\xb3\xe5\xf8Uc3\x13\xd1\
\xdb\x94>\x04\xa6NA\xb8\x86\xdb\x10g\xdf\xbcM8\
\x01\x84c\x89@\xdc\x12\xf2D\xc2+\xca\xa7\x09\x94\x1e\
q7\x97\x84\xed\x1b\x02\x15(G\xd2A\xd4\xe7\xc2V\
\x06`1\xb1\xb2\xe18\x04 \x89D\x22\x01\x00\xf0m\
z\x16\xf1\x9c\xdfa\xf6\xcc\xb2\xe2T*0Z+\x95\
\xae\xb4\x1bW\x01\xf5QI\xd6\xda\xe9\xaf\xbf]\x90\x04\
\xea\xd1\xf2\x12\x01H\x01\xc8\x07\x90\x0e\xe6\x0e\x94\xdew\
OG\xd2\xe1\x03\x14t\xfa\x8e*\x1cj\x06\xf2\x9f5\
K\x86o\xf2\xef\xf1\x8a\xa2\xe4\x17\x16bI\xdc\xc17\
m\x16V_\xd9\xb7\xef\x13\x99V*_\x96\x97M\xd0\
*\x95\xcd;\xb7M\xe1\xb2\xd2Q\x0e#\xc1\xac5\x9a\
Y0\xeb\x00\xc09\xec\xb2\xc1\x0a\xf9:\x96<\x85a\
Vr\xca\xed\xc4\xd0\xb0M\xb1\xbc23\xed\xb5\x9a9\
\x0b6|\x03\xecc\x12\xba\x00\xc0}\x00\x8e'B\xdb\
\xde\xf9\xf5F\xb3\x98\xc0\x16w\xd3\xe55C(s\xf3\
\x01\xa3v\xecX?C\x10\x1c\xd7s\xcd\xbb\xae\xf26\
\x0c\xad0t\xcc!\xa8\xda\xb4\x0d\x86\xa3c\xa2{\xe7\
G\x9d\xb5\x1b\xd2t<z\x81\x8e\xc7\x8b\xd5\xb6\xed\xd7\
8\xc9TS\xc6\x01C\xdeLF\x1b`k\x0d\x05\x82\
\xd6*\xa0\xab\xaa\xc7)\xa2Q\x1a`\xe5\xad\x88\x17\xc0\
\xc3^\xb0\xa2\x06K\xe5\xf3\x7fk\xa7g\xcc\xf7;\xf6\
\xcf\x03\xb8U\x9cT\x02:\x19Efz&ta\xe7\
\xda4\xb9\xe7\xbd`\xa8\xed\xbcTRO\xd1\x8e5F\
\x1arb\xf3\x9e\x1d\x1f\x223\xaby\x7f\xf7\xdb\xb6\x83\
\xa4\x10\x94t\xa4\xd4\xca2\x00\xf21\x09\x9f\x17\xe8\xe6\
\x85\x90ifH\xc0\x91\x12;M\xbf\xdc\x95\x9fi~\
[\xe8\x8f\xbf\xd2\xb9y\xeb\xa6em\xfa\xf2\x8eY\x1f\
\xa1c\xdfk\x90r\x94\xa1\xec\xa4O\x0b\xd7\xa7I\x0a\
&!\xbc\xd2\xa0\xd4Z\x0e\xc5+\x05$Z\xa3,\xb5\
`\x9d\xf2\x03\x80\x84C\xe4\xa6$A\x9a\x8a\x10v\x89\
\x8c\xd6\xf2\xb0\xba\xe5\xd4G-\xd1w\x8a\x18~\x8d<\
W[>\x11$D\x93.\xfc\x86\x80\xedJ\xc9p\xa4\
\xa6\xb8\x88\xa6\xbc\xf2\xd8\x96\xe5\x1d\xde\xcdH>\xfc[\
\xef\x10\xd1\xb4\xb3\x8eP\x9aGhfH\x89RK\xb9\
o\xc4\xda?\x99\x1c2\xf4\x02\x1cu\xd8\x18\x8f\xc0\x1a\
\x9e\xd3\xa5\xbe\xbe\x1e\xb5;b\xe80)\x87\x9d\xc4\xca\
\x19\xccy\xc7B\xc8\x03\xe3V\xaa\xdb\x86\xd5\xa5'\xf9\
V\xbft\xaf\xcby\xf6>)\xfa\x00\xd0\x95W]8\
\xb8\xef\x8d\x99\x86\x7f\x9b,/\x9f\xa8\xdddw(;\
\x8f\xbd\xd3\xfa\xde\xd2\x1a,\x08\xf0\xc9R\xb2\xf5\x97 \
c\x96\x95l\xbc\x8a\x0d\xc7t\xfd\xa4\x9a\xe0\xbc\xbf\xb3\
a\xf7\xd6\xce\xc3\xfa\xe3\xaa\xe1\xe3\xb1\xf6\x8c\xf3\xa1\xeb\
M\x18\xb6\x05\xd19\x84\xfb\x22\x01\xe4\x0c\xe8\x88\xf2\xe6\
\x86\x95\x99\x82?4\x0d}\x0dT*\xd2\xb4u\xe3\xc9\
\xcf\x15\xe7|M{\xea\x91N\xae\x11 \x17\x0e\x93_\
\xdaI\xa9\x9d\xc0^n\xe17\xee\xbc\x13m\xcd\xd2z\
cP\xaf\x07\x9c\x0d\x9b2t\x22y\xbaJ%:\xa5\
JK\xef\xa8\xd9S\x95\x8cv\xe8\xf4\xa9\xad\x85\xd4\xac\
\xa0\x98\xa1\xc1^TZ\x0bp5\x01\x8a\xb8\xc5m\x0d\
(\xcf\x92\x15v\x1cG\xd8\xb6\xfd\x8fU)\xaa\xae.\
\x03\x00\xe4uIG\xb41\xac\x02\x91P\xe5\x88\xfe\xc1\
g7\xae\xad\xf9\x0c\x9a;\x8aP0\x1c\x8c\x04\xf7\x0b\
\xe0\x94E0\x85\xdfuR\xcdK\x94\xe5\xfc\x05\xcc\x81\
\xd6\x226\xdf\xc5@j\x0a\xa5\x85\x9b\xf22\x82\xcb\xb3\
\xb2\x02\xcb\x86u\x89\x94\x8f\x7fq\x86=\xf3\xf4\x8b\xb1\
\xfe\x95g1}\xc6;@j\x1d\xa2\xcd\x99en\xb4\
\xf9U\x05*\x22\x88V\x8d\xc1k\x87[KI\xb5\xae\
2\x13k[\x90S\xf3M\x10\x00t\x83\xedD\xb7\xbc\
\xef\x8a\xc0&\x06)\x10\x81\x84\xc4^\xa7\xa0@Kl\
\xbf\x02\x93&\xd7\x12U\xb1\x94]V\xb9m\x0d\xec:\
\x0b\xda\xf2YN\xbca\x0e\xbb\x95\x06\xdb\xf5\xa2>\x1a\
X\x09\x00ee\xde\xdc(7\x0e\x0eu\x16\xa9\x9a\xd5\
\xf5\x1a\xe2-\x18\xa6+\x82\x81\xb5Ns\xcd\xb6/^\
\x9c\x0a\x150\xb1\xf4\xce;\xf63C\x19\x80\xf9\x05|\
N\xfd\x0e\x95\xd1\xf3\xaf\x90\xfe\xd5\xdam\xa4Xrw\
-q\x94@\xb1\xbdW23\x96.Z\x88\x03\x86\x8f\
\xe0\xbflY\xb1\xfe\xa2\xbb\xef\xb9\xc5\x9c\xf7\xc5\x0cc\
\xf7\xce\x89\x88\xbbEL:\x83\x810\x94v\x98\xcc&\
\xed\xe3\x06\x15\x11\xdfDb\xa9\xcf\xb6W5\x14\xc7a\
\xcd\xb6M,J\xf9\x8d\xc4\xf6h\xea\xab\xbcd\xca9\
\xb1\xbb\x85)\x8f<\xf0\x93k\xdf?\x0d\xd1\x0d\x16\xbd\
\x9f'\x110\x00Q\xdd\x1c\x8bU5%X\x12\xedq\
\xd9y6[p8\xe0$\xcb\xedX]\xb9%<\xb3\
\xe2\x9dw\xde\x89)7NA\xc5f\x17\x93\xdf\xfd\xb8\
\xe2\xc3n\xed\x1ep\xcb*\xcaR\x96\x9b\xdf\xec\xd8\x82\
\x1d\x95\x99\xd9TO\xcdp\xbfR\xac-hV-\xd9\
\x98\xde9\x9c\xe1\xc5\x15c\x9f\x1f\xc1Jmu\xadD\
\x8d\xad\xff\x052\xc4P\x0b\x9bd\xa7^\x03\xd0i\xf0\
d\x00\xa0`n\xf6/\xc9\x95o\x85\x96\xf8\xde\xdf?\
\xfc\xc1\x11g\xdf\x8dC\x0f=\x12\xfd\xbbvB$\x14\
\xfa\xeen#\xeb\xfb\xed\xfc\xa8\x8d\x96\x02M-4\x84\
\xdf=\xcb\x07\x10\x05\x8a\x90\x1d\xf9\xe9{\xf7\xf3\xd3z\
\x0dr\x8b\xda\xfcp\x0c\xadQN\xc4\xccX\xb1b\x05\
\x00 =\x0d\x18=\xb2\xff\xfe\x9e\x01_F\xc7\x9f\x9c\
\x14f\x86\xf3]\xb2\xe7\xf7\xdb\x17\x00A\x04\x83\xfb\x5c\
\xcb\xcc\xd8\xb1c;\xfa\x0f\x1d\x8a\xab\xcf;\x07\x0c\xe0\
d\xd6\xe2\xa2?\xdf\x11\x9cv\xfaqE\xc7\x9d8\xb9\
\xdb)\xa7\x1e\xdd\xe1\x8c\xeb.\xcf<\xfe\xcd\x97\x0d\x06\
p\x16\x00\x14D\xe0\xb5)Z\xfa\x96\xf5\x8b\x92L\xf3\
\xf6\xfe%\x05`\x0a@\x886=zQ\xda\x01\x83~\
8V\xc8H\xfa>\xf7\x9ex\xce\x89\x18u\xe1\xc9\x98\
3r\x02\x90\x1e\xf6j\x8d\xc1\x10 \x88\xbf\xdc}7\
\xbc\x8a\xe9\xbfh=\xf6yN:\xf6n;\xff\xbct\
\xee\xdc\x03\x89D\xdc\xf3d\x09F\xd0\xf0a\xe7\xae\x1f\
\xa7\xc4\xe4\xe6\xf6\x80\xcf\x0c\xa3\xae\xae\x1c\x96\xd3\xecy\
\xe2\xf6\xd6}h=\xf8\x11\x82\x91,d\x86|P*\
\x89x4\x8e\x98\x95\xfcQ[\xa1P[$\x93\xb5`\
\xd6?\xc3/\xd1\x92\xa5\x01\x80\xc8\xabqv\xdcq\x93\
0}\xfa\xd7^\x12h\xeb\x5c\x08\xb9\xb74\xae\xb7\x8b\
\xb7\xee\x01\x0c\xc3\x90\x08\x05M\xc4\xe3Q\xf4h3\x01\
Z%\xb0\xb3\xbe\x02I\xbb\xdek;\x10\xc4\xd2Y3\
0hp\x7f\x10\x11\x82\xc1\x10:th\x87\xcd\x9bw\
zA?dx\xaf\x93J\xb6x\xfc\xf7\x0f\x14f\xc6\
G\x1f}\x84#\x8e8\x02B\xf8Z\xfa\xa6[L\x8e\
\xbc\xdf\xeb[\xa5x\xea\xf98DoC~\xd0\x8fP\
n1\xb2s\x8b T\x04)'\x01\xcbn\xc2\xee]\
\xdb\xa1\xca\xd6\xe3\xc5\x05[\xe1'B\xd8$\xc4\x5c\xaf\
\xda\x91\xab5f\xce\x9c\x89\xb1c\xc7\xfe\xdd\xb9\xf4\xea\
\xbf\xb5\xf2A\xc3#6o\xb1}\x0b!\x10\x86F\x9e\
\xdf\x84\x11\x0a\xa0\xac9\x81\x84\xf3\xdd\xeeXPP\x80\
\x83\xbbWa\xd1\xae\x5c\xc4j\x1a\xd1\x94Tp\x01\x04\
\x19H\x80\x91\x85\xefr\x89\xc5\xde\x99\xff\xeew+:\
\xf4\xf7\xfaB\x02H7\x80\xff\x07=\x7f\xa0\xae\x93\x13\
t\xae\x00\x00\x00!tEXtCreati\
on Time\x002021:08:\
16 01:19:48\xd1L\xe7F\x00\
\x00\x00%tEXtdate:crea\
te\x002021-08-15T23\
:30:59+00:00\xf7\xd4\x8c7\
\x00\x00\x00%tEXtdate:mod\
ify\x002021-08-15T2\
3:30:59+00:00\x86\x894\
\x8b\x00\x00\x00\x00IEND\xaeB`\x82\
"
qt_resource_name = b"\
\x00\x06\
\x07\x03}\xc3\
\x00i\
\x00m\x00a\x00g\x00e\x00s\
\x00\x05\
\x00o\xa6S\
\x00i\
\x00c\x00o\x00n\x00s\
\x00\x0c\
\x05\x1b\xb0\xc7\
\x00c\
\x00i\x00l\x00-\x00f\x00i\x00l\x00e\x00.\x00p\x00n\x00g\
\x00\x0e\
\x02`\xf8\xc7\
\x00i\
\x00c\x00o\x00n\x00_\x00c\x00l\x00o\x00s\x00e\x00.\x00p\x00n\x00g\
\x00\x0d\
\x07\x1c\xda\x87\
\x00i\
\x00c\x00o\x00n\x00_\x00m\x00e\x00n\x00u\x00.\x00p\x00n\x00g\
\x00\x0f\
\x08\x0c\xf7'\
\x00c\
\x00i\x00l\x00-\x00g\x00a\x00m\x00e\x00p\x00a\x00d\x00.\x00p\x00n\x00g\
\x00\x14\
\x08Yh\x87\
\x00c\
\x00i\x00l\x00-\x00a\x00r\x00r\x00o\x00w\x00-\x00b\x00o\x00t\x00t\x00o\x00m\x00.\
\x00p\x00n\x00g\
\x00\x11\
\x0c\xc2^\xc7\
\x00i\
\x00c\x00o\x00n\x00_\x00m\x00i\x00n\x00i\x00m\x00i\x00z\x00e\x00.\x00p\x00n\x00g\
\
\x00\x11\
\x0c\xcb\x1e\xc7\
\x00i\
\x00c\x00o\x00n\x00_\x00m\x00a\x00x\x00i\x00m\x00i\x00z\x00e\x00.\x00p\x00n\x00g\
\
\x00\x18\
\x06\xb9\x17\xa7\
\x00P\
\x00y\x00D\x00r\x00a\x00c\x00u\x00l\x00a\x00_\x00h\x00o\x00r\x00i\x00z\x00o\x00n\
\x00t\x00a\x00l\x00.\x00p\x00n\x00g\
\x00\x0d\
\x07\x97YG\
\x00P\
\x00y\x00D\x00r\x00a\x00c\x00u\x00l\x00a\x00.\x00p\x00n\x00g\
\x00\x16\
\x05'\x86\xa7\
\x00P\
\x00y\x00D\x00r\x00a\x00c\x00u\x00l\x00a\x00_\x00v\x00e\x00r\x00t\x00i\x00c\x00a\
\x00l\x00.\x00p\x00n\x00g\
"
qt_resource_struct = b"\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x02\x00\x00\x00\x01\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x01\x00\x00\x00\x08\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x03\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x04\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x03\x00\x00\x00\x05\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x01z\x00\x00\x00\x00\x00\x01\x00\x00\x97E\
\x00\x00\x01{b\x979x\
\x00\x00\x01$\x00\x00\x00\x00\x00\x01\x00\x00*+\
\x00\x00\x01{b\x979x\
\x00\x00\x01Z\x00\x00\x00\x00\x00\x01\x00\x00\x89-\
\x00\x00\x01{b\x979x\
\x00\x00\x00\x00\x00\x02\x00\x00\x00\x01\x00\x00\x00\x09\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00\x12\x00\x02\x00\x00\x00\x07\x00\x00\x00\x0a\
\x00\x00\x00\x00\x00\x00\x00\x00\
\x00\x00\x00@\x00\x00\x00\x00\x00\x01\x00\x00\x1d\x00\
\x00\x00\x01y[\xecn`\
\x00\x00\x00\x22\x00\x00\x00\x00\x00\x01\x00\x00\x00\x00\
\x00\x00\x01{\x96\xb8JX\
\x00\x00\x00b\x00\x00\x00\x00\x00\x01\x00\x00\x1d\xd6\
\x00\x00\x01y[\xecn`\
\x00\x00\x00\x82\x00\x00\x00\x00\x00\x01\x00\x00\x1e\xd2\
\x00\x00\x01{\x96\xafr\xf2\
\x00\x00\x00\xa6\x00\x00\x00\x00\x00\x01\x00\x00!^\
\x00\x00\x01{b\x979x\
\x00\x00\x00\xd4\x00\x00\x00\x00\x00\x01\x00\x00(\x7f\
\x00\x00\x01y[\xecn`\
\x00\x00\x00\xfc\x00\x00\x00\x00\x00\x01\x00\x00)/\
\x00\x00\x01y[\xecn`\
"
def qInitResources():
QtCore.qRegisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
def qCleanupResources():
QtCore.qUnregisterResourceData(0x03, qt_resource_struct, qt_resource_name, qt_resource_data)
qInitResources()
| 48.349374 | 97 | 0.696476 | 43,120 | 196,927 | 3.174629 | 0.107653 | 0.020162 | 0.016305 | 0.008941 | 0.794857 | 0.793447 | 0.791979 | 0.790102 | 0.789444 | 0.787903 | 0 | 0.240007 | 0.043519 | 196,927 | 4,072 | 98 | 48.361248 | 0.486751 | 0.000828 | 0 | 0.769515 | 0 | 0.72002 | 0.000021 | 0 | 0 | 0 | 0.000042 | 0 | 0 | 1 | 0.000492 | false | 0 | 0.000246 | 0 | 0.000739 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
28d2d1fd6f096d46032a46ad3351c13f24800b46 | 2,014 | py | Python | tests/test_api.py | KarrLab/python_package_tutorial | dd20e0d3056138904e7e7fbbf6bb884d64dbf8f6 | [
"MIT"
] | 15 | 2018-01-06T11:33:01.000Z | 2022-03-01T15:18:40.000Z | tests/test_api.py | KarrLab/python_package_tutorial | dd20e0d3056138904e7e7fbbf6bb884d64dbf8f6 | [
"MIT"
] | 2 | 2018-01-30T23:21:12.000Z | 2018-03-23T20:22:06.000Z | tests/test_api.py | KarrLab/python_package_tutorial | dd20e0d3056138904e7e7fbbf6bb884d64dbf8f6 | [
"MIT"
] | 8 | 2018-01-08T21:40:19.000Z | 2022-01-04T14:48:02.000Z | """ Tests API
:Author: Jonathan Karr <jonrkarr@gmail.com>
:Date: 2018-03-12
:Copyright: 2018, Karr Lab
:License: MIT
"""
import intro_to_wc_modeling
import types
import unittest
class ApiTestCase(unittest.TestCase):
def test(self):
self.assertIsInstance(intro_to_wc_modeling, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling.model_composition, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling.simulation, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling.simulation.boolean, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling.simulation.multi_algorithm, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.cell_modeling.simulation.multi_algorithm.analysis, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering.databases, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering.databases.main, types.FunctionType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering.matplotlib_example, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering.unit_testing, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.concepts_skills.software_engineering.unit_testing.Simulation, type)
self.assertIsInstance(intro_to_wc_modeling.wc_modeling, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.wc_modeling.wc_lang_tutorial, types.ModuleType)
self.assertIsInstance(intro_to_wc_modeling.wc_modeling.wc_lang_tutorial.main, types.FunctionType)
| 55.944444 | 125 | 0.821251 | 242 | 2,014 | 6.479339 | 0.219008 | 0.133929 | 0.103316 | 0.195153 | 0.822066 | 0.822066 | 0.804847 | 0.804847 | 0.78125 | 0.78125 | 0 | 0.006641 | 0.102781 | 2,014 | 35 | 126 | 57.542857 | 0.861096 | 0.056107 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.772727 | 1 | 0.045455 | false | 0 | 0.136364 | 0 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
e90126c7d957caa4ef24898c0522af483a220234 | 6,407 | py | Python | loldib/getratings/models/NA/na_urgot/na_urgot_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_urgot/na_urgot_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_urgot/na_urgot_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_Urgot_Top_Aatrox(Ratings):
pass
class NA_Urgot_Top_Ahri(Ratings):
pass
class NA_Urgot_Top_Akali(Ratings):
pass
class NA_Urgot_Top_Alistar(Ratings):
pass
class NA_Urgot_Top_Amumu(Ratings):
pass
class NA_Urgot_Top_Anivia(Ratings):
pass
class NA_Urgot_Top_Annie(Ratings):
pass
class NA_Urgot_Top_Ashe(Ratings):
pass
class NA_Urgot_Top_AurelionSol(Ratings):
pass
class NA_Urgot_Top_Azir(Ratings):
pass
class NA_Urgot_Top_Bard(Ratings):
pass
class NA_Urgot_Top_Blitzcrank(Ratings):
pass
class NA_Urgot_Top_Brand(Ratings):
pass
class NA_Urgot_Top_Braum(Ratings):
pass
class NA_Urgot_Top_Caitlyn(Ratings):
pass
class NA_Urgot_Top_Camille(Ratings):
pass
class NA_Urgot_Top_Cassiopeia(Ratings):
pass
class NA_Urgot_Top_Chogath(Ratings):
pass
class NA_Urgot_Top_Corki(Ratings):
pass
class NA_Urgot_Top_Darius(Ratings):
pass
class NA_Urgot_Top_Diana(Ratings):
pass
class NA_Urgot_Top_Draven(Ratings):
pass
class NA_Urgot_Top_DrMundo(Ratings):
pass
class NA_Urgot_Top_Ekko(Ratings):
pass
class NA_Urgot_Top_Elise(Ratings):
pass
class NA_Urgot_Top_Evelynn(Ratings):
pass
class NA_Urgot_Top_Ezreal(Ratings):
pass
class NA_Urgot_Top_Fiddlesticks(Ratings):
pass
class NA_Urgot_Top_Fiora(Ratings):
pass
class NA_Urgot_Top_Fizz(Ratings):
pass
class NA_Urgot_Top_Galio(Ratings):
pass
class NA_Urgot_Top_Gangplank(Ratings):
pass
class NA_Urgot_Top_Garen(Ratings):
pass
class NA_Urgot_Top_Gnar(Ratings):
pass
class NA_Urgot_Top_Gragas(Ratings):
pass
class NA_Urgot_Top_Graves(Ratings):
pass
class NA_Urgot_Top_Hecarim(Ratings):
pass
class NA_Urgot_Top_Heimerdinger(Ratings):
pass
class NA_Urgot_Top_Illaoi(Ratings):
pass
class NA_Urgot_Top_Irelia(Ratings):
pass
class NA_Urgot_Top_Ivern(Ratings):
pass
class NA_Urgot_Top_Janna(Ratings):
pass
class NA_Urgot_Top_JarvanIV(Ratings):
pass
class NA_Urgot_Top_Jax(Ratings):
pass
class NA_Urgot_Top_Jayce(Ratings):
pass
class NA_Urgot_Top_Jhin(Ratings):
pass
class NA_Urgot_Top_Jinx(Ratings):
pass
class NA_Urgot_Top_Kalista(Ratings):
pass
class NA_Urgot_Top_Karma(Ratings):
pass
class NA_Urgot_Top_Karthus(Ratings):
pass
class NA_Urgot_Top_Kassadin(Ratings):
pass
class NA_Urgot_Top_Katarina(Ratings):
pass
class NA_Urgot_Top_Kayle(Ratings):
pass
class NA_Urgot_Top_Kayn(Ratings):
pass
class NA_Urgot_Top_Kennen(Ratings):
pass
class NA_Urgot_Top_Khazix(Ratings):
pass
class NA_Urgot_Top_Kindred(Ratings):
pass
class NA_Urgot_Top_Kled(Ratings):
pass
class NA_Urgot_Top_KogMaw(Ratings):
pass
class NA_Urgot_Top_Leblanc(Ratings):
pass
class NA_Urgot_Top_LeeSin(Ratings):
pass
class NA_Urgot_Top_Leona(Ratings):
pass
class NA_Urgot_Top_Lissandra(Ratings):
pass
class NA_Urgot_Top_Lucian(Ratings):
pass
class NA_Urgot_Top_Lulu(Ratings):
pass
class NA_Urgot_Top_Lux(Ratings):
pass
class NA_Urgot_Top_Malphite(Ratings):
pass
class NA_Urgot_Top_Malzahar(Ratings):
pass
class NA_Urgot_Top_Maokai(Ratings):
pass
class NA_Urgot_Top_MasterYi(Ratings):
pass
class NA_Urgot_Top_MissFortune(Ratings):
pass
class NA_Urgot_Top_MonkeyKing(Ratings):
pass
class NA_Urgot_Top_Mordekaiser(Ratings):
pass
class NA_Urgot_Top_Morgana(Ratings):
pass
class NA_Urgot_Top_Nami(Ratings):
pass
class NA_Urgot_Top_Nasus(Ratings):
pass
class NA_Urgot_Top_Nautilus(Ratings):
pass
class NA_Urgot_Top_Nidalee(Ratings):
pass
class NA_Urgot_Top_Nocturne(Ratings):
pass
class NA_Urgot_Top_Nunu(Ratings):
pass
class NA_Urgot_Top_Olaf(Ratings):
pass
class NA_Urgot_Top_Orianna(Ratings):
pass
class NA_Urgot_Top_Ornn(Ratings):
pass
class NA_Urgot_Top_Pantheon(Ratings):
pass
class NA_Urgot_Top_Poppy(Ratings):
pass
class NA_Urgot_Top_Quinn(Ratings):
pass
class NA_Urgot_Top_Rakan(Ratings):
pass
class NA_Urgot_Top_Rammus(Ratings):
pass
class NA_Urgot_Top_RekSai(Ratings):
pass
class NA_Urgot_Top_Renekton(Ratings):
pass
class NA_Urgot_Top_Rengar(Ratings):
pass
class NA_Urgot_Top_Riven(Ratings):
pass
class NA_Urgot_Top_Rumble(Ratings):
pass
class NA_Urgot_Top_Ryze(Ratings):
pass
class NA_Urgot_Top_Sejuani(Ratings):
pass
class NA_Urgot_Top_Shaco(Ratings):
pass
class NA_Urgot_Top_Shen(Ratings):
pass
class NA_Urgot_Top_Shyvana(Ratings):
pass
class NA_Urgot_Top_Singed(Ratings):
pass
class NA_Urgot_Top_Sion(Ratings):
pass
class NA_Urgot_Top_Sivir(Ratings):
pass
class NA_Urgot_Top_Skarner(Ratings):
pass
class NA_Urgot_Top_Sona(Ratings):
pass
class NA_Urgot_Top_Soraka(Ratings):
pass
class NA_Urgot_Top_Swain(Ratings):
pass
class NA_Urgot_Top_Syndra(Ratings):
pass
class NA_Urgot_Top_TahmKench(Ratings):
pass
class NA_Urgot_Top_Taliyah(Ratings):
pass
class NA_Urgot_Top_Talon(Ratings):
pass
class NA_Urgot_Top_Taric(Ratings):
pass
class NA_Urgot_Top_Teemo(Ratings):
pass
class NA_Urgot_Top_Thresh(Ratings):
pass
class NA_Urgot_Top_Tristana(Ratings):
pass
class NA_Urgot_Top_Trundle(Ratings):
pass
class NA_Urgot_Top_Tryndamere(Ratings):
pass
class NA_Urgot_Top_TwistedFate(Ratings):
pass
class NA_Urgot_Top_Twitch(Ratings):
pass
class NA_Urgot_Top_Udyr(Ratings):
pass
class NA_Urgot_Top_Urgot(Ratings):
pass
class NA_Urgot_Top_Varus(Ratings):
pass
class NA_Urgot_Top_Vayne(Ratings):
pass
class NA_Urgot_Top_Veigar(Ratings):
pass
class NA_Urgot_Top_Velkoz(Ratings):
pass
class NA_Urgot_Top_Vi(Ratings):
pass
class NA_Urgot_Top_Viktor(Ratings):
pass
class NA_Urgot_Top_Vladimir(Ratings):
pass
class NA_Urgot_Top_Volibear(Ratings):
pass
class NA_Urgot_Top_Warwick(Ratings):
pass
class NA_Urgot_Top_Xayah(Ratings):
pass
class NA_Urgot_Top_Xerath(Ratings):
pass
class NA_Urgot_Top_XinZhao(Ratings):
pass
class NA_Urgot_Top_Yasuo(Ratings):
pass
class NA_Urgot_Top_Yorick(Ratings):
pass
class NA_Urgot_Top_Zac(Ratings):
pass
class NA_Urgot_Top_Zed(Ratings):
pass
class NA_Urgot_Top_Ziggs(Ratings):
pass
class NA_Urgot_Top_Zilean(Ratings):
pass
class NA_Urgot_Top_Zyra(Ratings):
pass
| 15.364508 | 46 | 0.761667 | 972 | 6,407 | 4.59465 | 0.151235 | 0.216301 | 0.370802 | 0.463502 | 0.797582 | 0.797582 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173404 | 6,407 | 416 | 47 | 15.401442 | 0.843278 | 0 | 0 | 0.498195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.498195 | 0.00361 | 0 | 0.501805 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 7 |
3acea4e26ff836413ece32dafd17c467e4ffffa3 | 44 | py | Python | auto_sr/__init__.py | mavnt/auto_sr | 51ac56e1b52463dfc0bbd8e44997c8f176207488 | [
"MIT"
] | null | null | null | auto_sr/__init__.py | mavnt/auto_sr | 51ac56e1b52463dfc0bbd8e44997c8f176207488 | [
"MIT"
] | null | null | null | auto_sr/__init__.py | mavnt/auto_sr | 51ac56e1b52463dfc0bbd8e44997c8f176207488 | [
"MIT"
] | null | null | null | # coding=utf-8
from .auto_sr import auto_sr
| 14.666667 | 28 | 0.772727 | 9 | 44 | 3.555556 | 0.777778 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.026316 | 0.136364 | 44 | 2 | 29 | 22 | 0.815789 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
c94300d42c0fb0b48136f78ea02e02688a8047cf | 12,796 | py | Python | src/account/tests.py | DevTeamSCH/vikoverflow-backend | bac0a5f8d0f18bea4d99e0d94ee322feb6a8039e | [
"MIT"
] | null | null | null | src/account/tests.py | DevTeamSCH/vikoverflow-backend | bac0a5f8d0f18bea4d99e0d94ee322feb6a8039e | [
"MIT"
] | 24 | 2018-10-09T12:34:09.000Z | 2022-02-10T11:01:32.000Z | src/account/tests.py | DevTeamSCH/vikoverflow-backend | bac0a5f8d0f18bea4d99e0d94ee322feb6a8039e | [
"MIT"
] | null | null | null | from django.contrib.auth.models import User
from django.urls import reverse
from faker import Faker
from rest_framework import status
from rest_framework.test import APITestCase
from .models import Profile
from .serializers import ProfileSerializer
fake = Faker()
fake_email = "fake@fake.com"
class AccountsTests(APITestCase):
def setUp(self):
# Users
User.objects.create(
username="admin", is_superuser=True, is_staff=True
).set_password("adminpw")
User.objects.create(
username="mod", is_superuser=False, is_staff=True
).set_password("modpw")
User.objects.create(
username="user", is_superuser=False, is_staff=False
).set_password("userpw")
User.objects.create(
username="other_user", is_superuser=False, is_staff=False
).set_password("userpw")
# Profiles
for user in User.objects.all():
Profile.objects.create(
user=user,
about_me=fake.paragraph(nb_sentences=4),
is_score_visible=True,
ranked=True,
)
# -------------------------------------------------------------------------
# LIST view
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# GET
# -------------------------------------------------------------------------
def test_get_list_admin(self):
self.client.force_login(User.objects.get(username="admin"))
url = reverse("profile-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), User.objects.all().count())
self.client.logout()
def test_get_list_mod(self):
self.client.force_login(User.objects.get(username="mod"))
url = reverse("profile-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), User.objects.all().count())
self.client.logout()
def test_get_list_user(self):
self.client.force_login(User.objects.get(username="user"))
url = reverse("profile-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(len(response.data), User.objects.all().count())
self.client.logout()
def test_get_list_no_login(self):
url = reverse("profile-list")
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
# -------------------------------------------------------------------------
# SINGLE view
# -------------------------------------------------------------------------
# -------------------------------------------------------------------------
# GET
# -------------------------------------------------------------------------
def test_get_single_random_admin(self):
self.client.force_login(User.objects.get(username="admin"))
get_idx = Profile.objects.order_by("?").first().pk
url = "".join([reverse("profile-list"), str(get_idx), "/"])
response = self.client.get(url)
profile = Profile.objects.get(pk=get_idx)
serializer = ProfileSerializer(profile)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.client.logout()
def test_get_single_random_mod(self):
self.client.force_login(User.objects.get(username="mod"))
get_idx = Profile.objects.order_by("?").first().pk
url = "".join([reverse("profile-list"), str(get_idx), "/"])
response = self.client.get(url)
profile = Profile.objects.get(pk=get_idx)
serializer = ProfileSerializer(profile)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.client.logout()
def test_get_single_own_user(self):
self.client.force_login(User.objects.get(username="user"))
get_idx = User.objects.get(username="user").pk
url = "".join([reverse("profile-list"), str(get_idx), "/"])
response = self.client.get(url)
profile = Profile.objects.get(pk=get_idx)
serializer = ProfileSerializer(profile)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data, serializer.data)
self.client.logout()
def test_get_single_not_own_user(self):
self.client.force_login(User.objects.get(username="user"))
get_idx = User.objects.get(username="other_user").pk
url = "".join([reverse("profile-list"), str(get_idx), "/"])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.client.logout()
def test_get_single_non_exist_admin(self):
self.client.force_login(User.objects.get(username="admin"))
get_idx = Profile.objects.all().count() + 1
url = "".join([reverse("profile-list"), str(get_idx), "/"])
response = self.client.get(url)
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
self.client.logout()
# -------------------------------------------------------------------------
# PUT
# -------------------------------------------------------------------------
def test_put_single_admin(self):
admin_user = User.objects.get(username="admin")
self.client.force_login(admin_user)
put_idx = User.objects.get(username="user").pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": User.objects.get(pk=put_idx).username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": False,
"is_active": True,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
# test if changes took place in the database
changed_user = User.objects.get(pk=put_idx)
changed_profile = Profile.objects.get(pk=put_idx)
self.assertEqual(changed_user.email, data["user"]["email"])
self.assertEqual(changed_user.first_name, data["user"]["first_name"])
self.assertEqual(changed_user.last_name, data["user"]["last_name"])
self.assertEqual(changed_user.is_staff, data["user"]["is_staff"])
self.assertEqual(changed_user.is_active, data["user"]["is_active"])
self.assertEqual(changed_profile.about_me, data["about_me"])
self.assertEqual(changed_profile.is_score_visible, data["is_score_visible"])
self.assertEqual(changed_profile.ranked, data["ranked"])
self.client.logout()
def test_put_single_mod(self):
self.client.force_login(User.objects.get(username="mod"))
put_idx = User.objects.get(username="user").pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": User.objects.get(pk=put_idx).username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": False,
"is_active": True,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
def test_put_single_user_other(self):
user = User.objects.get(username="user")
self.client.force_login(user)
other_user = User.objects.get(username="other_user")
put_idx = other_user.pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": other_user.username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": other_user.is_staff,
"is_active": other_user.is_active,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.client.logout()
def test_put_single_user_own_valid(self):
user = User.objects.get(username="user")
self.client.force_login(user)
put_idx = user.pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": user.username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": user.is_staff,
"is_active": user.is_active,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEquals(response.status_code, status.HTTP_200_OK)
# test if changes took place in the database
changed_user = User.objects.get(pk=put_idx)
changed_profile = Profile.objects.get(pk=put_idx)
self.assertEqual(changed_user.email, data["user"]["email"])
self.assertEqual(changed_user.first_name, data["user"]["first_name"])
self.assertEqual(changed_user.last_name, data["user"]["last_name"])
self.assertEqual(changed_user.is_staff, data["user"]["is_staff"])
self.assertEqual(changed_user.is_active, data["user"]["is_active"])
self.assertEqual(changed_profile.about_me, data["about_me"])
self.assertEqual(changed_profile.is_score_visible, data["is_score_visible"])
self.assertEqual(changed_profile.ranked, data["ranked"])
self.client.logout()
def test_user_change_own_is_active(self):
user = User.objects.get(username="user")
self.client.force_login(user)
put_idx = user.pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": user.username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": user.is_staff,
"is_active": not user.is_active,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
user.is_active, User.objects.get(username=user.username).is_active
)
self.client.logout()
def test_user_change_own_is_staff(self):
user = User.objects.get(username="user")
self.client.force_login(user)
put_idx = user.pk
url = "".join([reverse("profile-list"), str(put_idx), "/"])
data = {
"id": put_idx,
"user": {
"username": user.username,
"email": fake_email,
"first_name": fake.text(max_nb_chars=20),
"last_name": fake.text(max_nb_chars=20),
"is_staff": not user.is_staff,
"is_active": user.is_active,
},
"about_me": fake.text(max_nb_chars=50),
"is_score_visible": True,
"ranked": True,
}
response = self.client.put(url, data, format="json")
self.assertEquals(response.status_code, status.HTTP_403_FORBIDDEN)
self.assertEqual(
user.is_staff, User.objects.get(username=user.username).is_staff
)
self.client.logout()
| 35.743017 | 84 | 0.565333 | 1,449 | 12,796 | 4.763285 | 0.079365 | 0.062301 | 0.05071 | 0.066937 | 0.891915 | 0.867864 | 0.859316 | 0.843524 | 0.843524 | 0.832657 | 0 | 0.008662 | 0.251172 | 12,796 | 357 | 85 | 35.843137 | 0.711647 | 0.068303 | 0 | 0.731518 | 0 | 0 | 0.088809 | 0 | 0 | 0 | 0 | 0 | 0.151751 | 1 | 0.062257 | false | 0.015564 | 0.027237 | 0 | 0.093385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a343f89f0facafd70637406897f749b9a60732b5 | 49 | py | Python | loftr/loftr_module/__init__.py | Kolkir/Coarse_LoFTR_TRT | 7533d39c232e9fd17d4abafa68adfc612d0fe6a2 | [
"Apache-2.0"
] | 19 | 2022-01-19T16:14:43.000Z | 2022-03-21T13:56:58.000Z | loftr/loftr_module/__init__.py | Kolkir/LoFTR_TRT | 91a30da2cd2082d5992c0b58493e98f6ae014c07 | [
"Apache-2.0"
] | 1 | 2022-01-15T10:16:13.000Z | 2022-01-18T10:23:07.000Z | loftr/loftr_module/__init__.py | Kolkir/LoFTR_TRT | 91a30da2cd2082d5992c0b58493e98f6ae014c07 | [
"Apache-2.0"
] | 3 | 2022-02-11T09:41:23.000Z | 2022-03-24T12:03:42.000Z | from .transformer import LocalFeatureTransformer
| 24.5 | 48 | 0.897959 | 4 | 49 | 11 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081633 | 49 | 1 | 49 | 49 | 0.977778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
a35660a50fdfc4fd5a3f90deb9eb243bd4ea2809 | 9,970 | py | Python | tests/test_revision.py | basilfx/flask-daapserver | ca595fcbc5b657cba826eccd3be5cebba0a1db0e | [
"MIT"
] | 11 | 2015-01-06T02:37:43.000Z | 2022-03-06T16:11:56.000Z | tests/test_revision.py | basilfx/flask-daapserver | ca595fcbc5b657cba826eccd3be5cebba0a1db0e | [
"MIT"
] | 2 | 2015-09-29T01:44:15.000Z | 2021-02-04T20:04:48.000Z | tests/test_revision.py | basilfx/flask-daapserver | ca595fcbc5b657cba826eccd3be5cebba0a1db0e | [
"MIT"
] | 6 | 2015-02-15T07:32:17.000Z | 2018-03-05T00:41:46.000Z | from daapserver.revision import RevisionStore
import unittest
class TestRevisionStore(unittest.TestCase):
def assertIterEqual(self, actual, expected):
"""
Helper to cast actual iterator into a list.
"""
self.assertListEqual(list(actual), expected)
def setUp(self):
"""
Initialize an empty revision store.
"""
self.store = RevisionStore()
def test_add(self):
"""
Test basic add functionality.
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.store.add("A", "A2")
self.store.add("D", "D1")
self.assertIterEqual(self.store.iterate(), ["D1", "C1", "B1", "A2"])
def test_remove(self):
"""
Test basic remove functionality.
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.store.remove("A")
self.assertIterEqual(self.store.iterate(), ["C1", "B1"])
self.store.remove("C")
self.assertIterEqual(self.store.iterate(), ["B1"])
def test_get(self):
"""
Test basic get functionality
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertEqual(self.store.get("A"), "A1")
self.assertEqual(self.store.get("A", revision=1), "A1")
self.store.commit()
self.store.add("A", "A2")
self.assertEqual(self.store.get("A"), "A2")
self.assertEqual(self.store.get("A", revision=2), "A2")
self.assertEqual(self.store.get("A", revision=1), "A1")
self.store.commit()
self.store.remove("A")
with self.assertRaises(KeyError):
self.store.get("A")
self.assertEqual(self.store.get("A", revision=2), "A2")
self.assertEqual(self.store.get("A", revision=1), "A1")
def test_get_fail(self):
"""
Test edge cases for get functionality.
"""
self.store.add("A", "A1")
self.store.remove("A")
with self.assertRaises(KeyError):
self.store.get("A", revision=1)
def test_commit(self):
"""
Test commit and revision functionality.
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertEqual(self.store.revision, 1)
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
self.store.commit()
self.store.add("A", "A2")
self.store.add("D", "D1")
self.assertEqual(self.store.revision, 2)
self.assertIterEqual(self.store.iterate(), ["D1", "C1", "B1", "A2"])
self.assertIterEqual(
self.store.iterate(revision=2), ["D1", "C1", "B1", "A2"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
self.store.commit()
self.store.remove("A")
self.store.remove("C")
self.assertEqual(self.store.revision, 3)
self.assertIterEqual(self.store.iterate(), ["D1", "B1"])
self.assertIterEqual(self.store.iterate(revision=3), ["D1", "B1"])
self.assertIterEqual(
self.store.iterate(revision=2), ["D1", "C1", "B1", "A2"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
def test_iterate(self):
"""
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
self.assertIterEqual(
self.store.iterate(revision=-1), ["C1", "B1", "A1"])
with self.assertRaises(ValueError):
for _ in self.store.iterate(revision=2):
pass
def test_clean(self):
"""
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.store.commit()
self.store.remove("A")
self.assertIterEqual(self.store.iterate(), ["C1", "B1"])
self.assertIterEqual(self.store.iterate(revision=2), ["C1", "B1"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
self.store.commit()
self.store.remove("C")
self.assertIterEqual(self.store.iterate(), ["B1"])
self.assertIterEqual(self.store.iterate(revision=3), ["B1"])
self.assertIterEqual(self.store.iterate(revision=2), ["C1", "B1"])
self.assertIterEqual(
self.store.iterate(revision=1), ["C1", "B1", "A1"])
self.store.clean(revision=2)
self.assertIterEqual(self.store.iterate(), ["B1"])
self.assertIterEqual(self.store.iterate(revision=3), ["B1"])
self.assertIterEqual(self.store.iterate(revision=2), ["C1", "B1"])
with self.assertRaises(ValueError):
for _ in self.store.iterate(revision=1):
pass
self.store.clean()
self.assertIterEqual(self.store.iterate(), ["B1"])
self.assertIterEqual(self.store.iterate(revision=3), ["B1"])
with self.assertRaises(ValueError):
for _ in self.store.iterate(revision=2):
pass
def test_diff(self):
"""
Test diff functionality (1).
"""
self.store.commit()
self.store.add("A", "A2")
self.store.commit()
self.store.remove("A")
self.assertIterEqual(self.store.diff(3, 1), [("A", 1)])
self.assertIterEqual(self.store.diff(1, 3), [("A", -1)])
self.assertIterEqual(self.store.diff(2, 1), [("A", 1)])
self.assertIterEqual(self.store.diff(1, 2), [("A", -1)])
self.assertIterEqual(self.store.diff(3, 2), [("A", -1)])
self.assertIterEqual(self.store.diff(2, 3), [("A", 1)])
def test_diff2(self):
"""
Test diff functionality (2).
"""
self.store.commit()
self.store.add("A", "A2")
self.store.commit()
self.store.remove("A")
self.store.commit()
self.store.add("B", "B4")
self.store.commit()
self.store.add("C", "C5")
self.store.commit()
self.store.remove("B")
self.assertIterEqual(self.store.diff(6, 5), [("B", -1)])
self.assertIterEqual(self.store.diff(5, 6), [("B", 1)])
def test_diff3(self):
"""
Test diff functionality (3).
"""
self.store.commit()
self.store.add("A", "A2")
self.store.commit()
self.store.add("B", "B3")
self.store.commit()
self.store.add("C", "C4")
self.assertIterEqual(self.store.diff(4, 3), [("C", 1)])
self.assertIterEqual(self.store.diff(3, 4), [("C", -1)])
def test_diff4(self):
"""
Test diff functionality (4).
"""
self.store.commit()
self.store.add("A", "A2.1")
self.store.add("A", "A2.2")
self.store.commit()
self.store.remove("A")
self.store.commit()
self.store.add("A", "A4")
self.store.commit()
self.store.remove("A")
self.store.commit()
self.store.add("A", "A6")
self.store.commit()
self.store.commit()
self.store.add("A", "A8")
self.assertIterEqual(self.store.diff(8, 7), [("A", 0)])
self.assertIterEqual(self.store.diff(8, 6), [("A", 0)])
self.assertIterEqual(self.store.diff(8, 5), [("A", 1)])
self.assertIterEqual(self.store.diff(8, 4), [("A", 0)])
self.assertIterEqual(self.store.diff(8, 3), [("A", 1)])
self.assertIterEqual(self.store.diff(8, 2), [("A", 0)])
self.assertIterEqual(self.store.diff(8, 1), [("A", 1)])
self.assertIterEqual(self.store.diff(7, 8), [("A", 0)])
self.assertIterEqual(self.store.diff(6, 8), [("A", 0)])
self.assertIterEqual(self.store.diff(5, 8), [("A", -1)])
self.assertIterEqual(self.store.diff(4, 8), [("A", 0)])
self.assertIterEqual(self.store.diff(3, 8), [("A", -1)])
self.assertIterEqual(self.store.diff(2, 8), [("A", 0)])
self.assertIterEqual(self.store.diff(1, 8), [("A", -1)])
self.assertIterEqual(self.store.diff(5, 2), [("A", -1)])
self.assertIterEqual(self.store.diff(2, 5), [("A", 1)])
self.assertIterEqual(self.store.diff(8, 8), [("A", 1)])
self.assertIterEqual(self.store.diff(5, 5), [])
self.assertIterEqual(self.store.diff(4, 4), [("A", 1)])
self.assertIterEqual(self.store.diff(3, 3), [])
self.assertIterEqual(self.store.diff(1, 1), [])
def test_iter(self):
"""
"""
self.store.add("A", "A1")
self.store.add("B", "B1")
self.store.add("C", "C1")
self.assertIterEqual(self.store.iterate(), ["C1", "B1", "A1"])
self.assertIterEqual(iter(self.store), ["C1", "B1", "A1"])
self.store.commit()
self.store.add("A", "A2")
self.store.add("D", "D1")
self.assertIterEqual(self.store.iterate(), ["D1", "C1", "B1", "A2"])
self.assertIterEqual(iter(self.store), ["D1", "C1", "B1", "A2"])
def test_nonzero(self):
"""
Test coercion to boolean.
"""
self.assertFalse(self.store)
self.store.add("A", "A1")
self.assertTrue(self.store)
self.store.add("A", "A2")
self.assertTrue(self.store)
self.store.remove("A")
self.assertFalse(self.store)
| 30.120846 | 76 | 0.537212 | 1,198 | 9,970 | 4.456594 | 0.068447 | 0.283199 | 0.271399 | 0.330399 | 0.866454 | 0.840607 | 0.771118 | 0.738341 | 0.61903 | 0.557782 | 0 | 0.03602 | 0.262086 | 9,970 | 330 | 77 | 30.212121 | 0.689683 | 0.042026 | 0 | 0.627451 | 0 | 0 | 0.04019 | 0 | 0 | 0 | 0 | 0 | 0.421569 | 1 | 0.073529 | false | 0.014706 | 0.009804 | 0 | 0.088235 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
a363cb8cd2452287adee8c1d66bd9c7d3d75a36c | 11,631 | py | Python | app/config_tablero.py | diegoag30/ScrabbleAr | e309b21ec60148b290c45f4edc6bf90d391d144a | [
"MIT"
] | 3 | 2020-05-26T21:02:48.000Z | 2020-08-06T03:19:54.000Z | app/config_tablero.py | diegoag30/ScrabbleAr | e309b21ec60148b290c45f4edc6bf90d391d144a | [
"MIT"
] | null | null | null | app/config_tablero.py | diegoag30/ScrabbleAr | e309b21ec60148b290c45f4edc6bf90d391d144a | [
"MIT"
] | null | null | null | class EstadBoton:
'''Esta clase maneja la configuracion del tablero,
ya sea el valor de las casillas, los colores, y el estado de los botones '''
def __init__(self,valor=1,color='',estado=False,tipo='L'): # valor es 1 por defecto para multiplicar la L o P
self._estado = estado
self._valor = valor
self._color = color
self._tipo = tipo
def get_color(self):
return self._color
def get_valor(self):
return self._valor
def get_estado(self):
return self._estado
def get_tipo(self):
return self._tipo
def set_estado(self,valor):
self._estado=valor
def Config1():
configuracion1=[]
#row=[]
for i in range(15):
row=[]
for j in range(15):
row.append(EstadBoton())
configuracion1.append(row)
configuracion1[7][7]=EstadBoton(1,'violet',tipo='P')
configuracion1[1][1]=EstadBoton(2,'orange',tipo='P')
configuracion1[2][2]=EstadBoton(2,'orange',tipo='P')
configuracion1[3][3]=EstadBoton(2,'orange',tipo='P')
configuracion1[4][4]=EstadBoton(2,'orange',tipo='P')
configuracion1[10][10]=EstadBoton(2,'orange',tipo='P')
configuracion1[11][11]=EstadBoton(2,'orange',tipo='P')
configuracion1[12][12]=EstadBoton(2,'orange',tipo='P')
configuracion1[13][13]=EstadBoton(2,'orange',tipo='P')
configuracion1[1][13]=EstadBoton(2,'orange',tipo='P')
configuracion1[2][12]=EstadBoton(2,'orange',tipo='P')
configuracion1[3][11]=EstadBoton(2,'orange',tipo='P')
configuracion1[4][10]=EstadBoton(2,'orange',tipo='P')
configuracion1[10][4]=EstadBoton(2,'orange',tipo='P')
configuracion1[11][3]=EstadBoton(2,'orange',tipo='P')
configuracion1[12][2]=EstadBoton(2,'orange',tipo='P')
configuracion1[13][1]=EstadBoton(2,'orange',tipo='P')
configuracion1[0][0]=EstadBoton(1,'red',tipo='P')
configuracion1[0][7]=EstadBoton(3,'red',tipo='P')
configuracion1[0][14]=EstadBoton(3,'red',tipo='P')
configuracion1[14][7]=EstadBoton(3,'red',tipo='P')
configuracion1[14][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][0]=EstadBoton(3,'red',tipo='P')
configuracion1[14][0]=EstadBoton(3,'red',tipo='P')
configuracion1[7][7]=EstadBoton(0,'violet',tipo='P')
configuracion1[1][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[1][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][1]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][13]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][13]=EstadBoton(3,'blue',tipo='L')
configuracion1[13][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[13][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][1]=EstadBoton(3,'blue',tipo='L')
configuracion1[0][3]=EstadBoton(2,'green',tipo='L')
configuracion1[0][11]=EstadBoton(2,'green',tipo='L')
configuracion1[2][6]=EstadBoton(2,'green',tipo='L')
configuracion1[2][8]=EstadBoton(2,'green',tipo='L')
configuracion1[3][0]=EstadBoton(2,'green',tipo='L')
configuracion1[3][7]=EstadBoton(2,'green',tipo='L')
configuracion1[3][14]=EstadBoton(2,'green',tipo='L')
configuracion1[6][2]=EstadBoton(2,'green',tipo='L')
configuracion1[6][6]=EstadBoton(2,'green',tipo='L')
configuracion1[6][8]=EstadBoton(2,'green',tipo='L')
configuracion1[6][12]=EstadBoton(2,'green',tipo='L')
configuracion1[7][3]=EstadBoton(2,'green',tipo='L')
configuracion1[7][11]=EstadBoton(2,'green',tipo='L')
configuracion1[8][2]=EstadBoton(2,'green',tipo='L')
configuracion1[8][6]=EstadBoton(2,'green',tipo='L')
configuracion1[8][8]=EstadBoton(2,'green',tipo='L')
configuracion1[8][12]=EstadBoton(2,'green',tipo='L')
configuracion1[11][0]=EstadBoton(2,'green',tipo='L')
configuracion1[11][7]=EstadBoton(2,'green',tipo='L')
configuracion1[11][14]=EstadBoton(2,'green',tipo='L')
configuracion1[12][6]=EstadBoton(2,'green',tipo='L')
configuracion1[12][8]=EstadBoton(2,'green',tipo='L')
configuracion1[14][3]=EstadBoton(2,'green',tipo='L')
configuracion1[14][11]=EstadBoton(2,'green',tipo='L')
return configuracion1
def Config2():
configuracion1=[]
#row=[]
for i in range(15):
row=[]
for j in range(15):
row.append(EstadBoton())
configuracion1.append(row)
#configuracion1[7][7]=EstadBoton(1,'B')
configuracion1[1][1]=EstadBoton(-2,'orange',tipo='P')
configuracion1[2][2]=EstadBoton(-2,'orange',tipo='P')
configuracion1[3][3]=EstadBoton(-2,'orange',tipo='P')
configuracion1[4][4]=EstadBoton(-2,'orange',tipo='P')
configuracion1[10][10]=EstadBoton(-2,'orange',tipo='P')
configuracion1[11][11]=EstadBoton(-2,'orange',tipo='P')
configuracion1[12][12]=EstadBoton(-2,'orange',tipo='P')
configuracion1[13][13]=EstadBoton(-2,'orange',tipo='P')
configuracion1[1][13]=EstadBoton(-2,'orange',tipo='P')
configuracion1[2][12]=EstadBoton(-2,'orange',tipo='P')
configuracion1[3][11]=EstadBoton(-2,'orange',tipo='P')
configuracion1[4][10]=EstadBoton(-2,'orange',tipo='P')
configuracion1[10][4]=EstadBoton(-2,'orange',tipo='P')
configuracion1[11][3]=EstadBoton(-2,'orange',tipo='P')
configuracion1[12][2]=EstadBoton(-2,'orange',tipo='P')
configuracion1[13][1]=EstadBoton(-2,'orange',tipo='P')
configuracion1[0][0]=EstadBoton(1,'red',tipo='P')
configuracion1[0][7]=EstadBoton(3,'red',tipo='P')
configuracion1[0][14]=EstadBoton(3,'red',tipo='P')
configuracion1[14][7]=EstadBoton(3,'red',tipo='P')
configuracion1[14][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][0]=EstadBoton(3,'red',tipo='P')
configuracion1[14][0]=EstadBoton(3,'red',tipo='P')
configuracion1[7][7]=EstadBoton(0,'violet',tipo='P')
configuracion1[1][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[1][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][1]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[5][13]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][13]=EstadBoton(3,'blue',tipo='L')
configuracion1[13][5]=EstadBoton(3,'blue',tipo='L')
configuracion1[13][9]=EstadBoton(3,'blue',tipo='L')
configuracion1[9][1]=EstadBoton(3,'blue',tipo='L')
configuracion1[0][3]=EstadBoton(2,'green',tipo='L')
configuracion1[0][11]=EstadBoton(2,'green',tipo='L')
configuracion1[2][6]=EstadBoton(2,'green',tipo='L')
configuracion1[2][8]=EstadBoton(2,'green',tipo='L')
configuracion1[3][0]=EstadBoton(2,'green',tipo='L')
configuracion1[3][7]=EstadBoton(2,'green',tipo='L')
configuracion1[3][14]=EstadBoton(2,'green',tipo='L')
configuracion1[6][2]=EstadBoton(2,'green',tipo='L')
configuracion1[6][6]=EstadBoton(2,'green',tipo='L')
configuracion1[6][8]=EstadBoton(2,'green',tipo='L')
configuracion1[6][12]=EstadBoton(2,'green',tipo='L')
configuracion1[7][3]=EstadBoton(2,'green',tipo='L')
configuracion1[7][11]=EstadBoton(2,'green',tipo='L')
configuracion1[8][2]=EstadBoton(2,'green',tipo='L')
configuracion1[8][6]=EstadBoton(2,'green',tipo='L')
configuracion1[8][8]=EstadBoton(2,'green',tipo='L')
configuracion1[8][12]=EstadBoton(2,'green',tipo='L')
configuracion1[11][0]=EstadBoton(2,'green',tipo='L')
configuracion1[11][7]=EstadBoton(2,'green',tipo='L')
configuracion1[11][14]=EstadBoton(2,'green',tipo='L')
configuracion1[12][6]=EstadBoton(2,'green',tipo='L')
configuracion1[12][8]=EstadBoton(2,'green',tipo='L')
configuracion1[14][3]=EstadBoton(2,'green',tipo='L')
configuracion1[14][11]=EstadBoton(2,'green',tipo='L')
return configuracion1
def Config3():
configuracion1=[]
#row=[]
for i in range(15):
row=[]
for j in range(15):
row.append(EstadBoton())
configuracion1.append(row)
#configuracion1[7][7]=EstadBoton(1,'Black violet')
configuracion1[1][1]=EstadBoton(2,'orange',tipo='P')
configuracion1[2][2]=EstadBoton(2,'orange',tipo='P')
configuracion1[3][3]=EstadBoton(2,'orange',tipo='P')
configuracion1[4][4]=EstadBoton(2,'orange',tipo='P')
configuracion1[10][10]=EstadBoton(2,'orange',tipo='P')
configuracion1[11][11]=EstadBoton(2,'orange',tipo='P')
configuracion1[12][12]=EstadBoton(2,'orange',tipo='P')
configuracion1[13][13]=EstadBoton(2,'orange',tipo='P')
configuracion1[1][13]=EstadBoton(2,'orange',tipo='P')
configuracion1[2][12]=EstadBoton(2,'orange',tipo='P')
configuracion1[3][11]=EstadBoton(2,'orange',tipo='P')
configuracion1[4][10]=EstadBoton(2,'orange',tipo='P')
configuracion1[10][4]=EstadBoton(2,'orange',tipo='P')
configuracion1[11][3]=EstadBoton(2,'orange',tipo='P')
configuracion1[12][2]=EstadBoton(2,'orange',tipo='P')
configuracion1[13][1]=EstadBoton(2,'orange',tipo='P')
configuracion1[0][0]=EstadBoton(1,'red',tipo='P')
configuracion1[0][7]=EstadBoton(3,'red',tipo='P')
configuracion1[0][14]=EstadBoton(3,'red',tipo='P')
configuracion1[14][7]=EstadBoton(3,'red',tipo='P')
configuracion1[14][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][14]=EstadBoton(3,'red',tipo='P')
configuracion1[7][0]=EstadBoton(3,'red',tipo='P')
configuracion1[14][0]=EstadBoton(3,'red',tipo='P')
configuracion1[7][7]=EstadBoton(0,'violet',tipo='P')
configuracion1[1][5]=EstadBoton(-3,'blue',tipo='L')
configuracion1[1][9]=EstadBoton(-3,'blue',tipo='L')
configuracion1[5][1]=EstadBoton(-3,'blue',tipo='L')
configuracion1[5][5]=EstadBoton(-3,'blue',tipo='L')
configuracion1[5][9]=EstadBoton(-3,'blue',tipo='L')
configuracion1[5][13]=EstadBoton(-3,'blue',tipo='L')
configuracion1[9][5]=EstadBoton(-3,'blue',tipo='L')
configuracion1[9][9]=EstadBoton(-3,'blue',tipo='L')
configuracion1[9][13]=EstadBoton(-3,'blue',tipo='L')
configuracion1[13][5]=EstadBoton(-3,'blue',tipo='L')
configuracion1[13][9]=EstadBoton(-3,'blue',tipo='L')
configuracion1[9][1]=EstadBoton(-3,'blue',tipo='L')
configuracion1[0][3]=EstadBoton(-2,'green',tipo='L')
configuracion1[0][11]=EstadBoton(-2,'green',tipo='L')
configuracion1[2][6]=EstadBoton(-2,'green',tipo='L')
configuracion1[2][8]=EstadBoton(-2,'green',tipo='L')
configuracion1[3][0]=EstadBoton(-2,'green',tipo='L')
configuracion1[3][7]=EstadBoton(-2,'green',tipo='L')
configuracion1[3][14]=EstadBoton(-2,'green',tipo='L')
configuracion1[6][2]=EstadBoton(-2,'green',tipo='L')
configuracion1[6][6]=EstadBoton(-2,'green',tipo='L')
configuracion1[6][8]=EstadBoton(-2,'green',tipo='L')
configuracion1[6][12]=EstadBoton(-2,'green',tipo='L')
configuracion1[7][3]=EstadBoton(-2,'green',tipo='L')
configuracion1[7][11]=EstadBoton(2,'green',tipo='L')
configuracion1[8][2]=EstadBoton(-2,'green',tipo='L')
configuracion1[8][6]=EstadBoton(-2,'green',tipo='L')
configuracion1[8][8]=EstadBoton(-2,'green',tipo='L')
configuracion1[8][12]=EstadBoton(-2,'green',tipo='L')
configuracion1[11][0]=EstadBoton(-2,'green',tipo='L')
configuracion1[11][7]=EstadBoton(-2,'green',tipo='L')
configuracion1[11][14]=EstadBoton(-2,'green',tipo='L')
configuracion1[12][6]=EstadBoton(-2,'green',tipo='L')
configuracion1[12][8]=EstadBoton(-2,'green',tipo='L')
configuracion1[14][3]=EstadBoton(-2,'green',tipo='L')
configuracion1[14][11]=EstadBoton(-2,'green',tipo='L')
return configuracion1
| 42.141304 | 111 | 0.664861 | 1,647 | 11,631 | 4.684274 | 0.043716 | 0.171095 | 0.258587 | 0.186649 | 0.943616 | 0.942191 | 0.942191 | 0.942191 | 0.942191 | 0.942191 | 0 | 0.084609 | 0.095607 | 11,631 | 275 | 112 | 42.294545 | 0.648826 | 0.02373 | 0 | 0.683036 | 0 | 0 | 0.096972 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.040179 | false | 0 | 0 | 0.017857 | 0.075893 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
a38749f0a84a40aab0cd70d611781f698e946910 | 7,274 | py | Python | tasks-deploy/true-reverse/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | 3 | 2021-03-30T06:27:58.000Z | 2021-04-03T17:56:35.000Z | tasks-deploy/true-reverse/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | tasks-deploy/true-reverse/check.py | irdkwmnsb/lkshl-ctf | e5c0200ddc8ba73df5f321b87b9763fb1bbaba57 | [
"MIT"
] | null | null | null | def check(attempt, context):
if attempt.answer == flags[attempt.participant.id % len(flags)]:
return Checked(True)
if attempt.answer in flags:
return CheckedPlagiarist(False, flags.index(attempt.answer))
return Checked(False)
flags = ['LKL{JUST_E5REV3R_1T_g1Gg0DwwMX}', 'LKL{JUST_E5REV3R_1T_Ay7KxrIYbj}', 'LKL{JUST_E5REV3R_1T_osMwwQwmLu}', 'LKL{JUST_E5REV3R_1T_EZyqJpy0E5}', 'LKL{JUST_E5REV3R_1T_VNZ6vVoytA}', 'LKL{JUST_E5REV3R_1T_16YVzdgMNc}', 'LKL{JUST_E5REV3R_1T_AUbwaGilk2}', 'LKL{JUST_E5REV3R_1T_ww9x5m771W}', 'LKL{JUST_E5REV3R_1T_8ZvKD3Hcwo}', 'LKL{JUST_E5REV3R_1T_S2wpwpLInp}', 'LKL{JUST_E5REV3R_1T_SASQOiiuAP}', 'LKL{JUST_E5REV3R_1T_eMkpHoltGq}', 'LKL{JUST_E5REV3R_1T_tAMA7p3iZr}', 'LKL{JUST_E5REV3R_1T_OMuCZsMjm6}', 'LKL{JUST_E5REV3R_1T_pd4wMiC8Nz}', 'LKL{JUST_E5REV3R_1T_oMnCDVh2UY}', 'LKL{JUST_E5REV3R_1T_1tPBgDxFct}', 'LKL{JUST_E5REV3R_1T_jQgppKrgRf}', 'LKL{JUST_E5REV3R_1T_y5QUQYKxoD}', 'LKL{JUST_E5REV3R_1T_OGFaEyqejk}', 'LKL{JUST_E5REV3R_1T_NCQNLMXnyw}', 'LKL{JUST_E5REV3R_1T_aD6j5MtmtP}', 'LKL{JUST_E5REV3R_1T_uM5emBEYvj}', 'LKL{JUST_E5REV3R_1T_4RSyPMGWOv}', 'LKL{JUST_E5REV3R_1T_G9pn53y83f}', 'LKL{JUST_E5REV3R_1T_wAh8R2G8Im}', 'LKL{JUST_E5REV3R_1T_pYl8dNE6mn}', 'LKL{JUST_E5REV3R_1T_dIiUofW9D0}', 'LKL{JUST_E5REV3R_1T_uCD7SrSBL0}', 'LKL{JUST_E5REV3R_1T_A9K2yX0kCd}', 'LKL{JUST_E5REV3R_1T_EBAMpbcsJZ}', 'LKL{JUST_E5REV3R_1T_pdZazHUmjj}', 'LKL{JUST_E5REV3R_1T_g6qxOoQ3JY}', 'LKL{JUST_E5REV3R_1T_QwFPNUR8zC}', 'LKL{JUST_E5REV3R_1T_Oc3n8px7pU}', 'LKL{JUST_E5REV3R_1T_GebSd9sdqV}', 'LKL{JUST_E5REV3R_1T_xhbsTWdObZ}', 'LKL{JUST_E5REV3R_1T_CQKxd5R4Ge}', 'LKL{JUST_E5REV3R_1T_QYRgnOZMWd}', 'LKL{JUST_E5REV3R_1T_Ri8bPHBADI}', 'LKL{JUST_E5REV3R_1T_avGi0HMe0L}', 'LKL{JUST_E5REV3R_1T_7jf7GQMIGu}', 'LKL{JUST_E5REV3R_1T_L1HBKqLmDW}', 'LKL{JUST_E5REV3R_1T_1zzoDhCLAx}', 'LKL{JUST_E5REV3R_1T_rlW39eg1Rr}', 'LKL{JUST_E5REV3R_1T_ddZqBsrIVG}', 'LKL{JUST_E5REV3R_1T_4eXR7OSsaH}', 'LKL{JUST_E5REV3R_1T_2hZEwdmHPy}', 'LKL{JUST_E5REV3R_1T_XennPdzZ6t}', 'LKL{JUST_E5REV3R_1T_IbiBCoruEY}', 'LKL{JUST_E5REV3R_1T_zVbVpB6cCa}', 'LKL{JUST_E5REV3R_1T_H2aaT33wf9}', 'LKL{JUST_E5REV3R_1T_86f4tQeEd4}', 'LKL{JUST_E5REV3R_1T_MpVzWYM0ae}', 'LKL{JUST_E5REV3R_1T_Ujf7MLCSW2}', 'LKL{JUST_E5REV3R_1T_GgNVPusKuS}', 'LKL{JUST_E5REV3R_1T_moopwfB59q}', 'LKL{JUST_E5REV3R_1T_MFz5jzzafR}', 'LKL{JUST_E5REV3R_1T_rWyNEX3rvP}', 'LKL{JUST_E5REV3R_1T_FdiGjgIUkb}', 'LKL{JUST_E5REV3R_1T_hdr9CPQcp3}', 'LKL{JUST_E5REV3R_1T_yZWAcSNcOe}', 'LKL{JUST_E5REV3R_1T_fRxC6twzhz}', 'LKL{JUST_E5REV3R_1T_etpfzcg72u}', 'LKL{JUST_E5REV3R_1T_Ah9ofaFmMe}', 'LKL{JUST_E5REV3R_1T_iYbCChaRwE}', 'LKL{JUST_E5REV3R_1T_1eP81zkATC}', 'LKL{JUST_E5REV3R_1T_CSoarQWJqW}', 'LKL{JUST_E5REV3R_1T_yIidERRQuy}', 'LKL{JUST_E5REV3R_1T_xqsKieseI2}', 'LKL{JUST_E5REV3R_1T_steiy1Gjt2}', 'LKL{JUST_E5REV3R_1T_W47GOA212L}', 'LKL{JUST_E5REV3R_1T_WUiqhlIDUl}', 'LKL{JUST_E5REV3R_1T_rKZlqgAo9T}', 'LKL{JUST_E5REV3R_1T_Vx9V3U58hD}', 'LKL{JUST_E5REV3R_1T_L97Fjx3OLr}', 'LKL{JUST_E5REV3R_1T_gOy9q6KkwW}', 'LKL{JUST_E5REV3R_1T_hh5jv8svYE}', 'LKL{JUST_E5REV3R_1T_Wnckhr1PHW}', 'LKL{JUST_E5REV3R_1T_5ZMVpTcQ7w}', 'LKL{JUST_E5REV3R_1T_Hq9oUK566N}', 'LKL{JUST_E5REV3R_1T_cbrBeLs567}', 'LKL{JUST_E5REV3R_1T_cX0ez1ZM8d}', 'LKL{JUST_E5REV3R_1T_23RSwvICpE}', 'LKL{JUST_E5REV3R_1T_pWaxkHXIF3}', 'LKL{JUST_E5REV3R_1T_5ZjVgBB86c}', 'LKL{JUST_E5REV3R_1T_5gb3RBj6vH}', 'LKL{JUST_E5REV3R_1T_LJdKU55dQd}', 'LKL{JUST_E5REV3R_1T_ttkrJfU6Nl}', 'LKL{JUST_E5REV3R_1T_MuVpMQ8XAp}', 'LKL{JUST_E5REV3R_1T_0f59ne8mek}', 'LKL{JUST_E5REV3R_1T_ldBQfBD03H}', 'LKL{JUST_E5REV3R_1T_yeY6T3YJbz}', 'LKL{JUST_E5REV3R_1T_8L494ljIgC}', 'LKL{JUST_E5REV3R_1T_qQX3zPwaIu}', 'LKL{JUST_E5REV3R_1T_G5ekxxYGQu}', 'LKL{JUST_E5REV3R_1T_piQ6ruUnDp}', 'LKL{JUST_E5REV3R_1T_TIyrBNTpgZ}', 'LKL{JUST_E5REV3R_1T_YZWUPXQaM4}', 'LKL{JUST_E5REV3R_1T_k2mh1lFwXN}', 'LKL{JUST_E5REV3R_1T_Emcc4i8tb0}', 'LKL{JUST_E5REV3R_1T_o056JgRnkl}', 'LKL{JUST_E5REV3R_1T_S58fqdSPEi}', 'LKL{JUST_E5REV3R_1T_6iOI3fAOGy}', 'LKL{JUST_E5REV3R_1T_TVpkgoTvQw}', 'LKL{JUST_E5REV3R_1T_oRiWNrhsZr}', 'LKL{JUST_E5REV3R_1T_KKNZDCLIVS}', 'LKL{JUST_E5REV3R_1T_q0iT4dooBI}', 'LKL{JUST_E5REV3R_1T_MFwWHwnHX3}', 'LKL{JUST_E5REV3R_1T_P8aLuAQTjW}', 'LKL{JUST_E5REV3R_1T_1h4hXhGw5X}', 'LKL{JUST_E5REV3R_1T_cteCuByPr1}', 'LKL{JUST_E5REV3R_1T_1y3DQhSUBI}', 'LKL{JUST_E5REV3R_1T_FIStspeHPs}', 'LKL{JUST_E5REV3R_1T_JPBBn35It7}', 'LKL{JUST_E5REV3R_1T_cLYzuWmlDh}', 'LKL{JUST_E5REV3R_1T_th6HszJ2rw}', 'LKL{JUST_E5REV3R_1T_yilcLyAXsx}', 'LKL{JUST_E5REV3R_1T_VhSBswERbX}', 'LKL{JUST_E5REV3R_1T_skVWJbkzcx}', 'LKL{JUST_E5REV3R_1T_vlM3rCJpo4}', 'LKL{JUST_E5REV3R_1T_8Moj4m6wNm}', 'LKL{JUST_E5REV3R_1T_P1Xlik3r7V}', 'LKL{JUST_E5REV3R_1T_DysIu1i4td}', 'LKL{JUST_E5REV3R_1T_EFxEG8yul6}', 'LKL{JUST_E5REV3R_1T_kA2pi39O87}', 'LKL{JUST_E5REV3R_1T_8a53Oo9BQd}', 'LKL{JUST_E5REV3R_1T_5vc3EYFCDb}', 'LKL{JUST_E5REV3R_1T_jZR78rpjP2}', 'LKL{JUST_E5REV3R_1T_Jwc8PpBI4o}', 'LKL{JUST_E5REV3R_1T_BLKiCgS0bp}', 'LKL{JUST_E5REV3R_1T_4OP8dlTHsQ}', 'LKL{JUST_E5REV3R_1T_7QGs0j5NIf}', 'LKL{JUST_E5REV3R_1T_NMlA0zRw6W}', 'LKL{JUST_E5REV3R_1T_kr9MFb305T}', 'LKL{JUST_E5REV3R_1T_QGuhsm62Tl}', 'LKL{JUST_E5REV3R_1T_vVJD9m5VH5}', 'LKL{JUST_E5REV3R_1T_eEVjlPVESf}', 'LKL{JUST_E5REV3R_1T_YiY0iq0MJA}', 'LKL{JUST_E5REV3R_1T_HAR2sXl1Tl}', 'LKL{JUST_E5REV3R_1T_h8jF9gK20h}', 'LKL{JUST_E5REV3R_1T_LDrpRLMc7Q}', 'LKL{JUST_E5REV3R_1T_aXexAtW7Wx}', 'LKL{JUST_E5REV3R_1T_UVJ2R8RHgh}', 'LKL{JUST_E5REV3R_1T_yXDHlF4b9i}', 'LKL{JUST_E5REV3R_1T_FmdFq2RPbH}', 'LKL{JUST_E5REV3R_1T_7xNbQF8Phj}', 'LKL{JUST_E5REV3R_1T_Xg3vPuLldl}', 'LKL{JUST_E5REV3R_1T_ZNRJVv4v1f}', 'LKL{JUST_E5REV3R_1T_HyUIYxoyK9}', 'LKL{JUST_E5REV3R_1T_o3NiF5HsOF}', 'LKL{JUST_E5REV3R_1T_yt7ep659U7}', 'LKL{JUST_E5REV3R_1T_aPGmD95jJS}', 'LKL{JUST_E5REV3R_1T_gj8Vk5KPdc}', 'LKL{JUST_E5REV3R_1T_x58sxxQRl9}', 'LKL{JUST_E5REV3R_1T_zezlIJBElI}', 'LKL{JUST_E5REV3R_1T_3eFIxPUWwG}', 'LKL{JUST_E5REV3R_1T_sLo7RPLRUg}', 'LKL{JUST_E5REV3R_1T_3EUh1BETkj}', 'LKL{JUST_E5REV3R_1T_NrDN0MNpRF}', 'LKL{JUST_E5REV3R_1T_WoxURzWONC}', 'LKL{JUST_E5REV3R_1T_pILJLpQ9aX}', 'LKL{JUST_E5REV3R_1T_BwkrsoYO36}', 'LKL{JUST_E5REV3R_1T_jNphWCF0Pl}', 'LKL{JUST_E5REV3R_1T_9NuSLndvIF}', 'LKL{JUST_E5REV3R_1T_lvYntMRvvA}', 'LKL{JUST_E5REV3R_1T_wMY3L4njFS}', 'LKL{JUST_E5REV3R_1T_WOOamsjxuL}', 'LKL{JUST_E5REV3R_1T_EoY27DhRFH}', 'LKL{JUST_E5REV3R_1T_pXVYLdO65G}', 'LKL{JUST_E5REV3R_1T_s3mD4aNMV1}', 'LKL{JUST_E5REV3R_1T_CBGWniU7Xg}', 'LKL{JUST_E5REV3R_1T_HYR4xJe1mO}', 'LKL{JUST_E5REV3R_1T_X4FwhP8Ksb}', 'LKL{JUST_E5REV3R_1T_LNym5G3pOs}', 'LKL{JUST_E5REV3R_1T_SgSpFgWjq8}', 'LKL{JUST_E5REV3R_1T_QGrnVuw5O0}', 'LKL{JUST_E5REV3R_1T_MG5C3cWTIh}', 'LKL{JUST_E5REV3R_1T_RV4kza6zuK}', 'LKL{JUST_E5REV3R_1T_WdyXFRlGC5}', 'LKL{JUST_E5REV3R_1T_F9cCizZnFx}', 'LKL{JUST_E5REV3R_1T_hmZY21R7mY}', 'LKL{JUST_E5REV3R_1T_T7NzJ8lgOL}', 'LKL{JUST_E5REV3R_1T_gSQLVTguHC}', 'LKL{JUST_E5REV3R_1T_lehBGXiVH0}', 'LKL{JUST_E5REV3R_1T_2beymxXPNF}', 'LKL{JUST_E5REV3R_1T_kU9CudwR0H}', 'LKL{JUST_E5REV3R_1T_UiPO7Mp1rP}', 'LKL{JUST_E5REV3R_1T_zvgrH5r566}', 'LKL{JUST_E5REV3R_1T_JvsosDGRQg}', 'LKL{JUST_E5REV3R_1T_ASeBhNnOtF}', 'LKL{JUST_E5REV3R_1T_Et1TKbyZQQ}', 'LKL{JUST_E5REV3R_1T_QVFsV5Ppil}', 'LKL{JUST_E5REV3R_1T_QaPkqXOtK3}', 'LKL{JUST_E5REV3R_1T_ZMFoWFY3uV}', 'LKL{JUST_E5REV3R_1T_IIN6HTVsXZ}', 'LKL{JUST_E5REV3R_1T_sOjkjJr9zu}', 'LKL{JUST_E5REV3R_1T_gsUdfp11Wz}', 'LKL{JUST_E5REV3R_1T_CInaqK9H74}', 'LKL{JUST_E5REV3R_1T_Yh577HTLo4}'] | 909.25 | 7,008 | 0.822519 | 1,032 | 7,274 | 5.216085 | 0.214147 | 0.260078 | 0.520156 | 0.594464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.132326 | 0.035881 | 7,274 | 8 | 7,008 | 909.25 | 0.635249 | 0 | 0 | 0 | 0 | 0 | 0.853054 | 0.853054 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0 | 0 | 0.571429 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 9 |
6e8d1dca2807d7de293a9e6cfa1b080fcaec29ee | 342 | py | Python | Personal scripts/Py_networking/browser/browser.py | powplowdevs/2021-2022-Projects | 1b704e9dbb2768a3acf271b2de87ccb28ab8b933 | [
"MIT"
] | null | null | null | Personal scripts/Py_networking/browser/browser.py | powplowdevs/2021-2022-Projects | 1b704e9dbb2768a3acf271b2de87ccb28ab8b933 | [
"MIT"
] | null | null | null | Personal scripts/Py_networking/browser/browser.py | powplowdevs/2021-2022-Projects | 1b704e9dbb2768a3acf271b2de87ccb28ab8b933 | [
"MIT"
] | null | null | null | # import os
# os.system("pip install selenium==3.141.0")
# from webbot import Browser
# web = Browser()
# web.go_to('https://bing.com')
# website = input('Service has audio')
import os
#os.system("pip install selenium==3.141.0")
from webbot import Browser
web = Browser()
web.go_to('https://bing.com')
website = input('Service has audio')
| 21.375 | 44 | 0.695906 | 54 | 342 | 4.37037 | 0.425926 | 0.169492 | 0.084746 | 0.135593 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0.03367 | 0.131579 | 342 | 15 | 45 | 22.8 | 0.760943 | 0.596491 | 0 | 0 | 0 | 0 | 0.255814 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.4 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
6eaa61e85b68e0bbf300a177a6c150048243e9ed | 2,818 | py | Python | keybow_hue/helpers/keys.py | ian-antking/keybow-hue | 4279ef77efced77a6d08c3e4b3f9e24075a55a86 | [
"MIT"
] | null | null | null | keybow_hue/helpers/keys.py | ian-antking/keybow-hue | 4279ef77efced77a6d08c3e4b3f9e24075a55a86 | [
"MIT"
] | null | null | null | keybow_hue/helpers/keys.py | ian-antking/keybow-hue | 4279ef77efced77a6d08c3e4b3f9e24075a55a86 | [
"MIT"
] | null | null | null | import colorsys
def convert_hue_color(hue, saturaton, brightness):
color = colorsys.hsv_to_rgb(hue / 65535, saturaton / 254, brightness / 254)
return [int(value * 255) for value in color]
def generate_keybow_color(hue, saturaton, brightness):
color = colorsys.hsv_to_rgb(hue, saturaton, brightness)
return [int(value * 255) for value in color]
def build_power_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.toggle_on_off, lambda: (0, 255, 0) if room.get_state('on') else (25, 0, 0))
def build_dimmer_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.dim, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat'), room.get_state('bri') - 50) if room.get_state('on') else [0] * 3))
def build_bright_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.bright, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat'), room.get_state('bri') + 50) if room.get_state('on') else [0] * 3))
def build_increase_sat_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.increase_sat, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat') + 50, 254) if room.get_state('on') else [0] * 3))
def build_decrease_sat_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.decrease_sat, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat') - 50, 254) if room.get_state('on') else [0] * 3))
def build_increase_hue_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.increase_hue, lambda: (convert_hue_color(room.get_state('hue') + 6553, 254, 254) if room.get_state('on') else [0] * 3))
def build_decrease_hue_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.decrease_hue, lambda: (convert_hue_color(room.get_state('hue') - 6553, 254, 254) if room.get_state('on') else [0] * 3))
def build_hue_indicator(room, keyboard, key_index):
return keyboard.Key(key_index, room.update_room, lambda: (convert_hue_color(room.get_state('hue'), 254, 254) if room.get_state('on') else [0] * 3))
def build_saturation_indicator(room, keyboard, key_index):
return keyboard.Key(key_index, room.update_room, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat'), 254) if room.get_state('on') else [0] * 3))
def build_blank_button(room, keyboard, key_index):
return keyboard.Key(key_index, room.update_room, lambda: (convert_hue_color(room.get_state('hue'), room.get_state('sat'), room.get_state('bri')) if room.get_state('on') else [0] * 3))
def build_mode_button(engine, room, keyboard, key_index):
return keyboard.Key(key_index, engine.change_mode, lambda: generate_keybow_color(engine.mode % len(engine.keyboards)/10,1,1) if room.get_state('on') else [0] * 3)
| 65.534884 | 187 | 0.738822 | 459 | 2,818 | 4.285403 | 0.12854 | 0.103203 | 0.176919 | 0.111845 | 0.866802 | 0.866802 | 0.856634 | 0.856634 | 0.84545 | 0.786985 | 0 | 0.037155 | 0.111781 | 2,818 | 42 | 188 | 67.095238 | 0.748702 | 0 | 0 | 0.068966 | 0 | 0 | 0.026969 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.448276 | false | 0 | 0.034483 | 0.37931 | 0.931034 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 9 |
6ecea781d26c2e9399cac3dc4f3cde38b3ac9e91 | 95 | py | Python | pynotes/add_link/format_link.py | afonsopacifer/pynotes | 442b2a3f802d523f1872ba912af880fdd3849062 | [
"MIT"
] | 8 | 2018-07-27T15:06:29.000Z | 2019-07-23T04:01:24.000Z | pynotes/add_link/format_link.py | afonsopacifer/pynotes | 442b2a3f802d523f1872ba912af880fdd3849062 | [
"MIT"
] | null | null | null | pynotes/add_link/format_link.py | afonsopacifer/pynotes | 442b2a3f802d523f1872ba912af880fdd3849062 | [
"MIT"
] | 1 | 2018-07-31T00:43:58.000Z | 2018-07-31T00:43:58.000Z | def format_link(link_title, link_url):
return '- [' + link_title + '](' + link_url + ') \n' | 47.5 | 56 | 0.6 | 13 | 95 | 4 | 0.538462 | 0.346154 | 0.5 | 0.615385 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.189474 | 95 | 2 | 56 | 47.5 | 0.675325 | 0 | 0 | 0 | 0 | 0 | 0.09375 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 8 |
42d29a81586740eacac40a2c841529446e0a00b1 | 68,202 | py | Python | swagger_client/apis/cart_api.py | yusong-shen/ecommerce-checkout-api-client-python | 0cfe9cd0120d3453f5efec2814b367a14a703b12 | [
"Apache-2.0"
] | null | null | null | swagger_client/apis/cart_api.py | yusong-shen/ecommerce-checkout-api-client-python | 0cfe9cd0120d3453f5efec2814b367a14a703b12 | [
"Apache-2.0"
] | null | null | null | swagger_client/apis/cart_api.py | yusong-shen/ecommerce-checkout-api-client-python | 0cfe9cd0120d3453f5efec2814b367a14a703b12 | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
ECommerce Checkout Flow API
Registration, Address Information, Delivery Options, Payment, Confirmation
OpenAPI spec version: 0.2
Generated by: https://github.com/swagger-api/swagger-codegen.git
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class CartApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def checkout_available_countries_get_using_get(self, **kwargs):
"""
Get available billing and shipping countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_available_countries_get_using_get(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: AvailableCountries
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_available_countries_get_using_get_with_http_info(**kwargs)
else:
(data) = self.checkout_available_countries_get_using_get_with_http_info(**kwargs)
return data
def checkout_available_countries_get_using_get_with_http_info(self, **kwargs):
"""
Get available billing and shipping countries
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_available_countries_get_using_get_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:return: AvailableCountries
If the method is called asynchronously,
returns the request thread.
"""
all_params = []
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_available_countries_get_using_get" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/checkout/availableCountries'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AvailableCountries',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_available_payment_methods_get_using_get(self, checkout_id, **kwargs):
"""
Get available payment methods
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_available_payment_methods_get_using_get(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: AvailablePaymentMethodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_available_payment_methods_get_using_get_with_http_info(checkout_id, **kwargs)
else:
(data) = self.checkout_checkout_id_available_payment_methods_get_using_get_with_http_info(checkout_id, **kwargs)
return data
def checkout_checkout_id_available_payment_methods_get_using_get_with_http_info(self, checkout_id, **kwargs):
"""
Get available payment methods
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_available_payment_methods_get_using_get_with_http_info(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: AvailablePaymentMethodList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_available_payment_methods_get_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_available_payment_methods_get_using_get`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/availablePaymentMethods'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AvailablePaymentMethodList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_available_shipping_methods_get_using_get(self, checkout_id, **kwargs):
"""
Get shipping info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_available_shipping_methods_get_using_get(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: AvailableShippingMethodList
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_available_shipping_methods_get_using_get_with_http_info(checkout_id, **kwargs)
else:
(data) = self.checkout_checkout_id_available_shipping_methods_get_using_get_with_http_info(checkout_id, **kwargs)
return data
def checkout_checkout_id_available_shipping_methods_get_using_get_with_http_info(self, checkout_id, **kwargs):
"""
Get shipping info
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_available_shipping_methods_get_using_get_with_http_info(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: AvailableShippingMethodList
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_available_shipping_methods_get_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_available_shipping_methods_get_using_get`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/availableShippingMethods'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AvailableShippingMethodList',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_billing_address_put_using_put(self, checkout_id, body, **kwargs):
"""
Update the billing address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_billing_address_put_using_put(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Address body: Cart object that needs to be updated (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_billing_address_put_using_put_with_http_info(checkout_id, body, **kwargs)
else:
(data) = self.checkout_checkout_id_billing_address_put_using_put_with_http_info(checkout_id, body, **kwargs)
return data
def checkout_checkout_id_billing_address_put_using_put_with_http_info(self, checkout_id, body, **kwargs):
"""
Update the billing address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_billing_address_put_using_put_with_http_info(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Address body: Cart object that needs to be updated (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_billing_address_put_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_billing_address_put_using_put`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `checkout_checkout_id_billing_address_put_using_put`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/billingAddress'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_customer_attributes_put_using_put(self, checkout_id, customer_attributes, **kwargs):
"""
Set or update customer attributes
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_customer_attributes_put_using_put(checkout_id, customer_attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param CustomerAttributes customer_attributes: Customer attributes (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_customer_attributes_put_using_put_with_http_info(checkout_id, customer_attributes, **kwargs)
else:
(data) = self.checkout_checkout_id_customer_attributes_put_using_put_with_http_info(checkout_id, customer_attributes, **kwargs)
return data
def checkout_checkout_id_customer_attributes_put_using_put_with_http_info(self, checkout_id, customer_attributes, **kwargs):
"""
Set or update customer attributes
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_customer_attributes_put_using_put_with_http_info(checkout_id, customer_attributes, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param CustomerAttributes customer_attributes: Customer attributes (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'customer_attributes']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_customer_attributes_put_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_customer_attributes_put_using_put`")
# verify the required parameter 'customer_attributes' is set
if ('customer_attributes' not in params) or (params['customer_attributes'] is None):
raise ValueError("Missing the required parameter `customer_attributes` when calling `checkout_checkout_id_customer_attributes_put_using_put`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/customerAttributes'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'customer_attributes' in params:
body_params = params['customer_attributes']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_get_using_get(self, checkout_id, **kwargs):
"""
Get an existing cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_get_using_get(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_get_using_get_with_http_info(checkout_id, **kwargs)
else:
(data) = self.checkout_checkout_id_get_using_get_with_http_info(checkout_id, **kwargs)
return data
def checkout_checkout_id_get_using_get_with_http_info(self, checkout_id, **kwargs):
"""
Get an existing cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_get_using_get_with_http_info(checkout_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_get_using_get" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_get_using_get`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_items_item_id_delete_using_delete(self, checkout_id, item_id, **kwargs):
"""
Delete an item from the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_items_item_id_delete_using_delete(checkout_id, item_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str item_id: Item Id (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_items_item_id_delete_using_delete_with_http_info(checkout_id, item_id, **kwargs)
else:
(data) = self.checkout_checkout_id_items_item_id_delete_using_delete_with_http_info(checkout_id, item_id, **kwargs)
return data
def checkout_checkout_id_items_item_id_delete_using_delete_with_http_info(self, checkout_id, item_id, **kwargs):
"""
Delete an item from the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_items_item_id_delete_using_delete_with_http_info(checkout_id, item_id, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str item_id: Item Id (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'item_id']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_items_item_id_delete_using_delete" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_items_item_id_delete_using_delete`")
# verify the required parameter 'item_id' is set
if ('item_id' not in params) or (params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `checkout_checkout_id_items_item_id_delete_using_delete`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/items/{itemId}'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
if 'item_id' in params:
path_params['itemId'] = params['item_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_items_item_id_put_using_put(self, checkout_id, item_id, item, **kwargs):
"""
Update an existing item from the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_items_item_id_put_using_put(checkout_id, item_id, item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str item_id: Item Id (required)
:param Product item: Item (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_items_item_id_put_using_put_with_http_info(checkout_id, item_id, item, **kwargs)
else:
(data) = self.checkout_checkout_id_items_item_id_put_using_put_with_http_info(checkout_id, item_id, item, **kwargs)
return data
def checkout_checkout_id_items_item_id_put_using_put_with_http_info(self, checkout_id, item_id, item, **kwargs):
"""
Update an existing item from the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_items_item_id_put_using_put_with_http_info(checkout_id, item_id, item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str item_id: Item Id (required)
:param Product item: Item (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'item_id', 'item']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_items_item_id_put_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_items_item_id_put_using_put`")
# verify the required parameter 'item_id' is set
if ('item_id' not in params) or (params['item_id'] is None):
raise ValueError("Missing the required parameter `item_id` when calling `checkout_checkout_id_items_item_id_put_using_put`")
# verify the required parameter 'item' is set
if ('item' not in params) or (params['item'] is None):
raise ValueError("Missing the required parameter `item` when calling `checkout_checkout_id_items_item_id_put_using_put`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/items/{itemId}'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
if 'item_id' in params:
path_params['itemId'] = params['item_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'item' in params:
body_params = params['item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_pay_post_using_post(self, checkout_id, body, **kwargs):
"""
Pay the cart total
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_pay_post_using_post(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param PaymentMethod body: Payment method (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_pay_post_using_post_with_http_info(checkout_id, body, **kwargs)
else:
(data) = self.checkout_checkout_id_pay_post_using_post_with_http_info(checkout_id, body, **kwargs)
return data
def checkout_checkout_id_pay_post_using_post_with_http_info(self, checkout_id, body, **kwargs):
"""
Pay the cart total
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_pay_post_using_post_with_http_info(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param PaymentMethod body: Payment method (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_pay_post_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_pay_post_using_post`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `checkout_checkout_id_pay_post_using_post`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/pay'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None,
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_shipping_address_put_using_put(self, checkout_id, body, **kwargs):
"""
Update the shipping address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_shipping_address_put_using_put(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Address body: Shipping address (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_shipping_address_put_using_put_with_http_info(checkout_id, body, **kwargs)
else:
(data) = self.checkout_checkout_id_shipping_address_put_using_put_with_http_info(checkout_id, body, **kwargs)
return data
def checkout_checkout_id_shipping_address_put_using_put_with_http_info(self, checkout_id, body, **kwargs):
"""
Update the shipping address
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_shipping_address_put_using_put_with_http_info(checkout_id, body, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Address body: Shipping address (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'body']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_shipping_address_put_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_shipping_address_put_using_put`")
# verify the required parameter 'body' is set
if ('body' not in params) or (params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `checkout_checkout_id_shipping_address_put_using_put`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/shippingAddress'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def checkout_checkout_id_shipping_method_put_using_put(self, checkout_id, shipping_method, **kwargs):
"""
Set or update the shipping method
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_shipping_method_put_using_put(checkout_id, shipping_method, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str shipping_method: Shipping method (0: Express, 1: Standard, 2: Economy) (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.checkout_checkout_id_shipping_method_put_using_put_with_http_info(checkout_id, shipping_method, **kwargs)
else:
(data) = self.checkout_checkout_id_shipping_method_put_using_put_with_http_info(checkout_id, shipping_method, **kwargs)
return data
def checkout_checkout_id_shipping_method_put_using_put_with_http_info(self, checkout_id, shipping_method, **kwargs):
"""
Set or update the shipping method
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.checkout_checkout_id_shipping_method_put_using_put_with_http_info(checkout_id, shipping_method, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param str shipping_method: Shipping method (0: Express, 1: Standard, 2: Economy) (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'shipping_method']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method checkout_checkout_id_shipping_method_put_using_put" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `checkout_checkout_id_shipping_method_put_using_put`")
# verify the required parameter 'shipping_method' is set
if ('shipping_method' not in params) or (params['shipping_method'] is None):
raise ValueError("Missing the required parameter `shipping_method` when calling `checkout_checkout_id_shipping_method_put_using_put`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/shippingMethod'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
if 'shipping_method' in params:
query_params['shippingMethod'] = params['shipping_method']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['*/*'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'PUT',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_cart_using_post(self, cart, **kwargs):
"""
Create a possibly empty shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cart_using_post(cart, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Cart cart: Includes billing and products info (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_cart_using_post_with_http_info(cart, **kwargs)
else:
(data) = self.create_cart_using_post_with_http_info(cart, **kwargs)
return data
def create_cart_using_post_with_http_info(self, cart, **kwargs):
"""
Create a possibly empty shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_cart_using_post_with_http_info(cart, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param Cart cart: Includes billing and products info (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['cart']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_cart_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'cart' is set
if ('cart' not in params) or (params['cart'] is None):
raise ValueError("Missing the required parameter `cart` when calling `create_cart_using_post`")
collection_formats = {}
resource_path = '/checkout'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'cart' in params:
body_params = params['cart']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def create_item_using_post(self, checkout_id, item, **kwargs):
"""
Add a new item to the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_item_using_post(checkout_id, item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Product item: Item to be added to the cart (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.create_item_using_post_with_http_info(checkout_id, item, **kwargs)
else:
(data) = self.create_item_using_post_with_http_info(checkout_id, item, **kwargs)
return data
def create_item_using_post_with_http_info(self, checkout_id, item, **kwargs):
"""
Add a new item to the shopping cart
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.create_item_using_post_with_http_info(checkout_id, item, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str checkout_id: Checkout Id (required)
:param Product item: Item to be added to the cart (required)
:return: Checkout
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['checkout_id', 'item']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_item_using_post" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'checkout_id' is set
if ('checkout_id' not in params) or (params['checkout_id'] is None):
raise ValueError("Missing the required parameter `checkout_id` when calling `create_item_using_post`")
# verify the required parameter 'item' is set
if ('item' not in params) or (params['item'] is None):
raise ValueError("Missing the required parameter `item` when calling `create_item_using_post`")
collection_formats = {}
resource_path = '/checkout/{checkoutId}/items'.replace('{format}', 'json')
path_params = {}
if 'checkout_id' in params:
path_params['checkoutId'] = params['checkout_id']
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'item' in params:
body_params = params['item']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.\
select_header_accept(['application/json'])
if not header_params['Accept']:
del header_params['Accept']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.\
select_header_content_type(['application/json'])
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Checkout',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 43.579553 | 156 | 0.586053 | 7,021 | 68,202 | 5.392822 | 0.034326 | 0.07263 | 0.041835 | 0.024721 | 0.962364 | 0.953939 | 0.952143 | 0.944537 | 0.935135 | 0.925864 | 0 | 0.000332 | 0.337336 | 68,202 | 1,564 | 157 | 43.607417 | 0.837438 | 0.298833 | 0 | 0.80865 | 1 | 0 | 0.186806 | 0.066969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035387 | false | 0 | 0.009174 | 0 | 0.096986 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
95719db34ec7e8c6ffc1ac90d29cdb4b9f4e51b7 | 190 | py | Python | test_py.py | MaxChangInnodisk/py2so | 367da6a0a371da71489e24c055aedf869cbdec6a | [
"MIT"
] | null | null | null | test_py.py | MaxChangInnodisk/py2so | 367da6a0a371da71489e24c055aedf869cbdec6a | [
"MIT"
] | 1 | 2022-03-08T09:43:52.000Z | 2022-03-08T09:43:52.000Z | test_py.py | MaxChangInnodisk/py2so | 367da6a0a371da71489e24c055aedf869cbdec6a | [
"MIT"
] | 1 | 2022-03-08T09:29:44.000Z | 2022-03-08T09:29:44.000Z | import demo.foo.print_me as foo_print_me
import demo.bar.print_me as bar_print_me
import demo.bar.barbar.print_me as barbar_print_me
foo_print_me.do()
bar_print_me.do()
barbar_print_me.do() | 27.142857 | 50 | 0.836842 | 40 | 190 | 3.6 | 0.225 | 0.4375 | 0.208333 | 0.236111 | 0.277778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.078947 | 190 | 7 | 51 | 27.142857 | 0.822857 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 1 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 7 |
95aa3cf079532b693e747aeb0ee7eede7631fff4 | 12,471 | py | Python | spectrome/forward/network_transfer.py | 21littlesun/spectrome | 7e3f85a7a51b430dc302f4dbb664555a4b5a2688 | [
"MIT"
] | 4 | 2019-11-19T20:35:02.000Z | 2021-05-25T15:18:40.000Z | spectrome/forward/network_transfer.py | 21littlesun/spectrome | 7e3f85a7a51b430dc302f4dbb664555a4b5a2688 | [
"MIT"
] | 9 | 2019-11-05T17:59:28.000Z | 2020-08-24T20:55:09.000Z | spectrome/forward/network_transfer.py | 21littlesun/spectrome | 7e3f85a7a51b430dc302f4dbb664555a4b5a2688 | [
"MIT"
] | 5 | 2019-11-13T21:13:50.000Z | 2021-11-12T21:28:00.000Z | """Module for computing basic quantities from a spectral graph model: the forward model
Makes the calculation for a single frequency only. This variation separates the global
coupling alpha (in laplacian) and the local coupling alpha = 1. """
import numpy as np
def network_transfer_function(brain, parameters, w, use_smalleigs=True):
"""Network Transfer Function for spectral graph model.
Args:
brain (Brain): specific brain to calculate NTF
parameters (dict): parameters for ntf. We shall keep this separate from Brain
for now, as we want to change and update according to fitting.
frequency (float): frequency at which to calculate NTF
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
frequency_response (numpy asarray): frequency response of local oscillators
ev (numpy asarray): Eigen values
Vv (numpy asarray): Eigen vectors
model_out (numpy asarray): Each region's frequency response for
the given frequency (w)
FCmodel (numpy asarray): Functional connectivity - still in the works
"""
C = brain.reducedConnectome
D = brain.distance_matrix
# defining parameters
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters[
"gei"
] # excitatory-inhibitory synaptic conductance as ratio of E-E syn
gii = parameters[
"gii"
] # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
tauC = parameters["tauC"] # tauC = 0.5*tau_e
global_alpha = parameters["alpha"]
local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# define sum of degrees for rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
Tau = 0.001 * D / speed
Cc = C * np.exp(-1j * Tau * w)
# Eigen Decomposition of Complex Laplacian Here
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - global_alpha * np.matmul(np.diag(L2), Cc)
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K]
# Cortical model
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
# Hed = 1/tau_e/(1j*w + 1/tau_e*He)
Hed = local_alpha / tau_e / (1j * w + local_alpha / tau_e * Fe)
# Hid = 1/tau_i/(1j*w + 1/tau_i*Hi)
Hid = local_alpha / tau_i / (1j * w + local_alpha / tau_i * Fi)
Heid = gei * Fe * Fi / (1 + gei * Fe * Fi)
Htotal = a * Hed + (1 - a) / 2 * Hid + (1 - a) / 2 * Heid
q1 = 1 / local_alpha * tauC * (1j * w + local_alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
FCmodel = np.matmul(
np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)),
np.transpose(eigenvectors[:, 1:K]),
)
den = np.sqrt(np.abs(model_out))
FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, FCmodel
def network_transfer_local_alpha(brain, parameters, w, use_smalleigs=True):
"""Network Transfer Function for spectral graph model.
Args:
brain (Brain): specific brain to calculate NTF
parameters (dict): parameters for ntf. We shall keep this separate from Brain
for now, as we want to change and update according to fitting.
frequency (float): frequency at which to calculate NTF
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
frequency_response (numpy asarray):
ev (numpy asarray): Eigen values
Vv (numpy asarray): Eigen vectors
model_out (numpy asarray): Each region's frequency response for
the given frequency (w)
FCmodel (numpy asarray): Functional connectivity - still in the works
"""
C = brain.reducedConnectome
D = brain.distance_matrix
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters[
"gei"
] # excitatory-inhibitory synaptic conductance as ratio of E-E syn
gii = parameters[
"gii"
] # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
tauC = parameters["tauC"] # tauC = 0.5*tau_e
alpha = parameters["alpha"]
# local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# define sum of degrees for rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
Tau = 0.001 * D / speed
Cc = C * np.exp(-1j * Tau * w)
# Eigen Decomposition of Complex Laplacian Here
#L1 = 0.8 * np.identity(nroi) # 0.8I in matlab
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - alpha * np.matmul(np.diag(L2), Cc)
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K]
# Cortical model
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
# Hed = 1/tau_e/(1j*w + 1/tau_e*He)
Hed = alpha / tau_e / (1j * w + alpha / tau_e * Fe)
# Hid = 1/tau_i/(1j*w + 1/tau_i*Hi)
Hid = alpha / tau_i / (1j * w + alpha / tau_i * Fi)
Heid = gei * Fe * Fi / (1 + gei * Fe * Fi)
Htotal = a * Hed + (1 - a) / 2 * Hid + (1 - a) / 2 * Heid
q1 = 1 / alpha * tauC * (1j * w + alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
FCmodel = np.matmul(
np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)),
np.transpose(eigenvectors[:, 1:K]),
)
den = np.sqrt(np.abs(model_out))
FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, FCmodel
def network_transfer_HM(brain, parameters, w, use_smalleigs=True):
"""Network transfer function for spectral graph model, the local oscillator model is modified by HM.
Args:
brain (Brain): Brain class object with connectome and distance matrix
parameters (dict): model parameters
w (float): Frequency of interest
use_smalleigs (boolean): how many eigen modes to use, True = using only 2/3 (cortical), leaving out subcortical
Returns:
[type]: [description]
"""
# Housing keeping - defining connectomes, distance matrix, and model parameters.
C = brain.reducedConnectome
D = brain.distance_matrix
tau_e = parameters["tau_e"]
tau_i = parameters["tau_i"]
speed = parameters["speed"]
gei = parameters["gei"]
gii = parameters["gii"]
tauC = parameters["tauC"]
global_alpha = parameters["alpha"]
local_alpha = 1
# Not being used: Pin = 1 and tau_syn = 0.002
# Defining some other parameters used:
zero_thr = 0.05
# use_smalleigs = True # otherwise uses full eig()
numsmalleigs = np.round(2 / 3 * C.shape[0]) # 2/3
a = 0.5 # fraction of signal at a node that is recurrent excitatory
# gei = 4 # excitatory-inhibitory synaptic conductance as ratio of E-E syn
# gii = 1 # inhibitory-inhibitory synaptic conductance as ratio of E-E syn
# tauC = 0.5*tau_e
# define sum of degrees in rows and columns for laplacian normalization
rowdegree = np.transpose(np.sum(C, axis=1))
coldegree = np.sum(C, axis=0)
qind = rowdegree + coldegree < 0.2 * np.mean(rowdegree + coldegree)
rowdegree[qind] = np.inf
coldegree[qind] = np.inf
# Use all eigenmodes or 2/3 eigenmodes excluding the subcortical ones
nroi = C.shape[0]
if use_smalleigs is True:
K = np.round(2 / 3 * C.shape[0]) # 2/3
K = K.astype(int)
else:
K = nroi
# Complex connectivity:
Tau = (
0.001 * D / speed
) # divide distance by speed, which is in meters per second, 0.001 converts D to meters
Cc = C * np.exp(-1j * Tau * w)
# Complex Laplacian:
L1 = np.identity(nroi)
L2 = np.divide(1, np.sqrt(np.multiply(rowdegree, coldegree)) + np.spacing(1))
L = L1 - global_alpha * np.matmul(np.diag(L2), Cc)
# eigen decomposition:
d, v = np.linalg.eig(L) # decomposition with scipy.linalg.eig
eig_ind = np.argsort(np.abs(d)) # sorting in ascending order and absolute value
eig_vec = v[:, eig_ind] # re-indexing eigen vectors according to sorted index
eig_val = d[eig_ind] # re-indexing eigen values with same sorted index
eigenvalues = np.transpose(eig_val)
eigenvectors = eig_vec[:, 0:K] # K is either 2/3 or all eigenmodes
# Cortical model:
Fe = np.divide(1 / tau_e ** 2, (1j * w + 1 / tau_e) ** 2)
Fi = np.divide(gii * 1 / tau_i ** 2, (1j * w + 1 / tau_i) ** 2)
He = local_alpha / tau_e / (1j * w + local_alpha / tau_e * Fe)
Hi = local_alpha / tau_i / (1j * w + local_alpha / tau_e * Fi)
# denominator term for alternative model proposed by HM
denom = 1 + (gei ** 2 / tau_e * tau_i) * Fe * Fi * He * Hi
He_alt = np.divide(He, denom)
Hi_alt = np.divide(Hi, denom)
Hoffdiag_alt = np.divide(
gei * ((-1 / tau_e) * Fe + (1 / tau_i) * Fi) * He * Hi, denom
)
Htotal = He_alt + Hi_alt + Hoffdiag_alt
# This scaling may not be necessary, take a look at Htotal
q1 = 1 / local_alpha * tauC * (1j * w + local_alpha / tauC * Fe * eigenvalues)
# q1 = tauC*(1j*w + 1/tauC*He*ev)
qthr = zero_thr * np.abs(q1[:]).max()
magq1 = np.maximum(np.abs(q1), qthr)
angq1 = np.angle(q1)
q1 = np.multiply(magq1, np.exp(1j * angq1))
frequency_response = np.divide(Htotal, q1)
model_out = 0
for k in range(1, K):
model_out += frequency_response[k] * eigenvectors[:, k]
# FCmodel = np.matmul(
# np.matmul(eigenvectors[:, 1:K], np.diag(frequency_response[1:K] ** 2)), np.transpose(eigenvectors[:, 1:K])
# )
# den = np.sqrt(np.abs(model_out))
# FCmodel = np.matmul(np.matmul(np.diag(1 / den), FCmodel), np.diag(1 / den))
return frequency_response, eigenvalues, eigenvectors, model_out, Htotal
# Look at Htotal only, see if it's similar to HOrig.
| 38.137615 | 119 | 0.629701 | 1,873 | 12,471 | 4.110518 | 0.135611 | 0.014547 | 0.006754 | 0.009092 | 0.849591 | 0.844006 | 0.843876 | 0.842057 | 0.842057 | 0.832965 | 0 | 0.028446 | 0.25018 | 12,471 | 326 | 120 | 38.254601 | 0.794888 | 0.402614 | 0 | 0.81768 | 0 | 0 | 0.012498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016575 | false | 0 | 0.005525 | 0 | 0.038674 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
95c1161f38b7ded95ed4a4d5ee1e284728e75c40 | 38 | py | Python | api/gunicorn-config_eeg.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2020-10-01T10:38:06.000Z | 2021-12-28T03:11:18.000Z | api/gunicorn-config_eeg.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 9 | 2017-11-08T17:29:10.000Z | 2020-08-31T15:28:31.000Z | api/gunicorn-config_eeg.py | HotMaps/Toolbox | ba1e287dbc63e34bf9feb80b65b02c1db93ce91c | [
"Apache-2.0"
] | 4 | 2019-03-25T13:24:14.000Z | 2021-07-16T20:52:51.000Z |
bind = "0.0.0.0:5000"
workers = 15
| 6.333333 | 21 | 0.552632 | 8 | 38 | 2.625 | 0.625 | 0.285714 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.344828 | 0.236842 | 38 | 5 | 22 | 7.6 | 0.37931 | 0 | 0 | 0 | 0 | 0 | 0.342857 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
95e499f435be44fb2cce8d9ad1e3db7129a47ce0 | 123 | py | Python | Pythonjunior2020/Woche1/Aufgabe_1_2_3.py | Zeyecx/HPI-Potsdam | ed45ca471cee204dde74dd2c3efae3877ee71036 | [
"MIT"
] | null | null | null | Pythonjunior2020/Woche1/Aufgabe_1_2_3.py | Zeyecx/HPI-Potsdam | ed45ca471cee204dde74dd2c3efae3877ee71036 | [
"MIT"
] | null | null | null | Pythonjunior2020/Woche1/Aufgabe_1_2_3.py | Zeyecx/HPI-Potsdam | ed45ca471cee204dde74dd2c3efae3877ee71036 | [
"MIT"
] | null | null | null | # 1.2.3, Woche 1, Block 2, Aufgabe 3
# Ausgabe
print("Viel", "SpaΓ im", "Sommercamp")
# print("Viel SpaΓ im Sommercamp") | 20.5 | 39 | 0.650407 | 20 | 123 | 4 | 0.6 | 0.225 | 0.325 | 0.375 | 0.625 | 0 | 0 | 0 | 0 | 0 | 0 | 0.058824 | 0.170732 | 123 | 6 | 40 | 20.5 | 0.72549 | 0.609756 | 0 | 0 | 0 | 0 | 0.466667 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
2515c8a21aa2cfaa04046252523548e6e89e7180 | 14,389 | py | Python | test/test_borders.py | jwodder/txtble | 682ffabf5bb11c606a457e5c18bd855cd81a5f69 | [
"MIT"
] | 3 | 2018-06-10T16:30:07.000Z | 2021-08-11T14:26:33.000Z | test/test_borders.py | jwodder/txtble | 682ffabf5bb11c606a457e5c18bd855cd81a5f69 | [
"MIT"
] | 25 | 2018-06-13T17:22:44.000Z | 2021-12-02T22:12:37.000Z | test/test_borders.py | jwodder/txtble | 682ffabf5bb11c606a457e5c18bd855cd81a5f69 | [
"MIT"
] | 1 | 2021-09-04T13:23:58.000Z | 2021-09-04T13:23:58.000Z | import pytest
from test_data import DATA, HEADERS, TABLE
from txtble import Txtble
@pytest.mark.parametrize("header_border", [None, True])
def test_no_border(header_border):
tbl = Txtble(
DATA,
border=False,
header_border=header_border,
headers=HEADERS,
)
assert str(tbl) == (
"Month |Birthstone|Birth Flower\n"
"---------+----------+------------------\n"
"January |Garnet |Carnation\n"
"February |Amethyst |Violet\n"
"March |Aquamarine|Jonquil\n"
"April |Diamond |Sweetpea\n"
"May |Emerald |Lily Of The Valley\n"
"June |Pearl |Rose\n"
"July |Ruby |Larkspur\n"
"August |Peridot |Gladiolus\n"
"September|Sapphire |Aster\n"
"October |Opal |Calendula\n"
"November |Topaz |Chrysanthemum\n"
"December |Turquoise |Narcissus"
)
def test_no_border_no_rstrip():
tbl = Txtble(DATA, headers=HEADERS, border=False, rstrip=False)
assert str(tbl) == (
"Month |Birthstone|Birth Flower \n"
"---------+----------+------------------\n"
"January |Garnet |Carnation \n"
"February |Amethyst |Violet \n"
"March |Aquamarine|Jonquil \n"
"April |Diamond |Sweetpea \n"
"May |Emerald |Lily Of The Valley\n"
"June |Pearl |Rose \n"
"July |Ruby |Larkspur \n"
"August |Peridot |Gladiolus \n"
"September|Sapphire |Aster \n"
"October |Opal |Calendula \n"
"November |Topaz |Chrysanthemum \n"
"December |Turquoise |Narcissus "
)
@pytest.mark.parametrize("header_border", [None, False])
def test_no_headers_no_border(header_border):
tbl = Txtble(DATA, border=False, header_border=header_border)
assert str(tbl) == (
"January |Garnet |Carnation\n"
"February |Amethyst |Violet\n"
"March |Aquamarine|Jonquil\n"
"April |Diamond |Sweetpea\n"
"May |Emerald |Lily Of The Valley\n"
"June |Pearl |Rose\n"
"July |Ruby |Larkspur\n"
"August |Peridot |Gladiolus\n"
"September|Sapphire |Aster\n"
"October |Opal |Calendula\n"
"November |Topaz |Chrysanthemum\n"
"December |Turquoise |Narcissus"
)
def test_header_border_no_headers_no_border():
tbl = Txtble(DATA, border=False, header_border=True)
assert str(tbl) == (
"---------+----------+------------------\n"
"January |Garnet |Carnation\n"
"February |Amethyst |Violet\n"
"March |Aquamarine|Jonquil\n"
"April |Diamond |Sweetpea\n"
"May |Emerald |Lily Of The Valley\n"
"June |Pearl |Rose\n"
"July |Ruby |Larkspur\n"
"August |Peridot |Gladiolus\n"
"September|Sapphire |Aster\n"
"October |Opal |Calendula\n"
"November |Topaz |Chrysanthemum\n"
"December |Turquoise |Narcissus"
)
def test_row_border():
tbl = Txtble(DATA, headers=HEADERS, row_border=True)
assert str(tbl) == (
"+---------+----------+------------------+\n"
"|Month |Birthstone|Birth Flower |\n"
"+---------+----------+------------------+\n"
"|January |Garnet |Carnation |\n"
"+---------+----------+------------------+\n"
"|February |Amethyst |Violet |\n"
"+---------+----------+------------------+\n"
"|March |Aquamarine|Jonquil |\n"
"+---------+----------+------------------+\n"
"|April |Diamond |Sweetpea |\n"
"+---------+----------+------------------+\n"
"|May |Emerald |Lily Of The Valley|\n"
"+---------+----------+------------------+\n"
"|June |Pearl |Rose |\n"
"+---------+----------+------------------+\n"
"|July |Ruby |Larkspur |\n"
"+---------+----------+------------------+\n"
"|August |Peridot |Gladiolus |\n"
"+---------+----------+------------------+\n"
"|September|Sapphire |Aster |\n"
"+---------+----------+------------------+\n"
"|October |Opal |Calendula |\n"
"+---------+----------+------------------+\n"
"|November |Topaz |Chrysanthemum |\n"
"+---------+----------+------------------+\n"
"|December |Turquoise |Narcissus |\n"
"+---------+----------+------------------+"
)
def test_row_border_embedded_newlines():
tbl = Txtble(
[
[
"Verse 1",
"Twas brillig, and the slithy toves\n"
"Did gyre and gimble in the wabe;\n"
"All mimsy were the borogoves,\n"
"And the mome raths outgrabe.",
],
[
"Verse 2",
'"Beware the Jabberwock, my son!\n'
"The jaws that bite, the claws that catch!\n"
"Beware the Jubjub bird, and shun\n"
'The frumious Bandersnatch!"',
],
],
row_border=True,
)
assert str(tbl) == (
"+-------+-----------------------------------------+\n"
"|Verse 1|Twas brillig, and the slithy toves |\n"
"| |Did gyre and gimble in the wabe; |\n"
"| |All mimsy were the borogoves, |\n"
"| |And the mome raths outgrabe. |\n"
"+-------+-----------------------------------------+\n"
'|Verse 2|"Beware the Jabberwock, my son! |\n'
"| |The jaws that bite, the claws that catch!|\n"
"| |Beware the Jubjub bird, and shun |\n"
'| |The frumious Bandersnatch!" |\n'
"+-------+-----------------------------------------+"
)
def test_no_column_border():
tbl = Txtble(DATA, headers=HEADERS, column_border=False)
assert str(tbl) == (
"+-------------------------------------+\n"
"|Month BirthstoneBirth Flower |\n"
"+-------------------------------------+\n"
"|January Garnet Carnation |\n"
"|February Amethyst Violet |\n"
"|March AquamarineJonquil |\n"
"|April Diamond Sweetpea |\n"
"|May Emerald Lily Of The Valley|\n"
"|June Pearl Rose |\n"
"|July Ruby Larkspur |\n"
"|August Peridot Gladiolus |\n"
"|SeptemberSapphire Aster |\n"
"|October Opal Calendula |\n"
"|November Topaz Chrysanthemum |\n"
"|December Turquoise Narcissus |\n"
"+-------------------------------------+"
)
def test_row_border_no_column_border():
tbl = Txtble(DATA, headers=HEADERS, column_border=False, row_border=True)
assert str(tbl) == (
"+-------------------------------------+\n"
"|Month BirthstoneBirth Flower |\n"
"+-------------------------------------+\n"
"|January Garnet Carnation |\n"
"+-------------------------------------+\n"
"|February Amethyst Violet |\n"
"+-------------------------------------+\n"
"|March AquamarineJonquil |\n"
"+-------------------------------------+\n"
"|April Diamond Sweetpea |\n"
"+-------------------------------------+\n"
"|May Emerald Lily Of The Valley|\n"
"+-------------------------------------+\n"
"|June Pearl Rose |\n"
"+-------------------------------------+\n"
"|July Ruby Larkspur |\n"
"+-------------------------------------+\n"
"|August Peridot Gladiolus |\n"
"+-------------------------------------+\n"
"|SeptemberSapphire Aster |\n"
"+-------------------------------------+\n"
"|October Opal Calendula |\n"
"+-------------------------------------+\n"
"|November Topaz Chrysanthemum |\n"
"+-------------------------------------+\n"
"|December Turquoise Narcissus |\n"
"+-------------------------------------+"
)
@pytest.mark.parametrize("header_border", [None, True])
def test_headers_header_border(header_border):
tbl = Txtble(DATA, headers=HEADERS, header_border=header_border)
assert str(tbl) == TABLE
def test_headers_no_header_border():
tbl = Txtble(DATA, headers=HEADERS, header_border=False)
assert str(tbl) == (
"+---------+----------+------------------+\n"
"|Month |Birthstone|Birth Flower |\n"
"|January |Garnet |Carnation |\n"
"|February |Amethyst |Violet |\n"
"|March |Aquamarine|Jonquil |\n"
"|April |Diamond |Sweetpea |\n"
"|May |Emerald |Lily Of The Valley|\n"
"|June |Pearl |Rose |\n"
"|July |Ruby |Larkspur |\n"
"|August |Peridot |Gladiolus |\n"
"|September|Sapphire |Aster |\n"
"|October |Opal |Calendula |\n"
"|November |Topaz |Chrysanthemum |\n"
"|December |Turquoise |Narcissus |\n"
"+---------+----------+------------------+"
)
def test_row_border_no_header_border():
tbl = Txtble(DATA, headers=HEADERS, row_border=True, header_border=False)
assert str(tbl) == (
"+---------+----------+------------------+\n"
"|Month |Birthstone|Birth Flower |\n"
"|January |Garnet |Carnation |\n"
"+---------+----------+------------------+\n"
"|February |Amethyst |Violet |\n"
"+---------+----------+------------------+\n"
"|March |Aquamarine|Jonquil |\n"
"+---------+----------+------------------+\n"
"|April |Diamond |Sweetpea |\n"
"+---------+----------+------------------+\n"
"|May |Emerald |Lily Of The Valley|\n"
"+---------+----------+------------------+\n"
"|June |Pearl |Rose |\n"
"+---------+----------+------------------+\n"
"|July |Ruby |Larkspur |\n"
"+---------+----------+------------------+\n"
"|August |Peridot |Gladiolus |\n"
"+---------+----------+------------------+\n"
"|September|Sapphire |Aster |\n"
"+---------+----------+------------------+\n"
"|October |Opal |Calendula |\n"
"+---------+----------+------------------+\n"
"|November |Topaz |Chrysanthemum |\n"
"+---------+----------+------------------+\n"
"|December |Turquoise |Narcissus |\n"
"+---------+----------+------------------+"
)
def test_all_borders_off():
tbl = Txtble(
DATA,
border=False,
column_border=False,
header_border=False,
headers=HEADERS,
)
assert str(tbl) == (
"Month BirthstoneBirth Flower\n"
"January Garnet Carnation\n"
"February Amethyst Violet\n"
"March AquamarineJonquil\n"
"April Diamond Sweetpea\n"
"May Emerald Lily Of The Valley\n"
"June Pearl Rose\n"
"July Ruby Larkspur\n"
"August Peridot Gladiolus\n"
"SeptemberSapphire Aster\n"
"October Opal Calendula\n"
"November Topaz Chrysanthemum\n"
"December Turquoise Narcissus"
)
def test_invert_all_borders():
tbl = Txtble(
DATA,
border=False,
column_border=False,
header_border=False,
headers=HEADERS,
row_border=True,
)
assert str(tbl) == (
"Month BirthstoneBirth Flower\n"
"January Garnet Carnation\n"
"-------------------------------------\n"
"February Amethyst Violet\n"
"-------------------------------------\n"
"March AquamarineJonquil\n"
"-------------------------------------\n"
"April Diamond Sweetpea\n"
"-------------------------------------\n"
"May Emerald Lily Of The Valley\n"
"-------------------------------------\n"
"June Pearl Rose\n"
"-------------------------------------\n"
"July Ruby Larkspur\n"
"-------------------------------------\n"
"August Peridot Gladiolus\n"
"-------------------------------------\n"
"SeptemberSapphire Aster\n"
"-------------------------------------\n"
"October Opal Calendula\n"
"-------------------------------------\n"
"November Topaz Chrysanthemum\n"
"-------------------------------------\n"
"December Turquoise Narcissus"
)
def test_outer_border_only():
tbl = Txtble(
DATA,
border=True,
column_border=False,
header_border=False,
headers=HEADERS,
)
assert str(tbl) == (
"+-------------------------------------+\n"
"|Month BirthstoneBirth Flower |\n"
"|January Garnet Carnation |\n"
"|February Amethyst Violet |\n"
"|March AquamarineJonquil |\n"
"|April Diamond Sweetpea |\n"
"|May Emerald Lily Of The Valley|\n"
"|June Pearl Rose |\n"
"|July Ruby Larkspur |\n"
"|August Peridot Gladiolus |\n"
"|SeptemberSapphire Aster |\n"
"|October Opal Calendula |\n"
"|November Topaz Chrysanthemum |\n"
"|December Turquoise Narcissus |\n"
"+-------------------------------------+"
)
| 40.08078 | 77 | 0.386059 | 1,112 | 14,389 | 4.923561 | 0.09982 | 0.018265 | 0.030685 | 0.050411 | 0.949954 | 0.942648 | 0.928037 | 0.916712 | 0.902648 | 0.847854 | 0 | 0.00042 | 0.33755 | 14,389 | 358 | 78 | 40.192737 | 0.573961 | 0 | 0 | 0.787879 | 0 | 0 | 0.640837 | 0.19362 | 0 | 0 | 0 | 0 | 0.042424 | 1 | 0.042424 | false | 0 | 0.009091 | 0 | 0.051515 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
6c4d7229091d569977e07c1c5ee598ec49064cab | 1,114 | py | Python | Lib/site-packages/tensorflow/_api/v1/strings/__init__.py | amitdev81296/tensorflow | 9869739cc142a996432bef4dc91b1f1b165bc27a | [
"bzip2-1.0.6"
] | 1 | 2020-07-06T14:18:59.000Z | 2020-07-06T14:18:59.000Z | keras-ResNet50/tensorflow/_api/v1/strings/__init__.py | wuh0007/severless_ML_live | 088b78b06434583b7443ab877a6cdd80121bb8d1 | [
"MIT"
] | 4 | 2020-09-26T00:55:50.000Z | 2022-02-10T01:53:06.000Z | keras-ResNet50/tensorflow/_api/v1/strings/__init__.py | wuh0007/severless_ML_live | 088b78b06434583b7443ab877a6cdd80121bb8d1 | [
"MIT"
] | null | null | null | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Operations for working with string Tensors.
See the [Strings](https://tensorflow.org/api_guides/python/string_ops) guide.
"""
from __future__ import print_function
from tensorflow.python import reduce_join
from tensorflow.python import regex_full_match
from tensorflow.python import string_format as format
from tensorflow.python import string_join as join
from tensorflow.python import string_length as length
from tensorflow.python import string_split_v2 as split
from tensorflow.python import string_strip as strip
from tensorflow.python import string_to_hash_bucket as to_hash_bucket
from tensorflow.python import string_to_hash_bucket_fast as to_hash_bucket_fast
from tensorflow.python import string_to_hash_bucket_strong as to_hash_bucket_strong
from tensorflow.python import string_to_number as to_number
from tensorflow.python import substr
from tensorflow.python import unicode_script
from tensorflow.python.ops.gen_string_ops import regex_replace
del print_function
| 41.259259 | 83 | 0.859066 | 171 | 1,114 | 5.333333 | 0.339181 | 0.263158 | 0.307018 | 0.370614 | 0.394737 | 0.182018 | 0.144737 | 0.144737 | 0 | 0 | 0 | 0.000996 | 0.098743 | 1,114 | 26 | 84 | 42.846154 | 0.907371 | 0.223519 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.9375 | 0 | 0.9375 | 0.125 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
665ce88079c6e9d52bfa681e9b46790ce8e976b5 | 15,567 | py | Python | a_bit_of_everything/py-grpc/a_bit_of_everything_pb2_grpc.py | moul/grpcbin-proto | bca18df4138cc423a9f8513f9c7d5f71f1ea4b35 | [
"MIT"
] | null | null | null | a_bit_of_everything/py-grpc/a_bit_of_everything_pb2_grpc.py | moul/grpcbin-proto | bca18df4138cc423a9f8513f9c7d5f71f1ea4b35 | [
"MIT"
] | null | null | null | a_bit_of_everything/py-grpc/a_bit_of_everything_pb2_grpc.py | moul/grpcbin-proto | bca18df4138cc423a9f8513f9c7d5f71f1ea4b35 | [
"MIT"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
import a_bit_of_everything_pb2 as a__bit__of__everything__pb2
from examples.sub import message_pb2 as examples_dot_sub_dot_message__pb2
from examples.sub2 import message_pb2 as examples_dot_sub2_dot_message__pb2
from google.protobuf import duration_pb2 as google_dot_protobuf_dot_duration__pb2
from google.protobuf import empty_pb2 as google_dot_protobuf_dot_empty__pb2
class ABitOfEverythingServiceStub(object):
"""ABitOfEverything service is used to validate that APIs with complicated
proto messages and URL templates are still processed correctly.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Create',
request_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
response_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
)
self.CreateBody = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/CreateBody',
request_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
response_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
)
self.Lookup = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Lookup',
request_serializer=examples_dot_sub2_dot_message__pb2.IdMessage.SerializeToString,
response_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
)
self.Update = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Update',
request_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Delete = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Delete',
request_serializer=examples_dot_sub2_dot_message__pb2.IdMessage.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetQuery = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/GetQuery',
request_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Echo = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Echo',
request_serializer=examples_dot_sub_dot_message__pb2.StringMessage.SerializeToString,
response_deserializer=examples_dot_sub_dot_message__pb2.StringMessage.FromString,
)
self.DeepPathEcho = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/DeepPathEcho',
request_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
response_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
)
self.NoBindings = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/NoBindings',
request_serializer=google_dot_protobuf_dot_duration__pb2.Duration.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.Timeout = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/Timeout',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.ErrorWithDetails = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/ErrorWithDetails',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.GetMessageWithBody = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/GetMessageWithBody',
request_serializer=a__bit__of__everything__pb2.MessageWithBody.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
self.PostWithEmptyBody = channel.unary_unary(
'/grpc.gateway.examples.examplepb.ABitOfEverythingService/PostWithEmptyBody',
request_serializer=a__bit__of__everything__pb2.Body.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class ABitOfEverythingServiceServicer(object):
"""ABitOfEverything service is used to validate that APIs with complicated
proto messages and URL templates are still processed correctly.
"""
def Create(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def CreateBody(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Lookup(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetQuery(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Echo(self, request, context):
"""Echo allows posting a StringMessage value.
It also exposes multiple bindings.
This makes it useful when validating that the OpenAPI v2 API
description exposes documentation correctly on all paths
defined as additional_bindings in the proto.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def DeepPathEcho(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def NoBindings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Timeout(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ErrorWithDetails(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetMessageWithBody(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def PostWithEmptyBody(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ABitOfEverythingServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
response_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
),
'CreateBody': grpc.unary_unary_rpc_method_handler(
servicer.CreateBody,
request_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
response_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
),
'Lookup': grpc.unary_unary_rpc_method_handler(
servicer.Lookup,
request_deserializer=examples_dot_sub2_dot_message__pb2.IdMessage.FromString,
response_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=examples_dot_sub2_dot_message__pb2.IdMessage.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetQuery': grpc.unary_unary_rpc_method_handler(
servicer.GetQuery,
request_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Echo': grpc.unary_unary_rpc_method_handler(
servicer.Echo,
request_deserializer=examples_dot_sub_dot_message__pb2.StringMessage.FromString,
response_serializer=examples_dot_sub_dot_message__pb2.StringMessage.SerializeToString,
),
'DeepPathEcho': grpc.unary_unary_rpc_method_handler(
servicer.DeepPathEcho,
request_deserializer=a__bit__of__everything__pb2.ABitOfEverything.FromString,
response_serializer=a__bit__of__everything__pb2.ABitOfEverything.SerializeToString,
),
'NoBindings': grpc.unary_unary_rpc_method_handler(
servicer.NoBindings,
request_deserializer=google_dot_protobuf_dot_duration__pb2.Duration.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'Timeout': grpc.unary_unary_rpc_method_handler(
servicer.Timeout,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'ErrorWithDetails': grpc.unary_unary_rpc_method_handler(
servicer.ErrorWithDetails,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'GetMessageWithBody': grpc.unary_unary_rpc_method_handler(
servicer.GetMessageWithBody,
request_deserializer=a__bit__of__everything__pb2.MessageWithBody.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
'PostWithEmptyBody': grpc.unary_unary_rpc_method_handler(
servicer.PostWithEmptyBody,
request_deserializer=a__bit__of__everything__pb2.Body.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.gateway.examples.examplepb.ABitOfEverythingService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class camelCaseServiceNameStub(object):
"""camelCase and lowercase service names are valid but not recommended (use TitleCase instead)
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Empty = channel.unary_unary(
'/grpc.gateway.examples.examplepb.camelCaseServiceName/Empty',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class camelCaseServiceNameServicer(object):
"""camelCase and lowercase service names are valid but not recommended (use TitleCase instead)
"""
def Empty(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_camelCaseServiceNameServicer_to_server(servicer, server):
rpc_method_handlers = {
'Empty': grpc.unary_unary_rpc_method_handler(
servicer.Empty,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.gateway.examples.examplepb.camelCaseServiceName', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
class AnotherServiceWithNoBindingsStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.NoBindings = channel.unary_unary(
'/grpc.gateway.examples.examplepb.AnotherServiceWithNoBindings/NoBindings',
request_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
response_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
)
class AnotherServiceWithNoBindingsServicer(object):
# missing associated documentation comment in .proto file
pass
def NoBindings(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_AnotherServiceWithNoBindingsServicer_to_server(servicer, server):
rpc_method_handlers = {
'NoBindings': grpc.unary_unary_rpc_method_handler(
servicer.NoBindings,
request_deserializer=google_dot_protobuf_dot_empty__pb2.Empty.FromString,
response_serializer=google_dot_protobuf_dot_empty__pb2.Empty.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'grpc.gateway.examples.examplepb.AnotherServiceWithNoBindings', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 44.861671 | 96 | 0.772339 | 1,657 | 15,567 | 6.853349 | 0.087508 | 0.025361 | 0.047904 | 0.056358 | 0.889574 | 0.871522 | 0.854086 | 0.774921 | 0.701039 | 0.668633 | 0 | 0.005862 | 0.156164 | 15,567 | 346 | 97 | 44.991329 | 0.858633 | 0.116272 | 0 | 0.563707 | 1 | 0 | 0.146442 | 0.085473 | 0 | 0 | 0 | 0 | 0 | 1 | 0.081081 | false | 0.061776 | 0.023166 | 0 | 0.127413 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
dd325949df937ec3ba51ced338d7330cda796968 | 7,080 | py | Python | 2020/11/asdf.py | Ernest1338/AdventOfCode | 69ff951e1e82b93f6507edceb83ac19ecb4cb8cf | [
"MIT"
] | 1 | 2021-12-01T21:08:27.000Z | 2021-12-01T21:08:27.000Z | 2020/11/asdf.py | Ernest1338/AdventOfCode | 69ff951e1e82b93f6507edceb83ac19ecb4cb8cf | [
"MIT"
] | null | null | null | 2020/11/asdf.py | Ernest1338/AdventOfCode | 69ff951e1e82b93f6507edceb83ac19ecb4cb8cf | [
"MIT"
] | null | null | null | inputfile = open('input.txt','r').readlines()
for i in range(len(inputfile)):
inputfile[i] = inputfile[i].strip('\n')
part = 1 # 1 or 2
answer = 0
if part == 1:
def adjacentSeats(seats, row, column):
adjacentSeatsList = []
try:
if (row-1)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row-1][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row-1)>=0 and (column)>=0:
adjacentSeatsList.append(seats[row-1][column])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row-1)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row-1][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row+1][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column)>=0:
adjacentSeatsList.append(seats[row+1][column])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row+1][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
return adjacentSeatsList
for _ in range(102):
newInputfile = []
for i in range(len(inputfile)):
for a in range(len(inputfile[0])):
adjacentTest = adjacentSeats(inputfile, i, a)
if inputfile[i][a]=="L" and "#" not in adjacentTest:
newInputfile.append("#")
elif inputfile[i][a]=="#":
nmbrOfOccupied = 0
for b in adjacentTest:
if b == "#":
nmbrOfOccupied += 1
if nmbrOfOccupied >= 4:
newInputfile.append("L")
else:
newInputfile.append("#")
elif inputfile[i][a]=="L" and "#" in adjacentTest:
newInputfile.append("L")
else:
newInputfile.append(".")
newNewInputfile = []
var = 1
toAppend = ""
for i in newInputfile:
if var < len(inputfile[0]):
toAppend += str(i)
var += 1
else:
toAppend += str(i)
newNewInputfile.append(toAppend)
toAppend = ""
var = 1
inputfile = newNewInputfile
for i in newNewInputfile:
for a in i:
if a=="#":
answer += 1
elif part == 2:
def adjacentSeats(seats, row, column):
adjacentSeatsList = []
try:
if (row-1)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row-1][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row-1)>=0 and (column)>=0:
adjacentSeatsList.append(seats[row-1][column])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row-1)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row-1][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column+1)>=0:
adjacentSeatsList.append(seats[row+1][column+1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column)>=0:
adjacentSeatsList.append(seats[row+1][column])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row+1)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row+1][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
try:
if (row)>=0 and (column-1)>=0:
adjacentSeatsList.append(seats[row][column-1])
else:
adjacentSeatsList.append(".")
except:
adjacentSeatsList.append(".")
return adjacentSeatsList
for _ in range(102):
newInputfile = []
for i in range(len(inputfile)):
for a in range(len(inputfile[0])):
adjacentTest = adjacentSeats(inputfile, i, a)
if inputfile[i][a]=="L" and "#" not in adjacentTest:
newInputfile.append("#")
elif inputfile[i][a]=="#":
nmbrOfOccupied = 0
for b in adjacentTest:
if b == "#":
nmbrOfOccupied += 1
if nmbrOfOccupied >= 4:
newInputfile.append("L")
else:
newInputfile.append("#")
elif inputfile[i][a]=="L" and "#" in adjacentTest:
newInputfile.append("L")
else:
newInputfile.append(".")
newNewInputfile = []
var = 1
toAppend = ""
for i in newInputfile:
if var < len(inputfile[0]):
toAppend += str(i)
var += 1
else:
toAppend += str(i)
newNewInputfile.append(toAppend)
toAppend = ""
var = 1
inputfile = newNewInputfile
for i in newNewInputfile:
for a in i:
if a=="#":
answer += 1
else:
print("nope")
print("The final answer is: "+str(answer)) | 33.875598 | 68 | 0.458898 | 619 | 7,080 | 5.245557 | 0.082391 | 0.340006 | 0.039421 | 0.142901 | 0.959347 | 0.959347 | 0.952264 | 0.952264 | 0.952264 | 0.952264 | 0 | 0.026538 | 0.414548 | 7,080 | 209 | 69 | 33.875598 | 0.756815 | 0.000847 | 0 | 0.960396 | 0 | 0 | 0.013149 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009901 | false | 0 | 0 | 0 | 0.019802 | 0.009901 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
dd463dcd6524a08e9c8eb42ea668bfdc1641cb92 | 2,730 | py | Python | jmd2018.py | HSDL/SVSmetrics | be2b2f6d8a06095947db73a0bb17f3ae01f20945 | [
"MIT"
] | null | null | null | jmd2018.py | HSDL/SVSmetrics | be2b2f6d8a06095947db73a0bb17f3ae01f20945 | [
"MIT"
] | null | null | null | jmd2018.py | HSDL/SVSmetrics | be2b2f6d8a06095947db73a0bb17f3ae01f20945 | [
"MIT"
] | null | null | null | import pkg_resources
import TeamVariety
levels = ['PhysicalPrinciple', 'WorkingPrinciple', 'Embodiment']
weights = [10, 5, 2]
def run_a_case(levels, weights, remove, run_name):
# Read in the data
participants = TeamVariety.Corpus(pkg_resources.resource_filename('sensitive_data', 'designs.csv'),
pkg_resources.resource_filename('sensitive_data', 'participants.csv'),
levels, weights)
# Remove things
for key in remove:
participants.remove_participants(key, remove[key])
# Compute variety for individuals
individual_file = pkg_resources.resource_filename('sensitive_data', run_name+'_alllevels_individual_variety.csv')
participants.compute_individual_variety(individual_file)
# Get all conditions
treatment_file = pkg_resources.resource_filename('sensitive_data', run_name+'_alllevels_treatments.csv')
results_file = pkg_resources.resource_filename('sensitive_data', run_name+'_alllevels_results.csv')
variety, combinations, treatments = participants.get_all_conditions(100, 4, treatment_file=treatment_file,
results_file=results_file)
# Loop and do it again
for i, level in enumerate(levels):
# Read in the data
participants = TeamVariety.Corpus(pkg_resources.resource_filename('sensitive_data', 'designs.csv'),
pkg_resources.resource_filename('sensitive_data', 'participants.csv'),
[level], [weights[i]])
# Remove things
for key in remove:
participants.remove_participants(key, remove[key])
# Compute variety for individuals
individual_file = pkg_resources.resource_filename('sensitive_data', run_name+'_'+level+'_individual_variety.csv')
participants.compute_individual_variety(individual_file)
# Get all conditions
treatment_file = pkg_resources.resource_filename('sensitive_data', run_name+'_'+level+'_treatments.csv')
results_file = pkg_resources.resource_filename('sensitive_data', run_name+'_'+level+'_results.csv')
variety, combinations, treatments = participants.get_all_conditions(100, 4, treatment_file=treatment_file,
results_file=results_file)
run_a_case(levels, weights, {"Level": 0, "Modality": 1}, 'virtualseniors')
run_a_case(levels, weights, {"Level": 1, "Modality": 1}, 'virtualfreshmen')
run_a_case(levels, weights, {"Level": 0, "Modality": 0}, 'physicalseniors')
run_a_case(levels, weights, {"Level": 1, "Modality": 0}, 'physicalfreshmen') | 51.509434 | 121 | 0.668132 | 288 | 2,730 | 6.027778 | 0.215278 | 0.076037 | 0.115207 | 0.16129 | 0.860023 | 0.847926 | 0.847926 | 0.847926 | 0.767281 | 0.767281 | 0 | 0.009528 | 0.231136 | 2,730 | 53 | 122 | 51.509434 | 0.817532 | 0.067399 | 0 | 0.4375 | 0 | 0 | 0.189988 | 0.040599 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0 | 0.0625 | 0 | 0.09375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
dd6a6f919cc54002ce0665fc44aba7130aa7bc0d | 1,172 | py | Python | tests/test_162.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | tests/test_162.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | tests/test_162.py | sungho-joo/leetcode2github | ce7730ef40f6051df23681dd3c0e1e657abba620 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import pytest
"""
Test 162. Find Peak Element
"""
@pytest.fixture(scope="session")
def init_variables_162():
from src.leetcode_162_find_peak_element import Solution
solution = Solution()
def _init_variables_162():
return solution
yield _init_variables_162
class TestClass162:
def test_solution_0(self, init_variables_162):
assert init_variables_162().findPeakElement([1, 2, 3, 1]) == 2
def test_solution_1(self, init_variables_162):
assert init_variables_162().findPeakElement([1, 2, 1, 3, 5, 6, 4]) == 5
#!/usr/bin/env python
import pytest
"""
Test 162. Find Peak Element
"""
@pytest.fixture(scope="session")
def init_variables_162():
from src.leetcode_162_find_peak_element import Solution
solution = Solution()
def _init_variables_162():
return solution
yield _init_variables_162
class TestClass162:
def test_solution_0(self, init_variables_162):
assert init_variables_162().findPeakElement([1, 2, 3, 1]) == 2
def test_solution_1(self, init_variables_162):
assert init_variables_162().findPeakElement([1, 2, 1, 3, 5, 6, 4]) == 5
| 20.561404 | 79 | 0.699659 | 162 | 1,172 | 4.765432 | 0.216049 | 0.235751 | 0.290155 | 0.093264 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0.094737 | 0.18942 | 1,172 | 56 | 80 | 20.928571 | 0.717895 | 0.03413 | 0 | 1 | 0 | 0 | 0.013208 | 0 | 0 | 0 | 0 | 0 | 0.153846 | 1 | 0.307692 | false | 0 | 0.153846 | 0.076923 | 0.615385 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 11 |
660427c691d6cbd80dd8802cf6169ce58cb295d0 | 187 | py | Python | yamldict/__init__.py | bmustiata/yamldict | e25c12a8bb7f265032fcc480f0db3a3707c22d38 | [
"BSD-3-Clause"
] | null | null | null | yamldict/__init__.py | bmustiata/yamldict | e25c12a8bb7f265032fcc480f0db3a3707c22d38 | [
"BSD-3-Clause"
] | 1 | 2021-06-07T09:07:29.000Z | 2021-06-07T09:07:29.000Z | yamldict/__init__.py | bmustiata/yamldict | e25c12a8bb7f265032fcc480f0db3a3707c22d38 | [
"BSD-3-Clause"
] | null | null | null | from yamldict.YamlDictClass import YamlDict # noqa: F401
from yamldict.YamlListClass import YamlList # noqa: F401
from yamldict.create_function import create, create_all # noqa: F401
| 37.4 | 69 | 0.807487 | 24 | 187 | 6.208333 | 0.458333 | 0.241611 | 0.161074 | 0.268456 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055901 | 0.139037 | 187 | 4 | 70 | 46.75 | 0.869565 | 0.171123 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
661f72e9df213aec0006f505638dbecb428c16ae | 19,293 | py | Python | genomics_data_index/test/integration/api/query/test_GenomicsDataIndex.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | 1 | 2021-04-21T00:19:49.000Z | 2021-04-21T00:19:49.000Z | genomics_data_index/test/integration/api/query/test_GenomicsDataIndex.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | null | null | null | genomics_data_index/test/integration/api/query/test_GenomicsDataIndex.py | apetkau/thesis-index | 6c96e9ed75d8e661437effe62a939727a0b473fc | [
"Apache-2.0"
] | null | null | null | import shutil
import tempfile
from pathlib import Path
from tempfile import TemporaryDirectory
import pytest
from genomics_data_index.api.query.GenomicsDataIndex import GenomicsDataIndex
from genomics_data_index.configuration.Project import Project
from genomics_data_index.storage.io.mutation.NucleotideSampleDataPackage import NucleotideSampleDataPackage
from genomics_data_index.storage.io.processor.SerialSampleFilesProcessor import SerialSampleFilesProcessor
from genomics_data_index.storage.service import EntityExistsError
from genomics_data_index.test.integration import sample_dirs, reference_file
def test_get_reference_tree(loaded_database_genomic_data_store_with_tree: GenomicsDataIndex):
tree = loaded_database_genomic_data_store_with_tree.reference_tree('genome')
assert tree is not None
with pytest.raises(EntityExistsError) as execinfo:
loaded_database_genomic_data_store_with_tree.reference_tree('invalid_reference')
assert 'No reference genome with name=[invalid_reference]' in str(execinfo.value)
def test_mlst_schemes(loaded_database_genomic_data_store: GenomicsDataIndex):
mlst_schemes = loaded_database_genomic_data_store.mlst_schemes()
assert 3 == len(mlst_schemes)
assert isinstance(mlst_schemes, list)
assert {'lmonocytogenes', 'ecoli', 'campylobacter'} == set(mlst_schemes)
def test_summaries_loaded_data(loaded_database_genomic_data_store: GenomicsDataIndex):
gds = loaded_database_genomic_data_store
# Samples
assert 9 == gds.count_samples()
assert {'2014C-3598', '2014C-3599', '2014D-0067', '2014D-0068',
'CFSAN002349', 'CFSAN023463',
'SampleA', 'SampleB', 'SampleC'} == set(gds.sample_names())
# References
assert 1 == gds.count_references()
assert ['genome'] == gds.reference_names()
assert 112 == gds.count_mutations('genome')
assert 112 + 440 == gds.count_mutations('genome', include_unknown=True)
# Mutations ignore unknown spdi
ms = gds.mutations_summary('genome', id_type='spdi', ignore_annotations=True)
assert 112 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Count', 'Total', 'Percent'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 1, 'G', 'SNP', 2, 3, 66] == ms.loc['reference:839:1:G'].values.tolist()
assert ['reference', 866, 9, 'G', 'INDEL', 1, 3, 33] == ms.loc['reference:866:9:G'].values.tolist()
assert ['reference', 1048, 1, 'G', 'SNP', 1, 3, 33] == ms.loc['reference:1048:1:G'].values.tolist()
assert ['reference', 3897, 5, 'G', 'INDEL', 2, 3, 66] == ms.loc['reference:3897:5:G'].values.tolist()
# Mutations include unknown spdi
ms = gds.mutations_summary('genome', id_type='spdi', ignore_annotations=True, include_unknown=True)
assert 112 + 440 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Count', 'Total', 'Percent'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 1, 'G', 'SNP', 2, 3, 66] == ms.loc['reference:839:1:G'].values.tolist()
assert ['reference', 866, 9, 'G', 'INDEL', 1, 3, 33] == ms.loc['reference:866:9:G'].values.tolist()
assert ['reference', 1048, 1, 'G', 'SNP', 1, 3, 33] == ms.loc['reference:1048:1:G'].values.tolist()
assert ['reference', 3897, 5, 'G', 'INDEL', 2, 3, 66] == ms.loc['reference:3897:5:G'].values.tolist()
assert ['reference', 89, 1, '?', 'UNKNOWN_MISSING', 3, 3, 100] == ms.loc['reference:89:1:?'].values.tolist()
assert ['reference', 5100, 1, '?', 'UNKNOWN_MISSING', 2, 3, 66] == ms.loc['reference:5100:1:?'].values.tolist()
assert ['reference', 649, 1, '?', 'UNKNOWN_MISSING', 1, 3, 33] == ms.loc['reference:649:1:?'].values.tolist()
# Mutations spdi ref
ms = gds.mutations_summary('genome', id_type='spdi_ref', ignore_annotations=True)
assert 112 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Count', 'Total', 'Percent'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 'C', 'G', 'SNP', 2, 3, 66] == ms.loc['reference:839:C:G'].values.tolist()
assert ['reference', 866, 'GCCAGATCC', 'G', 'INDEL', 1, 3, 33] == ms.loc[
'reference:866:GCCAGATCC:G'].values.tolist()
assert ['reference', 1048, 'C', 'G', 'SNP', 1, 3, 33] == ms.loc['reference:1048:C:G'].values.tolist()
assert ['reference', 3897, 'GCGCA', 'G', 'INDEL', 2, 3, 66] == ms.loc['reference:3897:GCGCA:G'].values.tolist()
# Mutations spdi ref include unknowns
ms = gds.mutations_summary('genome', id_type='spdi_ref', ignore_annotations=True, include_unknown=True)
assert 112 + 440 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Count', 'Total', 'Percent'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 'C', 'G', 'SNP', 2, 3, 66] == ms.loc['reference:839:C:G'].values.tolist()
assert ['reference', 866, 'GCCAGATCC', 'G', 'INDEL', 1, 3, 33] == ms.loc[
'reference:866:GCCAGATCC:G'].values.tolist()
assert ['reference', 1048, 'C', 'G', 'SNP', 1, 3, 33] == ms.loc['reference:1048:C:G'].values.tolist()
assert ['reference', 3897, 'GCGCA', 'G', 'INDEL', 2, 3, 66] == ms.loc['reference:3897:GCGCA:G'].values.tolist()
assert ['reference', 89, 'A', '?', 'UNKNOWN_MISSING', 3, 3, 100] == ms.loc['reference:89:A:?'].values.tolist()
assert ['reference', 5100, 'T', '?', 'UNKNOWN_MISSING', 2, 3, 66] == ms.loc['reference:5100:T:?'].values.tolist()
assert ['reference', 649, 'T', '?', 'UNKNOWN_MISSING', 1, 3, 33] == ms.loc['reference:649:T:?'].values.tolist()
# Mutations include annotations (which should all be empty)
ms = gds.mutations_summary('genome', id_type='spdi', ignore_annotations=False)
assert 112 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 1, 'G', 'SNP', 2, 3, 66] + ['NA'] * 12 == ms.loc['reference:839:1:G'].fillna(
'NA').values.tolist()
# Test case of directly calling features_summary
ms = gds.features_summary(kind='mutations', scope='genome', id_type='spdi', ignore_annotations=True)
assert 112 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type', 'Count', 'Total', 'Percent'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
assert ['reference', 839, 1, 'G', 'SNP', 2, 3, 66] == ms.loc['reference:839:1:G'].values.tolist()
assert ['reference', 866, 9, 'G', 'INDEL', 1, 3, 33] == ms.loc['reference:866:9:G'].values.tolist()
assert ['reference', 1048, 1, 'G', 'SNP', 1, 3, 33] == ms.loc['reference:1048:1:G'].values.tolist()
assert ['reference', 3897, 5, 'G', 'INDEL', 2, 3, 66] == ms.loc['reference:3897:5:G'].values.tolist()
# Test case of only including unknowns
ms = gds.mutations_summary('genome', id_type='spdi', include_present=False, include_unknown=True)
assert 440 == len(ms)
assert 'reference:649:1:?' in set(ms.index.tolist())
assert 'reference:839:1:G' not in (ms.index.tolist())
# Test case of no present or unknowns
ms = gds.mutations_summary('genome', id_type='spdi', include_present=False, include_unknown=False)
assert 0 == len(ms)
def test_summaries_mlst_data(loaded_database_genomic_data_store: GenomicsDataIndex):
gds = loaded_database_genomic_data_store
# MLST summaries for lmonocytogenes
summary_df = gds.features_summary(kind='mlst', scope='lmonocytogenes')
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 10 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 5, 5, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 3, 5, 60] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 5, 20] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 4, 5, 80] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 4, 5, 80] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
# MLST summaries for lmonocytogenes include unknown
summary_df = gds.features_summary(kind='mlst', scope='lmonocytogenes', include_unknown=True)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 11 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'abcZ', '1', 5, 5, 100] == summary_df.loc['mlst:lmonocytogenes:abcZ:1'].tolist()
assert ['lmonocytogenes', 'bglA', '51', 3, 5, 60] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'lhkA', '4', 1, 5, 20] == summary_df.loc['mlst:lmonocytogenes:lhkA:4'].tolist()
assert ['lmonocytogenes', 'lhkA', '5', 4, 5, 80] == summary_df.loc['mlst:lmonocytogenes:lhkA:5'].tolist()
assert ['lmonocytogenes', 'ldh', '5', 4, 5, 80] == summary_df.loc['mlst:lmonocytogenes:ldh:5'].tolist()
assert ['lmonocytogenes', 'ldh', '?', 1, 5, 20] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
# MLST summaries for lmonocytogenes with specific locus id
summary_df = gds.features_summary(kind='mlst', scope='lmonocytogenes', locus='bglA')
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 2 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'bglA', '51', 3, 5, 60] == summary_df.loc['mlst:lmonocytogenes:bglA:51'].tolist()
assert ['lmonocytogenes', 'bglA', '52', 2, 5, 40] == summary_df.loc['mlst:lmonocytogenes:bglA:52'].tolist()
# MLST summaries for lmonocytogenes include unknown and not present
summary_df = gds.features_summary(kind='mlst', scope='lmonocytogenes', include_present=False, include_unknown=True)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 1 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
assert ['lmonocytogenes', 'ldh', '?', 1, 5, 20] == summary_df.loc['mlst:lmonocytogenes:ldh:?'].tolist()
# MLST summaries for lmonocytogenes not include present or unknown
summary_df = gds.features_summary(kind='mlst', scope='lmonocytogenes', include_present=False, include_unknown=False)
summary_df['Percent'] = summary_df['Percent'].astype(int) # Convert to int for easier comparison
assert 0 == len(summary_df)
assert 'MLST Feature' == summary_df.index.name
assert ['Scheme', 'Locus', 'Allele', 'Count', 'Total', 'Percent'] == list(summary_df.columns)
# Summaries using 'mlst_summery()'
summary_df = gds.mlst_summary(scheme_name='lmonocytogenes')
assert 10 == len(summary_df)
def test_summaries_variant_annotations(loaded_database_genomic_data_store_annotations: GenomicsDataIndex):
gds = loaded_database_genomic_data_store_annotations
# Samples
assert 3 == gds.count_samples()
assert {'SH10-014', 'SH14-014', 'SH14-001'} == set(gds.sample_names())
# References
assert 1 == gds.count_references()
assert ['NC_011083'] == gds.reference_names()
# Mutations
assert 177 == gds.count_mutations('NC_011083')
# spdi
ms = gds.mutations_summary('NC_011083', id_type='spdi', ignore_annotations=False)
assert 177 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
## missense variant
assert ['NC_011083', 140658, 1, 'A', 'SNP', 3, 3, 100,
'missense_variant', 'MODERATE', 'murF', 'SEHA_RS01180', 'transcript', 'protein_coding',
'c.497C>A', 'p.Ala166Glu',
'hgvs:NC_011083:SEHA_RS01180:c.497C>A', 'hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu',
'hgvs_gn:NC_011083:murF:c.497C>A', 'hgvs_gn:NC_011083:murF:p.Ala166Glu'] == list(
ms.loc['NC_011083:140658:1:A'])
## inframe deletion
assert ['NC_011083', 4465400, len('GGCCGAA'), 'G', 'INDEL', 3, 3, 100,
'conservative_inframe_deletion', 'MODERATE', 'tyrB', 'SEHA_RS22180', 'transcript', 'protein_coding',
'c.157_162delGAAGCC', 'p.Glu53_Ala54del',
'hgvs:NC_011083:SEHA_RS22180:c.157_162delGAAGCC', 'hgvs:NC_011083:SEHA_RS22180:p.Glu53_Ala54del',
'hgvs_gn:NC_011083:tyrB:c.157_162delGAAGCC', 'hgvs_gn:NC_011083:tyrB:p.Glu53_Ala54del'] == list(
ms.loc['NC_011083:4465400:7:G'])
## Intergenic variant (with some NA values in fields)
assert ['NC_011083', 4555461, len('T'), 'TC', 'INDEL', 1, 3, 33,
'intergenic_region', 'MODIFIER', 'SEHA_RS22510-SEHA_RS26685', 'SEHA_RS22510-SEHA_RS26685',
'intergenic_region', 'NA',
'n.4555461_4555462insC', 'NA',
'hgvs:NC_011083:n.4555461_4555462insC', 'NA',
'hgvs_gn:NC_011083:n.4555461_4555462insC', 'NA'] == list(ms.loc['NC_011083:4555461:1:TC'].fillna('NA'))
# spdi_ref
ms = gds.mutations_summary('NC_011083', id_type='spdi_ref')
assert 177 == len(ms)
assert 'Mutation' == ms.index.name
assert ['Sequence', 'Position', 'Deletion', 'Insertion', 'Type',
'Count', 'Total', 'Percent', 'Annotation', 'Annotation_Impact',
'Gene_Name', 'Gene_ID', 'Feature_Type', 'Transcript_BioType',
'HGVS.c', 'HGVS.p', 'ID_HGVS.c', 'ID_HGVS.p', 'ID_HGVS_GN.c', 'ID_HGVS_GN.p'] == list(ms.columns)
## Convert percent to int to make it easier to compare in assert statements
ms['Percent'] = ms['Percent'].astype(int)
## missense variant
assert ['NC_011083', 140658, 'C', 'A', 'SNP', 3, 3, 100,
'missense_variant', 'MODERATE', 'murF', 'SEHA_RS01180', 'transcript', 'protein_coding',
'c.497C>A', 'p.Ala166Glu',
'hgvs:NC_011083:SEHA_RS01180:c.497C>A', 'hgvs:NC_011083:SEHA_RS01180:p.Ala166Glu',
'hgvs_gn:NC_011083:murF:c.497C>A', 'hgvs_gn:NC_011083:murF:p.Ala166Glu'] == list(
ms.loc['NC_011083:140658:C:A'])
## inframe deletion
assert ['NC_011083', 4465400, 'GGCCGAA', 'G', 'INDEL', 3, 3, 100,
'conservative_inframe_deletion', 'MODERATE', 'tyrB', 'SEHA_RS22180', 'transcript', 'protein_coding',
'c.157_162delGAAGCC', 'p.Glu53_Ala54del',
'hgvs:NC_011083:SEHA_RS22180:c.157_162delGAAGCC', 'hgvs:NC_011083:SEHA_RS22180:p.Glu53_Ala54del',
'hgvs_gn:NC_011083:tyrB:c.157_162delGAAGCC', 'hgvs_gn:NC_011083:tyrB:p.Glu53_Ala54del'] == list(
ms.loc['NC_011083:4465400:GGCCGAA:G'])
## Intergenic variant (with some NA values in fields)
assert ['NC_011083', 4555461, 'T', 'TC', 'INDEL', 1, 3, 33,
'intergenic_region', 'MODIFIER', 'SEHA_RS22510-SEHA_RS26685', 'SEHA_RS22510-SEHA_RS26685',
'intergenic_region', 'NA',
'n.4555461_4555462insC', 'NA',
'hgvs:NC_011083:n.4555461_4555462insC', 'NA',
'hgvs_gn:NC_011083:n.4555461_4555462insC', 'NA'] == list(ms.loc['NC_011083:4555461:T:TC'].fillna('NA'))
def test_connect_to_project_from_dir():
with TemporaryDirectory() as tmp_file_str:
tmp_file = Path(tmp_file_str)
project_dir = tmp_file / 'project'
Project.initialize_project(project_dir)
ds = GenomicsDataIndex.connect(project_dir=project_dir)
assert ds is not None
assert ds.connection.reference_service is not None
assert ds.connection.filesystem_storage.variation_dir.parent == project_dir / '.gdi-data'
def test_connect_to_project_from_project():
with TemporaryDirectory() as tmp_file_str:
tmp_file = Path(tmp_file_str)
project_dir = tmp_file / 'project'
project = Project.initialize_project(project_dir)
ds = GenomicsDataIndex.connect(project=project)
assert ds is not None
assert ds.connection.reference_service is not None
assert ds.connection.filesystem_storage.variation_dir.parent == project_dir / '.gdi-data'
def test_connect_and_tree_after_moving_project(loaded_data_store_from_project_dir):
with TemporaryDirectory() as tmp_file_str:
tmp_file = Path(tmp_file_str)
project_dir = tmp_file / 'project'
Project.initialize_project(project_dir)
ds = GenomicsDataIndex.connect(project_dir=project_dir)
database_connection = ds.connection
# Load Nucleotide variation
database_connection.reference_service.add_reference_genome(reference_file)
snippy_tmp_dir = Path(tempfile.mkdtemp())
data_package = NucleotideSampleDataPackage.create_from_snippy(sample_dirs,
SerialSampleFilesProcessor(snippy_tmp_dir))
database_connection.variation_service.insert(data_package, feature_scope_name='genome')
# I should be able to build tree initially
tq1 = ds.samples_query().build_tree(kind='mutation', scope='genome')
assert tq1.tree is not None
# Move project
project_dir_2 = tmp_file / 'project2'
shutil.move(project_dir, project_dir_2)
ds2 = GenomicsDataIndex.connect(project_dir=project_dir_2)
# I should still be able to build tree even after project has been moved
tq2 = ds2.samples_query().build_tree(kind='mutation', scope='genome')
assert tq2.tree is not None
| 55.280802 | 120 | 0.668636 | 2,552 | 19,293 | 4.887539 | 0.103056 | 0.033192 | 0.030305 | 0.045458 | 0.822176 | 0.795158 | 0.768219 | 0.742564 | 0.738876 | 0.690131 | 0 | 0.072345 | 0.168196 | 19,293 | 348 | 121 | 55.439655 | 0.704885 | 0.090706 | 0 | 0.583333 | 0 | 0 | 0.293526 | 0.088309 | 0 | 0 | 0 | 0 | 0.479167 | 1 | 0.033333 | false | 0 | 0.045833 | 0 | 0.079167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
6628f1d0be9f6dca003ff5f8398069cc209e8e76 | 88 | py | Python | caldera/utils/testing/__init__.py | jvrana/pyro-graphnets | 1c9809253e47414ecf3f6604c2147d5676ff76c0 | [
"MIT"
] | null | null | null | caldera/utils/testing/__init__.py | jvrana/pyro-graphnets | 1c9809253e47414ecf3f6604c2147d5676ff76c0 | [
"MIT"
] | null | null | null | caldera/utils/testing/__init__.py | jvrana/pyro-graphnets | 1c9809253e47414ecf3f6604c2147d5676ff76c0 | [
"MIT"
] | null | null | null | from .contexts import _context_manager_test_cases
from .contexts import pytest_contexts
| 29.333333 | 49 | 0.886364 | 12 | 88 | 6.083333 | 0.666667 | 0.328767 | 0.493151 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 88 | 2 | 50 | 44 | 0.9125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
6639be87472539ff7a55e76c8f3698bf6d27c955 | 102 | py | Python | ramen/__init__.py | bmwang/ramen | 92d9eefb072d19fb7973a8ea18a1bbad91fcab77 | [
"Apache-2.0"
] | null | null | null | ramen/__init__.py | bmwang/ramen | 92d9eefb072d19fb7973a8ea18a1bbad91fcab77 | [
"Apache-2.0"
] | null | null | null | ramen/__init__.py | bmwang/ramen | 92d9eefb072d19fb7973a8ea18a1bbad91fcab77 | [
"Apache-2.0"
] | null | null | null | import ramen.core
import ramen.editor
from ramen.core.graph import Graph
from ramen.core import node
| 17 | 34 | 0.823529 | 17 | 102 | 4.941176 | 0.411765 | 0.321429 | 0.357143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.127451 | 102 | 5 | 35 | 20.4 | 0.94382 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
b0755d9808e481a66c3e036f717ba0c36a285de5 | 36,111 | py | Python | test/test_segmentations.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | null | null | null | test/test_segmentations.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | 1 | 2015-05-13T08:28:52.000Z | 2015-05-13T08:28:52.000Z | test/test_segmentations.py | jonancm/viennagrid-python | a56f23ab65cf82b2f06ff546d45c056bb9d326b2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
#-*- coding: utf-8 -*-
import sys
if len(sys.argv) > 1:
sys.path.insert(0, sys.argv.pop(1))
import unittest
import viennagrid.wrapper
##################
# LINEAR DOMAINS #
##################
class TestLinearCartesian1D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian1D(1),
viennagrid.wrapper.PointCartesian1D(2),
viennagrid.wrapper.PointCartesian1D(3),
viennagrid.wrapper.PointCartesian1D(4),
viennagrid.wrapper.PointCartesian1D(5),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian1D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian1D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestLinearSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.LinearSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.LinearSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
segment.make_cell(v1, v2)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
######################
# TRIANGULAR DOMAINS #
######################
class TestTriangularCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTriangularSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TriangularSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TriangularSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
#########################
# QUADRILATERAL DOMAINS #
#########################
class TestQuadrilateralCartesian2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian2D(1, 2),
viennagrid.wrapper.PointCartesian2D(2, 3),
viennagrid.wrapper.PointCartesian2D(3, 4),
viennagrid.wrapper.PointCartesian2D(4, 5),
viennagrid.wrapper.PointCartesian2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralPolar2D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointPolar2D(1, 2),
viennagrid.wrapper.PointPolar2D(2, 3),
viennagrid.wrapper.PointPolar2D(3, 4),
viennagrid.wrapper.PointPolar2D(4, 5),
viennagrid.wrapper.PointPolar2D(5, 6),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralPolar2D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralPolar2D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestQuadrilateralSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.QuadrilateralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.QuadrilateralSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
#######################
# TETRAHEDRAL DOMAINS #
#######################
class TestTetrahedralCartesian3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCartesian3D(1, 2, 7),
viennagrid.wrapper.PointCartesian3D(2, 3, 7),
viennagrid.wrapper.PointCartesian3D(3, 4, 7),
viennagrid.wrapper.PointCartesian3D(4, 5, 7),
viennagrid.wrapper.PointCartesian3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCartesian3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralCartesian3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTetrahedralCylindrical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointCylindrical3D(1, 2, 7),
viennagrid.wrapper.PointCylindrical3D(2, 3, 7),
viennagrid.wrapper.PointCylindrical3D(3, 4, 7),
viennagrid.wrapper.PointCylindrical3D(4, 5, 7),
viennagrid.wrapper.PointCylindrical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralCylindrical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralCylindrical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
class TestTetrahedralSpherical3D_Segmentation(unittest.TestCase):
def setUp(self):
self.vertices = [
viennagrid.wrapper.PointSpherical3D(1, 2, 7),
viennagrid.wrapper.PointSpherical3D(2, 3, 7),
viennagrid.wrapper.PointSpherical3D(3, 4, 7),
viennagrid.wrapper.PointSpherical3D(4, 5, 7),
viennagrid.wrapper.PointSpherical3D(5, 6, 7),
]
self.num_vertices = len(self.vertices)
self.domain = viennagrid.wrapper.TetrahedralSpherical3D_Domain()
for point in self.vertices:
self.domain.make_vertex(point)
self.segmentation = viennagrid.wrapper.TetrahedralSpherical3D_Segmentation(self.domain)
self.num_segments = 5
def test_make_segment(self):
"""Test method 'make_segment' and attribute 'segments' of class 'Segmentation'."""
self.assertEqual(len(self.segmentation.segments), 0)
self.assertEqual(self.segmentation.num_segments, 0)
for i in range(0, self.num_segments):
self.segmentation.make_segment()
self.assertEqual(len(self.segmentation.segments), i+1)
self.assertEqual(self.segmentation.num_segments, i+1)
self.assertEqual(len(self.segmentation.segments), self.num_segments)
self.assertEqual(self.segmentation.num_segments, self.num_segments)
def test_make_cell(self):
"""Test method 'make_cell' and attribute 'cells' of class 'Segment'."""
for segment in self.segmentation.segments:
self.assertEqual(len(segment.cells), 0)
self.assertEqual(segment.num_cells, 0)
v1 = self.domain.get_vertex(0)
v2 = self.domain.get_vertex(1)
v3 = self.domain.get_vertex(2)
v4 = self.domain.get_vertex(3)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 1)
self.assertEqual(segment.num_cells, 1)
v1 = self.domain.get_vertex(1)
v2 = self.domain.get_vertex(2)
v3 = self.domain.get_vertex(3)
v4 = self.domain.get_vertex(4)
segment.make_cell(v1, v2, v3, v4)
self.assertEqual(len(segment.cells), 2)
self.assertEqual(segment.num_cells, 2)
if __name__ == '__main__':
unittest.main()
| 38.954693 | 93 | 0.744205 | 4,854 | 36,111 | 5.416564 | 0.02225 | 0.130078 | 0.058345 | 0.085273 | 0.938574 | 0.938574 | 0.938574 | 0.938574 | 0.938574 | 0.938574 | 0 | 0.031107 | 0.124893 | 36,111 | 926 | 94 | 38.99676 | 0.800892 | 0.078563 | 0 | 0.885863 | 0 | 0 | 0.000243 | 0 | 0 | 0 | 0 | 0 | 0.29572 | 1 | 0.07393 | false | 0 | 0.003891 | 0 | 0.102464 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b0afe38318a66d9be985993ab7f5c373fa7c7ee3 | 105 | py | Python | tests/__init__.py | fujiawei-dev/fixed-vhd-writer | c3fcc53ae79313b7be44200a0172fbd55c619620 | [
"MIT"
] | null | null | null | tests/__init__.py | fujiawei-dev/fixed-vhd-writer | c3fcc53ae79313b7be44200a0172fbd55c619620 | [
"MIT"
] | null | null | null | tests/__init__.py | fujiawei-dev/fixed-vhd-writer | c3fcc53ae79313b7be44200a0172fbd55c619620 | [
"MIT"
] | null | null | null | '''
Date: 2022.02.06 10:50
Description: Omit
LastEditors: Rustle Karl
LastEditTime: 2022.02.06 10:50
'''
| 15 | 30 | 0.72381 | 17 | 105 | 4.470588 | 0.705882 | 0.157895 | 0.210526 | 0.263158 | 0.315789 | 0 | 0 | 0 | 0 | 0 | 0 | 0.26087 | 0.12381 | 105 | 6 | 31 | 17.5 | 0.565217 | 0.914286 | 0 | null | 0 | null | 0 | 0 | null | 0 | 0 | 0 | null | 1 | null | true | 0 | 0 | null | null | null | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b0c2847bf273b60b79312c97c430b1e0644b3b08 | 54,424 | py | Python | skillsAndActions.py | dexterdy/rp-bot | 6ace2be4fac444815334ad3a8a9973af9be3e63b | [
"MIT"
] | 1 | 2021-06-18T07:57:54.000Z | 2021-06-18T07:57:54.000Z | skillsAndActions.py | dexterdy/rp-bot | 6ace2be4fac444815334ad3a8a9973af9be3e63b | [
"MIT"
] | 3 | 2021-07-24T13:33:51.000Z | 2021-07-25T13:35:18.000Z | skillsAndActions.py | dexterdy/rp-bot | 6ace2be4fac444815334ad3a8a9973af9be3e63b | [
"MIT"
] | null | null | null | import discord
from discord.ext import commands
from discord import webhook
class skillsWeaknessesActions(commands.Cog):
def __init__(self, bot, cursor, chardb):
self.bot = bot
self.cursor = cursor
self.chardb = chardb
@commands.command()
async def skillsandactions(send, ctx, args=""):
#the first embed. Just there to anounce that this is the help message
helpEmbed = discord.Embed(
title="Help Message", description="This is an overview of available commands in this section. Don't forget to put your characters name in double quotes (\" \") if it contains a space.")
#second embed
skillsAndActions = discord.Embed(title="Skills, Weaknesses and Special Actions",
description="The commands in this section allow you to flesh out your character by adding skills, weaknesses and special actions")
#explaining the difference between skills and special actions
skillsAndActions.add_field(name="The Difference", inline=False, value="What is the difference between a skill and a special action in this system? Well, a skill is quite simply anything your character is good at. This can be almost anything: a hobby, "
"a job, a type of magic, a type of science, a positive personality trait or even just a proficiency with one or multiple items, like a weapon or a toolset. A weakness is logically "
"just the opposite of a skill."
"A special action, on the other hand, is a little more specific. In this system, a special action is something special that your character can do in a short amount of time."
"Somewhere in between a few seconds and a minute. Here are a few examples: casting a spell, activating a magic item, using a superpower and using a weapon."
"Special actions are a way to make roleplay a little more interesting or just a little easier. They can be activated with a command that will make the bot send something along the"
"lines of: 'Wiliam used fireball', followed by a description of what happens."
)
skillsAndActions.add_field(name="Skilloverview", inline=False, value="Skilloverview: Gives an overview of your character's current skills. \n"
"__*Usage:*__ \n"
"`!skilloverview [character name]` \n" + u'\u200b')
skillsAndActions.add_field(name="Addskills", inline=False, value="Adds skills to your characters current skills. \n"
"__*Usage:*__ \n"
"`!addskills [character name] [skill name1; skill name2; skill name3;]` \n" + u'\u200b')
skillsAndActions.add_field(name="Skilldescription", inline=False, value="Prompts you to add a description to one of your character's skills. \n"
"__*Usage:*__ \n"
"`!skilldescription [character name] [skill name] OR [skill index]` \n" + u'\u200b')
skillsAndActions.add_field(name="Removeskills", inline=False, value="Removes skills from your characters current list of skills. \n"
"__*Usage:*__ \n"
"`!removeskills [character name] [skill name1; skill name2; skill name3;] OR [index1; index2; index3]` \n" + u'\u200b')
skillsAndActions.add_field(name="Weaknessoverview", inline=False, value="Gives an overview of your character's current weaknesses. \n"
"__*Usage:*__ \n"
"`!weaknessoverview [character name]` \n" + u'\u200b')
skillsAndActions.add_field(name="Addweaknesses", inline=False, value="Adds weaknesses to your characters current weaknesses. \n"
"__*Usage:*__ \n"
"`!addweaknesses [character name] [weakness name1; weakness name2; weakness name3;]` \n" + u'\u200b')
skillsAndActions.add_field(name="Weaknessdescription", inline=False, value="Prompts you to add a description to one of your character's weaknesses. \n"
"__*Usage:*__ \n"
"`!weaknessdescription [character name] [weakness name] OR [weakness index]` \n" + u'\u200b')
skillsAndActions.add_field(name="Removeweaknesses", inline=False, value="Removes weaknesses from your characters current list of weaknesses. \n"
"__*Usage:*__ \n"
"`!removeweaknesses [character name] [weakness name1; weakness name2; weakness name3;] OR [index1; index2; index3]` \n" + u'\u200b')
skillsAndActions.add_field(name="Actionoverview", inline=False, value="Gives an overview of your character's current actions. \n"
"__*Usage:*__ \n"
"`!actionoverview [character name]` \n" + u'\u200b')
skillsAndActions.add_field(name="Addactions", inline=False, value="Adds actions to your characters current actions. \n"
"__*Usage:*__ \n"
"`!addactions [character name] [action name1; action name2; action name3;]` \n" + u'\u200b')
skillsAndActions.add_field(name="Actiondescription", inline=False, value="Prompts you to add a description to one of your character's actions. \n"
"__*Usage:*__ \n"
"`!actiondescription [character name] [action name] OR [action index]` \n" + u'\u200b')
skillsAndActions.add_field(name="Removeactions", inline=False, value="Removes actions from your characters current list of actions. \n"
"__*Usage:*__ \n"
"`!removeactions [character name] [action name1; action name2; action name3;] OR [index1; index2; index3]` \n" + u'\u200b')
skillsAndActions.add_field(name="Useaction", inline=False, value="Allows you to use your character's actions. The bot will narrate what happens if your action has a description. You can also overwrite your description if you want. \n"
"__*Usage:*__ \n"
"`!useaction [character name] [action name] OR [action index] [optional: {description}]` \n" + u'\u200b')
#loop through all the embeds and send them in the chat
helpembeds = [helpEmbed, skillsAndActions]
for i in helpembeds:
await ctx.send(embed=i)
#gives a character's current skills
@commands.command(aliases=["skillsoverview"])
async def skilloverview(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Gives an overview of your character's current skills. \n \tUsage: `!skilloverview [character name]`")
return
#get all skills associated with this character
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
currentSkills = self.cursor.fetchall()
#make a new embed an add a field for each skill
skillEmbed = discord.Embed(title=chrnm + "'s skills")
for x in currentSkills:
skillEmbed.add_field(
name=x[1], value=x[4] + "\n" + u'\u200b', inline=False)
#send the embed if there are any skills
if(len(currentSkills) <= 0):
await ctx.send("You have not yet specified any skills for this character.")
else:
await ctx.send(embed=skillEmbed)
#add skill description
@commands.command(aliases=["skillsdescription"])
async def skilldescription(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Prompts you to add a description to one of your character's skills. \n \tUsage: `!skilldescription [character name] [skill name] OR [skill index]`")
return
#if value is a digit, retrieve skill based on index
if(value.isdigit()):
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a skill with that index.")
#else, retrieve skill based on name
else:
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member AND skill_name = :skill""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skill': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a skill with that name.")
#check to see if the next message is by the same author and in the same channel
def check(m):
return m.channel.id == ctx.channel.id and m.author.id == ctx.author.id
#wait for author to send skill description
await ctx.send("You can now type and send the description of the skill: '{}'.".format(temp[0][1]))
description = await self.bot.wait_for('message', check=check)
#update table with new value
self.cursor.execute("""UPDATE skills SET skill_description = :description WHERE name = :chrnm AND member = :member AND skill_name = :skill""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skill': temp[0][1], 'description': description.content})
self.chardb.commit()
await ctx.send("The description of '{}' is now: \n{}".format(temp[0][1], "'" + description.content + "'"))
#update character skills
@commands.command(aliases=["addskill"])
async def addskills(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if chrnm == "help" or value == "help":
await ctx.send("Adds skills to your characters current skills. \n \tUsage: `!addskills [character name] [skill name1; skill name2; skill name3]`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#make an array of all the different skills
skillArray = value.split("; ")
#get the maximum index of the skill table for this character
self.cursor.execute("""SELECT MAX(indexID) FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
indexID = self.cursor.fetchall()
if indexID[0][0] == None:
index = 1
else:
index = indexID[0][0] + 1
#for every item in the skillarray, extract description and add to database
for x in skillArray:
#extract description by finding and then deleting substring in between {} (not completely finished yet, but functional enough)
description = None
openloc = x.find('{')
closeloc = x.find('}')
if closeloc != -1 and openloc != -1 and openloc < closeloc:
description = x[openloc + 1: closeloc]
x = x.replace(" {" + description + "} ", '')
x = x.replace("{" + description + "} ", '')
x = x.replace(" {" + description + "}", '')
x = x.replace("{" + description + "}", '')
#if one of the { is missing, notify the user and abort
elif openloc != -1:
await ctx.send("You forgot to add closing curly brackets.")
self.chardb.rollback()
return
elif closeloc != -1:
await ctx.send("You forgot to add opening curly brackets.")
self.chardb.rollback()
return
#if there is not a skillname, notify the user and abort
if x == "" or x == " " or x == " ":
await ctx.send("You forgot to enter a skill name")
self.chardb.rollback()
return
#if there is no description, do not insert an empty string
if description == "" or description == " " or description == " ":
description = None
#check if there not aleady exist a skill with that name
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member AND skill_name = :skill""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skill': x})
skills = self.cursor.fetchall()
if len(skills) > 0:
await ctx.send("You already have a skill with that name.")
self.chardb.rollback()
return
#insert skill and increase index for next loop
self.cursor.execute("""INSERT INTO skills VALUES(:chrnm, :skills, :member, :index, :description)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skills': x, 'index': index, 'description': description})
index += 1
self.chardb.commit()
#construct string with all current skills and send it
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newSkills = self.cursor.fetchall()
s = ""
for x in newSkills:
s += x[1] + ", "
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s skills: {}.".format(chrnm, s))
#update character skills
@commands.command(aliases=["replaceskill"])
async def replaceskills(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Replaces all your character's current skills with new skills \n \tUsage: `!replaceskills [character name] [skill name1; skill name2; skill name3]`")
return
#delete all old skills
self.cursor.execute("""DELETE FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#add the skills given in parameter
self.addskills(ctx, chrnm, value)
#update character skills
@commands.command(aliases=["removeskill"])
async def removeskills(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Removes skills from your characters current skills. \n \t\tUsage: `!removeskills [character name] [skill name1; skill name2; skill name3] OR [index1; index2; index3`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#araay of skills in input
skillArray = value.split("; ")
#delete skills. user can use index or name to select the skills
for x in skillArray:
if(x.isdigit()):
self.cursor.execute("""DELETE FROM skills WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': x})
else:
self.cursor.execute("""DELETE FROM skills WHERE name = :chrnm AND member = :member AND skill_name = :skill""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skill': x})
self.chardb.commit()
#the skills in the database now have the wrong index, because some of them were removed. following code fixes that
#get skills from table and save them in variable
self.cursor.execute("""SELECT * FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newSkills = self.cursor.fetchall()
#delete skills from table
self.cursor.execute("""DELETE FROM skills WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#construct new skill string and reinsert them into the table with the correct index
s = ""
index = 1
for x in newSkills:
self.cursor.execute("""INSERT INTO skills VALUES(:chrnm, :skills, :member, :index)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'skills': x[1], 'index': index})
index += 1
s += x[1] + ", "
self.chardb.commit()
#remove last space and comma. then send the string
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s new skills: {}.".format(chrnm, s))
#gives a character's current weaknesses
@commands.command(aliases=["weaknessesoverview"])
async def weaknessoverview(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Gives an overview of your character's current weaknesses. \n \tUsage: `!weaknessoverview [character name]`")
return
#get all weaknesses associated with this character
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
currentWeaknesses = self.cursor.fetchall()
#make a new embed an add a field for each weakness
weaknessEmbed = discord.Embed(title=chrnm + "'s weaknesses")
for x in currentWeaknesses:
weaknessEmbed.add_field(
name=x[1], value=x[4] + "\n" + u'\u200b', inline=False)
#send the embed if there are any weaknesses
if(len(currentWeaknesses) <= 0):
await ctx.send("You have not yet specified any weaknesses for this character.")
else:
await ctx.send(embed=weaknessEmbed)
#add weakness description
@commands.command(aliases=["weaknessesdescription"])
async def weaknessdescription(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Prompts you to add a description to one of your character's weaknesses. \n \tUsage: `!weaknessdescription [character name] [weakness name] OR [weakness index]`")
return
#if value is a digit, retrieve weakness based on index
if(value.isdigit()):
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a weakness with that index.")
#else, retrieve weakness based on name
else:
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member AND skill_name = :weakness""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weakness': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a weakness with that name.")
#check to see if the next message is by the same author and in the same channel
def check(m):
return m.channel.id == ctx.channel.id and m.author.id == ctx.author.id
#wait for author to send weakness description
await ctx.send("You can now type and send the description of the weakness: '{}'.".format(temp[0][1]))
description = await self.bot.wait_for('message', check=check)
#update table with new value
self.cursor.execute("""UPDATE weaknesses SET weakness_description = :description WHERE name = :chrnm AND member = :member AND weakness_name = :weakness""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weakness': temp[0][1], 'description': description.content})
self.chardb.commit()
await ctx.send("The description of '{}' is now: \n{}".format(temp[0][1], "'" + description.content + "'"))
#update character weaknesses
@commands.command(aliases=["addweakness"])
async def addweaknesses(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if chrnm == "help" or value == "help":
await ctx.send("Adds weaknesses to your characters current weaknesses. \n \tUsage: `!addweaknesses [character name] [weakness name1; weakness name2; weakness name3]`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#make an array of all the different weaknesses
weaknessArray = value.split("; ")
#get the maximum index of the weakness table for this character
self.cursor.execute("""SELECT MAX(indexID) FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
indexID = self.cursor.fetchall()
if indexID[0][0] == None:
index = 1
else:
index = indexID[0][0] + 1
#for every item in the skillarray, extract description and add to database
for x in weaknessArray:
#extract description by finding and then deleting substring in between {} (not completely finished yet, but functional enough)
description = None
openloc = x.find('{')
closeloc = x.find('}')
if closeloc != -1 and openloc != -1 and openloc < closeloc:
description = x[openloc + 1: closeloc]
x = x.replace(" {" + description + "} ", '')
x = x.replace("{" + description + "} ", '')
x = x.replace(" {" + description + "}", '')
x = x.replace("{" + description + "}", '')
#if one of the { is missing, notify the user and abort
elif openloc != -1:
await ctx.send("You forgot to add closing curly brackets.")
self.chardb.rollback()
return
elif closeloc != -1:
await ctx.send("You forgot to add opening curly brackets.")
self.chardb.rollback()
return
#if there is not a skillname, notify the user and abort
if x == "" or x == " " or x == " ":
await ctx.send("You forgot to enter a weakness name")
self.chardb.rollback()
return
#if there is no description, do not insert an empty string
if description == "" or description == " " or description == " ":
description = None
#check if there not aleady exist a weakness with that name
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member AND weakness_name = :weakness""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weakness': x})
weaknesses = self.cursor.fetchall()
if len(weaknesses) > 0:
await ctx.send("You already have a weakness with that name.")
self.chardb.rollback()
return
#insert weakness and increase index for next loop
self.cursor.execute("""INSERT INTO weaknesses VALUES(:chrnm, :weaknesses, :member, :index, :description)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weaknesses': x, 'index': index, 'description': description})
index += 1
self.chardb.commit()
#construct string with all current weaknesses and send it
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newWeaknesses = self.cursor.fetchall()
s = ""
for x in newWeaknesses:
s += x[1] + ", "
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s weaknesses: {}.".format(chrnm, s))
#update character weaknesses
@commands.command(aliases=["replaceweakness"])
async def replaceweaknesses(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Replaces all your character's current weaknesses with new weaknesses. \n \tUsage: `!replaceweaknesses [character name] [weakness name1; weakness name2; weakness name3]`")
return
#delete all old weaknesses
self.cursor.execute("""DELETE FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#add the weaknesses given in parameter
self.addweaknesses(ctx, chrnm, value)
#update character weaknesses
@commands.command(aliases=["removeweakness"])
async def removeweaknesses(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Removes weaknesses from your characters current weaknesses. \n \t\tUsage: `!removeweaknesses [character name] [weakness name1; weakness name2; weakness name3] OR [index1; index2; index3`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#aray of weaknesses in input
weaknessArray = value.split("; ")
#delete weaknesses. user can use index or name to select the weaknesses
for x in weaknessArray:
if(x.isdigit()):
self.cursor.execute("""DELETE FROM weaknesses WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': x})
else:
self.cursor.execute("""DELETE FROM weaknesses WHERE name = :chrnm AND member = :member AND weakness_name = :weakness""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weakness': x})
self.chardb.commit()
#the weaknesses in the database now have the wrong index, because some of them were removed. following code fixes that
#get weaknesses from table and save them in variable
self.cursor.execute("""SELECT * FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newWeaknesses = self.cursor.fetchall()
#delete weaknesses from table
self.cursor.execute("""DELETE FROM weaknesses WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#construct new weakness string and reinsert them into the table with the correct index
s = ""
index = 1
for x in newWeaknesses:
self.cursor.execute("""INSERT INTO weaknesses VALUES(:chrnm, :weaknesses, :member, :index)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'weaknesses': x[1], 'index': index})
index += 1
s += x[1] + ", "
self.chardb.commit()
#remove last space and comma. then send the string
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s new weaknesses: {}.".format(chrnm, s))
#gives a character's current special actions
@commands.command(aliases=["actionsoverview"])
async def actionoverview(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Gives an overview of your character's current special actions. \n \tUsage: `!actionoverview [character name]`")
return
#get all special actions associated with this character
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
currentActions = self.cursor.fetchall()
#make a new embed an add a field for each special action
actionEmbed = discord.Embed(title=chrnm + "'s special actions")
for x in currentActions:
actionEmbed.add_field(
name=x[1], value=x[4] + "\n" + u'\u200b', inline=False)
#send the embed if there are any special actions
if(len(currentActions) <= 0):
await ctx.send("You have not yet specified any special actions for this character.")
else:
await ctx.send(embed=actionEmbed)
#add special action description
@commands.command(aliases=["actionsdescription"])
async def actiondescription(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Prompts you to add a description to one of your character's special actions. \n \tUsage: `!actiondescription [character name] [special action name] OR [special action index]`")
return
#if value is a digit, retrieve special action based on index
if(value.isdigit()):
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a special action with that index.")
#else, retrieve special action based on name
else:
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': value})
temp = self.cursor.fetchall()
if len(temp) == 0:
await ctx.send(chrnm + " does not have a special action with that name.")
#check to see if the next message is by the same author and in the same channel
def check(m):
return m.channel.id == ctx.channel.id and m.author.id == ctx.author.id
#wait for author to send special action description
await ctx.send("You can now type and send the description of the special action: '{}'.".format(temp[0][1]))
description = await self.bot.wait_for('message', check=check)
#update table with new value
self.cursor.execute("""UPDATE special_actions SET action_description = :description WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': temp[0][1], 'description': description.content})
self.chardb.commit()
await ctx.send("The description of '{}' is now: \n{}".format(temp[0][1], "'" + description.content + "'"))
#update character special actions
@commands.command(aliases=["addaction"])
async def addactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if chrnm == "help" or value == "help":
await ctx.send("Adds special actions to your characters current special actions. \n \tUsage: `!addactions [character name] [action name1; action name2; action name3]`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#make an array of all the different special actions
actionArray = value.split("; ")
#get the maximum index of the special action table for this character
self.cursor.execute("""SELECT MAX(indexID) FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
indexID = self.cursor.fetchall()
if indexID[0][0] == None:
index = 1
else:
index = indexID[0][0] + 1
#for every item in the actionarray, extract description and add to database
for x in actionArray:
#extract description by finding and then deleting substring in between {} (not completely finished yet, but functional enough)
description = None
openloc = x.find('{')
closeloc = x.find('}')
if closeloc != -1 and openloc != -1 and openloc < closeloc:
description = x[openloc + 1: closeloc]
x = x.replace(" {" + description + "} ", '')
x = x.replace("{" + description + "} ", '')
x = x.replace(" {" + description + "}", '')
x = x.replace("{" + description + "}", '')
#if one of the { is missing, notify the user and abort
elif openloc != -1:
await ctx.send("You forgot to add closing curly brackets.")
self.chardb.rollback()
return
elif closeloc != -1:
await ctx.send("You forgot to add opening curly brackets.")
self.chardb.rollback()
return
#if there is not a special action name, notify the user and abort
if x == "" or x == " " or x == " ":
await ctx.send("You forgot to enter a special action name")
self.chardb.rollback()
return
#if there is no description, do not insert an empty string
if description == "" or description == " " or description == " ":
description = None
#check if there not aleady exist a special action with that name
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': x})
actions = self.cursor.fetchall()
if len(actions) > 0:
await ctx.send("You already have a special action with that name.")
self.chardb.rollback()
return
#insert special action and increase index for next loop
self.cursor.execute("""INSERT INTO special_actions VALUES(:chrnm, :specialActions, :member, :index, :description)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialActions': x, 'index': index, 'description': description})
index += 1
self.chardb.commit()
#construct string with all current special actions and send it
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newActions = self.cursor.fetchall()
s = ""
for x in newActions:
s += x[1] + ", "
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s special actions: {}.".format(chrnm, s))
#update character special actions
@commands.command(aliases=["replaceaction"])
async def replaceactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Replaces all your character's current special actions with new special actions \n \tUsage: `!replaceactions [character name] [action name1; action name2; action name3]`")
return
#delete all old special actions
self.cursor.execute("""DELETE FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#add the special actions given in parameter
self.addactions(ctx, chrnm, value)
#update character special actions
@commands.command(aliases=["removeaction"])
async def removeactions(self, ctx, chrnm="", value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#help case
if value == "help" or chrnm == "help":
await ctx.send("Removes special actions from your characters current special actions. \n \t\tUsage: `!removeactions [character name] [action name1; action name2; action name3] OR [index1; index2; index3`")
return
#remove unnecesary ;
if value.endswith(';'):
value = value[:-1]
#araay of special actions in input
actionArray = value.split("; ")
#delete special actions. user can use index or name to select the special actions
for x in actionArray:
if(x.isdigit()):
self.cursor.execute("""DELETE FROM special_actions WHERE name = :chrnm AND member = :member AND indexID = :index""", {
'chrnm': chrnm, 'member': ctx.author.id, 'index': x})
else:
self.cursor.execute("""DELETE FROM special_actions WHERE name = :chrnm AND member = :member AND action_name = :specialAction""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialAction': x})
self.chardb.commit()
#the special actions in the database now have the wrong index, because some of them were removed. following code fixes that
#get special actions from table and save them in variable
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
newActions = self.cursor.fetchall()
#delete special actions from table
self.cursor.execute("""DELETE FROM special_actions WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
#construct new special action string and reinsert them into the table with the correct index
s = ""
index = 1
for x in newActions:
self.cursor.execute("""INSERT INTO special_actions VALUES(:chrnm, :specialActions, :member, :index)""", {
'chrnm': chrnm, 'member': ctx.author.id, 'specialActions': x[1], 'index': index})
index += 1
s += x[1] + ", "
self.chardb.commit()
#remove last space and comma. then send the string
s = s[:-2]
if s == "":
s = None
await ctx.send("These are {}'s new special actions: {}.".format(chrnm, s))
#this command allows users to use their special actions during roleplay
@commands.command(aliases=["useactions"])
async def useaction(self, ctx, chrnm="", *, value=""):
self.cursor.execute("""SELECT * FROM characters WHERE name = :chrnm AND member = :member""", {
'chrnm': chrnm, 'member': ctx.author.id})
oldValueCheck = self.cursor.fetchall()
#help case
if chrnm == "help" or value == "help":
await ctx.send("Allows you to use your character's actions. The bot will narrate what happens if your action has a description. You can also overwrite your description if you want. \n \t\t Usage: `!useaction [character name] [action name] OR [action index] [optional: {description}]`")
return
if chrnm == "":
await ctx.send("You forgot to enter a name")
return
elif chrnm[0] == "'":
await ctx.send("Please use double quotes for characters that have a space in their name.")
return
elif len(oldValueCheck) == 0 and chrnm != "help":
await ctx.send("You do not have a character named '{}'.".format(chrnm))
return
elif value == "":
await ctx.send("You forgot to enter a value.")
return
#extract description by finding and then deleting substring in between {} (not completely finished yet, but functional enough)
description = None
openloc = value.find('{')
closeloc = value.find('}')
if closeloc != -1 and openloc != -1 and openloc < closeloc:
description = value[openloc + 1: closeloc]
value = value.replace(" {" + description + "} ", '')
value = value.replace("{" + description + "} ", '')
value = value.replace(" {" + description + "}", '')
value = value.replace("{" + description + "}", '')
#if one of the { is missing, notify the user and abort
elif openloc != -1:
await ctx.send("You forgot to add closing curly brackets.")
return
elif closeloc != -1:
await ctx.send("You forgot to add opening curly brackets.")
return
if value.isdigit():
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND indexID = :index AND member = :member""", {
'chrnm': chrnm, 'index': value, 'member': ctx.author.id})
action = self.cursor.fetchall()
if len(action) == 0:
await ctx.send("You do not have an action with that index")
return
else:
self.cursor.execute("""SELECT * FROM special_actions WHERE name = :chrnm AND action_name = :action AND member = :member""", {
'chrnm': chrnm, 'action': value, 'member': ctx.author.id})
action = self.cursor.fetchall()
if len(action) == 0:
await ctx.send("You do not have an action with that name")
return
#find if the current channel has one of our webhooks
self.cursor.execute(
"""SELECT * FROM webhooks WHERE channelID = :channelID;""", {'channelID': ctx.channel.id})
webhooks = self.cursor.fetchall()
#if this channel does already have a webhook, use that one
if(len(webhooks) > 0):
rpHook = webhook.Webhook.from_url(
webhooks[0][1], adapter=discord.RequestsWebhookAdapter())
#if this channel does not already have a webhook, create one and store it in the db
else:
rpHook = await ctx.channel.create_webhook(name='rpHook')
self.cursor.execute("""INSERT INTO channels VALUES(:channelID, :webhook_url)""", {
'channelID': ctx.channel.id, 'webhook_url': rpHook.url})
if description == None and action[0][4] != None:
embed = discord.Embed(title="{} used {}".format(
chrnm, action[0][1]), description=action[0][4])
rpHook.send(embed=embed, username="Narrator",
avatar_url="https://cdn.discordapp.com/attachments/854722556475867159/868834924029956146/th.jpeg")
elif action[0][4] == None:
embed = discord.Embed(
title="{} used {}".format(chrnm, action[0][1]))
rpHook.send(embed=embed, username="Narrator",
avatar_url="https://cdn.discordapp.com/attachments/854722556475867159/868834924029956146/th.jpeg")
else:
embed = discord.Embed(title="{} used {}".format(
chrnm, action[0][1]), description=description)
rpHook.send(embed=embed, username="Narrator",
avatar_url="https://cdn.discordapp.com/attachments/854722556475867159/868834924029956146/th.jpeg")
await ctx.message.delete()
| 49.976125 | 297 | 0.567452 | 6,272 | 54,424 | 4.905612 | 0.060906 | 0.031201 | 0.046022 | 0.032664 | 0.829303 | 0.799304 | 0.781266 | 0.773368 | 0.727314 | 0.690783 | 0 | 0.009994 | 0.316037 | 54,424 | 1,088 | 298 | 50.022059 | 0.81657 | 0.10782 | 0 | 0.729834 | 0 | 0.038412 | 0.349059 | 0.001302 | 0 | 0 | 0 | 0 | 0 | 1 | 0.005122 | false | 0 | 0.003841 | 0.003841 | 0.133163 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b0d30a41b869071f8aee78b47f947787395a08c2 | 1,076 | py | Python | pit_package/pit_poland/data_import/tests/test_import.py | Qertan/NYPD21Z | 24ad8f22b6cdc6f424470d00e3528ca49c8fd213 | [
"BSD-2-Clause"
] | 1 | 2022-02-22T15:15:27.000Z | 2022-02-22T15:15:27.000Z | pit_package/pit_poland/data_import/tests/test_import.py | Qertan/NYPD21Z | 24ad8f22b6cdc6f424470d00e3528ca49c8fd213 | [
"BSD-2-Clause"
] | null | null | null | pit_package/pit_poland/data_import/tests/test_import.py | Qertan/NYPD21Z | 24ad8f22b6cdc6f424470d00e3528ca49c8fd213 | [
"BSD-2-Clause"
] | null | null | null | import pandas as pd
from .. import gminy_import_pit, powiaty_import_pit, wojewodztwa_import_pit
from .. import gminy_import_ppl, powiaty_import_ppl, wojewodztwa_import_ppl
def test_import_pit():
# Sprawdzenie blednej sciezki
expected_df = pd.DataFrame()
pd.testing.assert_frame_equal(gminy_import_pit("not even a path"), expected_df, check_column_type=True)
pd.testing.assert_frame_equal(powiaty_import_pit("not even a path", "not even a path too"), expected_df, check_column_type=True)
pd.testing.assert_frame_equal(wojewodztwa_import_pit("not even a path"), expected_df, check_column_type=True)
def test_import_ppl():
# Sprawdzenie blednej sciezki
expected_df = pd.DataFrame()
pd.testing.assert_frame_equal(gminy_import_ppl("not even a path"), expected_df, check_column_type=True)
pd.testing.assert_frame_equal(powiaty_import_ppl("not even a path"), expected_df,
check_column_type=True)
pd.testing.assert_frame_equal(wojewodztwa_import_ppl("not even a path"), expected_df, check_column_type=True)
| 53.8 | 132 | 0.772305 | 161 | 1,076 | 4.78882 | 0.192547 | 0.103761 | 0.072633 | 0.108949 | 0.762646 | 0.762646 | 0.743191 | 0.743191 | 0.743191 | 0.743191 | 0 | 0 | 0.141264 | 1,076 | 19 | 133 | 56.631579 | 0.834416 | 0.051115 | 0 | 0.142857 | 0 | 0 | 0.107073 | 0 | 0 | 0 | 0 | 0 | 0.428571 | 1 | 0.142857 | false | 0 | 0.785714 | 0 | 0.928571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
b0e22fe696d15dd2118ce2546e24fcd48a1f03ad | 138 | py | Python | superglue/__init__.py | gzhch/fairseq | ea13f0838fad9e8898afeac21e171f1f316eeb0e | [
"MIT"
] | null | null | null | superglue/__init__.py | gzhch/fairseq | ea13f0838fad9e8898afeac21e171f1f316eeb0e | [
"MIT"
] | null | null | null | superglue/__init__.py | gzhch/fairseq | ea13f0838fad9e8898afeac21e171f1f316eeb0e | [
"MIT"
] | null | null | null | from . import wic_criterion, wic_task # noqa
from . import wsc_criterion, wsc_task, wsc_utils # noqa
from . import multirc_task # noqa
| 34.5 | 56 | 0.76087 | 21 | 138 | 4.714286 | 0.428571 | 0.30303 | 0.282828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.173913 | 138 | 3 | 57 | 46 | 0.868421 | 0.101449 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
9fe7adf60475ebaab7b3cdd0fc591187fba67949 | 116 | py | Python | app/constants.py | jlehker/pitutor | d58186cb1c743ee5dac2a5152ddc3035efcec3ca | [
"MIT"
] | null | null | null | app/constants.py | jlehker/pitutor | d58186cb1c743ee5dac2a5152ddc3035efcec3ca | [
"MIT"
] | null | null | null | app/constants.py | jlehker/pitutor | d58186cb1c743ee5dac2a5152ddc3035efcec3ca | [
"MIT"
] | null | null | null | PETTUTOR_UUID = "B0E6A4BF-CCCC-FFFF-330C-0000000000F0"
FEED_CHARACTERISTIC = "B0E6A4BF-CCCC-FFFF-330C-0000000000F1"
| 38.666667 | 60 | 0.827586 | 14 | 116 | 6.714286 | 0.714286 | 0.255319 | 0.340426 | 0.425532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.309091 | 0.051724 | 116 | 2 | 61 | 58 | 0.545455 | 0 | 0 | 0 | 0 | 0 | 0.62069 | 0.62069 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
b010abf4ce247b9f1d293e31ac88255fa467646c | 2,047 | py | Python | parsers/parser_interface.py | BojanaZ/SeamlessMDD | 94302cc1d253eb26794e906c9e648c2ea569f851 | [
"MIT"
] | null | null | null | parsers/parser_interface.py | BojanaZ/SeamlessMDD | 94302cc1d253eb26794e906c9e648c2ea569f851 | [
"MIT"
] | 1 | 2021-12-13T20:56:06.000Z | 2021-12-13T20:56:06.000Z | parsers/parser_interface.py | BojanaZ/SeamlessMDD | 94302cc1d253eb26794e906c9e648c2ea569f851 | [
"MIT"
] | null | null | null | from abc import ABCMeta, abstractmethod
class IParser:
__metaclass__ = ABCMeta
@abstractmethod
def get_element_by_id(self, id):
raise NotImplementedError
@abstractmethod
def get_elements_by_name(self, name):
raise NotImplementedError
@abstractmethod
def get_element_by_path(self, path):
raise NotImplementedError
@abstractmethod
def replace_element_by_id(self, id, new_element):
raise NotImplementedError
@abstractmethod
def replace_element_by_name(self, name, new_element):
raise NotImplementedError
@abstractmethod
def replace_element_by_path(self, path, new_element):
raise NotImplementedError
@abstractmethod
def update_element_by_id(self, id, attribute_name, new_value):
raise NotImplementedError
@abstractmethod
def update_element_by_name(self, name, attribute_name, new_value):
raise NotImplementedError
@abstractmethod
def update_element_by_path(self, path, attribute_name, new_value):
raise NotImplementedError
@abstractmethod
def remove_element_by_id(self, id):
raise NotImplementedError
@abstractmethod
def remove_element_by_name(self, name):
raise NotImplementedError
@abstractmethod
def remove_element_by_path(self, path):
raise NotImplementedError
@abstractmethod
def add_child_by_parent_id(self, id, child_node):
raise NotImplementedError
@abstractmethod
def add_child_by_parent_name(self, name, child_node):
raise NotImplementedError
@abstractmethod
def add_child_by_parent_path(self, path, child_node):
raise NotImplementedError
@abstractmethod
def remove_all_child_nodes_by_parent_id(self, id):
raise NotImplementedError
@abstractmethod
def remove_all_child_nodes_by_parent_name(self, name):
raise NotImplementedError
@abstractmethod
def remove_all_child_nodes_by_parent_path(self, path):
raise NotImplementedError
| 25.5875 | 70 | 0.731803 | 225 | 2,047 | 6.293333 | 0.137778 | 0.216102 | 0.456215 | 0.492232 | 0.950565 | 0.838983 | 0.825565 | 0.765537 | 0.610169 | 0.346045 | 0 | 0 | 0.216903 | 2,047 | 79 | 71 | 25.911392 | 0.883344 | 0 | 0 | 0.631579 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.315789 | false | 0 | 0.017544 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
b01dd75ca1781e48bdbb2173a6c89d9173abf7f8 | 1,940 | py | Python | data_loader.py | JudePark96/speech-recognition-pytorch | 5d7375249fd5f1c53ddbedc2dd9c48dd38bbb8ea | [
"MIT"
] | 2 | 2020-07-01T09:52:31.000Z | 2020-08-02T04:38:50.000Z | data_loader.py | JudePark96/speech-recognition-pytorch | 5d7375249fd5f1c53ddbedc2dd9c48dd38bbb8ea | [
"MIT"
] | null | null | null | data_loader.py | JudePark96/speech-recognition-pytorch | 5d7375249fd5f1c53ddbedc2dd9c48dd38bbb8ea | [
"MIT"
] | 1 | 2020-08-02T04:38:51.000Z | 2020-08-02T04:38:51.000Z | __author__ = 'JudePark'
__email__ = 'judepark@kookmin.ac.kr'
import h5py
from torch.utils.data import Dataset, DataLoader
import torch
import pandas as pd
class SpeechDataset(Dataset):
def __init__(self, file_path) -> None:
self.file_path = file_path
with h5py.File(file_path, 'r') as features_hdf:
self.feature_keys = list(features_hdf.keys())
self.num_instances = features_hdf.get(self.feature_keys[0]).shape[0]
print(f'total instances is {self.num_instances}')
def __getitem__(self, index: int) -> dict:
features = self._read_hdf_features(index)
return features
def __len__(self) -> int:
return self.num_instances
def _read_hdf_features(self, index):
features = {}
with h5py.File(self.file_path, 'r') as features_hdf:
features['feature'] = features_hdf['feature'][index]
features['label'] = features_hdf['label'][index]
return features
class SpeechTestDataset(Dataset):
def __init__(self, file_path) -> None:
self.file_path = file_path
with h5py.File(file_path, 'r') as features_hdf:
self.feature_keys = list(features_hdf.keys())
self.num_instances = features_hdf.get(self.feature_keys[0]).shape[0]
print(f'total instances is {self.num_instances}')
def __getitem__(self, index: int) -> dict:
features = self._read_hdf_features(index)
return features
def __len__(self) -> int:
return self.num_instances
def _read_hdf_features(self, index):
features = {}
with h5py.File(self.file_path, 'r') as features_hdf:
features['feature'] = features_hdf['feature'][index]
return features
def get_data_loader(dataset, bs, shuffle, num_workers, pin_memory):
return DataLoader(dataset, batch_size=bs, shuffle=shuffle, num_workers=num_workers, pin_memory=pin_memory)
| 28.955224 | 110 | 0.665464 | 249 | 1,940 | 4.84739 | 0.232932 | 0.100249 | 0.059652 | 0.036454 | 0.705882 | 0.705882 | 0.705882 | 0.705882 | 0.705882 | 0.705882 | 0 | 0.005988 | 0.225258 | 1,940 | 66 | 111 | 29.393939 | 0.797073 | 0 | 0 | 0.744186 | 0 | 0 | 0.07732 | 0.01134 | 0 | 0 | 0 | 0 | 0 | 1 | 0.209302 | false | 0 | 0.093023 | 0.069767 | 0.511628 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b02f948b03dc3c4fdd3f74e92e62cebe91286898 | 5,420 | py | Python | data_science/example.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 1 | 2021-04-21T09:38:38.000Z | 2021-04-21T09:38:38.000Z | data_science/example.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | 34 | 2019-12-16T16:53:24.000Z | 2022-01-13T02:29:30.000Z | data_science/example.py | phiratio/lpthw | a32240d4355fb331805d515f96e1d009914e5c47 | [
"MIT"
] | null | null | null | # 1
import json
import random
from datetime import date, timedelta
import faker
# 2
fake = faker.Faker()
# 3
usernames = set()
usernames_no = 1000
while len(usernames) < usernames_no:
usernames.add(fake.user_name())
# 4
def get_random_name_and_gender():
skew = .6
male = random.random() > skew
if male:
return fake.name_male(), 'M'
else:
return fake.name_female(), 'F'
def get_users(usernames):
users = []
for username in usernames:
name, gender = get_random_name_and_gender()
user = {
'username': username,
'name': name,
'gender': gender,
'email': fake.email(),
'age': fake.random_int(min=18, max=90),
'address': fake.address(),
}
users.append(json.dumps(username))
return users
users = get_users(usernames)
users[:3]
# 5
def get_type():
types = ['AKX', 'BYU', 'GRZ', 'KTR']
return random.choice(types)
def get_start_end_dates():
duration = random.randint(1, 2 * 365)
offset = random.randint(-365, 365)
start = date.today() - timedelta(days=offset)
end = start + timedelta(days=duration)
def _format_date(date_):
return date_.strftime("%Y%m%d")
return _format_date(start), _format_date(end)
def get_age():
age = random.randint(20, 45)
age -= age % 5
diff = random.randint(5, 25)
diff -= diff % 5
return '{}-{}'.format(age, age + diff)
def get_gender():
return random.choice(('M', 'F', 'B'))
def get_currency():
return random.choice(('GBP', 'EUR', 'USD'))
def get_campaign_name():
separator = '_'
type_ = get_type()
start, end = get_start_end_dates()
age = get_age()
gender = get_gender()
currency = get_currency()
return separator.join(
(type_, start, end, age, gender, currency)
)
# 6
def get_campaign_data():
name = get_campaign_name()
budget = random.randint(10 ** 3, 10 ** 6)
spent = random.randint(10 ** 2, budget)
clicks = int(random.triangular(10 ** 2, 10 ** 5, 0.2 * 10 ** 5))
impressions = int(random.gauss(0.5 * 10 ** 6, 2))
return {
'cmp_name': name,
'cmp_bgt': budget,
'cmp_spent': spent,
'cmp_clicks': clicks,
'cmp_impr': impressions
}
# 7
def get_data(users):
data = []
for user in users:
campaigns = [get_campaign_data()
for _ in range(random.randint(2, 8))
]
data.append({
'user': user,
'campaigns': campaigns
})
return data
# 8
rough_data = get_data(users)
rough_data[:2]
while len(usernames) < usernames_no:
usernames.add(fake.user_name())
# 4
def get_random_name_and_gender():
skew = .6
male = random.random() > skew
if male:
return fake.name_male(), 'M'
else:
return fake.name_female(), 'F'
def get_users(usernames):
users = []
for username in usernames:
name, gender = get_random_name_and_gender()
user = {
'username': username,
'name': name,
'gender': gender,
'email': fake.email(),
'age': fake.random_int(min=18, max=90),
'address': fake.address(),
}
users.append(json.dumps(username))
return users
users = get_users(usernames)
users[:3]
# 5
def get_type():
types = ['AKX', 'BYU', 'GRZ', 'KTR']
return random.choice(types)
def get_start_end_dates():
duration = random.randint(1, 2 * 365)
offset = random.randint(-365, 365)
start = date.today() - timedelta(days=offset)
end = start + timedelta(days=duration)
def _format_date(date_):
return date_.strftime("%Y%m%d")
return _format_date(start), _format_date(end)
def get_age():
age = random.randint(20, 45)
age -= age % 5
diff = random.randint(5, 25)
diff -= diff % 5
return '{}-{}'.format(age, age + diff)
def get_gender():
return random.choice(('M', 'F', 'B'))
def get_currency():
return random.choice(('GBP', 'EUR', 'USD'))
def get_campaign_name():
separator = '_'
type_ = get_type()
start, end = get_start_end_dates()
age = get_age()
gender = get_gender()
currency = get_currency()
return separator.join(
(type_, start, end, age, gender, currency)
)
# 6
def get_campaign_data():
name = get_campaign_name()
budget = random.randint(10 ** 3, 10 ** 6)
spent = random.randint(10 ** 2, budget)
clicks = int(random.triangular(10 ** 2, 10 ** 5, 0.2 * 10 ** 5))
impressions = int(random.gauss(0.5 * 10 ** 6, 2))
return {
'cmp_name': name,
'cmp_bgt': budget,
'cmp_spent': spent,
'cmp_clicks': clicks,
'cmp_impr': impressions
}
# 7
def get_data(users):
data = []
for user in users:
campaigns = [get_campaign_data()
for _ in range(random.randint(2, 8))
]
data.append({
'user': user,
'campaigns': campaigns
})
return data
# 8
rough_data = get_data(users)
rough_data[:2]
# 9
data = []
for datum in roug_data:
for campaign in datum['campaigns']:
campaign.update({
'user': datum['user']
})
data.append(campaign)
data[:2]
# 10
with open('data.json', 'w') as stream:
stream.write(json.dumps(data))
| 21.089494 | 68 | 0.571033 | 685 | 5,420 | 4.345985 | 0.159124 | 0.040309 | 0.036278 | 0.021498 | 0.913 | 0.913 | 0.913 | 0.913 | 0.913 | 0.913 | 0 | 0.033737 | 0.283579 | 5,420 | 256 | 69 | 21.171875 | 0.732938 | 0.005535 | 0 | 0.872928 | 0 | 0 | 0.051917 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121547 | false | 0 | 0.022099 | 0.033149 | 0.276243 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b05347888ff5485965b77095d74e2c6f98689ca9 | 10,770 | py | Python | src/plots/synthetic.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | src/plots/synthetic.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | src/plots/synthetic.py | dballesteros7/master-thesis-2015 | 8c0bf9a6eef172fc8167a30780ae0666f8ea2d88 | [
"MIT"
] | null | null | null | import os
import itertools
import math
import numpy as np
import constants
import plots
from models.general_features import GeneralFeatures
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import seaborn as sns
def plot_weights_synthetic_2():
model = GeneralFeatures(n_items=4, features=np.identity(4),
l_dims=2, k_dims=2)
model.a_weights = np.array([0, 0, 0, 0])
model.b_weights = np.array([[20, 0], [0, 20], [20, 0], [0, 20]])
model.c_weights = np.array([[20, 0], [20, 0], [0, 20], [0, 20]])
model.update_composite_parameters()
model.full_distribution()
print(model.distribution)
fig = plt.figure(figsize=(10, 4))
gs = gridspec.GridSpec(1, 2, height_ratios=[1], width_ratios=[1, 1])
cmap = sns.cubehelix_palette(8, start=1.8, light=.8, as_cmap=True)
ax1 = fig.add_subplot(gs[0])
color = ax1.matshow(model.diversity_weights, interpolation='none',
cmap=cmap, vmin=0, vmax=20)
ax1.grid(None)
ax1.set_yticks([0, 1, 2, 3])
ax1.set_yticklabels(['1', '2', '3', '4'])
ax1.set_ylabel('Item')
ax1.set_xticks([0, 1])
ax1.set_xticklabels([])
ax1.set_xlabel('$d$')
ax1.set_title(r'$\mathbf{W}^{b}$')
ax1.plot([-.5,1.5], [1.5, 1.5], color='white', linestyle='--', linewidth=2)
ax2 = fig.add_subplot(gs[1], sharey=ax1)
color_set = ax2.matshow(model.coherence_weights, interpolation='none',
cmap=cmap, vmin=0, vmax=10)
ax2.grid(None)
ax2.set_yticks([0, 1, 2, 3])
ax2.set_xticks([0, 1])
ax2.set_xticklabels([])
ax2.set_xlabel('$c$')
ax2.set_title(r'$\mathbf{W}^{e}$')
#plt.setp(ax2.get_yticklabels(), visible=False)
ax2.plot([-.5,1.5], [1.5, 1.5], color='white', linestyle='--', linewidth=2)
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
#plt.colorbar(color)
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'fldc_toy_example_mixed_weights_pres.eps'),
bbox_inches='tight')
plt.show()
def distribution_error_1(other_distribution):
probabilities = {}
for size in range(5):
for subset in itertools.combinations(range(4), size):
probabilities[frozenset(subset)] = 0.0
probabilities[frozenset({0, 1})] = 0.5
probabilities[frozenset({2, 3})] = 0.5
acc_err = 0.0
max_err = -1
max_sum = 0.0
for subset in probabilities:
other_prob = other_distribution[subset]
abs_err = abs(other_prob - probabilities[subset])
max_sum += abs_err
if abs_err > max_err:
max_err = abs_err
error = math.pow(other_prob - probabilities[subset], 2)
acc_err += error
rmse = 100 * math.sqrt(acc_err / len(probabilities))
return rmse, max_sum/2
def plot_weights_synthetic_2_learned():
model = GeneralFeatures(n_items=4, features=np.identity(4),
l_dims=2, k_dims=2)
model.load_from_file(constants.NCE_OUT_GENERAL_PATH_OLD_TPL.format(
dataset='path_set_synthetic_2', fold=1, l_dim=2, k_dim=2,
index='0'))
model.update_composite_parameters()
model.full_distribution()
print(distribution_error_1(model.distribution))
return
fig = plt.figure(figsize=(10, 4))
gs = gridspec.GridSpec(1, 3, height_ratios=[1], width_ratios=[1, 2, 2])
cmap = sns.cubehelix_palette(8, start=1.8, light=.8, as_cmap=True)
ax1 = fig.add_subplot(gs[0])
color_set = ax1.matshow(model.utilities.reshape(4, 1), interpolation='none',
cmap=cmap, vmin=-3, vmax=6)
ax1.grid(None)
ax1.set_xticks([0])
ax1.set_yticks([0, 1, 2, 3])
ax1.set_xticklabels([])
ax1.set_yticklabels(['1', '2', '3', '4'])
ax1.set_ylabel('Item ($i$)')
ax1.set_title(r'$\mathbf{u}$')
ax2 = fig.add_subplot(gs[1], sharey=ax1)
color_set = ax2.matshow(model.diversity_weights, interpolation='none',
cmap=cmap, vmin=-3, vmax=6)
ax2.grid(None)
ax2.set_yticks([0, 1, 2, 3])
ax2.set_xticks([0, 1])
ax2.set_xticklabels([])
ax2.set_xlabel('$d$')
ax2.set_title(r'$\mathbf{W}^{b}$')
plt.setp(ax2.get_yticklabels(), visible=False)
ax2.plot([-.5,1.5], [1.5, 1.5], color='white', linestyle='--', linewidth=2)
ax3 = fig.add_subplot(gs[2], sharey=ax1)
color_set = ax3.matshow(model.coherence_weights, interpolation='none',
cmap=cmap, vmin=-3, vmax=6)
ax3.grid(None)
ax3.set_yticks([0, 1, 2, 3])
ax3.set_xticks([0, 1])
ax3.set_xticklabels([])
ax3.set_xlabel('$c$')
ax3.set_title(r'$\mathbf{W}^{e}$')
plt.setp(ax3.get_yticklabels(), visible=False)
ax3.plot([-.5,1.5], [1.5, 1.5], color='white', linestyle='--', linewidth=2)
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
ax3.set_adjustable('box-forced')
plt.colorbar(color_set)
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'fldc_toy_example_learned_weights.eps'),
bbox_inches='tight')
plt.show()
def plot_weights_synthetic_3():
features = np.array([
[4., 1., 0.],
[4., 1., 1.],
[3., 0., 1.],
[3., 1., 0.],
[2., 1., 1.],
[2., 1., 0.],
# [5., 0., 1.],
])
model = GeneralFeatures(n_items=6, features=features,
l_dims=2, k_dims=1)
model.a_weights = np.array([0.1, 0, 0])
model.b_weights = np.array([[0, 0], [10, 0], [0, 10]])
model.c_weights = np.array([[0.5], [0], [0]])
model.update_composite_parameters()
fig = plt.figure(figsize=(10, 4))
gs = gridspec.GridSpec(1, 3, height_ratios=[1], width_ratios=[1, 1, 2])
cmap = sns.cubehelix_palette(8, start=1.8, light=.8, as_cmap=True)
ax1 = fig.add_subplot(gs[0])
color_set = ax1.matshow(model.a_weights.reshape(3, 1), interpolation='none',
cmap=cmap, vmin=0, vmax=1)
ax1.grid(None)
ax1.set_xticks([0])
ax1.set_yticks([0, 1, 2])
ax1.set_xticklabels([])
ax1.set_yticklabels(['1', '2', '3'])
ax1.set_xlabel('')
ax1.set_ylabel('Feature ($m$)')
ax1.set_title(r'$\mathbf{a}$')
ax2 = fig.add_subplot(gs[2], sharey=ax1)
color_set = ax2.matshow(model.b_weights, interpolation='none',
cmap=cmap, vmin=0, vmax=10)
ax2.grid(None)
ax2.set_yticks([0, 1, 2])
ax2.set_xticks([0, 1])
ax2.set_xticklabels([])
ax2.set_xlabel('$d$')
ax2.set_title(r'$\mathbf{B}$')
plt.setp(ax2.get_yticklabels(), visible=False)
plt.colorbar(color_set, ax=ax2)
ax3 = fig.add_subplot(gs[1], sharey=ax1)
color_set = ax3.matshow(model.c_weights, interpolation='none',
cmap=cmap, vmin=0, vmax=1)
ax3.grid(None)
ax3.set_yticks([0, 1, 2])
ax3.set_xticks([0])
ax3.set_xticklabels([])
ax3.set_xlabel('$c$')
ax3.set_title(r'$\mathbf{E}$')
plt.setp(ax3.get_yticklabels(), visible=False)
plt.colorbar(color_set, ax=ax3)
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
ax3.set_adjustable('box-forced')
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'ffldc_toy_example.eps'),
bbox_inches='tight')
plt.show()
def distribution_error(other_distribution):
probabilities = {}
for size in range(7):
for subset in itertools.combinations(range(6), size):
probabilities[frozenset(subset)] = 0.0
probabilities[frozenset({0, 2})] = 0.3
probabilities[frozenset({2, 3})] = 0.25
probabilities[frozenset({2, 5})] = 0.15
probabilities[frozenset({1})] = 0.1
probabilities[frozenset({0})] = 0.06
probabilities[frozenset({2})] = 0.04
probabilities[frozenset({3})] = 0.04
probabilities[frozenset({4})] = 0.03
probabilities[frozenset({5})] = 0.03
acc_err = 0.0
max_err = -1
max_sum = 0
for subset in probabilities:
other_prob = other_distribution[subset]
abs_err = abs(other_prob - probabilities[subset])
if abs_err > max_err:
max_err = abs_err
max_sum += abs_err
error = math.pow(other_prob - probabilities[subset], 2)
acc_err += error
rmse = 100 * math.sqrt(acc_err / len(probabilities))
return rmse, max_sum/2
def plot_weights_synthetic_3_learned():
features = np.array([
[4., 1., 0.],
[4., 1., 1.],
[3., 0., 1.],
[3., 1., 0.],
[2., 1., 1.],
[2., 1., 0.],
# [5., 0., 1.],
])
model = GeneralFeatures(n_items=6, features=features,
l_dims=2, k_dims=1)
model.load_from_file(constants.NCE_OUT_GENERAL_PATH_OLD_TPL.format(
dataset='path_set_synthetic_4', fold=1, l_dim=2, k_dim=1,
index='1'))
model.update_composite_parameters()
model.full_distribution()
print(distribution_error(model.distribution))
return
fig = plt.figure(figsize=(10, 4))
gs = gridspec.GridSpec(1, 3, height_ratios=[1], width_ratios=[1, 1, 2])
cmap = sns.cubehelix_palette(8, start=1.8, light=0.8, as_cmap=True)
ax1 = fig.add_subplot(gs[0])
color_set = ax1.matshow(model.a_weights.reshape(3, 1), interpolation='none',
cmap=cmap, vmin=-1, vmax=1)
ax1.grid(None)
ax1.set_xticks([0])
ax1.set_yticks([0, 1, 2])
ax1.set_xticklabels([])
ax1.set_yticklabels(['1', '2', '3'])
ax1.set_xlabel('')
ax1.set_ylabel('Feature ($m$)')
ax1.set_title(r'$\mathbf{a}$')
ax2 = fig.add_subplot(gs[2], sharey=ax1)
color_set = ax2.matshow(model.b_weights, interpolation='none',
cmap=cmap, vmin=0, vmax=8)
ax2.grid(None)
ax2.set_yticks([0, 1, 2])
ax2.set_xticks([0, 1])
ax2.set_xticklabels([])
ax2.set_xlabel('$d$')
ax2.set_title(r'$\mathbf{B}$')
plt.setp(ax2.get_yticklabels(), visible=False)
plt.colorbar(color_set, ax=ax2)
ax3 = fig.add_subplot(gs[1], sharey=ax1)
color_set = ax3.matshow(model.c_weights, interpolation='none',
cmap=cmap, vmin=-1, vmax=1)
ax3.grid(None)
ax3.set_yticks([0, 1, 2])
ax3.set_xticks([0])
ax3.set_xticklabels([])
ax3.set_xlabel('$c$')
ax3.set_title(r'$\mathbf{E}$')
plt.setp(ax3.get_yticklabels(), visible=False)
plt.colorbar(color_set, ax=ax3)
ax1.set_adjustable('box-forced')
ax2.set_adjustable('box-forced')
ax3.set_adjustable('box-forced')
plt.savefig(os.path.join(
constants.IMAGE_PATH, 'ffldc_toy_example_learned_weights.eps'),
bbox_inches='tight')
plt.show()
if __name__ == '__main__':
plots.setup()
plot_weights_synthetic_2()
#plot_weights_synthetic_2_learned()
| 32.053571 | 80 | 0.605942 | 1,555 | 10,770 | 4.01672 | 0.109325 | 0.029779 | 0.005764 | 0.026417 | 0.902177 | 0.85879 | 0.838136 | 0.802754 | 0.777778 | 0.694044 | 0 | 0.061364 | 0.22377 | 10,770 | 335 | 81 | 32.149254 | 0.685766 | 0.011792 | 0 | 0.709559 | 0 | 0 | 0.057436 | 0.012502 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022059 | false | 0 | 0.036765 | 0 | 0.073529 | 0.011029 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
b05d2e574c5b5af0c650b27db74838550e0868ee | 120 | py | Python | application/config/__init__.py | crossgovernmentservices/csl-my-learning-plan | a22b76b5ba0327e426d91dce073c0e0f887b400e | [
"MIT"
] | null | null | null | application/config/__init__.py | crossgovernmentservices/csl-my-learning-plan | a22b76b5ba0327e426d91dce073c0e0f887b400e | [
"MIT"
] | null | null | null | application/config/__init__.py | crossgovernmentservices/csl-my-learning-plan | a22b76b5ba0327e426d91dce073c0e0f887b400e | [
"MIT"
] | null | null | null | from .config import Config # NOQA
from .config import DevelopmentConfig # NOQA
from .config import TestConfig # NOQA
| 30 | 45 | 0.775 | 15 | 120 | 6.2 | 0.4 | 0.322581 | 0.516129 | 0.430108 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.175 | 120 | 3 | 46 | 40 | 0.939394 | 0.116667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
c6c6f41bd844e52b3fff2ada1210dca9bac3b1e5 | 3,330 | py | Python | BoManifolds/Riemannian_utils/spd_constraints_utils.py | NoemieJaquier/MaternGaBO | 1841e24cef6310bb1bdf7a4d51295b6d593feb41 | [
"MIT"
] | 3 | 2021-11-15T16:43:51.000Z | 2022-03-29T16:46:13.000Z | BoManifolds/Riemannian_utils/spd_constraints_utils.py | NoemieJaquier/MaternGaBO | 1841e24cef6310bb1bdf7a4d51295b6d593feb41 | [
"MIT"
] | null | null | null | BoManifolds/Riemannian_utils/spd_constraints_utils.py | NoemieJaquier/MaternGaBO | 1841e24cef6310bb1bdf7a4d51295b6d593feb41 | [
"MIT"
] | null | null | null | """
This file is part of the MaternGaBO library.
Authors: Noemie Jaquier and Leonel Rozo, 2021
License: MIT
Contact: noemie.jaquier@kit.edu, leonel.rozo@de.bosch.com
"""
import numpy as np
from BoManifolds.Riemannian_utils.spd_utils import vector_to_symmetric_matrix_mandel
def min_eigenvalue_constraint(x_vec, min_eigenvalue):
"""
This function defines an inequality constraint on the minimum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_vec: SPD matrix in Mandel vector form
:param min_eigenvalue: minimum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between minimum eigenvalue of x and minimum tolerated eigenvalue
"""
x = vector_to_symmetric_matrix_mandel(x_vec)
eigenvalues = np.linalg.eigvals(x)
return np.min(eigenvalues) - min_eigenvalue
def max_eigenvalue_constraint(x_vec, max_eigenvalue):
"""
This function defines an inequality constraint on the maximum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_vec: SPD matrix in Mandel vector form
:param max_eigenvalue: maximum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between maximum tolerated eigenvalue and maximum eigenvalue of x
"""
x = vector_to_symmetric_matrix_mandel(x_vec)
eigenvalues = np.linalg.eigvals(x)
return max_eigenvalue - np.max(eigenvalues)
def min_eigenvalue_constraint_cholesky(x_chol, min_eigenvalue):
"""
This function defines an inequality constraint on the minimum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_chol: cholesky decomposition of a SPD matrix in vector form
:param min_eigenvalue: minimum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between minimum eigenvalue of x and minimum tolerated eigenvalue
"""
dim_vec = x_chol.shape[0]
dim = int((-1.0 + (1.0 + 8.0 * dim_vec) ** 0.5) / 2.0)
indices = np.tril_indices(dim)
xL = np.zeros((dim, dim))
xL[indices] = x_chol
x_mat = np.dot(xL, xL.T)
eigenvalues = np.linalg.eigvals(x_mat)
return np.max(eigenvalues) - min_eigenvalue
def max_eigenvalue_constraint_cholesky(x_chol, max_eigenvalue):
"""
This function defines an inequality constraint on the maximum eigenvalue of a SPD matrix.
The value returned by the function is positive if the inequality constraints is satisfied.
Parameters
----------
:param x_chol: cholesky decomposition of a SPD matrix in vector form
:param max_eigenvalue: maximum eigenvalue to satisfy the constraint
Returns
-------
:return: difference between maximum tolerated eigenvalue and maximum eigenvalue of x
"""
dim_vec = x_chol.shape[0]
dim = int((-1.0 + (1.0 + 8.0 * dim_vec) ** 0.5) / 2.0)
indices = np.tril_indices(dim)
xL = np.zeros((dim, dim))
xL[indices] = x_chol
x_mat = np.dot(xL, xL.T)
eigenvalues = np.linalg.eigvals(x_mat)
return max_eigenvalue - np.max(eigenvalues)
| 32.970297 | 94 | 0.711411 | 463 | 3,330 | 4.987041 | 0.194384 | 0.045041 | 0.015591 | 0.031182 | 0.896925 | 0.864443 | 0.839324 | 0.796016 | 0.796016 | 0.796016 | 0 | 0.009782 | 0.201802 | 3,330 | 100 | 95 | 33.3 | 0.858916 | 0.567267 | 0 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.071429 | 0 | 0.357143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a552142a16e38f6a285d3e54d6fec79a13f6c067 | 2,084 | py | Python | amrrt/cost.py | dan-armstrong/amrrt | 84d063a45adafc317c141bbad7306aa1dbca9b69 | [
"Apache-2.0"
] | 2 | 2021-07-15T09:49:02.000Z | 2022-03-27T07:44:20.000Z | amrrt/cost.py | dan-armstrong/amrrt | 84d063a45adafc317c141bbad7306aa1dbca9b69 | [
"Apache-2.0"
] | null | null | null | amrrt/cost.py | dan-armstrong/amrrt | 84d063a45adafc317c141bbad7306aa1dbca9b69 | [
"Apache-2.0"
] | 1 | 2022-03-03T13:42:59.000Z | 2022-03-03T13:42:59.000Z | #Copyright (c) 2020 Ocado. All Rights Reserved.
import numpy as np
class Cost:
def __init__(self, value, blocked=False):
self.value = value
self.blocked = blocked
def to_float(self):
if self.blocked : return np.inf
return self.value
def __add__(self, other):
if isinstance(other, Cost):
raise ValueError
return Cost(self.value + other, self.blocked)
def __sub__(self, other):
if isinstance(other, Cost):
raise ValueError
return Cost(self.value - other, self.blocked)
def __lt__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return not self.blocked
return self.value < other.value
if self.blocked : return np.inf < other
return self.value < other
def __le__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return not self.blocked
return self.value <= other.value
if self.blocked : return np.inf <= other
return self.value <= other
def __eq__(self, other):
if isinstance(other, Cost):
return self.blocked == other.blocked and self.value == other.value
if self.blocked : return np.inf == other
return self.value == other
def __ne__(self, other):
if isinstance(other, Cost):
return self.blocked != other.blocked or self.value != other.value
if self.blocked : return np.inf != other
return self.value != other
def __gt__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return self.blocked
return self.value > other.value
if self.blocked : return np.inf > other
return self.value > other
def __ge__(self, other):
if isinstance(other, Cost):
if self.blocked != other.blocked : return self.blocked
return self.value >= other.value
if self.blocked : return np.inf >= other
return self.value >= other
| 32.5625 | 78 | 0.605086 | 260 | 2,084 | 4.707692 | 0.15 | 0.179739 | 0.160131 | 0.163399 | 0.857026 | 0.857026 | 0.837418 | 0.837418 | 0.837418 | 0.837418 | 0 | 0.002743 | 0.300384 | 2,084 | 63 | 79 | 33.079365 | 0.836763 | 0.022073 | 0 | 0.28 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.02 | 0 | 0.54 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
36fff3cb027e98b8f2713aa17ddd4e4295f776b2 | 76,982 | py | Python | awx/main/south_migrations/0037_v148_changes.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | 1 | 2021-06-11T20:01:06.000Z | 2021-06-11T20:01:06.000Z | awx/main/south_migrations/0037_v148_changes.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | 4 | 2020-04-29T23:03:16.000Z | 2022-03-01T23:56:09.000Z | awx/main/south_migrations/0037_v148_changes.py | alexander-bauer/awx | d1319b739406dad988f97c41cb92093f180ba822 | [
"Apache-2.0"
] | 1 | 2018-06-06T08:47:22.000Z | 2018-06-06T08:47:22.000Z | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'JobHostSummary', fields ['job', 'host']
db.delete_unique(u'main_jobhostsummary', ['job_id', 'host_id'])
# Deleting model 'JobTemplate'
db.delete_table(u'main_jobtemplate')
# Deleting model 'InventorySource'
db.delete_table(u'main_inventorysource')
# Deleting model 'Project'
db.delete_table(u'main_project')
# Deleting model 'ProjectUpdate'
db.delete_table(u'main_projectupdate')
# Deleting model 'InventoryUpdate'
db.delete_table(u'main_inventoryupdate')
# Deleting model 'Job'
db.delete_table(u'main_job')
# Deleting field 'Host.last_job'
db.delete_column(u'main_host', 'last_job_id')
# Removing M2M table for field inventory_sources on 'Host'
db.delete_table(db.shorten_name(u'main_host_inventory_sources'))
# Removing M2M table for field projects on 'Organization'
db.delete_table(db.shorten_name(u'main_organization_projects'))
# Removing M2M table for field projects on 'Team'
db.delete_table(db.shorten_name(u'main_team_projects'))
# Deleting field 'Permission.project'
db.delete_column(u'main_permission', 'project_id')
# Deleting field 'JobHostSummary.job'
db.delete_column(u'main_jobhostsummary', 'job_id')
# Changing field 'JobHostSummary.new_job'
db.alter_column(u'main_jobhostsummary', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['main.JobNew']))
# Removing M2M table for field inventory_sources on 'Group'
db.delete_table(db.shorten_name(u'main_group_inventory_sources'))
# Deleting field 'JobEvent.job'
db.delete_column(u'main_jobevent', 'job_id')
# Changing field 'JobEvent.new_job'
db.alter_column(u'main_jobevent', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(default=None, to=orm['main.JobNew']))
# Removing M2M table for field inventory_update on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_inventory_update'))
# Removing M2M table for field project_update on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_project_update'))
# Removing M2M table for field inventory_source on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_inventory_source'))
# Removing M2M table for field job_template on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_job_template'))
# Removing M2M table for field job on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_job'))
# Removing M2M table for field project on 'ActivityStream'
db.delete_table(db.shorten_name(u'main_activitystream_project'))
def backwards(self, orm):
# Adding model 'JobTemplate'
db.create_table(u'main_jobtemplate', (
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'jobtemplate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('extra_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('verbosity', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('job_tags', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('job_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'jobtemplate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='job_templates', null=True, on_delete=models.SET_NULL, to=orm['main.Project'])),
('host_config_key', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('limit', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates', null=True, on_delete=models.SET_NULL, to=orm['main.Inventory'])),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('forks', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('playbook', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)),
('cloud_credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobtemplates_as_cloud_credential+', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512, unique=True)),
))
db.send_create_signal('main', ['JobTemplate'])
# Adding model 'InventorySource'
db.create_table(u'main_inventorysource', (
('last_updated', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
('source_regions', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('current_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_source_as_current_update+', null=True, on_delete=models.SET_NULL, to=orm['main.InventoryUpdate'])),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('overwrite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('group', self.gf('awx.main.fields.AutoOneToOneField')(default=None, related_name='inventory_source', unique=True, null=True, to=orm['main.Group'])),
('last_update_failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventorysource', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_source_as_last_update+', null=True, on_delete=models.SET_NULL, to=orm['main.InventoryUpdate'])),
('source', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='inventory_sources', null=True, to=orm['main.Inventory'])),
('update_cache_timeout', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('status', self.gf('django.db.models.fields.CharField')(default='none', max_length=32)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventorysources', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('overwrite_vars', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventorysource', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('update_on_launch', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('source_path', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
))
db.send_create_signal('main', ['InventorySource'])
# Adding model 'Project'
db.create_table(u'main_project', (
('scm_branch', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('scm_update_cache_timeout', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('scm_clean', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_delete_on_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('current_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='project_as_current_update+', null=True, on_delete=models.SET_NULL, to=orm['main.ProjectUpdate'])),
('last_updated', self.gf('django.db.models.fields.DateTimeField')(default=None, null=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'project', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update_failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'project', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('last_update', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='project_as_last_update+', null=True, on_delete=models.SET_NULL, to=orm['main.ProjectUpdate'])),
('local_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('scm_delete_on_next_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status', self.gf('django.db.models.fields.CharField')(default='ok', max_length=32, null=True)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projects', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('scm_type', self.gf('django.db.models.fields.CharField')(default='', max_length=8, blank=True)),
('scm_update_on_launch', self.gf('django.db.models.fields.BooleanField')(default=False)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=512, unique=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('scm_url', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
))
db.send_create_signal('main', ['Project'])
# Adding model 'ProjectUpdate'
db.create_table(u'main_projectupdate', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_branch', self.gf('django.db.models.fields.CharField')(default='', max_length=256, blank=True)),
('scm_clean', self.gf('django.db.models.fields.BooleanField')(default=False)),
('scm_delete_on_update', self.gf('django.db.models.fields.BooleanField')(default=False)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'projectupdate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'projectupdate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('local_path', self.gf('django.db.models.fields.CharField')(max_length=1024, blank=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='projectupdates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('scm_type', self.gf('django.db.models.fields.CharField')(default='', max_length=8, blank=True)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('scm_url', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='project_updates', to=orm['main.Project'])),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
))
db.send_create_signal('main', ['ProjectUpdate'])
# Adding model 'InventoryUpdate'
db.create_table(u'main_inventoryupdate', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_regions', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('license_error', self.gf('django.db.models.fields.BooleanField')(default=False)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('overwrite', self.gf('django.db.models.fields.BooleanField')(default=False)),
('source_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventoryupdate', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('source', self.gf('django.db.models.fields.CharField')(default='', max_length=32, blank=True)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'inventoryupdate', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventoryupdates', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('overwrite_vars', self.gf('django.db.models.fields.BooleanField')(default=False)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('inventory_source', self.gf('django.db.models.fields.related.ForeignKey')(related_name='inventory_updates', to=orm['main.InventorySource'])),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('source_path', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
))
db.send_create_signal('main', ['InventoryUpdate'])
# Adding model 'Job'
db.create_table(u'main_job', (
('cancel_flag', self.gf('django.db.models.fields.BooleanField')(default=False)),
('credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('description', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('result_traceback', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_type', self.gf('django.db.models.fields.CharField')(max_length=64)),
('start_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('job_tags', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('celery_task_id', self.gf('django.db.models.fields.CharField')(default='', max_length=100, blank=True)),
('playbook', self.gf('django.db.models.fields.CharField')(default='', max_length=1024)),
('job_env', self.gf('jsonfield.fields.JSONField')(default={}, blank=True)),
('active', self.gf('django.db.models.fields.BooleanField')(default=True)),
('result_stdout_file', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('status', self.gf('django.db.models.fields.CharField')(default='new', max_length=20)),
('modified_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'job', 'app_label': 'main'}(class)s_modified+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('job_cwd', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('job_template', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', on_delete=models.SET_NULL, default=None, to=orm['main.JobTemplate'], blank=True, null=True)),
('created', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('extra_vars', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('verbosity', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('job_args', self.gf('django.db.models.fields.TextField')(default='', blank=True)),
('modified', self.gf('django.db.models.fields.DateTimeField')(default=None)),
('created_by', self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name="{'class': 'job', 'app_label': 'main'}(class)s_created+", null=True, on_delete=models.SET_NULL, to=orm['auth.User'])),
('project', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', null=True, on_delete=models.SET_NULL, to=orm['main.Project'])),
('failed', self.gf('django.db.models.fields.BooleanField')(default=False)),
('inventory', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs', null=True, on_delete=models.SET_NULL, to=orm['main.Inventory'])),
('_result_stdout', self.gf('django.db.models.fields.TextField')(default='', db_column='result_stdout', blank=True)),
('limit', self.gf('django.db.models.fields.CharField')(default='', max_length=1024, blank=True)),
('forks', self.gf('django.db.models.fields.PositiveIntegerField')(default=0, blank=True)),
('cloud_credential', self.gf('django.db.models.fields.related.ForeignKey')(related_name='jobs_as_cloud_credential+', on_delete=models.SET_NULL, default=None, to=orm['main.Credential'], blank=True, null=True)),
('launch_type', self.gf('django.db.models.fields.CharField')(default='manual', max_length=20)),
))
db.send_create_signal('main', ['Job'])
# Adding field 'Host.last_job'
db.add_column(u'main_host', 'last_job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='hosts_as_last_job+', null=True, on_delete=models.SET_NULL, to=orm['main.Job']),
keep_default=False)
# Adding M2M table for field inventory_sources on 'Host'
m2m_table_name = db.shorten_name(u'main_host_inventory_sources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('host', models.ForeignKey(orm['main.host'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['host_id', 'inventorysource_id'])
# Adding M2M table for field projects on 'Organization'
m2m_table_name = db.shorten_name(u'main_organization_projects')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('organization', models.ForeignKey(orm['main.organization'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['organization_id', 'project_id'])
# Adding M2M table for field projects on 'Team'
m2m_table_name = db.shorten_name(u'main_team_projects')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('team', models.ForeignKey(orm['main.team'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['team_id', 'project_id'])
# Adding field 'Permission.project'
db.add_column(u'main_permission', 'project',
self.gf('django.db.models.fields.related.ForeignKey')(related_name='permissions', null=True, to=orm['main.Project'], on_delete=models.SET_NULL, blank=True),
keep_default=False)
# Adding field 'JobHostSummary.job'
db.add_column(u'main_jobhostsummary', 'job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='job_host_summaries', null=True, to=orm['main.Job']),
keep_default=False)
# Changing field 'JobHostSummary.new_job'
db.alter_column(u'main_jobhostsummary', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['main.JobNew']))
# Adding unique constraint on 'JobHostSummary', fields ['job', 'host']
db.create_unique(u'main_jobhostsummary', ['job_id', 'host_id'])
# Adding M2M table for field inventory_sources on 'Group'
m2m_table_name = db.shorten_name(u'main_group_inventory_sources')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('group', models.ForeignKey(orm['main.group'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['group_id', 'inventorysource_id'])
# Adding field 'JobEvent.job'
db.add_column(u'main_jobevent', 'job',
self.gf('django.db.models.fields.related.ForeignKey')(default=None, related_name='job_events', null=True, to=orm['main.Job']),
keep_default=False)
# Changing field 'JobEvent.new_job'
db.alter_column(u'main_jobevent', 'new_job_id', self.gf('django.db.models.fields.related.ForeignKey')(null=True, to=orm['main.JobNew']))
# Adding M2M table for field inventory_update on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_inventory_update')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('inventoryupdate', models.ForeignKey(orm['main.inventoryupdate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'inventoryupdate_id'])
# Adding M2M table for field project_update on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_project_update')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('projectupdate', models.ForeignKey(orm['main.projectupdate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'projectupdate_id'])
# Adding M2M table for field inventory_source on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_inventory_source')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('inventorysource', models.ForeignKey(orm['main.inventorysource'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'inventorysource_id'])
# Adding M2M table for field job_template on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_job_template')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('jobtemplate', models.ForeignKey(orm['main.jobtemplate'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'jobtemplate_id'])
# Adding M2M table for field job on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_job')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('job', models.ForeignKey(orm['main.job'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'job_id'])
# Adding M2M table for field project on 'ActivityStream'
m2m_table_name = db.shorten_name(u'main_activitystream_project')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('activitystream', models.ForeignKey(orm['main.activitystream'], null=False)),
('project', models.ForeignKey(orm['main.project'], null=False))
))
db.create_unique(m2m_table_name, ['activitystream_id', 'project_id'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'main.activitystream': {
'Meta': {'object_name': 'ActivityStream'},
'actor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'activity_stream'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'changes': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'credential': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Credential']", 'symmetrical': 'False', 'blank': 'True'}),
'group': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'host': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Host']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Inventory']", 'symmetrical': 'False', 'blank': 'True'}),
'new_inventory_source': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventorySourceNew']", 'symmetrical': 'False', 'blank': 'True'}),
'new_inventory_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.InventoryUpdateNew']", 'symmetrical': 'False', 'blank': 'True'}),
'new_job': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.JobNew']", 'symmetrical': 'False', 'blank': 'True'}),
'new_job_template': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.JobTemplateNew']", 'symmetrical': 'False', 'blank': 'True'}),
'new_project': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ProjectNew']", 'symmetrical': 'False', 'blank': 'True'}),
'new_project_update': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.ProjectUpdateNew']", 'symmetrical': 'False', 'blank': 'True'}),
'object1': ('django.db.models.fields.TextField', [], {}),
'object2': ('django.db.models.fields.TextField', [], {}),
'object_relationship_type': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'operation': ('django.db.models.fields.CharField', [], {'max_length': '13'}),
'organization': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Organization']", 'symmetrical': 'False', 'blank': 'True'}),
'permission': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Schedule']", 'symmetrical': 'False', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['main.Team']", 'symmetrical': 'False', 'blank': 'True'}),
'timestamp': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'unified_job': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job+'", 'blank': 'True', 'to': "orm['main.UnifiedJob']"}),
'unified_job_template': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'activity_stream_as_unified_job_template+'", 'blank': 'True', 'to': "orm['main.UnifiedJobTemplate']"}),
'user': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.User']", 'symmetrical': 'False', 'blank': 'True'})
},
'main.authtoken': {
'Meta': {'object_name': 'AuthToken'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expires': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40', 'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'request_hash': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'auth_tokens'", 'to': u"orm['auth.User']"})
},
'main.credential': {
'Meta': {'unique_together': "[('user', 'team', 'kind', 'name')]", 'object_name': 'Credential'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cloud': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kind': ('django.db.models.fields.CharField', [], {'default': "'ssh'", 'max_length': '32'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'credential\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'ssh_key_data': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'ssh_key_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'ssh_key_unlock': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'sudo_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'credentials'", 'null': 'True', 'blank': 'True', 'to': u"orm['auth.User']"}),
'username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'vault_password': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'})
},
'main.group': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Group'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'groups'", 'blank': 'True', 'to': "orm['main.Host']"}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'groups'", 'to': "orm['main.Inventory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'group\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'new_inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'groups'", 'symmetrical': 'False', 'to': "orm['main.InventorySourceNew']"}),
'parents': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'children'", 'blank': 'True', 'to': "orm['main.Group']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.host': {
'Meta': {'unique_together': "(('name', 'inventory'),)", 'object_name': 'Host'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'instance_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts'", 'to': "orm['main.Inventory']"}),
'last_job_host_summary': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'hosts_as_last_job_summary+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobHostSummary']", 'blank': 'True', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'host\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'new_inventory_sources': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'hosts'", 'symmetrical': 'False', 'to': "orm['main.InventorySourceNew']"}),
'new_last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'hosts_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.JobNew']"}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventory': {
'Meta': {'unique_together': "[('name', 'organization')]", 'object_name': 'Inventory'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'groups_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'has_active_failures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'has_inventory_sources': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hosts_with_active_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory_sources_with_failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'inventory\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventories'", 'to': "orm['main.Organization']"}),
'total_groups': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_hosts': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'total_inventory_sources': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'variables': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'})
},
'main.inventorysourcenew': {
'Meta': {'object_name': 'InventorySourceNew', '_ormbases': ['main.UnifiedJobTemplate']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventorysourcenews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'group': ('awx.main.fields.AutoOneToOneField', [], {'default': 'None', 'related_name': "'new_inventory_source'", 'unique': 'True', 'null': 'True', 'to': "orm['main.Group']"}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'new_inventory_sources'", 'null': 'True', 'to': "orm['main.Inventory']"}),
'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}),
'update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'main.inventoryupdatenew': {
'Meta': {'object_name': 'InventoryUpdateNew', '_ormbases': ['main.UnifiedJob']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventoryupdatenews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'inventory_source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'inventory_updates'", 'to': "orm['main.InventorySourceNew']"}),
'license_error': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'overwrite_vars': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'source': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'blank': 'True'}),
'source_path': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_regions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'source_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'})
},
'main.jobevent': {
'Meta': {'ordering': "('pk',)", 'object_name': 'JobEvent'},
'changed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'event': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'event_data': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'job_events_as_primary_host'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Host']"}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'job_events'", 'symmetrical': 'False', 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'new_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_job_events'", 'to': "orm['main.JobNew']"}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'children'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.JobEvent']"}),
'play': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'role': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'task': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'})
},
'main.jobhostsummary': {
'Meta': {'ordering': "('-pk',)", 'unique_together': "[('new_job', 'host')]", 'object_name': 'JobHostSummary'},
'changed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'dark': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'failures': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'host': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_host_summaries'", 'to': "orm['main.Host']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'new_job': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'new_job_host_summaries'", 'to': "orm['main.JobNew']"}),
'ok': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'processed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'skipped': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'main.jobnew': {
'Meta': {'object_name': 'JobNew', '_ormbases': ['main.UnifiedJob']},
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobnews_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobnews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'hosts': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'jobnews'", 'symmetrical': 'False', 'through': "orm['main.JobHostSummary']", 'to': "orm['main.Host']"}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobnews'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.JobTemplateNew']", 'blank': 'True', 'null': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.ProjectNew']"}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.jobtemplatenew': {
'Meta': {'object_name': 'JobTemplateNew', '_ormbases': ['main.UnifiedJobTemplate']},
'cloud_credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplatenews_as_cloud_credential+'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplatenews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'extra_vars': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'forks': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'}),
'host_config_key': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'jobtemplatenews'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'job_tags': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'limit': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'playbook': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'job_templates'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.ProjectNew']"}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'}),
'verbosity': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'blank': 'True'})
},
'main.organization': {
'Meta': {'object_name': 'Organization'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'admins': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'admin_of_organizations'", 'blank': 'True', 'to': u"orm['auth.User']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'organization\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'new_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': "orm['main.ProjectNew']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'organizations'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.permission': {
'Meta': {'object_name': 'Permission'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'inventory': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Inventory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'permission\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'new_project': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.ProjectNew']"}),
'permission_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'team': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Team']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'permissions'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"})
},
'main.profile': {
'Meta': {'object_name': 'Profile'},
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ldap_dn': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'user': ('awx.main.fields.AutoOneToOneField', [], {'related_name': "'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
'main.projectnew': {
'Meta': {'object_name': 'ProjectNew', '_ormbases': ['main.UnifiedJobTemplate']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectnews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_next_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}),
'scm_update_cache_timeout': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'scm_update_on_launch': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'unifiedjobtemplate_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJobTemplate']", 'unique': 'True', 'primary_key': 'True'})
},
'main.projectupdatenew': {
'Meta': {'object_name': 'ProjectUpdateNew', '_ormbases': ['main.UnifiedJob']},
'credential': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projectupdatenews'", 'on_delete': 'models.SET_NULL', 'default': 'None', 'to': "orm['main.Credential']", 'blank': 'True', 'null': 'True'}),
'local_path': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'blank': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'project_updates'", 'to': "orm['main.ProjectNew']"}),
'scm_branch': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'blank': 'True'}),
'scm_clean': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_delete_on_update': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'scm_type': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '8', 'blank': 'True'}),
'scm_url': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
u'unifiedjob_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['main.UnifiedJob']", 'unique': 'True', 'primary_key': 'True'})
},
'main.schedule': {
'Meta': {'object_name': 'Schedule'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'dtend': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'dtstart': ('django.db.models.fields.DateTimeField', [], {}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'schedule\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '512'}),
'next_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'rrule': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'schedules'", 'to': "orm['main.UnifiedJobTemplate']"})
},
'main.team': {
'Meta': {'unique_together': "[('organization', 'name')]", 'object_name': 'Team'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'team\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'new_projects': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': "orm['main.ProjectNew']"}),
'organization': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'teams'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Organization']"}),
'users': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'teams'", 'blank': 'True', 'to': u"orm['auth.User']"})
},
'main.unifiedjob': {
'Meta': {'object_name': 'UnifiedJob'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'cancel_flag': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'celery_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'dependent_jobs': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'dependent_jobs_rel_+'", 'to': "orm['main.UnifiedJob']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'elapsed': ('django.db.models.fields.DecimalField', [], {'max_digits': '12', 'decimal_places': '3'}),
'failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'finished': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'job_cwd': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'blank': 'True'}),
'job_env': ('jsonfield.fields.JSONField', [], {'default': '{}', 'blank': 'True'}),
'launch_type': ('django.db.models.fields.CharField', [], {'default': "'manual'", 'max_length': '20'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjob\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjob_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'result_stdout_file': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_stdout_text': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'result_traceback': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['main.Schedule']", 'null': 'True', 'on_delete': 'models.SET_NULL'}),
'start_args': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'started': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'new'", 'max_length': '20'}),
'unified_job_template': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjob_unified_jobs'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJobTemplate']"})
},
'main.unifiedjobtemplate': {
'Meta': {'unique_together': "[('polymorphic_ctype', 'name')]", 'object_name': 'UnifiedJobTemplate'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_created+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'current_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_current_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}),
'description': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'has_schedules': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_job': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_last_job+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.UnifiedJob']"}),
'last_job_failed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'default': 'None'}),
'modified_by': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': '"{\'class\': \'unifiedjobtemplate\', \'app_label\': \'main\'}(class)s_modified+"', 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': u"orm['auth.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'next_job_run': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'next_schedule': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'related_name': "'unifiedjobtemplate_as_next_schedule+'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['main.Schedule']"}),
'old_pk': ('django.db.models.fields.PositiveIntegerField', [], {'default': 'None', 'null': 'True'}),
'polymorphic_ctype': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'polymorphic_main.unifiedjobtemplate_set'", 'null': 'True', 'to': u"orm['contenttypes.ContentType']"}),
'status': ('django.db.models.fields.CharField', [], {'default': "'ok'", 'max_length': '32'})
},
u'taggit.tag': {
'Meta': {'object_name': 'Tag'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100'})
},
u'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_tagged_items'", 'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'taggit_taggeditem_items'", 'to': u"orm['taggit.Tag']"})
}
}
complete_apps = ['main']
| 96.711055 | 274 | 0.613403 | 8,557 | 76,982 | 5.369522 | 0.034942 | 0.083923 | 0.14656 | 0.209372 | 0.902975 | 0.884476 | 0.867957 | 0.844081 | 0.783098 | 0.741811 | 0 | 0.006405 | 0.15831 | 76,982 | 795 | 275 | 96.832704 | 0.702709 | 0.026669 | 0 | 0.431232 | 0 | 0 | 0.540311 | 0.295723 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002865 | false | 0.005731 | 0.005731 | 0 | 0.012894 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3c48a1377c475042053bc5d6b388e0ca58a48af7 | 1,300 | py | Python | bucket_34/gnatstudio/patches/patch-share_plug-ins_dispatching.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 17 | 2017-04-22T21:53:52.000Z | 2021-01-21T16:57:55.000Z | bucket_34/gnatstudio/patches/patch-share_plug-ins_dispatching.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 186 | 2017-09-12T20:46:52.000Z | 2021-11-27T18:15:14.000Z | bucket_34/gnatstudio/patches/patch-share_plug-ins_dispatching.py | jrmarino/ravensource | 91d599fd1f2af55270258d15e72c62774f36033e | [
"FTL"
] | 74 | 2017-09-06T14:48:01.000Z | 2021-08-28T02:48:27.000Z | --- share/plug-ins/dispatching.py.orig 2021-06-15 05:19:41 UTC
+++ share/plug-ins/dispatching.py
@@ -39,11 +39,8 @@ class Dispatching_Highlighter(Location_H
GPS.Hook("file_edited").add(self.__on_file_edited)
GPS.Hook("file_changed_on_disk").add(self.__on_file_edited)
- if GPS.Logger("ENTITIES.SQLITE").active:
- GPS.Hook("xref_updated").add(self.__on_compilation_finished)
- else:
- GPS.Hook("compilation_finished").add(
- self.__on_compilation_finished)
+ GPS.Hook("compilation_finished").add(
+ self.__on_compilation_finished)
def __del__(self):
Location_Highlighter.__del__(self)
@@ -51,11 +48,8 @@ class Dispatching_Highlighter(Location_H
GPS.Hook("file_edited").remove(self.__on_file_edited)
GPS.Hook("file_changed_on_disk").remove(self.__on_file_edited)
- if GPS.Logger("ENTITIES.SQLITE").active:
- GPS.Hook("xref_updated").remove(self.__on_compilation_finished)
- else:
- GPS.Hook("compilation_finished").remove(
- self.__on_compilation_finished)
+ GPS.Hook("compilation_finished").remove(
+ self.__on_compilation_finished)
def __on_preferences_changed(self, hook):
changed = False
| 41.935484 | 76 | 0.66 | 159 | 1,300 | 4.949686 | 0.289308 | 0.088945 | 0.129606 | 0.190597 | 0.865311 | 0.759848 | 0.759848 | 0.759848 | 0.759848 | 0.404066 | 0 | 0.027237 | 0.209231 | 1,300 | 30 | 77 | 43.333333 | 0.738327 | 0 | 0 | 0.307692 | 0 | 0 | 0.150769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3c5b6d71bc1913ed1bb6afa217d2deb0e7169cdf | 8,172 | py | Python | tests/test_main.py | bradfox2/nrc-scrape | a4b564c7230cee55984576564ef699d82b0e6744 | [
"MIT"
] | 1 | 2020-02-25T05:05:05.000Z | 2020-02-25T05:05:05.000Z | tests/test_main.py | bradfox2/nrc-scrape | a4b564c7230cee55984576564ef699d82b0e6744 | [
"MIT"
] | null | null | null | tests/test_main.py | bradfox2/nrc-scrape | a4b564c7230cee55984576564ef699d82b0e6744 | [
"MIT"
] | null | null | null | import main
from main import build_nrc_event_report_url, generate_nrc_event_report_urls, EventNotificationReport, get_text_after_tag, get_text_without_tag
import calendar
import re
import typing
from typing import List
import requests
from bs4 import BeautifulSoup, NavigableString, Tag
from requests.exceptions import HTTPError
import pytest
urls = ['https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2005/20050606en.html',
'https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2017/20171129en.html',
'https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2018/20181112en.html']
@pytest.fixture
def headers():
headers = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36'}
return headers
def test_build_nrc_event_report_url():
assert build_nrc_event_report_url(
2004, 12, 30) == 'https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2004/20041230en.html'
assert build_nrc_event_report_url(
2004, 2, 3) == 'https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2004/20040203en.html'
@pytest.mark.parametrize('url', urls)
def test_event_notification_report_whole(url, headers):
e = EventNotificationReport.from_url(url, headers)
assert isinstance(e, EventNotificationReport)
def test_event_notification_report_class_2(headers):
url2 = 'https://www.nrc.gov/reading-rm/doc-collections/event-status/event/2017/20171129en.html'
f = EventNotificationReport.from_url(url2, headers)
assert len(f.events) == 4
assert str(
f) == "Event Notification Report from 2017-11-29. 4 events, numbers 53079, 53080, 53082, 53095."
def test_generate_er_urls():
len(generate_nrc_event_report_urls(2003, 2019)) == 5263
def test_generate_er_urls_smart():
assert isinstance(generate_nrc_event_report_urls(2003, 2019, only_known=True), list)
def test_get_text_without_tag():
# make sure we can parse double br tags as in
# <html>
# <br>
# <br>
# text
# <br>
# <br>
html = """<table border="1" cellpadding="3" cellspacing="0" width="98%">
<tr>
<td align="left" scope="row"> CONTROL ROOM ENVELOPE INOPERABLE DUE TO DOOR HANDLE DETACHING<br><br>"On April 11, 2019, at 0200 CDT the shift operating crew declared the control room envelope inoperable in accordance with Technical Specification (TS) 3.7.6.1 due to the door handle for Door 86 (H&V Airlock Access Door) being detached. Operations entered TS 3.7.6.1 action b, which requires that with one or more control room emergency air filtration trains inoperable due to inoperable control room envelope boundary in MODES 1, 2, 3, or 4, then: 1. Immediately initiate action to implement mitigating actions; 2. Within 24 hours, verify mitigating actions ensure control room envelope occupant exposures to radiological, chemical, and smoke hazards will not exceed limits; and 3. Within 90 days, restore the control room envelope boundary to OPERABLE status. Action b.1 was completed by sealing the hole in Door 86 at 0232 CDT. This event is reportable pursuant to 10 CFR 50.72(b)(3)(v)(D), 'event or condition that could have prevented fulfilment of a safety function of structures or systems that are needed to (D) mitigate the consequences of an accident,' due to the control room envelope being inoperable. <br><br>"The licensee notified the NRC Resident."<br><br>* * * RETRACTION ON 5/17/19 AT 1620 EDT FROM MARIA ZAMBER TO BETHANY CECERE * * *<br><br>"This is a Non-Emergency Notification from Waterford 3. This is a retraction of EN 53991. This event was evaluated in accordance with the corrective action process. The original operability determination of inoperable was made based on a conservative evaluation that with the door handle for Door 86 (Heating and Ventilation Airlock Access Door) being detached, the control room envelope boundary could not perform its safety function. A more detailed engineering evaluation was subsequently performed. This shows that the condition of the door handle being detached is bounded by the most recently performed non-pressurized radiological tracer gas test, as the control room envelope differential pressure was maintained more positive with the detached door handle as compared to that observed during the test. Additionally, the control room envelope differential pressure trends showed no discernable change between the two conditions of the door handle detached or with the opening taped over (resulting in an air tight seal). This information supports the conclusion that with the door handle for Door 86 being detached, the control room envelope boundary remained operable and did not constitute a condition that could have prevented fulfillment of a safety function of structures or systems that are needed to mitigate the consequences of an accident; therefore, this event is not reportable per 10 CFR 50.72(b)(3)(v)(D).<br><br>"The licensee notified the NRC Resident Inspector."<br><br>Notified R4DO (Proulx). </br></br></br></br></br></br></br></br></br></br></br></br></td>
</tr>
</table>"""
html = BeautifulSoup(html, 'html.parser')
assert get_text_without_tag(html, 'br') == ['CONTROL ROOM ENVELOPE INOPERABLE DUE TO DOOR HANDLE DETACHING', '"On April 11, 2019, at 0200 CDT the shift operating crew declared the control room envelope inoperable in accordance with Technical Specification (TS) 3.7.6.1 due to the door handle for Door 86 (H&V Airlock Access Door) being detached. Operations entered TS 3.7.6.1 action b, which requires that with one or more control room emergency air filtration trains inoperable due to inoperable control room envelope boundary in MODES 1, 2, 3, or 4, then: 1. Immediately initiate action to implement mitigating actions; 2. Within 24 hours, verify mitigating actions ensure control room envelope occupant exposures to radiological, chemical, and smoke hazards will not exceed limits; and 3. Within 90 days, restore the control room envelope boundary to OPERABLE status. Action b.1 was completed by sealing the hole in Door 86 at 0232 CDT. This event is reportable pursuant to 10 CFR 50.72(b)(3)(v)(D), \'event or condition that could have prevented fulfilment of a safety function of structures or systems that are needed to (D) mitigate the consequences of an accident,\' due to the control room envelope being inoperable.', '"The licensee notified the NRC Resident."', '* * * RETRACTION ON 5/17/19 AT 1620 EDT FROM MARIA ZAMBER TO BETHANY CECERE * * *', '"This is a Non-Emergency Notification from Waterford 3. This is a retraction of EN 53991. This event was evaluated in accordance with the corrective action process. The original operability determination of inoperable was made based on a conservative evaluation that with the door handle for Door 86 (Heating and Ventilation Airlock Access Door) being detached, the control room envelope boundary could not perform its safety function. A more detailed engineering evaluation was subsequently performed. This shows that the condition of the door handle being detached is bounded by the most recently performed non-pressurized radiological tracer gas test, as the control room envelope differential pressure was maintained more positive with the detached door handle as compared to that observed during the test. Additionally, the control room envelope differential pressure trends showed no discernable change between the two conditions of the door handle detached or with the opening taped over (resulting in an air tight seal). This information supports the conclusion that with the door handle for Door 86 being detached, the control room envelope boundary remained operable and did not constitute a condition that could have prevented fulfillment of a safety function of structures or systems that are needed to mitigate the consequences of an accident; therefore, this event is not reportable per 10 CFR 50.72(b)(3)(v)(D).', '"The licensee notified the NRC Resident Inspector."', 'Notified R4DO (Proulx).']
| 111.945205 | 2,963 | 0.77374 | 1,264 | 8,172 | 4.952532 | 0.246835 | 0.038658 | 0.060703 | 0.049201 | 0.832268 | 0.804792 | 0.799521 | 0.770288 | 0.758466 | 0.741534 | 0 | 0.045777 | 0.155286 | 8,172 | 72 | 2,964 | 113.5 | 0.861075 | 0.009422 | 0 | 0.044444 | 1 | 0.266667 | 0.800668 | 0.015086 | 0 | 0 | 0 | 0 | 0.155556 | 1 | 0.155556 | false | 0 | 0.222222 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
3c77f4011b9975da909dcb64038fc6a3a1656ad0 | 120 | py | Python | texasholdem/texasholdem/environment.py | stricoff92/games-hub | 23bbd308fc12e214abd8813607ce92fd0a20fa8c | [
"MIT"
] | null | null | null | texasholdem/texasholdem/environment.py | stricoff92/games-hub | 23bbd308fc12e214abd8813607ce92fd0a20fa8c | [
"MIT"
] | 5 | 2021-03-19T04:38:06.000Z | 2021-09-22T19:10:42.000Z | texasholdem/texasholdem/environment.py | stricoff92/games-hub | 23bbd308fc12e214abd8813607ce92fd0a20fa8c | [
"MIT"
] | null | null | null |
from django.conf import settings
def is_testing():
return hasattr(settings, "IS_TESTING") and settings.IS_TESTING
| 20 | 66 | 0.775 | 17 | 120 | 5.294118 | 0.647059 | 0.3 | 0.377778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.141667 | 120 | 5 | 67 | 24 | 0.873786 | 0 | 0 | 0 | 0 | 0 | 0.084034 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 8 |
3c92f0e6b7451147dc2451fc581e07754885c560 | 7,176 | py | Python | tests/ampligraph/latent_features/test_models.py | vindex10/AmpliGraph | 1aa91a4b32081bcaad4e7386032b9ac85deb99d6 | [
"Apache-2.0"
] | 2 | 2019-06-24T23:21:28.000Z | 2020-10-28T02:57:59.000Z | tests/ampligraph/latent_features/test_models.py | vindex10/AmpliGraph | 1aa91a4b32081bcaad4e7386032b9ac85deb99d6 | [
"Apache-2.0"
] | null | null | null | tests/ampligraph/latent_features/test_models.py | vindex10/AmpliGraph | 1aa91a4b32081bcaad4e7386032b9ac85deb99d6 | [
"Apache-2.0"
] | null | null | null | import numpy as np
import pytest
from ampligraph.latent_features import TransE, DistMult, ComplEx, HolE
from ampligraph.datasets import load_wn18
def test_fit_predict_TransE_early_stopping_with_filter():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=7, k=50, loss='pairwise', loss_params={'margin': 5},
verbose=True, optimizer='adagrad', optimizer_params={'lr':0.1})
X_filter = np.concatenate((X['train'], X['valid'], X['test']))
model.fit(X['train'], True, {'x_valid': X['valid'][::100],
'criteria':'mrr',
'x_filter':X_filter,
'stop_interval': 2,
'burn_in':1,
'check_interval':2})
y, _ = model.predict(X['test'][:1], get_ranks=True)
print(y)
def test_fit_predict_TransE_early_stopping_without_filter():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=7, k=50, loss='pairwise', loss_params={'margin': 5},
verbose=True, optimizer='adagrad', optimizer_params={'lr':0.1})
model.fit(X['train'], True, {'x_valid': X['valid'][::100],
'criteria':'mrr',
'stop_interval': 2,
'burn_in':1,
'check_interval':2})
y, _ = model.predict(X['test'][:1], get_ranks=True)
print(y)
def test_fit_predict_transE():
model = TransE(batches_count=1, seed=555, epochs=20, k=10, loss='pairwise', loss_params={'margin': 5},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
print(y_pred)
assert y_pred[0] > y_pred[1]
def test_fit_predict_DistMult():
model = DistMult(batches_count=2, seed=555, epochs=20, k=10, loss='pairwise', loss_params={'margin': 5},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
print(y_pred)
assert y_pred[0] > y_pred[1]
def test_fit_predict_CompleEx():
model = ComplEx(batches_count=1, seed=555, epochs=20, k=10,
loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
regularizer_params={'lambda': 0.1, 'p': 2},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
print(y_pred)
assert y_pred[0] > y_pred[1]
def test_fit_predict_HolE():
model = HolE(batches_count=1, seed=555, epochs=20, k=10,
loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
regularizer_params={'lambda': 0.1, 'p': 2},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
print(y_pred)
assert y_pred[0] > y_pred[1]
def test_retrain():
model = ComplEx(batches_count=1, seed=555, epochs=20, k=10,
loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
regularizer_params={'lambda': 0.1, 'p': 2},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
y_pred_1st, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
model.fit(X)
y_pred_2nd, _ = model.predict(np.array([['f', 'y', 'e'], ['b', 'y', 'd']]), get_ranks=True)
np.testing.assert_array_equal(y_pred_1st, y_pred_2nd)
def test_fit_predict_wn18_TransE():
X = load_wn18()
model = TransE(batches_count=1, seed=555, epochs=5, k=100, loss='pairwise', loss_params={'margin': 5},
verbose=True, optimizer='adagrad', optimizer_params={'lr': 0.1})
model.fit(X['train'])
y, _ = model.predict(X['test'][:1], get_ranks=True)
print(y)
def test_missing_entity_ComplEx():
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model = ComplEx(batches_count=1, seed=555, epochs=2, k=5)
model.fit(X)
with pytest.raises(ValueError):
model.predict(['a', 'y', 'zzzzzzzzzzz'])
with pytest.raises(ValueError):
model.predict(['a', 'xxxxxxxxxx', 'e'])
with pytest.raises(ValueError):
model.predict(['zzzzzzzz', 'y', 'e'])
def test_fit_predict_wn18_ComplEx():
X = load_wn18()
model = ComplEx(batches_count=1, seed=555, epochs=5, k=100,
loss='pairwise', loss_params={'margin': 1}, regularizer='LP',
regularizer_params={'lambda': 0.1, 'p': 2},
optimizer='adagrad', optimizer_params={'lr':0.1})
model.fit(X['train'])
y = model.predict(X['test'][:1], get_ranks=True)
print(y)
def test_lookup_embeddings():
model = DistMult(batches_count=2, seed=555, epochs=20, k=10, loss='pairwise', loss_params={'margin': 5},
optimizer='adagrad', optimizer_params={'lr':0.1})
X = np.array([['a', 'y', 'b'],
['b', 'y', 'a'],
['a', 'y', 'c'],
['c', 'y', 'a'],
['a', 'y', 'd'],
['c', 'y', 'd'],
['b', 'y', 'c'],
['f', 'y', 'e']])
model.fit(X)
model.get_embeddings(['a', 'b'], embedding_type='entity')
| 38.789189 | 109 | 0.443144 | 887 | 7,176 | 3.42841 | 0.107103 | 0.014469 | 0.013811 | 0.018415 | 0.865834 | 0.847419 | 0.834923 | 0.79316 | 0.774416 | 0.774416 | 0 | 0.035058 | 0.332219 | 7,176 | 184 | 110 | 39 | 0.599541 | 0 | 0 | 0.807692 | 0 | 0 | 0.09547 | 0 | 0 | 0 | 0 | 0 | 0.032051 | 1 | 0.070513 | false | 0 | 0.025641 | 0 | 0.096154 | 0.051282 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3c996e54959e8ed1f25c71c4949c09230cfbbd5d | 2,059 | py | Python | semver.py | brandonstevens/compare-semantic-versions | 37a5ba08d550521d5810c022fbe1f98b4baee142 | [
"MIT"
] | null | null | null | semver.py | brandonstevens/compare-semantic-versions | 37a5ba08d550521d5810c022fbe1f98b4baee142 | [
"MIT"
] | null | null | null | semver.py | brandonstevens/compare-semantic-versions | 37a5ba08d550521d5810c022fbe1f98b4baee142 | [
"MIT"
] | null | null | null | class version:
major = 0
minor = 0
patch = 0
def __init__(self, version):
try:
versions = version.split('.', 3)
self.major = int(versions[0])
self.minor = int(versions[1])
self.patch = int(versions[2])
except ValueError as e:
raise RuntimeError("Versions must be in the form of X.Y.Z") from e
except IndexError as e:
raise RuntimeError("Versions must be in the form of X.Y.Z") from e
def equals(self, v2):
if self.major != v2.major:
return False
if self.minor != v2.minor:
return False
if self.patch != v2.patch:
return False
return True
def lt(self, v2):
if self.major > v2.major:
return True
elif self.major == v2.major:
if self.minor > v2.minor:
return True
elif self.minor == v2.minor:
if self.patch > v2.patch:
return True
else:
return False
else:
return False
else:
return False
def lte(self, v2):
if self.major > v2.major:
return True
elif self.major == v2.major:
if self.minor > v2.minor:
return True
elif self.minor == v2.minor:
if self.patch > v2.patch:
return True
elif self.patch == v2.patch:
return True
else:
return False
else:
return False
else:
return False
def satisfies(self, v2):
if self.major != v2.major:
return False
if self.minor != v2.minor:
return False
if v2.patch >= self.patch:
return True
else:
return False
def gt(self, v2):
if self.major < v2.major:
return True
elif self.major == v2.major:
if self.minor < v2.minor:
return True
elif self.minor == v2.minor:
if self.patch < v2.patch:
return True
else:
return False
else:
return False
else:
return False
def gte(self, v2):
if self.major < v2.major:
return True
elif self.major == v2.major:
if self.minor < v2.minor:
return True
elif self.minor == v2.minor:
if self.patch < v2.patch:
return True
elif self.patch == v2.patch:
return True
else:
return False
else:
return False
else:
return False
| 20.186275 | 69 | 0.626518 | 313 | 2,059 | 4.108626 | 0.146965 | 0.153966 | 0.151633 | 0.124417 | 0.823484 | 0.823484 | 0.783048 | 0.783048 | 0.783048 | 0.783048 | 0 | 0.027261 | 0.269548 | 2,059 | 101 | 70 | 20.386139 | 0.827793 | 0 | 0 | 0.789474 | 0 | 0 | 0.036425 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073684 | false | 0 | 0 | 0 | 0.473684 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
3c9e489a7690a8b051b3cf1704131c160840d6c7 | 133 | py | Python | utils.py | entwanne/pythonrpc | 611e76f2fe8d8d213859a67f30fcf3086988ce70 | [
"MIT"
] | 1 | 2017-11-16T10:06:53.000Z | 2017-11-16T10:06:53.000Z | utils.py | entwanne/pythonrpc | 611e76f2fe8d8d213859a67f30fcf3086988ce70 | [
"MIT"
] | null | null | null | utils.py | entwanne/pythonrpc | 611e76f2fe8d8d213859a67f30fcf3086988ce70 | [
"MIT"
] | null | null | null | def rpc_owner(rpc):
return object.__getattribute__(rpc, 'owner')
def rpc_id(rpc):
return object.__getattribute__(rpc, 'id')
| 22.166667 | 48 | 0.721805 | 18 | 133 | 4.777778 | 0.388889 | 0.139535 | 0.348837 | 0.627907 | 0.697674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142857 | 133 | 5 | 49 | 26.6 | 0.754386 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | false | 0 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
b1ee1a9b8690b75118fab1e5924695c59d0b06f3 | 12,168 | py | Python | core/forms.py | EzekielCarvalho/costas_commerce_store | d7f00ffd5d5c7bbd0c1dcec9adaa4ddd70d19ea8 | [
"MIT"
] | null | null | null | core/forms.py | EzekielCarvalho/costas_commerce_store | d7f00ffd5d5c7bbd0c1dcec9adaa4ddd70d19ea8 | [
"MIT"
] | null | null | null | core/forms.py | EzekielCarvalho/costas_commerce_store | d7f00ffd5d5c7bbd0c1dcec9adaa4ddd70d19ea8 | [
"MIT"
] | null | null | null | # This is a python file having the main structure of the form used in the project. It is a Django form
# ref https://docs.djangoproject.com/en/3.2/topics/forms/
# ref https://docs.djangoproject.com/en/3.2/topics/forms/
from django import forms
from django.http import request
from django_countries.fields import CountryField
from django_countries.widgets import CountrySelectWidget
PAYMENT_OPTIONS = {
('S', 'Stripe'),
('P', 'PayPal')
}
class CheckoutForm(forms.Form):
first_name = forms.CharField(required=True) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
last_name = forms.CharField(required=True) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
username = forms.CharField(widget=forms.TextInput(attrs={ # On a real Web page, you probably donβt want every widget to look the same. You might want a larger input element for the comment, and you might want the βnameβ widget to have some special CSS class. It is also possible to specify the βtypeβ attribute to take advantage of the new HTML5 input types. To do this, you use the Widget.attrs argument when creating the widget. In this case we manipulated the placeholder attribute. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs
'placeholder': 'Enter your username'})) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
shipping_address = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
shipping_address2 = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
shipping_country = CountryField(blank_label='(select country)').formfield(required=False, widget=CountrySelectWidget(attrs={'class': 'custom-select d-block w-100'})) # we need to add "Required = False" incase we decide to use "use default shipping address" # A Django application that provides country choices for use with forms, flag icons static files, and a country field for models. ref https://github.com/SmileyChris/django-countries .formfield() is from the same repository under Custom forms heading
# country select widget from https://github.com/SmileyChris/django-countries/ the attrs is A dictionary containing HTML attributes to be set on the rendered widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs
# This class was taken from the (now commented) class attribute in the boostrap format in the country element (line 103 forms.py)
shipping_zip = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
first_name = forms.CharField(required=True) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
last_name = forms.CharField(required=True) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
username = forms.CharField(widget=forms.TextInput(attrs={ # On a real Web page, you probably donβt want every widget to look the same. You might want a larger input element for the comment, and you might want the βnameβ widget to have some special CSS class. It is also possible to specify the βtypeβ attribute to take advantage of the new HTML5 input types. To do this, you use the Widget.attrs argument when creating the widget. In this case we manipulated the placeholder attribute. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs
'placeholder': 'Enter your username'})) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
billing_address = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
billing_address2 = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
billing_country = CountryField(blank_label='(select country)').formfield(required=False, widget=CountrySelectWidget(attrs={'class': 'custom-select d-block w-100'})) # we need to add "Required = False" incase we decide to use "use default shipping address" # A Django application that provides country choices for use with forms, flag icons static files, and a country field for models. ref https://github.com/SmileyChris/django-countries .formfield() is from the same repository under Custom forms heading
# country select widget from https://github.com/SmileyChris/django-countries/ the attrs is A dictionary containing HTML attributes to be set on the rendered widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs
# This class was taken from the (now commented) class attribute in the boostrap format in the country element (line 103 forms.py)
billing_zip = forms.CharField(required=False)
same_billing_address = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
set_default_shipping = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
use_default_shipping = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
set_default_billing = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
use_default_billing = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
payment_option = forms.ChoiceField(widget=forms.RadioSelect, choices=PAYMENT_OPTIONS) # These widgets make use of the HTML elements <select>, <input type="checkbox">, and <input type="radio">. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.RadioSelect
class CouponForm(forms.Form):
code = forms.CharField(widget=forms.TextInput(attrs={ # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
'class': 'form-control', # These lines are from order_snippet.html line 37 # On a real Web page, you probably donβt want every widget to look the same. You might want a larger input element for the comment, and you might want the βnameβ widget to have some special CSS class. It is also possible to specify the βtypeβ attribute to take advantage of the new HTML5 input types. To do this, you use the Widget.attrs argument when creating the widget. In this case we manipulated the placeholder attribute. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Widget.attrs
'placeholder': 'Promo code',
'aria-label': 'Recipient\'s username',
'aria-describedby': 'basic-addon2'
}))
class RefundForm(forms.Form):
reference_code = forms.CharField() # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
message = forms.CharField(widget=forms.Textarea(attrs={ # A text area is provides a much larger area for data entry than a text field. Text areas are common for things such as entering comments or other large pieces of information. So how can we create a text area in Django. ref http://www.learningaboutelectronics.com/Articles/How-to-create-a-text-area-in-a-Django-form.php ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/#django.forms.Textarea
'rows': 4
})) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
email = forms.EmailField() # ref https://docs.djangoproject.com/en/3.2/ref/forms/fields/#emailfield
class PaymentForm(forms.Form):
stripeToken = forms.CharField(required=False) # A string field, for small- to large-sized strings. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
save = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/
use_default = forms.BooleanField(required=False) # A widget is Djangoβs representation of an HTML input element. The widget handles the rendering of the HTML, and the extraction of data from a GET/POST dictionary that corresponds to the widget. ref https://docs.djangoproject.com/en/3.2/ref/forms/widgets/ A true/false field. The default form widget for this field is CheckboxInput, or NullBooleanSelect if null=True. ref https://docs.djangoproject.com/en/3.2/topics/forms/modelforms/ | 178.941176 | 677 | 0.710552 | 1,756 | 12,168 | 4.905467 | 0.129841 | 0.038078 | 0.05433 | 0.113188 | 0.881936 | 0.874158 | 0.869631 | 0.869631 | 0.869631 | 0.869631 | 0 | 0.010169 | 0.199951 | 12,168 | 68 | 678 | 178.941176 | 0.874679 | 0.732659 | 0 | 0.208333 | 0 | 0 | 0.081123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.729167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5928dd141bef1d6f5501e41deea6b3784cdb94da | 658 | py | Python | models/RelationNetworks/relation_rcnn/symbols/__init__.py | SurajK7/kaggle-rsna18 | 0572708b503edf95f8304df2f92473b5a99c8cba | [
"MIT"
] | 1,091 | 2018-06-14T06:30:11.000Z | 2022-03-29T09:33:03.000Z | models/RelationNetworks/relation_rcnn/symbols/__init__.py | SurajK7/kaggle-rsna18 | 0572708b503edf95f8304df2f92473b5a99c8cba | [
"MIT"
] | 44 | 2018-06-15T09:27:16.000Z | 2021-12-15T08:02:06.000Z | models/RelationNetworks/relation_rcnn/symbols/__init__.py | SurajK7/kaggle-rsna18 | 0572708b503edf95f8304df2f92473b5a99c8cba | [
"MIT"
] | 206 | 2018-06-14T16:10:25.000Z | 2022-03-16T12:08:15.000Z | import resnet_v1_101_rcnn_attention_1024_pairwise_position_multi_head_16
import resnet_v1_101_rcnn_dcn_attention_1024_pairwise_position_multi_head_16
import resnet_v1_101_rcnn_attention_1024_pairwise_position_multi_head_16_learn_nms
import resnet_v1_101_rcnn_dcn_attention_1024_pairwise_position_multi_head_16_learn_nms
import resnet_v1_101_rcnn_fpn_attention_1024_pairwise_position_multi_head_16
import resnet_v1_101_rcnn_fpn_attention_1024_pairwise_position_multi_head_16_learn_nms
import resnet_v1_101_rcnn_learn_nms_1024_attention_1024_pairwise_position_multi_head_16
import resnet_v1_101_rcnn_dcn
import resnet_v1_101_rcnn_fpn
import resnet_v1_101_rcnn
| 59.818182 | 87 | 0.969605 | 114 | 658 | 4.77193 | 0.140351 | 0.220588 | 0.257353 | 0.3125 | 0.977941 | 0.939338 | 0.895221 | 0.895221 | 0.895221 | 0.895221 | 0 | 0.134796 | 0.030395 | 658 | 10 | 88 | 65.8 | 0.717868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 12 |
3cd7027922a06e63e626f566eb31f1a7d15a041d | 196 | py | Python | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/profiles/frame_profile_inputs_common.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 69 | 2021-12-16T01:34:09.000Z | 2022-03-31T08:27:39.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/profiles/frame_profile_inputs_common.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 6 | 2022-01-12T18:22:08.000Z | 2022-03-25T10:19:27.000Z | platform/radio/efr32_multiphy_configurator/pyradioconfig/parts/sol/profiles/frame_profile_inputs_common.py | lmnotran/gecko_sdk | 2e82050dc8823c9fe0e8908c1b2666fb83056230 | [
"Zlib"
] | 21 | 2021-12-20T09:05:45.000Z | 2022-03-28T02:52:28.000Z | from pyradioconfig.parts.ocelot.profiles.frame_profile_inputs_common import frame_profile_inputs_common_ocelot
class frame_profile_inputs_common_sol(frame_profile_inputs_common_ocelot):
pass | 39.2 | 110 | 0.903061 | 27 | 196 | 6 | 0.481481 | 0.296296 | 0.444444 | 0.592593 | 0.37037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.061224 | 196 | 5 | 111 | 39.2 | 0.880435 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 8 |
3ce52fc915bd92dda811181eaece96ab54eb4f08 | 71,853 | py | Python | pynos/versions/ver_7/ver_7_1_0/yang/brocade_port_profile.py | bdeetz/pynos | bd8a34e98f322de3fc06750827d8bbc3a0c00380 | [
"Apache-2.0"
] | 12 | 2015-09-21T23:56:09.000Z | 2018-03-30T04:35:32.000Z | pynos/versions/ver_7/ver_7_1_0/yang/brocade_port_profile.py | bdeetz/pynos | bd8a34e98f322de3fc06750827d8bbc3a0c00380 | [
"Apache-2.0"
] | 10 | 2016-09-15T19:03:27.000Z | 2017-07-17T23:38:01.000Z | pynos/versions/ver_7/ver_7_1_0/yang/brocade_port_profile.py | bdeetz/pynos | bd8a34e98f322de3fc06750827d8bbc3a0c00380 | [
"Apache-2.0"
] | 6 | 2015-08-14T08:05:23.000Z | 2022-02-03T15:33:54.000Z | #!/usr/bin/env python
import xml.etree.ElementTree as ET
class brocade_port_profile(object):
"""Auto generated class.
"""
def __init__(self, **kwargs):
self._callback = kwargs.pop('callback')
def port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_allow_nonprofiledmacs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
allow = ET.SubElement(port_profile, "allow")
nonprofiledmacs = ET.SubElement(allow, "nonprofiledmacs")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_basic_basic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport_basic = ET.SubElement(vlan_profile, "switchport-basic")
basic = ET.SubElement(switchport_basic, "basic")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_mode_vlan_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
mode = ET.SubElement(switchport, "mode")
vlan_mode = ET.SubElement(mode, "vlan-mode")
vlan_mode.text = kwargs.pop('vlan_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_vlan_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access = ET.SubElement(switchport, "access")
vlan = ET.SubElement(access, "vlan")
name = ET.SubElement(vlan, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification")
access = ET.SubElement(access_mac_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_mac_address_key = ET.SubElement(vlan, "access-mac-address")
access_mac_address_key.text = kwargs.pop('access_mac_address')
access_vlan_id = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id.text = kwargs.pop('access_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification")
access = ET.SubElement(access_mac_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_address = ET.SubElement(vlan, "access-mac-address")
access_mac_address.text = kwargs.pop('access_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_group_vlan_classification = ET.SubElement(switchport, "access-mac-group-vlan-classification")
access = ET.SubElement(access_mac_group_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_mac_group_key = ET.SubElement(vlan, "access-mac-group")
access_mac_group_key.text = kwargs.pop('access_mac_group')
access_vlan_id = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id.text = kwargs.pop('access_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_mac_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_group_vlan_classification = ET.SubElement(switchport, "access-mac-group-vlan-classification")
access = ET.SubElement(access_mac_group_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_group = ET.SubElement(vlan, "access-mac-group")
access_mac_group.text = kwargs.pop('access_mac_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_all(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
all = ET.SubElement(vlan, "all")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_none(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
none = ET.SubElement(vlan, "none")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_excpt(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
excpt = ET.SubElement(vlan, "except")
excpt.text = kwargs.pop('excpt')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
remove.text = kwargs.pop('remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
trunk_ctag_id_key = ET.SubElement(add, "trunk-ctag-id")
trunk_ctag_id_key.text = kwargs.pop('trunk_ctag_id')
trunk_vlan_id = ET.SubElement(add, "trunk-vlan-id")
trunk_vlan_id.text = kwargs.pop('trunk_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
trunk_vlan_id_key = ET.SubElement(add, "trunk-vlan-id")
trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
trunk_ctag_id = ET.SubElement(add, "trunk-ctag-id")
trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_remove_trunk_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
trunk_ctag_id_key = ET.SubElement(remove, "trunk-ctag-id")
trunk_ctag_id_key.text = kwargs.pop('trunk_ctag_id')
trunk_vlan_id = ET.SubElement(remove, "trunk-vlan-id")
trunk_vlan_id.text = kwargs.pop('trunk_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_remove_trunk_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
trunk_vlan_id_key = ET.SubElement(remove, "trunk-vlan-id")
trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
trunk_ctag_id = ET.SubElement(remove, "trunk-ctag-id")
trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan_classification_native_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan_classification = ET.SubElement(trunk, "native-vlan-classification")
native_vlan_id = ET.SubElement(native_vlan_classification, "native-vlan-id")
native_vlan_id.text = kwargs.pop('native_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan_classification_native_vlan_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan_classification = ET.SubElement(trunk, "native-vlan-classification")
native_vlan_ctag_id = ET.SubElement(native_vlan_classification, "native-vlan-ctag-id")
native_vlan_ctag_id.text = kwargs.pop('native_vlan_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan = ET.SubElement(trunk, "native-vlan")
native_vlan.text = kwargs.pop('native_vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_fcoe_profile_fcoeport_fcoe_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
fcoe_profile = ET.SubElement(port_profile, "fcoe-profile")
fcoeport = ET.SubElement(fcoe_profile, "fcoeport")
fcoe_map_name = ET.SubElement(fcoeport, "fcoe-map-name")
fcoe_map_name.text = kwargs.pop('fcoe_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_cee(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
cee = ET.SubElement(qos_profile, "cee")
cee.text = kwargs.pop('cee')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos = ET.SubElement(qos, "cos")
cos.text = kwargs.pop('cos')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_trust_trust_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
trust = ET.SubElement(qos, "trust")
trust_cos = ET.SubElement(trust, "trust-cos")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos_mutation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos_mutation = ET.SubElement(qos, "cos-mutation")
cos_mutation.text = kwargs.pop('cos_mutation')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos_traffic_class(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos_traffic_class = ET.SubElement(qos, "cos-traffic-class")
cos_traffic_class.text = kwargs.pop('cos_traffic_class')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
tx = ET.SubElement(flowcontrolglobal, "tx")
tx.text = kwargs.pop('tx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_rx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
rx = ET.SubElement(flowcontrolglobal, "rx")
rx.text = kwargs.pop('rx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos = ET.SubElement(pfc, "pfc-cos")
pfc_cos.text = kwargs.pop('pfc_cos')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos_key = ET.SubElement(pfc, "pfc-cos")
pfc_cos_key.text = kwargs.pop('pfc_cos')
pfc_tx = ET.SubElement(pfc, "pfc-tx")
pfc_tx.text = kwargs.pop('pfc_tx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_rx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos_key = ET.SubElement(pfc, "pfc-cos")
pfc_cos_key.text = kwargs.pop('pfc_cos')
pfc_rx = ET.SubElement(pfc, "pfc-rx")
pfc_rx.text = kwargs.pop('pfc_rx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_mac_access_group_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
mac = ET.SubElement(security_profile, "mac")
access_group = ET.SubElement(mac, "access-group")
access_group_name = ET.SubElement(access_group, "access-group-name")
access_group_name.text = kwargs.pop('access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_mac_access_group_in_cg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
mac = ET.SubElement(security_profile, "mac")
access_group = ET.SubElement(mac, "access-group")
in_cg = ET.SubElement(access_group, "in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ip_access_group_ipv4_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ip = ET.SubElement(security_profile, "ip")
access_group = ET.SubElement(ip, "access-group")
ipv4_access_group_name = ET.SubElement(access_group, "ipv4-access-group-name")
ipv4_access_group_name.text = kwargs.pop('ipv4_access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ip_access_group_ipv4_in(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ip = ET.SubElement(security_profile, "ip")
access_group = ET.SubElement(ip, "access-group")
ipv4_in = ET.SubElement(access_group, "ipv4-in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ipv6_access_group_ipv6_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ipv6 = ET.SubElement(security_profile, "ipv6")
access_group = ET.SubElement(ipv6, "access-group")
ipv6_access_group_name = ET.SubElement(access_group, "ipv6-access-group-name")
ipv6_access_group_name.text = kwargs.pop('ipv6_access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ipv6_access_group_ipv6_in(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ipv6 = ET.SubElement(security_profile, "ipv6")
access_group = ET.SubElement(ipv6, "access-group")
ipv6_in = ET.SubElement(access_group, "ipv6-in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_restrict_flooding_container_restrict_flooding(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
restrict_flooding_container = ET.SubElement(port_profile, "restrict-flooding-container")
restrict_flooding = ET.SubElement(restrict_flooding_container, "restrict-flooding")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(port_profile, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_static_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
static = ET.SubElement(port_profile, "static")
mac_address = ET.SubElement(static, "mac-address")
mac_address.text = kwargs.pop('mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_domain_port_profile_domain_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name.text = kwargs.pop('port_profile_domain_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_domain_profile_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name_key = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name_key.text = kwargs.pop('port_profile_domain_name')
profile = ET.SubElement(port_profile_domain, "profile")
profile_name = ET.SubElement(profile, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_allow_nonprofiledmacs(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
allow = ET.SubElement(port_profile, "allow")
nonprofiledmacs = ET.SubElement(allow, "nonprofiledmacs")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_basic_basic(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport_basic = ET.SubElement(vlan_profile, "switchport-basic")
basic = ET.SubElement(switchport_basic, "basic")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_mode_vlan_mode(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
mode = ET.SubElement(switchport, "mode")
vlan_mode = ET.SubElement(mode, "vlan-mode")
vlan_mode.text = kwargs.pop('vlan_mode')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_vlan_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access = ET.SubElement(switchport, "access")
vlan = ET.SubElement(access, "vlan")
name = ET.SubElement(vlan, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification")
access = ET.SubElement(access_mac_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_mac_address_key = ET.SubElement(vlan, "access-mac-address")
access_mac_address_key.text = kwargs.pop('access_mac_address')
access_vlan_id = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id.text = kwargs.pop('access_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_vlan_classification_access_vlan_access_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_vlan_classification = ET.SubElement(switchport, "access-mac-vlan-classification")
access = ET.SubElement(access_mac_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_address = ET.SubElement(vlan, "access-mac-address")
access_mac_address.text = kwargs.pop('access_mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_group_vlan_classification = ET.SubElement(switchport, "access-mac-group-vlan-classification")
access = ET.SubElement(access_mac_group_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_mac_group_key = ET.SubElement(vlan, "access-mac-group")
access_mac_group_key.text = kwargs.pop('access_mac_group')
access_vlan_id = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id.text = kwargs.pop('access_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_access_mac_group_vlan_classification_access_vlan_access_mac_group(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
access_mac_group_vlan_classification = ET.SubElement(switchport, "access-mac-group-vlan-classification")
access = ET.SubElement(access_mac_group_vlan_classification, "access")
vlan = ET.SubElement(access, "vlan")
access_vlan_id_key = ET.SubElement(vlan, "access-vlan-id")
access_vlan_id_key.text = kwargs.pop('access_vlan_id')
access_mac_group = ET.SubElement(vlan, "access-mac-group")
access_mac_group.text = kwargs.pop('access_mac_group')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_all(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
all = ET.SubElement(vlan, "all")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_none(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
none = ET.SubElement(vlan, "none")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_add(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
add.text = kwargs.pop('add')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_excpt(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
excpt = ET.SubElement(vlan, "except")
excpt.text = kwargs.pop('excpt')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_allowed_vlan_remove(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
allowed = ET.SubElement(trunk, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
remove.text = kwargs.pop('remove')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
trunk_ctag_id_key = ET.SubElement(add, "trunk-ctag-id")
trunk_ctag_id_key.text = kwargs.pop('trunk_ctag_id')
trunk_vlan_id = ET.SubElement(add, "trunk-vlan-id")
trunk_vlan_id.text = kwargs.pop('trunk_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_add_trunk_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
add = ET.SubElement(vlan, "add")
trunk_vlan_id_key = ET.SubElement(add, "trunk-vlan-id")
trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
trunk_ctag_id = ET.SubElement(add, "trunk-ctag-id")
trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_remove_trunk_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
trunk_ctag_id_key = ET.SubElement(remove, "trunk-ctag-id")
trunk_ctag_id_key.text = kwargs.pop('trunk_ctag_id')
trunk_vlan_id = ET.SubElement(remove, "trunk-vlan-id")
trunk_vlan_id.text = kwargs.pop('trunk_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_trunk_vlan_classification_allowed_vlan_remove_trunk_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
trunk_vlan_classification = ET.SubElement(trunk, "trunk-vlan-classification")
allowed = ET.SubElement(trunk_vlan_classification, "allowed")
vlan = ET.SubElement(allowed, "vlan")
remove = ET.SubElement(vlan, "remove")
trunk_vlan_id_key = ET.SubElement(remove, "trunk-vlan-id")
trunk_vlan_id_key.text = kwargs.pop('trunk_vlan_id')
trunk_ctag_id = ET.SubElement(remove, "trunk-ctag-id")
trunk_ctag_id.text = kwargs.pop('trunk_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan_classification_native_vlan_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan_classification = ET.SubElement(trunk, "native-vlan-classification")
native_vlan_id = ET.SubElement(native_vlan_classification, "native-vlan-id")
native_vlan_id.text = kwargs.pop('native_vlan_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan_classification_native_vlan_ctag_id(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan_classification = ET.SubElement(trunk, "native-vlan-classification")
native_vlan_ctag_id = ET.SubElement(native_vlan_classification, "native-vlan-ctag-id")
native_vlan_ctag_id.text = kwargs.pop('native_vlan_ctag_id')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_vlan_profile_switchport_trunk_native_vlan(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
vlan_profile = ET.SubElement(port_profile, "vlan-profile")
switchport = ET.SubElement(vlan_profile, "switchport")
trunk = ET.SubElement(switchport, "trunk")
native_vlan = ET.SubElement(trunk, "native-vlan")
native_vlan.text = kwargs.pop('native_vlan')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_fcoe_profile_fcoeport_fcoe_map_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
fcoe_profile = ET.SubElement(port_profile, "fcoe-profile")
fcoeport = ET.SubElement(fcoe_profile, "fcoeport")
fcoe_map_name = ET.SubElement(fcoeport, "fcoe-map-name")
fcoe_map_name.text = kwargs.pop('fcoe_map_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_cee(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
cee = ET.SubElement(qos_profile, "cee")
cee.text = kwargs.pop('cee')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos = ET.SubElement(qos, "cos")
cos.text = kwargs.pop('cos')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_trust_trust_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
trust = ET.SubElement(qos, "trust")
trust_cos = ET.SubElement(trust, "trust-cos")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos_mutation(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos_mutation = ET.SubElement(qos, "cos-mutation")
cos_mutation.text = kwargs.pop('cos_mutation')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_cos_traffic_class(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
cos_traffic_class = ET.SubElement(qos, "cos-traffic-class")
cos_traffic_class.text = kwargs.pop('cos_traffic_class')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
tx = ET.SubElement(flowcontrolglobal, "tx")
tx.text = kwargs.pop('tx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_flowcontrolglobal_rx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
flowcontrolglobal = ET.SubElement(flowcontrol, "flowcontrolglobal")
rx = ET.SubElement(flowcontrolglobal, "rx")
rx.text = kwargs.pop('rx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_cos(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos = ET.SubElement(pfc, "pfc-cos")
pfc_cos.text = kwargs.pop('pfc_cos')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_tx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos_key = ET.SubElement(pfc, "pfc-cos")
pfc_cos_key.text = kwargs.pop('pfc_cos')
pfc_tx = ET.SubElement(pfc, "pfc-tx")
pfc_tx.text = kwargs.pop('pfc_tx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_qos_profile_qos_flowcontrol_pfc_pfc_rx(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
qos_profile = ET.SubElement(port_profile, "qos-profile")
qos = ET.SubElement(qos_profile, "qos")
flowcontrol = ET.SubElement(qos, "flowcontrol")
pfc = ET.SubElement(flowcontrol, "pfc")
pfc_cos_key = ET.SubElement(pfc, "pfc-cos")
pfc_cos_key.text = kwargs.pop('pfc_cos')
pfc_rx = ET.SubElement(pfc, "pfc-rx")
pfc_rx.text = kwargs.pop('pfc_rx')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_mac_access_group_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
mac = ET.SubElement(security_profile, "mac")
access_group = ET.SubElement(mac, "access-group")
access_group_name = ET.SubElement(access_group, "access-group-name")
access_group_name.text = kwargs.pop('access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_mac_access_group_in_cg(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
mac = ET.SubElement(security_profile, "mac")
access_group = ET.SubElement(mac, "access-group")
in_cg = ET.SubElement(access_group, "in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ip_access_group_ipv4_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ip = ET.SubElement(security_profile, "ip")
access_group = ET.SubElement(ip, "access-group")
ipv4_access_group_name = ET.SubElement(access_group, "ipv4-access-group-name")
ipv4_access_group_name.text = kwargs.pop('ipv4_access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ip_access_group_ipv4_in(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ip = ET.SubElement(security_profile, "ip")
access_group = ET.SubElement(ip, "access-group")
ipv4_in = ET.SubElement(access_group, "ipv4-in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ipv6_access_group_ipv6_access_group_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ipv6 = ET.SubElement(security_profile, "ipv6")
access_group = ET.SubElement(ipv6, "access-group")
ipv6_access_group_name = ET.SubElement(access_group, "ipv6-access-group-name")
ipv6_access_group_name.text = kwargs.pop('ipv6_access_group_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_security_profile_ipv6_access_group_ipv6_in(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
security_profile = ET.SubElement(port_profile, "security-profile")
ipv6 = ET.SubElement(security_profile, "ipv6")
access_group = ET.SubElement(ipv6, "access-group")
ipv6_in = ET.SubElement(access_group, "ipv6-in")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_restrict_flooding_container_restrict_flooding(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile = ET.SubElement(config, "port-profile", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
restrict_flooding_container = ET.SubElement(port_profile, "restrict-flooding-container")
restrict_flooding = ET.SubElement(restrict_flooding_container, "restrict-flooding")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name = ET.SubElement(port_profile, "name")
name.text = kwargs.pop('name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_activate(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
activate = ET.SubElement(port_profile, "activate")
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_global_port_profile_static_mac_address(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_global = ET.SubElement(config, "port-profile-global", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile = ET.SubElement(port_profile_global, "port-profile")
name_key = ET.SubElement(port_profile, "name")
name_key.text = kwargs.pop('name')
static = ET.SubElement(port_profile, "static")
mac_address = ET.SubElement(static, "mac-address")
mac_address.text = kwargs.pop('mac_address')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_domain_port_profile_domain_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name.text = kwargs.pop('port_profile_domain_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
def port_profile_domain_profile_profile_name(self, **kwargs):
"""Auto Generated Code
"""
config = ET.Element("config")
port_profile_domain = ET.SubElement(config, "port-profile-domain", xmlns="urn:brocade.com:mgmt:brocade-port-profile")
port_profile_domain_name_key = ET.SubElement(port_profile_domain, "port-profile-domain-name")
port_profile_domain_name_key.text = kwargs.pop('port_profile_domain_name')
profile = ET.SubElement(port_profile_domain, "profile")
profile_name = ET.SubElement(profile, "profile-name")
profile_name.text = kwargs.pop('profile_name')
callback = kwargs.pop('callback', self._callback)
return callback(config)
| 49.113465 | 127 | 0.664579 | 8,527 | 71,853 | 5.364724 | 0.009851 | 0.151623 | 0.065406 | 0.088491 | 0.997005 | 0.997005 | 0.997005 | 0.997005 | 0.997005 | 0.997005 | 0 | 0.000846 | 0.210249 | 71,853 | 1,463 | 128 | 49.113465 | 0.80529 | 0.036171 | 0 | 0.996377 | 1 | 0 | 0.17146 | 0.066823 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080616 | false | 0 | 0.000906 | 0 | 0.162138 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
a70dbfbdd213005ca6157eb51af007ea5db762f4 | 306 | py | Python | src/IceRayPy/core/material/instruction/label/spot/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 2 | 2020-09-04T12:27:15.000Z | 2022-01-17T14:49:40.000Z | src/IceRayPy/core/material/instruction/label/spot/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | null | null | null | src/IceRayPy/core/material/instruction/label/spot/__init__.py | dmilos/IceRay | 4e01f141363c0d126d3c700c1f5f892967e3d520 | [
"MIT-0"
] | 1 | 2020-09-04T12:27:52.000Z | 2020-09-04T12:27:52.000Z | print( '<' + __name__ + ' name=\'' + __file__ + '\'>' )
import IceRayPy.core.material.instruction.label.spot.const
import IceRayPy.core.material.instruction.label.spot.dynamic
import IceRayPy.core.material.instruction.label.spot.temp
print( '</' + __name__ + ' name=\'' + __file__ + '\'>' )
| 34 | 61 | 0.666667 | 32 | 306 | 5.875 | 0.40625 | 0.223404 | 0.287234 | 0.414894 | 0.734043 | 0.734043 | 0.734043 | 0 | 0 | 0 | 0 | 0 | 0.147059 | 306 | 8 | 62 | 38.25 | 0.720307 | 0 | 0 | 0 | 0 | 0 | 0.171141 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0.4 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 9 |
a7369191efec0e62f445d3d2b03261494d9e470a | 423 | py | Python | COGor/__init__.py | xpolak37/the-COG-or | 85cddf196d3aacdb42a2673567cd7016beed1cd0 | [
"MIT"
] | 1 | 2022-03-21T15:15:59.000Z | 2022-03-21T15:15:59.000Z | COGor/__init__.py | xpolak37/the-COG-or | 85cddf196d3aacdb42a2673567cd7016beed1cd0 | [
"MIT"
] | null | null | null | COGor/__init__.py | xpolak37/the-COG-or | 85cddf196d3aacdb42a2673567cd7016beed1cd0 | [
"MIT"
] | null | null | null | from COGor.program_processor import em_processor
from COGor.program_processor import om_processor
from COGor.program_processor import batch_merger
from COGor.program_processor import batch_splitter
from COGor.program_processor import batch_processor
from COGor.consensus import consensus
from COGor.track_manager import get_track_template
from COGor.consensus import get_features
from COGor.track_manager import get_legend
| 42.3 | 51 | 0.893617 | 61 | 423 | 5.934426 | 0.278689 | 0.223757 | 0.220994 | 0.345304 | 0.685083 | 0.599448 | 0 | 0 | 0 | 0 | 0 | 0 | 0.085106 | 423 | 9 | 52 | 47 | 0.935401 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
59b528056f3eafd9a30681a70c5a9cd0986b2318 | 62,187 | py | Python | elements_sdk/api/auth_api.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | 6 | 2020-11-16T23:15:18.000Z | 2022-03-14T03:56:12.000Z | elements_sdk/api/auth_api.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | 1 | 2021-07-28T13:03:49.000Z | 2021-08-25T12:24:01.000Z | elements_sdk/api/auth_api.py | elements-storage/elements-sdk-python | 39c365fe079dcd5928c5fe1bbaa67389bd5a3d81 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
ELEMENTS API
The version of the OpenAPI document: 2
Generated by: https://openapi-generator.tech
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from elements_sdk.api_client import ApiClient
from elements_sdk.exceptions import (
ApiTypeError,
ApiValueError
)
class AuthApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def check_auth_ticket(self, ticket, **kwargs): # noqa: E501
"""check_auth_ticket # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_auth_ticket(ticket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Ticket ticket: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: ElementsUserDetail
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.check_auth_ticket_with_http_info(ticket, **kwargs) # noqa: E501
def check_auth_ticket_with_http_info(self, ticket, **kwargs): # noqa: E501
"""check_auth_ticket # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.check_auth_ticket_with_http_info(ticket, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param Ticket ticket: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(ElementsUserDetail, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['ticket'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method check_auth_ticket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'ticket' is set
if self.api_client.client_side_validation and ('ticket' not in local_var_params or # noqa: E501
local_var_params['ticket'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `ticket` when calling `check_auth_ticket`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'ticket' in local_var_params:
body_params = local_var_params['ticket']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/ticket/check', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='ElementsUserDetail', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def create_auth_ticket(self, **kwargs): # noqa: E501
"""create_auth_ticket # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_auth_ticket(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Ticket
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.create_auth_ticket_with_http_info(**kwargs) # noqa: E501
def create_auth_ticket_with_http_info(self, **kwargs): # noqa: E501
"""create_auth_ticket # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_auth_ticket_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(Ticket, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method create_auth_ticket" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/ticket', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Ticket', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_access_token(self, id, **kwargs): # noqa: E501
"""delete_access_token # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_access_token(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.delete_access_token_with_http_info(id, **kwargs) # noqa: E501
def delete_access_token_with_http_info(self, id, **kwargs): # noqa: E501
"""delete_access_token # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_access_token_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_access_token" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `delete_access_token`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/access-tokens/{id}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def generate_password(self, **kwargs): # noqa: E501
"""generate_password # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_password(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: GeneratePasswordEndpointResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.generate_password_with_http_info(**kwargs) # noqa: E501
def generate_password_with_http_info(self, **kwargs): # noqa: E501
"""generate_password # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.generate_password_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(GeneratePasswordEndpointResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method generate_password" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/generate-password', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='GeneratePasswordEndpointResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_access_token(self, id, **kwargs): # noqa: E501
"""get_access_token # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_access_token(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: OneTimeAccessToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_access_token_with_http_info(id, **kwargs) # noqa: E501
def get_access_token_with_http_info(self, id, **kwargs): # noqa: E501
"""get_access_token # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_access_token_with_http_info(id, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(OneTimeAccessToken, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_access_token" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `get_access_token`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/access-tokens/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OneTimeAccessToken', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def get_all_access_tokens(self, **kwargs): # noqa: E501
"""get_all_access_tokens # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_access_tokens(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str shared_bundles: Filter the returned list by `shared_bundles`.
:param str shared_directories: Filter the returned list by `shared_directories`.
:param str shared_bundles__asset: Filter the returned list by `shared_bundles__asset`.
:param str user: Filter the returned list by `user`.
:param str created_by: Filter the returned list by `created_by`.
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: list[OneTimeAccessToken]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.get_all_access_tokens_with_http_info(**kwargs) # noqa: E501
def get_all_access_tokens_with_http_info(self, **kwargs): # noqa: E501
"""get_all_access_tokens # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_all_access_tokens_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param str shared_bundles: Filter the returned list by `shared_bundles`.
:param str shared_directories: Filter the returned list by `shared_directories`.
:param str shared_bundles__asset: Filter the returned list by `shared_bundles__asset`.
:param str user: Filter the returned list by `user`.
:param str created_by: Filter the returned list by `created_by`.
:param str ordering: Which field to use when ordering the results.
:param int limit: Number of results to return per page.
:param int offset: The initial index from which to return the results.
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(list[OneTimeAccessToken], status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['shared_bundles', 'shared_directories', 'shared_bundles__asset', 'user', 'created_by', 'ordering', 'limit', 'offset'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_all_access_tokens" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'shared_bundles' in local_var_params and local_var_params['shared_bundles'] is not None: # noqa: E501
query_params.append(('shared_bundles', local_var_params['shared_bundles'])) # noqa: E501
if 'shared_directories' in local_var_params and local_var_params['shared_directories'] is not None: # noqa: E501
query_params.append(('shared_directories', local_var_params['shared_directories'])) # noqa: E501
if 'shared_bundles__asset' in local_var_params and local_var_params['shared_bundles__asset'] is not None: # noqa: E501
query_params.append(('shared_bundles__asset', local_var_params['shared_bundles__asset'])) # noqa: E501
if 'user' in local_var_params and local_var_params['user'] is not None: # noqa: E501
query_params.append(('user', local_var_params['user'])) # noqa: E501
if 'created_by' in local_var_params and local_var_params['created_by'] is not None: # noqa: E501
query_params.append(('created_by', local_var_params['created_by'])) # noqa: E501
if 'ordering' in local_var_params and local_var_params['ordering'] is not None: # noqa: E501
query_params.append(('ordering', local_var_params['ordering'])) # noqa: E501
if 'limit' in local_var_params and local_var_params['limit'] is not None: # noqa: E501
query_params.append(('limit', local_var_params['limit'])) # noqa: E501
if 'offset' in local_var_params and local_var_params['offset'] is not None: # noqa: E501
query_params.append(('offset', local_var_params['offset'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/access-tokens', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[OneTimeAccessToken]', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def login(self, auth_login_endpoint_request, **kwargs): # noqa: E501
"""login # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.login(auth_login_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AuthLoginEndpointRequest auth_login_endpoint_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: AuthLoginEndpointResponse
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.login_with_http_info(auth_login_endpoint_request, **kwargs) # noqa: E501
def login_with_http_info(self, auth_login_endpoint_request, **kwargs): # noqa: E501
"""login # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.login_with_http_info(auth_login_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param AuthLoginEndpointRequest auth_login_endpoint_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: tuple(AuthLoginEndpointResponse, status_code(int), headers(HTTPHeaderDict))
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['auth_login_endpoint_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method login" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'auth_login_endpoint_request' is set
if self.api_client.client_side_validation and ('auth_login_endpoint_request' not in local_var_params or # noqa: E501
local_var_params['auth_login_endpoint_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `auth_login_endpoint_request` when calling `login`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'auth_login_endpoint_request' in local_var_params:
body_params = local_var_params['auth_login_endpoint_request']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/login', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='AuthLoginEndpointResponse', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def logout(self, **kwargs): # noqa: E501
"""logout # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logout(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.logout_with_http_info(**kwargs) # noqa: E501
def logout_with_http_info(self, **kwargs): # noqa: E501
"""logout # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.logout_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method logout" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/logout', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def reset_password(self, password_reset_endpoint_request, **kwargs): # noqa: E501
"""reset_password # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_password(password_reset_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PasswordResetEndpointRequest password_reset_endpoint_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.reset_password_with_http_info(password_reset_endpoint_request, **kwargs) # noqa: E501
def reset_password_with_http_info(self, password_reset_endpoint_request, **kwargs): # noqa: E501
"""reset_password # noqa: E501
### Required permissions * <class 'rest_framework.permissions.AllowAny'> # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.reset_password_with_http_info(password_reset_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param PasswordResetEndpointRequest password_reset_endpoint_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['password_reset_endpoint_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method reset_password" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'password_reset_endpoint_request' is set
if self.api_client.client_side_validation and ('password_reset_endpoint_request' not in local_var_params or # noqa: E501
local_var_params['password_reset_endpoint_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `password_reset_endpoint_request` when calling `reset_password`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'password_reset_endpoint_request' in local_var_params:
body_params = local_var_params['password_reset_endpoint_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/reset-password', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def send_access_token_email_notification(self, id, send_link_email_request, **kwargs): # noqa: E501
"""send_access_token_email_notification # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_access_token_email_notification(id, send_link_email_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param SendLinkEmailRequest send_link_email_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.send_access_token_email_notification_with_http_info(id, send_link_email_request, **kwargs) # noqa: E501
def send_access_token_email_notification_with_http_info(self, id, send_link_email_request, **kwargs): # noqa: E501
"""send_access_token_email_notification # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.send_access_token_email_notification_with_http_info(id, send_link_email_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param int id: A unique integer value identifying this one time access token. (required)
:param SendLinkEmailRequest send_link_email_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['id', 'send_link_email_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method send_access_token_email_notification" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'id' is set
if self.api_client.client_side_validation and ('id' not in local_var_params or # noqa: E501
local_var_params['id'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `id` when calling `send_access_token_email_notification`") # noqa: E501
# verify the required parameter 'send_link_email_request' is set
if self.api_client.client_side_validation and ('send_link_email_request' not in local_var_params or # noqa: E501
local_var_params['send_link_email_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `send_link_email_request` when calling `send_access_token_email_notification`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in local_var_params:
path_params['id'] = local_var_params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'send_link_email_request' in local_var_params:
body_params = local_var_params['send_link_email_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/access-tokens/{id}/email', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def start_impersonation(self, impersonation_endpoint_request, **kwargs): # noqa: E501
"""start_impersonation # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_impersonation(impersonation_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ImpersonationEndpointRequest impersonation_endpoint_request: (required)
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.start_impersonation_with_http_info(impersonation_endpoint_request, **kwargs) # noqa: E501
def start_impersonation_with_http_info(self, impersonation_endpoint_request, **kwargs): # noqa: E501
"""start_impersonation # noqa: E501
### Required permissions * User account permission: `system:admin-access` # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.start_impersonation_with_http_info(impersonation_endpoint_request, async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param ImpersonationEndpointRequest impersonation_endpoint_request: (required)
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = ['impersonation_endpoint_request'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method start_impersonation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'impersonation_endpoint_request' is set
if self.api_client.client_side_validation and ('impersonation_endpoint_request' not in local_var_params or # noqa: E501
local_var_params['impersonation_endpoint_request'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `impersonation_endpoint_request` when calling `start_impersonation`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'impersonation_endpoint_request' in local_var_params:
body_params = local_var_params['impersonation_endpoint_request']
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/impersonation', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
def stop_impersonation(self, **kwargs): # noqa: E501
"""stop_impersonation # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stop_impersonation(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
return self.stop_impersonation_with_http_info(**kwargs) # noqa: E501
def stop_impersonation_with_http_info(self, **kwargs): # noqa: E501
"""stop_impersonation # noqa: E501
### Required permissions * Authenticated user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.stop_impersonation_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool: execute request asynchronously
:param _return_http_data_only: response data without head status code
and headers
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: None
If the method is called asynchronously,
returns the request thread.
"""
local_var_params = locals()
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method stop_impersonation" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['Bearer'] # noqa: E501
return self.api_client.call_api(
'/api/2/auth/impersonation/stop', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats)
| 46.82756 | 157 | 0.608182 | 6,892 | 62,187 | 5.218369 | 0.032792 | 0.042486 | 0.059169 | 0.030029 | 0.954817 | 0.945697 | 0.939302 | 0.923982 | 0.909634 | 0.90221 | 0 | 0.014538 | 0.318636 | 62,187 | 1,327 | 158 | 46.862849 | 0.834254 | 0.470146 | 0 | 0.758148 | 1 | 0 | 0.182722 | 0.072091 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042882 | false | 0.02916 | 0.008576 | 0 | 0.09434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
ab7b4606986da2aaa5e9779783141d77254b60fc | 7,235 | py | Python | loldib/getratings/models/NA/na_twistedfate/na_twistedfate_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_twistedfate/na_twistedfate_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_twistedfate/na_twistedfate_bot.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_TwistedFate_Bot_Aatrox(Ratings):
pass
class NA_TwistedFate_Bot_Ahri(Ratings):
pass
class NA_TwistedFate_Bot_Akali(Ratings):
pass
class NA_TwistedFate_Bot_Alistar(Ratings):
pass
class NA_TwistedFate_Bot_Amumu(Ratings):
pass
class NA_TwistedFate_Bot_Anivia(Ratings):
pass
class NA_TwistedFate_Bot_Annie(Ratings):
pass
class NA_TwistedFate_Bot_Ashe(Ratings):
pass
class NA_TwistedFate_Bot_AurelionSol(Ratings):
pass
class NA_TwistedFate_Bot_Azir(Ratings):
pass
class NA_TwistedFate_Bot_Bard(Ratings):
pass
class NA_TwistedFate_Bot_Blitzcrank(Ratings):
pass
class NA_TwistedFate_Bot_Brand(Ratings):
pass
class NA_TwistedFate_Bot_Braum(Ratings):
pass
class NA_TwistedFate_Bot_Caitlyn(Ratings):
pass
class NA_TwistedFate_Bot_Camille(Ratings):
pass
class NA_TwistedFate_Bot_Cassiopeia(Ratings):
pass
class NA_TwistedFate_Bot_Chogath(Ratings):
pass
class NA_TwistedFate_Bot_Corki(Ratings):
pass
class NA_TwistedFate_Bot_Darius(Ratings):
pass
class NA_TwistedFate_Bot_Diana(Ratings):
pass
class NA_TwistedFate_Bot_Draven(Ratings):
pass
class NA_TwistedFate_Bot_DrMundo(Ratings):
pass
class NA_TwistedFate_Bot_Ekko(Ratings):
pass
class NA_TwistedFate_Bot_Elise(Ratings):
pass
class NA_TwistedFate_Bot_Evelynn(Ratings):
pass
class NA_TwistedFate_Bot_Ezreal(Ratings):
pass
class NA_TwistedFate_Bot_Fiddlesticks(Ratings):
pass
class NA_TwistedFate_Bot_Fiora(Ratings):
pass
class NA_TwistedFate_Bot_Fizz(Ratings):
pass
class NA_TwistedFate_Bot_Galio(Ratings):
pass
class NA_TwistedFate_Bot_Gangplank(Ratings):
pass
class NA_TwistedFate_Bot_Garen(Ratings):
pass
class NA_TwistedFate_Bot_Gnar(Ratings):
pass
class NA_TwistedFate_Bot_Gragas(Ratings):
pass
class NA_TwistedFate_Bot_Graves(Ratings):
pass
class NA_TwistedFate_Bot_Hecarim(Ratings):
pass
class NA_TwistedFate_Bot_Heimerdinger(Ratings):
pass
class NA_TwistedFate_Bot_Illaoi(Ratings):
pass
class NA_TwistedFate_Bot_Irelia(Ratings):
pass
class NA_TwistedFate_Bot_Ivern(Ratings):
pass
class NA_TwistedFate_Bot_Janna(Ratings):
pass
class NA_TwistedFate_Bot_JarvanIV(Ratings):
pass
class NA_TwistedFate_Bot_Jax(Ratings):
pass
class NA_TwistedFate_Bot_Jayce(Ratings):
pass
class NA_TwistedFate_Bot_Jhin(Ratings):
pass
class NA_TwistedFate_Bot_Jinx(Ratings):
pass
class NA_TwistedFate_Bot_Kalista(Ratings):
pass
class NA_TwistedFate_Bot_Karma(Ratings):
pass
class NA_TwistedFate_Bot_Karthus(Ratings):
pass
class NA_TwistedFate_Bot_Kassadin(Ratings):
pass
class NA_TwistedFate_Bot_Katarina(Ratings):
pass
class NA_TwistedFate_Bot_Kayle(Ratings):
pass
class NA_TwistedFate_Bot_Kayn(Ratings):
pass
class NA_TwistedFate_Bot_Kennen(Ratings):
pass
class NA_TwistedFate_Bot_Khazix(Ratings):
pass
class NA_TwistedFate_Bot_Kindred(Ratings):
pass
class NA_TwistedFate_Bot_Kled(Ratings):
pass
class NA_TwistedFate_Bot_KogMaw(Ratings):
pass
class NA_TwistedFate_Bot_Leblanc(Ratings):
pass
class NA_TwistedFate_Bot_LeeSin(Ratings):
pass
class NA_TwistedFate_Bot_Leona(Ratings):
pass
class NA_TwistedFate_Bot_Lissandra(Ratings):
pass
class NA_TwistedFate_Bot_Lucian(Ratings):
pass
class NA_TwistedFate_Bot_Lulu(Ratings):
pass
class NA_TwistedFate_Bot_Lux(Ratings):
pass
class NA_TwistedFate_Bot_Malphite(Ratings):
pass
class NA_TwistedFate_Bot_Malzahar(Ratings):
pass
class NA_TwistedFate_Bot_Maokai(Ratings):
pass
class NA_TwistedFate_Bot_MasterYi(Ratings):
pass
class NA_TwistedFate_Bot_MissFortune(Ratings):
pass
class NA_TwistedFate_Bot_MonkeyKing(Ratings):
pass
class NA_TwistedFate_Bot_Mordekaiser(Ratings):
pass
class NA_TwistedFate_Bot_Morgana(Ratings):
pass
class NA_TwistedFate_Bot_Nami(Ratings):
pass
class NA_TwistedFate_Bot_Nasus(Ratings):
pass
class NA_TwistedFate_Bot_Nautilus(Ratings):
pass
class NA_TwistedFate_Bot_Nidalee(Ratings):
pass
class NA_TwistedFate_Bot_Nocturne(Ratings):
pass
class NA_TwistedFate_Bot_Nunu(Ratings):
pass
class NA_TwistedFate_Bot_Olaf(Ratings):
pass
class NA_TwistedFate_Bot_Orianna(Ratings):
pass
class NA_TwistedFate_Bot_Ornn(Ratings):
pass
class NA_TwistedFate_Bot_Pantheon(Ratings):
pass
class NA_TwistedFate_Bot_Poppy(Ratings):
pass
class NA_TwistedFate_Bot_Quinn(Ratings):
pass
class NA_TwistedFate_Bot_Rakan(Ratings):
pass
class NA_TwistedFate_Bot_Rammus(Ratings):
pass
class NA_TwistedFate_Bot_RekSai(Ratings):
pass
class NA_TwistedFate_Bot_Renekton(Ratings):
pass
class NA_TwistedFate_Bot_Rengar(Ratings):
pass
class NA_TwistedFate_Bot_Riven(Ratings):
pass
class NA_TwistedFate_Bot_Rumble(Ratings):
pass
class NA_TwistedFate_Bot_Ryze(Ratings):
pass
class NA_TwistedFate_Bot_Sejuani(Ratings):
pass
class NA_TwistedFate_Bot_Shaco(Ratings):
pass
class NA_TwistedFate_Bot_Shen(Ratings):
pass
class NA_TwistedFate_Bot_Shyvana(Ratings):
pass
class NA_TwistedFate_Bot_Singed(Ratings):
pass
class NA_TwistedFate_Bot_Sion(Ratings):
pass
class NA_TwistedFate_Bot_Sivir(Ratings):
pass
class NA_TwistedFate_Bot_Skarner(Ratings):
pass
class NA_TwistedFate_Bot_Sona(Ratings):
pass
class NA_TwistedFate_Bot_Soraka(Ratings):
pass
class NA_TwistedFate_Bot_Swain(Ratings):
pass
class NA_TwistedFate_Bot_Syndra(Ratings):
pass
class NA_TwistedFate_Bot_TahmKench(Ratings):
pass
class NA_TwistedFate_Bot_Taliyah(Ratings):
pass
class NA_TwistedFate_Bot_Talon(Ratings):
pass
class NA_TwistedFate_Bot_Taric(Ratings):
pass
class NA_TwistedFate_Bot_Teemo(Ratings):
pass
class NA_TwistedFate_Bot_Thresh(Ratings):
pass
class NA_TwistedFate_Bot_Tristana(Ratings):
pass
class NA_TwistedFate_Bot_Trundle(Ratings):
pass
class NA_TwistedFate_Bot_Tryndamere(Ratings):
pass
class NA_TwistedFate_Bot_TwistedFate(Ratings):
pass
class NA_TwistedFate_Bot_Twitch(Ratings):
pass
class NA_TwistedFate_Bot_Udyr(Ratings):
pass
class NA_TwistedFate_Bot_Urgot(Ratings):
pass
class NA_TwistedFate_Bot_Varus(Ratings):
pass
class NA_TwistedFate_Bot_Vayne(Ratings):
pass
class NA_TwistedFate_Bot_Veigar(Ratings):
pass
class NA_TwistedFate_Bot_Velkoz(Ratings):
pass
class NA_TwistedFate_Bot_Vi(Ratings):
pass
class NA_TwistedFate_Bot_Viktor(Ratings):
pass
class NA_TwistedFate_Bot_Vladimir(Ratings):
pass
class NA_TwistedFate_Bot_Volibear(Ratings):
pass
class NA_TwistedFate_Bot_Warwick(Ratings):
pass
class NA_TwistedFate_Bot_Xayah(Ratings):
pass
class NA_TwistedFate_Bot_Xerath(Ratings):
pass
class NA_TwistedFate_Bot_XinZhao(Ratings):
pass
class NA_TwistedFate_Bot_Yasuo(Ratings):
pass
class NA_TwistedFate_Bot_Yorick(Ratings):
pass
class NA_TwistedFate_Bot_Zac(Ratings):
pass
class NA_TwistedFate_Bot_Zed(Ratings):
pass
class NA_TwistedFate_Bot_Ziggs(Ratings):
pass
class NA_TwistedFate_Bot_Zilean(Ratings):
pass
class NA_TwistedFate_Bot_Zyra(Ratings):
pass
| 17.35012 | 48 | 0.788943 | 972 | 7,235 | 5.446502 | 0.151235 | 0.182471 | 0.46921 | 0.547412 | 0.828107 | 0.828107 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153559 | 7,235 | 416 | 49 | 17.391827 | 0.864468 | 0 | 0 | 0.498195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.498195 | 0.00361 | 0 | 0.501805 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 7 |
ab8310cee8941406c6e0bf018ead2315721a8ca5 | 41,220 | py | Python | tensorlayerx/nn/layers/pooling.py | tensorlayer/TensorLayerX | 4e3e6f13687309dda7787f0b86e35a62bb3adbad | [
"Apache-2.0"
] | 34 | 2021-12-03T08:19:23.000Z | 2022-03-13T08:34:34.000Z | tensorlayerx/nn/layers/pooling.py | tensorlayer/TensorLayerX | 4e3e6f13687309dda7787f0b86e35a62bb3adbad | [
"Apache-2.0"
] | null | null | null | tensorlayerx/nn/layers/pooling.py | tensorlayer/TensorLayerX | 4e3e6f13687309dda7787f0b86e35a62bb3adbad | [
"Apache-2.0"
] | 3 | 2021-12-28T16:57:20.000Z | 2022-03-18T02:23:14.000Z | #! /usr/bin/python
# -*- coding: utf-8 -*-
import tensorlayerx as tlx
from tensorlayerx import logging
from tensorlayerx.nn.core import Module
__all__ = [
'PoolLayer',
'MaxPool1d',
'MeanPool1d',
'MaxPool2d',
'MeanPool2d',
'MaxPool3d',
'MeanPool3d',
'GlobalMaxPool1d',
'GlobalMeanPool1d',
'GlobalMaxPool2d',
'GlobalMeanPool2d',
'GlobalMaxPool3d',
'GlobalMeanPool3d',
'AdaptiveMeanPool1d',
'AdaptiveMeanPool2d',
'AdaptiveMeanPool3d',
'AdaptiveMaxPool1d',
'AdaptiveMaxPool2d',
'AdaptiveMaxPool3d',
'CornerPool2d',
]
class PoolLayer(Module):
"""
The :class:`PoolLayer` class is a Pooling layer.
You can choose ``tlx.ops.max_pool`` and ``tlx.ops.avg_pool`` for 2D input or
``tlx.ops.max_pool3d`` and ``tlx.ops.avg_pool3d`` for 3D input.
Parameters
----------
kernel_size : tuple of int
The size of the window for each dimension of the input tensor.
Note that: len(kernel_size) >= 4.
stride : tuple of int
The stride of the sliding window for each dimension of the input tensor.
Note that: len(stride) >= 4.
padding : str
The padding algorithm type: "SAME" or "VALID".
pool : pooling function
One of ``tlx.ops.max_pool``, ``tlx.ops.avg_pool``, ``tlx.ops.max_pool3d`` and ``f.ops.avg_pool3d``.
See `TensorFlow pooling APIs <https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf/nn/>`__
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 50, 32], name='input')
>>> net = tlx.nn.PoolLayer()(net)
>>> output shape : [10, 25, 25, 32]
"""
def __init__(
self,
kernel_size=(1, 2, 2, 1),
stride=(1, 2, 2, 1),
padding='SAME',
pool=tlx.ops.MaxPool,
name=None # 'pool_pro',
):
super().__init__(name)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
self.pool = pool
self.build()
self._built = True
logging.info(
"PoolLayer %s: kernel_size: %s stride: %s padding: %s pool: %s" %
(self.name, str(self.kernel_size), str(self.stride), self.padding, pool.__name__)
)
def __repr__(self):
s = '{classname}(pool={poolname}, kernel_size={stride}, padding={padding}'
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, poolname=self.pool.__name__, **self.__dict__)
def build(self, inputs_shape=None):
self._pool = self.pool(ksize=self.kernel_size, strides=self.stride, padding=self.padding)
def forward(self, inputs):
outputs = self._pool(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MaxPool1d(Module):
"""Max pooling for 1D signal.
Parameters
----------
kernel_size : int
Pooling window size.
stride : int
Stride of the pooling operation.
padding : str or int
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, length, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 32], name='input')
>>> net = tlx.nn.MaxPool1d(kernel_size=3, stride=2, padding='SAME', name='maxpool1d')(net)
>>> output shape : [10, 25, 32]
"""
def __init__(
self,
kernel_size=3,
stride=2,
padding='SAME',
data_format='channels_last',
name=None # 'maxpool1d'
):
super().__init__(name)
self.kernel_size = self._filter_size = kernel_size
self.stride = self._stride = stride
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MaxPool1d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
# https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf/nn/pool
self._filter_size = [self.kernel_size]
self._stride = [self.stride]
self.max_pool = tlx.ops.MaxPool1d(
ksize=self._filter_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.max_pool(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MeanPool1d(Module):
"""Mean pooling for 1D signal.
Parameters
------------
kernel_size : int
Pooling window size.
stride : int
Strides of the pooling operation.
padding : intγtuple or str
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, length, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 32], name='input')
>>> net = tlx.nn.MeanPool1d(kernel_size=3, stride=2, padding='SAME')(net)
>>> output shape : [10, 25, 32]
"""
def __init__(
self,
kernel_size=3,
stride=2,
padding='SAME',
data_format='channels_last',
dilation_rate=1,
name=None # 'meanpool1d'
):
super().__init__(name)
self.kernel_size = self._filter_size = kernel_size
self.stride = self._stride = stride
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MeanPool1d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
# https://tensorflow.google.cn/versions/r2.0/api_docs/python/tf/nn/pool
self._filter_size = [self.kernel_size]
self._stride = [self.stride]
self.avg_pool = tlx.ops.AvgPool1d(
ksize=self._filter_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.avg_pool(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MaxPool2d(Module):
"""Max pooling for 2D image.
Parameters
-----------
kernel_size : tuple or int
(height, width) for filter size.
stride : tuple or int
(height, width) for stride.
padding : intγtuple or str
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 50, 32], name='input')
>>> net = tlx.nn.MaxPool2d(kernel_size=(3, 3), stride=(2, 2), padding='SAME')(net)
>>> output shape : [10, 25, 25, 32]
"""
def __init__(
self,
kernel_size=(3, 3),
stride=(2, 2),
padding='SAME',
data_format='channels_last',
name=None # 'maxpool2d'
):
super().__init__(name)
self.kernel_size = self.check_param(kernel_size)
if stride is None:
stride = self.kernel_size
self.stride = self._stride = self.check_param(stride)
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MaxPool2d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self._stride = [1, self.stride[0], self.stride[1], 1]
elif self.data_format == 'channels_first':
self._stride = [1, 1, self.stride[0], self.stride[1]]
else:
raise Exception("unsupported data format")
self.max_pool = tlx.ops.MaxPool(
ksize=self.kernel_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.max_pool(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MeanPool2d(Module):
"""Mean pooling for 2D image [batch, height, width, channel].
Parameters
-----------
kernel_size : tuple or int
(height, width) for filter size.
stride : tuple or int
(height, width) for stride.
padding : intγtuple or str
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 50, 32], name='input')
>>> net = tlx.nn.MeanPool2d(kernel_size=(3, 3), stride=(2, 2), padding='SAME')(net)
>>> output shape : [10, 25, 25, 32]
"""
def __init__(
self,
kernel_size=(3, 3),
stride=(2, 2),
padding='SAME',
data_format='channels_last',
name=None # 'meanpool2d'
):
super().__init__(name)
self.kernel_size = self.check_param(kernel_size)
if stride is None:
stride = self.kernel_size
self.stride = self._stride = self.check_param(stride)
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MeanPool2d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self._stride = [1, self.stride[0], self.stride[1], 1]
elif self.data_format == 'channels_first':
self._stride = [1, 1, self.stride[0], self.stride[1]]
else:
raise Exception("unsupported data format")
self.avg_pool = tlx.ops.AvgPool(
ksize=self.kernel_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.avg_pool(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MaxPool3d(Module):
"""Max pooling for 3D volume.
Parameters
------------
kernel_size : tuple or int
Pooling window size.
stride : tuple or int
Strides of the pooling operation.
padding : intγtuple or str
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Returns
-------
:class:`tf.Tensor`
A max pooling 3-D layer with a output rank as 5.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 50, 50, 32], name='input')
>>> net = tlx.nn.MaxPool3d(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding='SAME')(net)
>>> output shape : [10, 25, 25, 25, 32]
"""
def __init__(
self,
kernel_size=(3, 3, 3),
stride=(2, 2, 2),
padding='VALID',
data_format='channels_last',
name=None # 'maxpool3d'
):
super().__init__(name)
self.kernel_size = self.check_param(kernel_size, '3d')
self.stride = self._stride = self.check_param(stride, '3d')
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MaxPool3d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self._stride = [1, self.stride[0], self.stride[1], self.stride[2], 1]
elif self.data_format == 'channels_first':
self._stride = [1, 1, self.stride[0], self.stride[1], self.stride[2]]
else:
raise Exception("unsupported data format")
self.max_pool3d = tlx.ops.MaxPool3d(
ksize=self.kernel_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.max_pool3d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class MeanPool3d(Module):
"""Mean pooling for 3D volume.
Parameters
------------
kernel_size : tuple or int
Pooling window size.
stride : tuple or int
Strides of the pooling operation.
padding : intγtuple or str
The padding method: 'VALID' or 'SAME'.
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Returns
-------
:class:`tf.Tensor`
A mean pooling 3-D layer with a output rank as 5.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 50, 50, 50, 32], name='input')
>>> net = tlx.nn.MeanPool3d(kernel_size=(3, 3, 3), stride=(2, 2, 2), padding='SAME')(net)
>>> output shape : [10, 25, 25, 25, 32]
"""
def __init__(
self,
kernel_size=(3, 3, 3),
stride=(2, 2, 2),
padding='VALID',
data_format='channels_last',
name=None # 'meanpool3d'
):
super().__init__(name)
self.kernel_size = self.check_param(kernel_size, '3d')
self.stride = self._stride = self.check_param(stride, '3d')
self.padding = padding
self.data_format = data_format
self.build()
self._built = True
logging.info(
"MeanPool3d %s: kernel_size: %s stride: %s padding: %s" %
(self.name, str(kernel_size), str(stride), str(padding))
)
def __repr__(self):
s = ('{classname}(kernel_size={kernel_size}' ', stride={stride}, padding={padding}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
self._stride = [1, self.stride[0], self.stride[1], self.stride[2], 1]
self.avg_pool3d = tlx.ops.AvgPool3d(
ksize=self.kernel_size, strides=self._stride, padding=self.padding, data_format=self.data_format
)
def forward(self, inputs):
outputs = self.avg_pool3d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMaxPool1d(Module):
"""The :class:`GlobalMaxPool1d` class is a 1D Global Max Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, length, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 30], name='input')
>>> net = tlx.nn.GlobalMaxPool1d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format="channels_last",
name=None # 'globalmaxpool1d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMaxPool1d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_max = tlx.ReduceMax(axis=1)
elif self.data_format == 'channels_first':
self.reduce_max = tlx.ReduceMax(axis=2)
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_max(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMeanPool1d(Module):
"""The :class:`GlobalMeanPool1d` class is a 1D Global Mean Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, length, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 30], name='input')
>>> net = tlx.nn.GlobalMeanPool1d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format='channels_last',
name=None # 'globalmeanpool1d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMeanPool1d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_mean = tlx.ReduceMean(axis=1)
elif self.data_format == 'channels_first':
self.reduce_mean = tlx.ReduceMean(axis=2)
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_mean(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMaxPool2d(Module):
"""The :class:`GlobalMaxPool2d` class is a 2D Global Max Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 100, 30], name='input')
>>> net = tlx.nn.GlobalMaxPool2d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format='channels_last',
name=None # 'globalmaxpool2d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMaxPool2d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_max = tlx.ReduceMax(axis=[1, 2])
elif self.data_format == 'channels_first':
self.reduce_max = tlx.ReduceMax(axis=[2, 3])
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_max(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMeanPool2d(Module):
"""The :class:`GlobalMeanPool2d` class is a 2D Global Mean Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 100, 30], name='input')
>>> net = tlx.nn.GlobalMeanPool2d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format='channels_last',
name=None # 'globalmeanpool2d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMeanPool2d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_mean = tlx.ReduceMean(axis=[1, 2])
elif self.data_format == 'channels_first':
self.reduce_mean = tlx.ReduceMean(axis=[2, 3])
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_mean(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMaxPool3d(Module):
"""The :class:`GlobalMaxPool3d` class is a 3D Global Max Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 100, 100, 30], name='input')
>>> net = tlx.nn.GlobalMaxPool3d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format='channels_last',
name=None # 'globalmaxpool3d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMaxPool3d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_max = tlx.ReduceMax(axis=[1, 2, 3])
elif self.data_format == 'channels_first':
self.reduce_max = tlx.ReduceMax(axis=[2, 3, 4])
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_max(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class GlobalMeanPool3d(Module):
"""The :class:`GlobalMeanPool3d` class is a 3D Global Mean Pooling layer.
Parameters
------------
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 100, 100, 100, 30], name='input')
>>> net = tlx.nn.GlobalMeanPool3d()(net)
>>> output shape : [10, 30]
"""
def __init__(
self,
data_format='channels_last',
name=None # 'globalmeanpool3d'
):
super().__init__(name)
self.data_format = data_format
self.build()
self._built = True
logging.info("GlobalMeanPool3d %s" % self.name)
def __repr__(self):
s = '{classname}('
if self.name is not None:
s += 'name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if self.data_format == 'channels_last':
self.reduce_mean = tlx.ReduceMean(axis=[1, 2, 3])
elif self.data_format == 'channels_first':
self.reduce_mean = tlx.ReduceMean(axis=[2, 3, 4])
else:
raise ValueError(
"`data_format` should have one of the following values: [`channels_last`, `channels_first`]"
)
def forward(self, inputs):
outputs = self.reduce_mean(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class CornerPool2d(Module):
"""Corner pooling for 2D image [batch, height, width, channel], see `here <https://arxiv.org/abs/1808.01244>`__.
Parameters
----------
mode : str
TopLeft for the top left corner,
Bottomright for the bottom right corner.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 32, 32, 8], name='input')
>>> net = tlx.nn.CornerPool2d(mode='TopLeft',name='cornerpool2d')(net)
>>> output shape : [10, 32, 32, 8]
"""
def __init__(
self,
mode='TopLeft',
name=None # 'cornerpool2d'
):
super().__init__(name)
self.mode = mode
self.build()
self._built = True
logging.info("CornerPool2d %s : mode: %s" % (self.name, str(mode)))
def __repr__(self):
s = ('{classname}(mode={mode}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
pass
def forward(self, inputs):
_, input_width, input_height, _ = tlx.get_tensor_shape(inputs)
# input_width = inputs.shape[2]
# input_height = inputs.shape[1]
batch_min = tlx.reduce_min(inputs)
if self.mode == 'TopLeft':
temp_bottom = tlx.pad(
inputs, tlx.constant([[0, 0], [0, input_height - 1], [0, 0], [0, 0]]), constant_values=batch_min
)
temp_right = tlx.pad(
inputs, tlx.constant([[0, 0], [0, 0], [0, input_width - 1], [0, 0]]), constant_values=batch_min
)
temp_bottom = tlx.ops.max_pool(temp_bottom, ksize=(input_height, 1), strides=(1, 1), padding='VALID')
temp_right = tlx.ops.max_pool(temp_right, ksize=(1, input_width), strides=(1, 1), padding='VALID')
outputs = tlx.add(temp_bottom, temp_right) #, name=self.name)
elif self.mode == 'BottomRight':
temp_top = tlx.pad(
inputs, tlx.constant([[0, 0], [input_height - 1, 0], [0, 0], [0, 0]]), constant_values=batch_min
)
temp_left = tlx.pad(
inputs, tlx.constant([[0, 0], [0, 0], [input_width - 1, 0], [0, 0]]), constant_values=batch_min
)
temp_top = tlx.ops.max_pool(temp_top, ksize=(input_height, 1), strides=(1, 1), padding='VALID')
temp_left = tlx.ops.max_pool(temp_left, ksize=(1, input_width), strides=(1, 1), padding='VALID')
outputs = tlx.add(temp_top, temp_left)
else:
outputs = tlx.identity(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMeanPool1d(Module):
"""The :class:`AdaptiveMeanPool1d` class is a 1D Adaptive Mean Pooling layer.
Parameters
------------
output_size : int
The target output size. It must be an integer.
data_format : str
One of channels_last (default, [batch, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMeanPool1d(output_size=16)(net)
>>> output shape : [10, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMeanPool1d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMeanPool1d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
self.adaptivemeanpool1d = tlx.ops.AdaptiveMeanPool1D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemeanpool1d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMeanPool2d(Module):
"""The :class:`AdaptiveMeanPool2d` class is a 2D Adaptive Mean Pooling layer.
Parameters
------------
output_size : int or list or tuple
The target output size. It cloud be an int \[int,int]\(int, int).
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10,32, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMeanPool2d(output_size=16)(net)
>>> output shape : [10,16, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMeanPool2d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMeanPool2d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if isinstance(self.output_size, int):
self.output_size = (self.output_size, ) * 2
self.adaptivemeanpool2d = tlx.ops.AdaptiveMeanPool2D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemeanpool2d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMeanPool3d(Module):
"""The :class:`AdaptiveMeanPool3d` class is a 3D Adaptive Mean Pooling layer.
Parameters
------------
output_size : int or list or tuple
The target output size. It cloud be an int \[int,int,int]\(int, int, int).
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10,32, 32, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMeanPool3d(output_size=16)(net)
>>> output shape : [10, 16, 16, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMeanPool3d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMeanPool3d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if isinstance(self.output_size, int):
self.output_size = (self.output_size, ) * 3
self.adaptivemeanpool3d = tlx.ops.AdaptiveMeanPool3D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemeanpool3d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMaxPool1d(Module):
"""The :class:`AdaptiveMaxPool1d` class is a 1D Adaptive Max Pooling layer.
Parameters
------------
output_size : int
The target output size. It must be an integer.
data_format : str
One of channels_last (default, [batch, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMaxPool1d(output_size=16)(net)
>>> output shape : [10, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMaxPool1d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMaxPool1d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
self.adaptivemaxpool1d = tlx.ops.AdaptiveMaxPool1D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemaxpool1d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMaxPool2d(Module):
"""The :class:`AdaptiveMaxPool2d` class is a 2D Adaptive Max Pooling layer.
Parameters
------------
output_size : int or list or tuple
The target output size. It cloud be an int \[int,int]\(int, int).
data_format : str
One of channels_last (default, [batch, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10, 32, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMaxPool2d(output_size=16)(net)
>>> output shape : [10, 16, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMaxPool2d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMaxPool1d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if isinstance(self.output_size, int):
self.output_size = (self.output_size, ) * 2
self.adaptivemaxpool2d = tlx.ops.AdaptiveMaxPool2D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemaxpool2d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs
class AdaptiveMaxPool3d(Module):
"""The :class:`AdaptiveMaxPool3d` class is a 3D Adaptive Max Pooling layer.
Parameters
------------
output_size : int or list or tuple
The target output size. It cloud be an int \[int,int,int]\(int, int, int).
data_format : str
One of channels_last (default, [batch, depth, height, width, channel]) or channels_first. The ordering of the dimensions in the inputs.
name : None or str
A unique layer name.
Examples
---------
With TensorLayerX
>>> net = tlx.nn.Input([10,32, 32, 32, 3], name='input')
>>> net = tlx.nn.AdaptiveMaxPool3d(output_size=16)(net)
>>> output shape : [10, 16, 16, 16, 3]
"""
def __init__(self, output_size, data_format='channels_last', name=None):
super(AdaptiveMaxPool3d, self).__init__(name)
self.output_size = output_size
self.data_format = data_format
self.build()
self._built = True
logging.info("AdaptiveMaxPool3d %s: output_size: %s " % (self.name, str(output_size)))
def __repr__(self):
s = ('{classname}(output_size={output_size}')
if self.name is not None:
s += ', name=\'{name}\''
s += ')'
return s.format(classname=self.__class__.__name__, **self.__dict__)
def build(self, inputs_shape=None):
if isinstance(self.output_size, int):
self.output_size = (self.output_size, ) * 3
self.adaptivemaxpool3d = tlx.ops.AdaptiveMaxPool3D(output_size=self.output_size, data_format=self.data_format)
def forward(self, inputs):
outputs = self.adaptivemaxpool3d(inputs)
if not self._nodes_fixed and self._build_graph:
self._add_node(inputs, outputs)
self._nodes_fixed = True
return outputs | 31.346008 | 148 | 0.59231 | 5,029 | 41,220 | 4.620004 | 0.043547 | 0.05294 | 0.032539 | 0.025566 | 0.851209 | 0.836188 | 0.832616 | 0.824051 | 0.813635 | 0.805974 | 0 | 0.02197 | 0.277826 | 41,220 | 1,315 | 149 | 31.346008 | 0.758533 | 0.294517 | 0 | 0.764275 | 0 | 0 | 0.116198 | 0.018855 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11713 | false | 0.001464 | 0.004392 | 0 | 0.20937 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e64e664cf4f541019f6b56a369cc2758c81c72f8 | 103 | py | Python | det3d/ops/align_aggregation/alignfeature/check.py | PatrickChoDev/CSA-3D | 34aa87194d653a93f16834d485738255f55112f0 | [
"Apache-2.0"
] | 1,318 | 2019-09-13T06:57:32.000Z | 2022-03-31T07:01:21.000Z | det3d/ops/align_aggregation/alignfeature/check.py | PatrickChoDev/CSA-3D | 34aa87194d653a93f16834d485738255f55112f0 | [
"Apache-2.0"
] | 142 | 2019-09-26T04:25:57.000Z | 2022-03-29T17:21:03.000Z | det3d/ops/align_aggregation/alignfeature/check.py | PatrickChoDev/CSA-3D | 34aa87194d653a93f16834d485738255f55112f0 | [
"Apache-2.0"
] | 309 | 2019-09-21T09:21:38.000Z | 2022-03-31T02:06:56.000Z | from __future__ import division
from __future__ import print_function
import numpy as np
import torch
| 17.166667 | 37 | 0.854369 | 15 | 103 | 5.266667 | 0.666667 | 0.253165 | 0.405063 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.145631 | 103 | 5 | 38 | 20.6 | 0.897727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0.25 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
053beece1f2c0966292c44b8400948bfa5acaf2e | 187 | py | Python | anamic/fitter/__init__.py | brouhardlab/anamic | 0e61e4aeb999ba91fdf0e21b55f2e132e94f94bc | [
"BSD-3-Clause"
] | 4 | 2019-03-27T09:49:42.000Z | 2021-06-09T12:42:03.000Z | anamic/fitter/__init__.py | hadim/anamic | 0e61e4aeb999ba91fdf0e21b55f2e132e94f94bc | [
"BSD-3-Clause"
] | 7 | 2019-03-03T16:46:23.000Z | 2019-03-03T17:01:59.000Z | anamic/fitter/__init__.py | hadim/anamic | 0e61e4aeb999ba91fdf0e21b55f2e132e94f94bc | [
"BSD-3-Clause"
] | 1 | 2019-03-27T09:49:46.000Z | 2019-03-27T09:49:46.000Z | from ._utils import get_thick_line
from ._utils import line_profile
from ._utils import perpendicular_line_fit
from ._utils import tip_line_fit
from ._utils import microtubule_tip_fitter
| 31.166667 | 42 | 0.86631 | 29 | 187 | 5.103448 | 0.413793 | 0.304054 | 0.506757 | 0.216216 | 0.297297 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.106952 | 187 | 5 | 43 | 37.4 | 0.886228 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
0557317790fe2399e04245c23c2941b7cda1aa26 | 25,042 | py | Python | plot/box/paths_final.py | architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | null | null | null | plot/box/paths_final.py | architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | null | null | null | plot/box/paths_final.py | architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | 94b8f205b12f0cc59ae8e19b2e6099f34be929d6 | [
"MIT"
] | null | null | null | ac_true_env = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/online_learning/esarsa/step50k/gridsearch_realenv/"]
# ac_cm = [
# "../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
# "../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
# "../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
# "../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
# "../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/",
# ]
ac_cm = [
"../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3_50k_timeout200_randinit/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3_50k_timeout200_randinit/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3_50k_timeout200_randinit/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3_50k_timeout200_randinit/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../data/hyperparam_randomStart/acrobot/offline_learning/knn-ensemble/k3_50k_timeout200_randinit/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/",
]
ac_fqi = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/fqi/fqi-adam/alpha_hidden_epsilon/step10k_env/data_eps0/lockat_baseline_online/"
]
ac_rnd = [2, 0, 29, 10, 28, 30, 0, 29, 14, 24, 9, 8, 29, 11, 28, 9, 7, 5, 6, 8, 9, 7, 9, 3, 21, 2, 8, 21, 6, 20, 2, 16]
"""
cartpole action noise 1%
"""
cpn1_true_env = ["../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/online_learning/esarsa-adam/step50k/sweep/"]
# Without random start
distStart_farTrans_time200 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/farTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/farTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/farTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/farTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/farTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
distStart_closeTrans_time200 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_farTrans_time1000 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
cpn1_fqi = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/fqi/fqi-adam/alpha_hidden_epsilon/step10k_env/data_eps0/lockat_baseline_online/"
]
# With random start
RS_distStart_closeTrans_time200 = [
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/farStart/closeTrans/k3/timeout200/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
RS_trueStart_farTrans_time1000 = [
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_rs_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
RS_cpn1_fqi = [
"../../data/hyperparam_randomStart/cartpole-noisy-action/noise_1perc/offline_learning/fqi/fqi-adam/alpha_hidden_epsilon/step10k_env/data_eps0/lockat_baseline_online/"
]
cpn1_rnd = [2, 0, 29, 10, 28, 30, 0, 29, 14, 24, 9, 8, 29, 11, 28, 9, 7, 5, 6, 8, 9, 7, 9, 3, 21, 2, 8, 21, 6, 20, 2, 16]
AcrobotdistantStart_regularTrans_timeout200 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout200_distantStart_regularTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
AcrobottrueStart_adversarialTrans_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/randomrestarts/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/",
]
k1_notimeout = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_notimeout/esarsa/step10k/optimalfixed_eps0/"]
k1_timeout1000 = ["../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k1_timeout1000/esarsa/step10k/optimalfixed_eps0/"]
k3ensemble_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_timeout1000/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adversarial_notimeout = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3ensemble_adversarial_notimeout/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3ensemble_adverarial_timeout1000 = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed1/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed2/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed3/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed4/optimalfixed_eps0/",
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_50k_timeout1000_trueStart_adversarialTrans/esarsa/step10k/drop0.2/ensembleseed5/optimalfixed_eps0/"
]
k3_adversarial_timeout1000_subruns = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/acrobot/offline_learning/knn-ensemble/k3_adversarial_timeout1000_subruns/esarsa/step10k/optimalfixed_eps0/"
]
ac_CEM = [
"../../../../../../Downloads/data_timeoutsAndtransitions_acrobot/list/acrobot/online_learning/esarsa/step50k/list_realenv/"
]
"""
Ablation study - Cartpole
"""
trueStart_farTrans_time0 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_closeTrans_time1000 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_closeTrans_time0 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout0/esarsa/step10k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_noEnsemble_time1000 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k1/timeout1000/esarsa/step10k_env/data_eps0/drop0/ensembleseed0",
]
trueStart_noEnsemble_time0 = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k1/timeout0/esarsa/step10k_env/data_eps0/drop0/ensembleseed0",
]
"""
Dataset size study - Cartpole
"""
trueStart_farTrans_time1000_5k = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_farTrans_time1000_2k = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step2k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step2k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step2k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step2k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step2k_env/data_eps0/drop0.2/ensembleseed5",
]
trueStart_farTrans_time1000_1k = [
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step1k_env/data_eps0/drop0.2/ensembleseed1",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step1k_env/data_eps0/drop0.2/ensembleseed2",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step1k_env/data_eps0/drop0.2/ensembleseed3",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step1k_env/data_eps0/drop0.2/ensembleseed4",
"../../data/hyperparam_v1/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step1k_env/data_eps0/drop0.2/ensembleseed5",
]
"""
hyperparam v2: average calibration model performance with runs in inner loop
"""
v2_cpn1_true_env = ["../../data/hyperparam_v2/cartpole-noisy-action/noise_1perc/online_learning/esarsa-adam/step50k/sweep/"]
v2_trueStart_farTrans_time1000 = [
"../../data/hyperparam_v2/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.3/ensembleseed0",
]
v2_RSt200_trueStart_farTrans_time1000 = [
"../../data/hyperparam_rs_t200_v2/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.3/ensembleseed0",
]
v2_RS_trueStart_farTrans_time1000 = [
"../../data/hyperparam_rs_v2/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0/drop0.3/ensembleseed0",
]
v2_fqi = [
"../../data/hyperparam_v2/cartpole-noisy-action/noise_1perc/offline_learning/fqi-linear/fqi-adam/step20k_env/data_eps0.1/lockat_baseline_online/"
]
"""
v3
"""
v3_RSA_trueStart_farTrans_time1000_eps01 = [
"../../data/hyperparam_rs_v3/cartpole-noisy-action/noise_0.1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps0.1/drop0.3/ensembleseed0/",
]
v3_RSA_trueStart_farTrans_time1000_eps1 = [
"../../data/hyperparam_rs_v3/cartpole-noisy-action/noise_0.1perc/offline_learning/knn-ens/randomInit/farTrans/k3/timeout1000/esarsa/step10k_env/data_eps1/drop0.3/ensembleseed0/",
]
v3_trueStart_closeTrans_time1000_eps1_noEns = [
"../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step10k_env/data_eps1/drop0/ensembleseed0/",
]
AC_true_env = ["../../data/hyperparam_v2/acrobot/online_learning/esarsa/step50k/gridsearch_realenv"]
AC_trueStart_farTrans_time1000 = [
"../../data/hyperparam_v2/acrobot/offline_learning/knn-ensemble/k3_adversarial_timeout1000_subruns/esarsa/step10k/optimalfixed_eps0/",
]
AC_RSt200_trueStart_farTrans_time1000 = [
"../../data/hyperparam_rs_t200_v2/acrobot/offline_learning/knn-ensemble/k3_adversarial_timeout1000_subruns/esarsa/step10k/optimalfixed_eps0/",
]
AC_RS_trueStart_farTrans_time1000 = [
"../../data/hyperparam_rs_v2/acrobot/offline_learning/knn-ensemble/k3_adversarial_timeout1000_subruns/esarsa/step10k/optimalfixed_eps0/",
]
"""
final
"""
AC_eps1 = ["../../data/hyperparam_v3/acrobot/offline_learning/knn-ensemble/final/k3/esarsa/step5k/optimalfixed_eps100/"]
AC_eps0 = ["../../data/hyperparam_v3/acrobot/offline_learning/knn-ensemble/final/k3/esarsa/step5k/optimalfixed_eps0/"]
AC_eps025 = ["../../data/hyperparam_v3/acrobot/offline_learning/knn-ensemble/final/k3/esarsa/step5k/optimalfixed_eps25/"]
AC_fqi_eps1 = ["../../data/hyperparam_v3/acrobot/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps1/lockat_baseline_online/"]
AC_fqi_eps0 = ["../../data/hyperparam_v3/acrobot/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps0/lockat_baseline_online/"]
AC_fqi_eps025 = ["../../data/hyperparam_v3/acrobot/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps0.25/lockat_baseline_online/"]
AC_true = ["../../data/hyperparam_v3/acrobot/online_learning/esarsa/step50k/gridsearch_realenv/"]
AC_cem = ["../../data/hyperparam_v3/acrobot/online_learning/cem/esarsa/step50k/acrobot_list_online/"]
CP_eps1 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step5k_env/data_eps1/drop0/ensembleseed0"]
CP_eps0 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step5k_env/data_eps0/drop0/ensembleseed0"]
CP_eps025 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/knn-ens/randomInit/closeTrans/k3/timeout1000/esarsa/step5k_env/data_eps0.25/drop0/ensembleseed0"]
CP_fqi_eps1 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps1/lockat_baseline_online"]
CP_fqi_eps0 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps0/lockat_baseline_online"]
CP_fqi_eps025 = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/offline_learning/fqi-linear/fqi-adam/step5k_env/data_eps0.25/lockat_baseline_online"]
CP_true = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/online_learning/esarsa-adam/step50k/sweep/"]
CP_cem = ["../../data/hyperparam_v3/cartpole-noisy-action/noise_1perc/online_learning/cem/esarsa/step50k/cartpole_list_online/"] | 98.590551 | 225 | 0.806685 | 3,107 | 25,042 | 6.223045 | 0.043128 | 0.096974 | 0.10706 | 0.094337 | 0.957021 | 0.93985 | 0.932558 | 0.920248 | 0.899716 | 0.885389 | 0 | 0.065846 | 0.036938 | 25,042 | 254 | 226 | 98.590551 | 0.735871 | 0.02955 | 0 | 0 | 0 | 0.616915 | 0.886398 | 0.886398 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
057fd5222ef48def99c5eb67f6dd8fdc8740e96d | 15,631 | py | Python | src/gan/load_data.py | louis-r/colorization | f80eedff385c6cc407c0919d741956375372a9b1 | [
"MIT"
] | null | null | null | src/gan/load_data.py | louis-r/colorization | f80eedff385c6cc407c0919d741956375372a9b1 | [
"MIT"
] | 2 | 2018-03-12T19:02:24.000Z | 2018-05-02T02:14:33.000Z | src/gan/load_data.py | louis-r/colorization | f80eedff385c6cc407c0919d741956375372a9b1 | [
"MIT"
] | 1 | 2018-03-12T17:12:21.000Z | 2018-03-12T17:12:21.000Z | import numpy as np
from PIL import Image
import time
import torch
from torchvision import transforms
from torch.utils import data as torchdata
import scipy.io as io
import scipy.misc as misc
import glob
import csv
from skimage import color
def pil_loader(path):
with open(path, 'rb') as f:
with Image.open(f) as img:
return img.convert('RGB')
class lfw_Dataset(torchdata.Dataset):
def __init__(self, root,
shuffle=False,
small=False,
mode='test',
transform=None,
target_transform=None,
types='',
show_ab=False,
loader=pil_loader):
tic = time.time()
self.root = root
self.loader = loader
self.image_transform = transform
self.imgpath = glob.glob(root + 'lfw_funneled/*/*')
self.types = types
self.show_ab = show_ab # show ab channel in classify mode
# read split
self.train_people = set()
with open(self.root + 'peopleDevTrain.txt', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.train_people.add(row[0])
assert self.train_people.__len__() == 4038
self.test_people = set()
with open(self.root + 'peopleDevTest.txt', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.test_people.add(row[0])
assert self.test_people.__len__() == 1711
self.path = []
if mode == 'train':
for item in self.imgpath:
if item.split('/')[-2] in self.train_people:
self.path.append(item)
elif mode == 'test':
for item in self.imgpath:
if item.split('/')[-2] in self.test_people:
self.path.append(item)
np.random.seed(0)
if shuffle:
perm = np.random.permutation(len(self.path))
self.path = [self.path[i] for i in perm]
if types == 'classify':
ab_list = np.load('data/pts_in_hull.npy')
self.nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(ab_list)
print('Load %d images, used %fs' % (self.path.__len__(), time.time() - tic))
def __getitem__(self, index):
mypath = self.path[index]
img = self.loader(mypath) # PIL Image
img = np.array(img)[13:13 + 224, 13:13 + 224, :]
img_lab = color.rgb2lab(np.array(img)) # np array
# img_lab = img_lab[13:13+224, 13:13+224, :]
if self.types == 'classify':
X_a = np.ravel(img_lab[:, :, 1])
X_b = np.ravel(img_lab[:, :, 2])
img_ab = np.vstack((X_a, X_b)).T
_, ind = self.nbrs.kneighbors(img_ab)
ab_class = np.reshape(ind, (224, 224))
# print(ab_class.shape, ab_class.dtype, np.amax(ab_class), np.amin(ab_class))
ab_class = torch.unsqueeze(torch.LongTensor(ab_class), 0)
img = (img - 127.5) / 127.5 # -1 to 1
img = torch.FloatTensor(np.transpose(img, (2, 0, 1)))
img_lab = torch.FloatTensor(np.transpose(img_lab, (2, 0, 1)))
img_l = torch.unsqueeze(img_lab[0], 0) / 100. # L channel 0-100
img_ab = (img_lab[1::] + 0) / 110. # ab channel -110 - 110
if self.types == 'classify':
if self.show_ab:
return img_l, ab_class, img_ab
return img_l, ab_class
elif self.types == 'raw':
return img_l, img
else:
return img_l, img_ab
def __len__(self):
return len(self.path)
class FlowerDataset(torchdata.Dataset):
def __init__(self, root,
shuffle=False,
small=False,
mode='test',
transform=None,
target_transform=None,
types='',
show_ab=False,
large=False,
loader=pil_loader):
tic = time.time()
self.root = root
self.loader = loader
self.image_transform = transform
self.imgpath = glob.glob(root + 'jpg/*.jpg')
self.types = types
self.show_ab = show_ab # show ab channel in classify mode
# read split
split_file = io.loadmat(root + 'datasplits.mat')
self.train_file = set([str(i).zfill(4) for i in np.hstack((split_file['trn1'][0], split_file['val1'][0]))])
self.test_file = set([str(i).zfill(4) for i in split_file['tst1'][0]])
assert self.train_file.__len__() == 1020
assert self.test_file.__len__() == 340
self.path = []
if mode == 'train':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.train_file:
self.path.append(item)
elif mode == 'test':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.test_file:
self.path.append(item)
self.path = sorted(self.path)
np.random.seed(0)
if shuffle:
perm = np.random.permutation(len(self.path))
self.path = [self.path[i] for i in perm]
if types == 'classify':
ab_list = np.load('data/pts_in_hull.npy')
self.nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(ab_list)
print('Load %d images, used %fs' % (self.path.__len__(), time.time() - tic))
def __getitem__(self, index):
mypath = self.path[index]
img = self.loader(mypath) # PIL Image
img = np.array(img)
img = misc.imresize(img, (224, 224))
img_lab = color.rgb2lab(np.array(img)) # np array
# img_lab = img_lab[13:13+224, 13:13+224, :]
if self.types == 'classify':
X_a = np.ravel(img_lab[:, :, 1])
X_b = np.ravel(img_lab[:, :, 2])
img_ab = np.vstack((X_a, X_b)).T
_, ind = self.nbrs.kneighbors(img_ab)
ab_class = np.reshape(ind, (224, 224))
ab_class = torch.unsqueeze(torch.LongTensor(ab_class), 0)
img = (img - 127.5) / 127.5 # -1 to 1
img = torch.FloatTensor(np.transpose(img, (2, 0, 1)))
img_lab = torch.FloatTensor(np.transpose(img_lab, (2, 0, 1)))
img_l = torch.unsqueeze(img_lab[0], 0) / 100. # L channel 0-100
img_ab = (img_lab[1::] + 0) / 110. # ab channel -110 - 110
if self.types == 'classify':
if self.show_ab:
return img_l, ab_class, img_ab
return img_l, ab_class
elif self.types == 'raw':
return img_l, img
# if self.show_ab:
# return img_l, img_ab, None
else:
return img_l, img_ab
def __len__(self):
return len(self.path)
class BobDataset(torchdata.Dataset):
def __init__(self, root,
shuffle=False,
small=False,
mode='test',
transform=None,
target_transform=None,
types='',
show_ab=False,
large=False,
loader=pil_loader):
tic = time.time()
self.root = root
self.loader = loader
self.image_transform = transform
if large:
self.size = 480
self.imgpath = glob.glob(root + 'img_480/*.png')
else:
self.size = 224
self.imgpath = glob.glob(root + 'img/*.png')
self.types = types
self.show_ab = show_ab # show ab channel in classify mode
# read split
self.train_file = set()
with open(self.root + 'train_split.csv', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.train_file.add(str(row[0]).zfill(4))
assert self.train_file.__len__() == 1392
self.test_file = set()
with open(self.root + 'test_split.csv', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.test_file.add(str(row[0]).zfill(4))
assert self.test_file.__len__() == 348
self.path = []
if mode == 'train':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.train_file:
self.path.append(item)
elif mode == 'test':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.test_file:
self.path.append(item)
self.path = sorted(self.path)
np.random.seed(0)
if shuffle:
perm = np.random.permutation(len(self.path))
self.path = [self.path[i] for i in perm]
if types == 'classify':
ab_list = np.load('data/pts_in_hull.npy')
self.nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(ab_list)
print('Load %d images, used %fs' % (self.path.__len__(), time.time() - tic))
def __getitem__(self, index):
mypath = self.path[index]
img = self.loader(mypath) # PIL Image
img = np.array(img)
if (img.shape[0] != self.size) or (img.shape[1] != self.size):
img = misc.imresize(img, (self.size, self.size))
img_lab = color.rgb2lab(np.array(img)) # np array
# img_lab = img_lab[13:13+224, 13:13+224, :]
if self.types == 'classify':
X_a = np.ravel(img_lab[:, :, 1])
X_b = np.ravel(img_lab[:, :, 2])
img_ab = np.vstack((X_a, X_b)).T
_, ind = self.nbrs.kneighbors(img_ab)
ab_class = np.reshape(ind, (self.size, self.size))
# print(ab_class.shape, ab_class.dtype, np.amax(ab_class), np.amin(ab_class))
ab_class = torch.unsqueeze(torch.LongTensor(ab_class), 0)
img = (img - 127.5) / 127.5 # -1 to 1
img = torch.FloatTensor(np.transpose(img, (2, 0, 1)))
img_lab = torch.FloatTensor(np.transpose(img_lab, (2, 0, 1)))
img_l = torch.unsqueeze(img_lab[0], 0) / 100. # L channel 0-100
img_ab = (img_lab[1::] + 0) / 110. # ab channel -110 - 110
if self.types == 'classify':
if self.show_ab:
return img_l, ab_class, img_ab
return img_l, ab_class
elif self.types == 'raw':
return img_l, img
# if self.show_ab:
# return img_l, img_ab, None
else:
return img_l, img_ab
def __len__(self):
return len(self.path)
class SC2Dataset(torchdata.Dataset):
def __init__(self, root,
shuffle=False,
small=False,
mode='test',
transform=None,
target_transform=None,
types='',
show_ab=False,
large=False,
loader=pil_loader):
tic = time.time()
self.root = root
self.loader = loader
self.image_transform = transform
if large:
self.size = 480
self.imgpath = glob.glob(root + 'img_480/*.png')
else:
self.size = 224
self.imgpath = glob.glob(root + 'img/*.png')
self.types = types
self.show_ab = show_ab # show ab channel in classify mode
# read split
self.train_file = set()
with open(self.root + 'train_split.csv', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.train_file.add(str(row[0]).zfill(4))
assert self.train_file.__len__() == 1383
self.test_file = set()
with open(self.root + 'test_split.csv', 'r') as f:
reader = csv.reader(f, delimiter='\t')
for i, row in enumerate(reader):
if i == 0:
continue
self.test_file.add(str(row[0]).zfill(4))
assert self.test_file.__len__() == 345
self.path = []
if mode == 'train':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.train_file:
self.path.append(item)
elif mode == 'test':
for item in self.imgpath:
if item.split('/')[-1][6:6 + 4] in self.test_file:
self.path.append(item)
self.path = sorted(self.path)
np.random.seed(0)
if shuffle:
perm = np.random.permutation(len(self.path))
self.path = [self.path[i] for i in perm]
if types == 'classify':
ab_list = np.load('data/pts_in_hull.npy')
self.nbrs = NearestNeighbors(n_neighbors=1, algorithm='ball_tree').fit(ab_list)
print('Load %d images, used %fs' % (self.path.__len__(), time.time() - tic))
def __getitem__(self, index):
mypath = self.path[index]
img = self.loader(mypath) # PIL Image
img = np.array(img)
if (img.shape[0] != self.size) or (img.shape[1] != self.size):
img = misc.imresize(img, (self.size, self.size))
img_lab = color.rgb2lab(np.array(img)) # np array
# img_lab = img_lab[13:13+224, 13:13+224, :]
if self.types == 'classify':
X_a = np.ravel(img_lab[:, :, 1])
X_b = np.ravel(img_lab[:, :, 2])
img_ab = np.vstack((X_a, X_b)).T
_, ind = self.nbrs.kneighbors(img_ab)
ab_class = np.reshape(ind, (self.size, self.size))
# print(ab_class.shape, ab_class.dtype, np.amax(ab_class), np.amin(ab_class))
ab_class = torch.unsqueeze(torch.LongTensor(ab_class), 0)
img = (img - 127.5) / 127.5 # -1 to 1
img = torch.FloatTensor(np.transpose(img, (2, 0, 1)))
img_lab = torch.FloatTensor(np.transpose(img_lab, (2, 0, 1)))
img_l = torch.unsqueeze(img_lab[0], 0) / 100. # L channel 0-100
img_ab = (img_lab[1::] + 0) / 110. # ab channel -110 - 110
if self.types == 'classify':
if self.show_ab:
return img_l, ab_class, img_ab
return img_l, ab_class
elif self.types == 'raw':
if img.size(1) == 479 or img.size(2) == 479:
print(mypath)
return img_l, img
# if self.show_ab:
# return img_l, img_ab, None
else:
return img_l, img_ab
def __len__(self):
return len(self.path)
if __name__ == '__main__':
data_root = '/home/users/u5612799/DATA/SCReplay/'
# normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
# std=[0.229, 0.224, 0.225])
image_transform = transforms.Compose([
transforms.CenterCrop(224),
transforms.ToTensor(),
])
lfw = SC2Dataset(data_root, mode='train',
transform=image_transform, large=True, types='raw')
data_loader = torchdata.DataLoader(lfw,
batch_size=1,
shuffle=False,
num_workers=4)
for i, (data, target) in enumerate(data_loader):
print(i, len(lfw))
| 35.047085 | 115 | 0.520568 | 2,037 | 15,631 | 3.825724 | 0.093274 | 0.043116 | 0.024381 | 0.016938 | 0.877454 | 0.868087 | 0.854356 | 0.854356 | 0.854356 | 0.848454 | 0 | 0.039564 | 0.348346 | 15,631 | 445 | 116 | 35.125843 | 0.725506 | 0.070373 | 0 | 0.836676 | 0 | 0 | 0.044316 | 0.002416 | 0 | 0 | 0 | 0 | 0.022923 | 1 | 0.037249 | false | 0 | 0.031519 | 0.011461 | 0.140401 | 0.017192 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
058e7f45fe4fe442f0f06d1cdba5ec523edf150c | 96 | py | Python | SBaaS_LIMS/lims_sample_execute.py | dmccloskey/SBaaS_LIMS | 5dfd73689674953345d523178a67b8dda10e6d47 | [
"MIT"
] | null | null | null | SBaaS_LIMS/lims_sample_execute.py | dmccloskey/SBaaS_LIMS | 5dfd73689674953345d523178a67b8dda10e6d47 | [
"MIT"
] | null | null | null | SBaaS_LIMS/lims_sample_execute.py | dmccloskey/SBaaS_LIMS | 5dfd73689674953345d523178a67b8dda10e6d47 | [
"MIT"
] | null | null | null | from .lims_sample_io import lims_sample_io
class lims_sample_execute(lims_sample_io):
pass; | 24 | 42 | 0.833333 | 16 | 96 | 4.5 | 0.5 | 0.555556 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114583 | 96 | 4 | 43 | 24 | 0.847059 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.333333 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 7 |
e958a46e14e5d3630d39d9bd2f6c1669392a4296 | 6,168 | py | Python | baseline/cnn/networks.py | DenseAI/deep-learning-and-fashion-mnist | 49e21d821bbbcc6c9b33f5ee440a61a166efed15 | [
"Apache-2.0"
] | null | null | null | baseline/cnn/networks.py | DenseAI/deep-learning-and-fashion-mnist | 49e21d821bbbcc6c9b33f5ee440a61a166efed15 | [
"Apache-2.0"
] | null | null | null | baseline/cnn/networks.py | DenseAI/deep-learning-and-fashion-mnist | 49e21d821bbbcc6c9b33f5ee440a61a166efed15 | [
"Apache-2.0"
] | null | null | null |
import keras
import tensorflow as tf
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten, BatchNormalization, Input, add, Embedding, multiply, subtract, add, dot, Dot
from keras.layers import Conv2D, MaxPooling2D
from keras.models import Model
from keras.activations import softmax
def create_base_cnn_model(input_shape, num_classes=10):
learn_rate = 1
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding = 'same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=keras.optimizers.Adadelta(lr=learn_rate),
metrics=['accuracy'])
return model
def create_base_cnn_model_with_optimizer(input_shape, optimizer="sgd", num_classes=10):
learn_rate = 1
model = Sequential()
model.add(Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape, padding = 'same'))
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
return model
def create_base_cnn_model_with_kernel(input_shape, kernel=3, optimizer="sgd", num_classes=10):
model = Sequential()
model.add(Conv2D(32, kernel_size=(kernel, kernel), activation='relu', input_shape=input_shape, padding = 'same'))
model.add(Conv2D(32, (kernel, kernel), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
return model
def create_base_cnn_model_with_kernels(input_shape, kernels=[3,3,3,3], optimizer="adamax", droprate=0.25, factor=1, num_classes=10):
# Encoder
input_img = Input(shape=(28, 28, 1))
outs = []
for kernel in kernels:
x = Conv2D(32, (kernel, kernel), activation='relu', padding='same')(input_img)
x = Conv2D(32, (kernel * factor, kernel * factor), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
x = Conv2D(64 * factor, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(64 * factor, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
outs.append(x)
x = add(outs)
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
output = Dense(num_classes, activation="softmax")(x)
model = Model(input_img, output)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
return model
def create_base_cnn_model_with_kernels_with_label(input_shape, kernels=[3,3,3,3], optimizer="adamax", droprate=0.25, factor=1, num_classes=10):
# Encoder
input_img = Input(shape=(28, 28, 1))
input_label = Input(shape=(1,), dtype='int32')
label_embedding = Flatten()(Embedding(num_classes, 128)(input_label))
outs = []
for kernel in kernels:
x = Conv2D(32, (kernel, kernel), activation='relu', padding='same')(input_img)
x = Conv2D(32, (kernel, kernel), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
x = Conv2D(64 * factor, (3, 3), activation='relu', padding='same')(x)
x = Conv2D(64 * factor, (3, 3), activation='relu', padding='same')(x)
x = MaxPooling2D(pool_size=(2, 2))(x)
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
x = Flatten()(x)
x = Dense(128, activation='relu')(x)
outs.append(x)
if(len(outs) > 1):
x = add(outs)
else:
x = outs[0]
mul = multiply([x, label_embedding])
sub = subtract([x, label_embedding])
add_ = add([x, label_embedding])
x = add([mul, sub, add_])
x = BatchNormalization()(x)
if droprate > 0:
x = Dropout(droprate)(x)
output = Dense(num_classes, activation='softmax')(x)
model = Model([input_img,input_label], output)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=optimizer,
metrics=['accuracy'])
return model | 34.458101 | 143 | 0.631161 | 799 | 6,168 | 4.768461 | 0.106383 | 0.08189 | 0.093701 | 0.111549 | 0.842257 | 0.827297 | 0.827297 | 0.827297 | 0.813386 | 0.811811 | 0 | 0.043541 | 0.210603 | 6,168 | 179 | 144 | 34.458101 | 0.738961 | 0.002432 | 0 | 0.774436 | 0 | 0 | 0.045218 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037594 | false | 0 | 0.052632 | 0 | 0.12782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e9b2056ac95525d9f3581acf6a171121dd16df59 | 450 | py | Python | priactice_test/test2/str_test.py | KongYun-Mrs/practice | 72bffdf889ce36e4fcbb12f2456c03d45dec681e | [
"MIT"
] | null | null | null | priactice_test/test2/str_test.py | KongYun-Mrs/practice | 72bffdf889ce36e4fcbb12f2456c03d45dec681e | [
"MIT"
] | 1 | 2020-07-11T05:18:06.000Z | 2020-07-11T05:18:06.000Z | priactice_test/test2/str_test.py | KongYun-Mrs/practice | 72bffdf889ce36e4fcbb12f2456c03d45dec681e | [
"MIT"
] | null | null | null | # coding: utf-8
s1 = u'UdpSource~~~ST_TDPS~~~V1.0~~~tdps~~~1565318476640~~~10.214.9.10~~~~~~572d96f0-fef4-4cd1-aee6-e80186cfdd231565318476640~~~NULL~~~///<1>2019-08-09T10:40:39+08:00 cebbank-tdps-02 Threatbook[43381]:'
print len(s1)
s2 = u'UdpSource~~~ST_TDPS~~~V1.0~~~tdps~~~1565319084407~~~10.214.9.10~~~~~~6b1680aa-fdd0-4fb9-911f-a4d13c804fa51565319084407~~~NULL~~~///<1>2019-08-09T10:50:46+08:00 cebbank-tdps-02 Threatbook[43381]:'
print len(s2) | 75 | 202 | 0.706667 | 73 | 450 | 4.328767 | 0.575342 | 0.063291 | 0.075949 | 0.101266 | 0.5 | 0.398734 | 0.398734 | 0.253165 | 0.253165 | 0 | 0 | 0.378753 | 0.037778 | 450 | 6 | 203 | 75 | 0.351039 | 0.028889 | 0 | 0 | 0 | 0.5 | 0.889908 | 0.729358 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
f9d2dc97b2cb5d2ad8d9c8bd64f0261665080850 | 133 | py | Python | home/tests/__init__.py | balgrat/CID19 | ed27f177a1ff8d5fe9cad1fde7da6ec3611e52ce | [
"MIT"
] | null | null | null | home/tests/__init__.py | balgrat/CID19 | ed27f177a1ff8d5fe9cad1fde7da6ec3611e52ce | [
"MIT"
] | 12 | 2020-02-12T00:00:40.000Z | 2022-03-11T23:44:47.000Z | home/tests/__init__.py | lucvd/CID19 | 826c1ff75e0e33154c48a5b386b2718306a239db | [
"MIT"
] | null | null | null | from home.tests.test_models_home import *
from home.tests.test_models_chat import *
from home.tests.test_models_successstory import * | 44.333333 | 49 | 0.849624 | 21 | 133 | 5.095238 | 0.380952 | 0.224299 | 0.364486 | 0.476636 | 0.757009 | 0.542056 | 0 | 0 | 0 | 0 | 0 | 0 | 0.082707 | 133 | 3 | 49 | 44.333333 | 0.877049 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
ddb7cda367d7611a5d5ad66fb41dbbfaa304a61b | 8,902 | py | Python | tests/test_nectar.py | polyswarm/polyswarm-transaction | 3f6125757d2830d46c2d46aa823ae2c532f513cf | [
"MIT"
] | null | null | null | tests/test_nectar.py | polyswarm/polyswarm-transaction | 3f6125757d2830d46c2d46aa823ae2c532f513cf | [
"MIT"
] | 1 | 2020-07-27T19:43:39.000Z | 2020-07-27T19:43:39.000Z | tests/test_nectar.py | polyswarm/polyswarm-transaction | 3f6125757d2830d46c2d46aa823ae2c532f513cf | [
"MIT"
] | 1 | 2021-04-26T10:58:29.000Z | 2021-04-26T10:58:29.000Z | import json
from deepdiff import DeepDiff
from eth_keys.datatypes import PrivateKey
from web3 import Web3
from polyswarmtransaction.transaction import SignedTransaction
from polyswarmtransaction.nectar import WithdrawalTransaction, ApproveNectarReleaseTransaction
def test_recover_release_when_computed(ethereum_accounts):
data = {
'name': 'polyswarmtransaction.nectar:ApproveNectarReleaseTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'destination': '0x0000000000000000000000000000000000000001',
'amount': '200000000000000000',
'transaction_hash': '0x0000000000000000000000000000000',
'block_hash': '0x0000000000000000000000000000000',
'block_number': '0x1',
}
}
transaction = ApproveNectarReleaseTransaction(destination='0x0000000000000000000000000000000000000001',
amount='200000000000000000',
transaction_hash='0x0000000000000000000000000000000',
block_hash='0x0000000000000000000000000000000',
block_number='0x1')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.signature == PrivateKey(ethereum_accounts[0].key).sign_msg_hash(Web3.keccak(text=json.dumps(data)))
def test_recover_withdrawal_when_computed(ethereum_accounts):
data = {
'name': 'polyswarmtransaction.nectar:WithdrawalTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'amount': '2000000000000000000'
}
}
transaction = WithdrawalTransaction('2000000000000000000')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.signature == PrivateKey(ethereum_accounts[0].key).sign_msg_hash(Web3.keccak(text=json.dumps(data)))
def test_sign_release_transaction(ethereum_accounts):
signature = ('0x7f9f551b25397ed66c7aa0dbb5ad92a09ce5c976fa5492d0c3324fbd46b09cba7325d13b74038ab63cbcc369216983b94b'
'507a876ee13eac8b95ff05a69de36200')
data = {
'name': 'polyswarmtransaction.nectar:ApproveNectarReleaseTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'destination': '0x0000000000000000000000000000000000000001',
'amount': '200000000000000000',
'transaction_hash': '0x0000000000000000000000000000000',
'block_hash': '0x0000000000000000000000000000000',
'block_number': '0x1',
}
}
transaction = ApproveNectarReleaseTransaction(destination='0x0000000000000000000000000000000000000001',
amount='200000000000000000',
transaction_hash='0x0000000000000000000000000000000',
block_hash='0x0000000000000000000000000000000',
block_number='0x1')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.raw_transaction == json.dumps(data)
assert signed.signature.hex() == signature
def test_sign_withdrawal_transaction(ethereum_accounts):
signature = ('0x92787e13b8b556e24d5316e58a344b3feca1b5559571de63eb64160e874bfb78710848fd20e6fe45c5cb19b0adee22ccf1'
'248eefe3fd8ad39e47281ee25b2b8d01')
data = {
'name': 'polyswarmtransaction.nectar:WithdrawalTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'amount': '2000000000000000000'
}
}
transaction = WithdrawalTransaction('2000000000000000000')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.raw_transaction == json.dumps(data)
assert signed.signature.hex() == signature
def test_recover_release_signed_transaction(ethereum_accounts):
transaction = ApproveNectarReleaseTransaction(destination='0x0000000000000000000000000000000000000001',
amount='200000000000000000',
transaction_hash='0x0000000000000000000000000000000',
block_hash='0x0000000000000000000000000000000',
block_number='0x1')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_withdrawal_signed_transaction(ethereum_accounts):
transaction = WithdrawalTransaction('2000000000000000000')
signed = transaction.sign(ethereum_accounts[0].key)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_release_signed_transaction_from_parts():
signature = ('0x7f9f551b25397ed66c7aa0dbb5ad92a09ce5c976fa5492d0c3324fbd46b09cba7325d13b74038ab63cbcc369216983b94b'
'507a876ee13eac8b95ff05a69de36200')
data = {
'name': 'polyswarmtransaction.nectar:ApproveNectarReleaseTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'destination': '0x0000000000000000000000000000000000000001',
'amount': '200000000000000000',
'transaction_hash': '0x0000000000000000000000000000000',
'block_hash': '0x0000000000000000000000000000000',
'block_number': '0x1',
}
}
signed = SignedTransaction(json.dumps(data), signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_withdrawal_signed_transaction_from_parts():
signature = ('0x92787e13b8b556e24d5316e58a344b3feca1b5559571de63eb64160e874bfb78710848fd20e6fe45c5cb19b0adee22ccf1'
'248eefe3fd8ad39e47281ee25b2b8d01')
data = {
'name': 'polyswarmtransaction.nectar:WithdrawalTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'amount': '2000000000000000000'
}
}
signed = SignedTransaction(json.dumps(data), signature)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_release_signed_transaction_from_signed_output(ethereum_accounts):
transaction = ApproveNectarReleaseTransaction(destination='0x0000000000000000000000000000000000000001',
amount='200000000000000000',
transaction_hash='0x0000000000000000000000000000000',
block_hash='0x0000000000000000000000000000000',
block_number='0x1')
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(**signed.payload)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_recover_withdrawal_signed_transaction_from_signed_output(ethereum_accounts):
transaction = WithdrawalTransaction('2000000000000000000')
signed = transaction.sign(ethereum_accounts[0].key)
signed = SignedTransaction(**signed.payload)
assert signed.ecrecover() == '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5'
def test_load_approve_release():
data = {
'name': 'polyswarmtransaction.nectar:ApproveNectarReleaseTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'destination': '0x0000000000000000000000000000000000000001',
'amount': '200000000000000000',
'transaction_hash': '0x0000000000000000000000000000000',
'block_hash': '0x0000000000000000000000000000000',
'block_number': '0x1',
}
}
signed = SignedTransaction(json.dumps(data), bytes([0] * 65))
approve_nct_release = ApproveNectarReleaseTransaction(destination='0x0000000000000000000000000000000000000001',
amount='200000000000000000',
transaction_hash='0x0000000000000000000000000000000',
block_hash='0x0000000000000000000000000000000',
block_number='0x1')
assert isinstance(signed.transaction(), ApproveNectarReleaseTransaction)
assert not DeepDiff(signed.transaction().data, approve_nct_release.data, ignore_order=True)
def test_load_withdrawal():
data = {
'name': 'polyswarmtransaction.nectar:WithdrawalTransaction',
'from': '0x3f17f1962B36e491b30A40b2405849e597Ba5FB5',
'data': {
'amount': '200000000000000000'
}
}
signed = SignedTransaction(json.dumps(data), bytes([0] * 65))
assert isinstance(signed.transaction(), WithdrawalTransaction)
assert not DeepDiff(signed.transaction().data, WithdrawalTransaction('200000000000000000').data, ignore_order=True)
| 48.118919 | 120 | 0.68479 | 554 | 8,902 | 10.803249 | 0.120939 | 0.04812 | 0.126316 | 0.033417 | 0.893233 | 0.891562 | 0.873684 | 0.873684 | 0.851629 | 0.833083 | 0 | 0.297928 | 0.230061 | 8,902 | 184 | 121 | 48.380435 | 0.575285 | 0 | 0 | 0.745223 | 0 | 0 | 0.361492 | 0.283082 | 0 | 0 | 0.223208 | 0 | 0.101911 | 1 | 0.076433 | false | 0 | 0.038217 | 0 | 0.11465 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.