hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0b0ec7f756e9143e4706db7455cf43759101ab6f | 3,991 | py | Python | models/multitask/data_loader.py | nghuyong/mtl-duolingo | bd7c9bf1f3d54b23928ba3af2b460e5e34eb8375 | [
"MIT"
] | null | null | null | models/multitask/data_loader.py | nghuyong/mtl-duolingo | bd7c9bf1f3d54b23928ba3af2b460e5e34eb8375 | [
"MIT"
] | null | null | null | models/multitask/data_loader.py | nghuyong/mtl-duolingo | bd7c9bf1f3d54b23928ba3af2b460e5e34eb8375 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
from models.base.data_loader import DataLoader
class MultiTaskDataLoader(DataLoader):
def __init__(self, is_training):
super().__init__(is_training)
def generate_train_batch_data(self, one_data_set_batch_size):
data_sets = ['en_es', 'es_en', 'fr_en']
data_generator_dic = {}
context_embedding_dic = {}
for data_set in data_sets:
data_generator_dic[data_set] = self.batch_data_generator(self.get_data_path(data_set),
one_data_set_batch_size)
context_embedding_dic[data_set] = self.load_context_embedding(data_set)
country2id, user2id, client2id, format2id, session2id = self.load_meta_embedding()
while True:
yield_batch_data = {}
for data_set in data_sets:
one_batch_data = data_generator_dic[data_set].__next__()
prepared_batch_data = self.prepare_one_batch_meta_data(one_batch_data, user2id, country2id,
client2id, session2id, format2id)
word2id, word_embedded_dic, char2id = context_embedding_dic[data_set]
prepared_batch_data.update(
self.prepare_one_batch_context_and_label_data(one_batch_data, self.char_max_len, word2id,
word_embedded_dic, char2id))
yield_batch_data[data_set] = prepared_batch_data
yield yield_batch_data
def generate_test_batch_data(self, one_data_set_batch_size, test_data_set):
data_sets = ['en_es', 'es_en', 'fr_en']
data_generator_dic = {}
context_embedding_dic = {}
for data_set in data_sets:
data_generator_dic[data_set] = self.batch_data_generator(self.get_data_path(data_set),
one_data_set_batch_size)
context_embedding_dic[data_set] = self.load_context_embedding(data_set)
country2id, user2id, client2id, format2id, session2id = self.load_meta_embedding()
yield_batch_data = {}
for data_set in data_sets:
one_batch_data = data_generator_dic[data_set].__next__()
prepared_batch_data = self.prepare_one_batch_meta_data(one_batch_data, user2id, country2id,
client2id, session2id, format2id)
word2id, word_embedded_dic, char2id = context_embedding_dic[data_set]
prepared_batch_data.update(
self.prepare_one_batch_context_and_label_data(one_batch_data, self.char_max_len, word2id,
word_embedded_dic,
char2id))
yield_batch_data[data_set] = prepared_batch_data
yield yield_batch_data
test_data_generator = data_generator_dic[test_data_set]
for one_batch_data in test_data_generator:
prepared_batch_data = self.prepare_one_batch_meta_data(one_batch_data, user2id, country2id,
client2id, session2id, format2id)
word2id, word_embedded_dic, char2id = context_embedding_dic[test_data_set]
prepared_batch_data.update(
self.prepare_one_batch_context_and_label_data(one_batch_data, self.char_max_len, word2id,
word_embedded_dic,
char2id))
yield_batch_data[test_data_set] = prepared_batch_data
yield yield_batch_data
if __name__ == "__main__":
g = MultiTaskDataLoader(is_training=True).generate_train_batch_data(3)
for batch in g:
print(batch['en_es']['client_input'])
| 56.211268 | 109 | 0.598096 | 440 | 3,991 | 4.877273 | 0.163636 | 0.130009 | 0.050326 | 0.053122 | 0.829916 | 0.826188 | 0.82246 | 0.82246 | 0.792637 | 0.7726 | 0 | 0.014806 | 0.340015 | 3,991 | 70 | 110 | 57.014286 | 0.799924 | 0.00902 | 0 | 0.737705 | 0 | 0 | 0.013913 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04918 | false | 0 | 0.016393 | 0 | 0.081967 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
505e1c9469c04c9105f3156450bba50d5ef25217 | 11,706 | py | Python | direcao.py | MarcoAntonioGomes/IAwork_IFMG | 4803413a905afc0fd253a04146505c6e1c9e5cd5 | [
"MIT"
] | null | null | null | direcao.py | MarcoAntonioGomes/IAwork_IFMG | 4803413a905afc0fd253a04146505c6e1c9e5cd5 | [
"MIT"
] | null | null | null | direcao.py | MarcoAntonioGomes/IAwork_IFMG | 4803413a905afc0fd253a04146505c6e1c9e5cd5 | [
"MIT"
] | null | null | null | from dbz import DragonballZ
def escolhedirecao(agente):
"""
algoritmo passa a direção e o peso
(dir,peso). Ex : [(1,10), (2,35)]
uma lista ordena pelo menor peso e vai passando
----------------------------------------
1 = norte
2 = nordeste
3 = leste
4 = suldeste
5 = sul
6 = suldoeste
7 = oeste
8 = noroeste
-----------------------------------------
norte pega as 3 casas a cima da pos atual
nordeste pega em L ou seja 3 para a direita e 3 para cima 3 ou 3 para cima e 3 para o lado
leste 3 para direita
suldeste em L 3 para direita e 3 para baixo ou 3 p baixo 3 p direita
sul 3 p baixo
suldoeste 3 p esquerda 3 p baixo ou 3 p baixo e 3 p esquerda
oeste 3 p esquerda
noroeste 3 p esquerda e 3 p cima, ou 3 p cima e 3 p esquerda
"""
# lista de tuplas
ranking = list()
# norte
if (radar_direcao["norte" ]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1]-i] == 1
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1]-i] == 2
custo += 10
else:
custo += 35
ranking.append((1, custo))
# reseta custo
custo = 0
# nordeste
if (radar_direcao["nordeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]+i][agente_pos[1]-i] == 1
custo += 1
elif mapa[agente_pos[0]+i][agente_pos[1]-i] == 2
custo += 10
else:
custo += 35
ranking.append((2, custo))
# reseta custo
custo = 0
# leste
if (radar_direcao["leste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1] + i] == 1
custo += 1
elif mapa[agente_pos[0]][agente_pos[1] + i] == 2
custo += 10
else:
custo += 35
ranking.append((3, custo))
# reseta custo
custo = 0
# suldeste
if (radar_direcao["suldeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]+i][agente_pos[1] + i] == 1
custo += 1
elif mapa[agente_pos[0]+i][agente_pos[1] + i] == 2
custo += 10
else:
custo += 35
ranking.append((4, custo))
# reseta custo
custo = 0
# sul
if (radar_direcao["sul" ]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1] +i] == 1
custo += 1
elif mapa[agente_pos[0]][agente_pos[1] +i] == 2
custo += 10
else:
custo += 35
ranking.append((5, custo))
# reseta custo
custo = 0
# suldoeste
if (radar_direcao["suldoeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]-i][agente_pos[1] +i] == 1
custo += 1
elif mapa[agente_pos[0]-i][agente_pos[1] +i] == 2
custo += 10
else:
custo += 35
ranking.append((6, custo))
# reseta custo
custo = 0
# oeste
if (radar_direcao["oeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0] - i][agente_pos[1]] == 1
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1]] == 2
custo += 10
else:
custo += 35
ranking.append((7, custo))
# reseta custo
custo = 0
# noroeste
if (radar_direcao["noroeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0] - i][agente_pos[1] - i] == 1
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1] - i] == 2
custo += 10
else:
custo += 35
ranking.append((8, custo))
# reseta custo
custo = 0
# ordena as direções pelo menor peso
ranking.sort(key=lambda x: x[1])
#escolhe direcao
if ranking[0][0] == 1 :
direcao = "norte"
elif ranking [0][0] == 2
direcao = "nordeste"
elif ranking [0][0] == 3
direcao = "leste"
elif ranking [0][0] == 4
direcao = "suldeste"
elif ranking [0][0] == 5
direcao = "sul"
elif ranking [0][0] == 6
direcao = "suldoeste"
elif ranking [0][0] == 7
direcao = "oeste"
elif ranking [0][0] == 8
direcao = "nordeste"
else:
direcao = "erro"
#retorna direcao escolhida
return direcao
def anda(agente):
#escolhe direcao
direcao=escolhedirecao(agente)
#anda a direcao
if direcao == "norte":
pos=agente_pos[0], agente_pos[1] - 3
elif direcao == "sul":
pos=agente_pos[0], agente_pos[1] + 3
elif direcao == "leste":
pos=agente_pos[0] + 3, agente_pos[1]
elif direcao == "oeste":
pos=agente_pos[0] - 3, agente_pos[1]
elif direcao == "nordeste":
pos=agente_pos[0] + 3, agente_pos[1] - 3
elif direcao == "noroeste":
pos=agente_pos[0] - 3, agente_pos[1] - 3
elif direcao == "suldeste":
pos=agent_pos[0] + 3, agente_pos[1] + 3
elif direcao == "suldoeste":
pos=agente_pos[0] - 3, agente_pos[1] + 3
else direcao == "erro":
raise Exception("Erro ao passar a direcao com menor caminho")
posfinal = pos
return posfinal
"""
def escolhedirecao(mapa,radar_direcao,agente_pos):
"""
"""
algoritmo passa a direção e o peso
(dir,peso). Ex : [(1,10), (2,35)]
uma lista ordena pelo menor peso e vai passando
----------------------------------------
1 = norte
2 = nordeste
3 = leste
4 = suldeste
5 = sul
6 = suldoeste
7 = oeste
8 = noroeste
-----------------------------------------
norte pega as 3 casas a cima da pos atual
nordeste pega em L ou seja 3 para a direita e 3 para cima 3 ou 3 para cima e 3 para o lado
leste 3 para direita
suldeste em L 3 para direita e 3 para baixo ou 3 p baixo 3 p direita
sul 3 p baixo
suldoeste 3 p esquerda 3 p baixo ou 3 p baixo e 3 p esquerda
oeste 3 p esquerda
noroeste 3 p esquerda e 3 p cima, ou 3 p cima e 3 p esquerda
"""
"""
# lista de tuplas
ranking = list()
custo = 0
# norte
if (radar_direcao["norte"]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1]-i] == 1 :
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1]-i] == 2:
custo += 10
else:
custo += 35
ranking.append((1, custo))
# reseta custo
custo = 0
# nordeste
if (radar_direcao["nordeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]+i][agente_pos[1]-i] == 1:
custo += 1
elif mapa[agente_pos[0]+i][agente_pos[1]-i] == 2:
custo += 10
else:
custo += 35
ranking.append((2, custo))
# reseta custo
custo = 0
# leste
if (radar_direcao["leste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1] + i] == 1:
custo += 1
elif mapa[agente_pos[0]][agente_pos[1] + i] == 2:
custo += 10
else:
custo += 35
ranking.append((3, custo))
# reseta custo
custo = 0
# suldeste
if (radar_direcao["suldeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]+i][agente_pos[1] + i] == 1:
custo += 1
elif mapa[agente_pos[0]+i][agente_pos[1] + i] == 2:
custo += 10
else:
custo += 35
ranking.append((4, custo))
# reseta custo
custo = 0
# sul
if (radar_direcao["sul" ]== 1):
for i in range(3):
if mapa[agente_pos[0]][agente_pos[1] +i] == 1:
custo += 1
elif mapa[agente_pos[0]][agente_pos[1] +i] == 2:
custo += 10
else:
custo += 35
ranking.append((5, custo))
# reseta custo
custo = 0
# suldoeste
if (radar_direcao["suldoeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0]-i][agente_pos[1] +i] == 1:
custo += 1
elif mapa[agente_pos[0]-i][agente_pos[1] +i] == 2:
custo += 10
else:
custo += 35
ranking.append((6, custo))
# reseta custo
custo = 0
# oeste
if (radar_direcao["oeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0] - i][agente_pos[1]] == 1:
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1]] == 2:
custo += 10
else:
custo += 35
ranking.append((7, custo))
# reseta custo
custo = 0
# noroeste
if (radar_direcao["noroeste" ]== 1):
for i in range(3):
if mapa[agente_pos[0] - i][agente_pos[1] - i] == 1:
custo += 1
elif mapa[agente_pos[0] - i][agente_pos[1] - i] == 2:
custo += 10
else:
custo += 35
ranking.append((8, custo))
# reseta custo
custo = 0
# ordena as direções pelo menor peso
ranking.sort(key=lambda x: x[1])
print("RRRRRRANKINNNNG:", ranking)
#escolhe direcao
if ranking[0][0] == 1 :
direcao = "norte"
elif ranking [0][0] == 2:
direcao = "nordeste"
elif ranking [0][0] == 3:
direcao = "leste"
elif ranking [0][0] == 4:
direcao = "suldeste"
elif ranking [0][0] == 5:
direcao = "sul"
elif ranking [0][0] == 6:
direcao = "suldoeste"
elif ranking [0][0] == 7:
direcao = "oeste"
elif ranking [0][0] == 8:
direcao = "nordeste"
else:
direcao = "erro"
#retorna direcao escolhida
return direcao
def anda(radar_direcao,mapa,agente_pos):
while(True):
#escolhe direcao
direcao = escolhedirecao(mapa,radar_direcao,agente_pos)
print("Direcao=", direcao)
print("agente=", agente_pos)
#anda a direcao
if direcao == "norte":
pos=agente_pos[0], agente_pos[1] - 3
elif direcao == "sul":
pos=agente_pos[0], agente_pos[1] + 3
elif direcao == "leste":
pos=agente_pos[0] + 3, agente_pos[1]
elif direcao == "oeste":
pos=agente_pos[0] - 3, agente_pos[1]
elif direcao == "nordeste":
pos=agente_pos[0] + 3, agente_pos[1] - 3
elif direcao == "noroeste":
pos=agente_pos[0] - 3, agente_pos[1] - 3
elif direcao == "suldeste":
pos=agente_pos[0] + 3, agente_pos[1] + 3
elif direcao == "suldoeste":
pos=agente_pos[0] - 3, agente_pos[1] + 3
else:
raise Exception("Erro ao passar a direcao com menor caminho")
posfinal = pos
print("posFinal=", posfinal)
if (aceito([0,0],[len(mapa),len(mapa)],posfinal) == True):
#norte
if(posfinal[1] <0):
posfinal = posfinal[0],posfinal[1]+3
elif(posfinal [0] < 0):
posFinal = posfinal[0]+3,posfinal[1]
elif(posfinal [1]> len(mapa)-1):
posfinal = posfinal[0],posfinal[1]-3,
elif(posfinal [0]> len(mapa)-1):
posfinal = posfinal[0] -3,posfinal[1]
return posfinal
print("brecou")
# break
break
return posfinal
"""
| 28.139423 | 94 | 0.484367 | 1,526 | 11,706 | 3.637615 | 0.067497 | 0.160512 | 0.086471 | 0.080706 | 0.929562 | 0.929562 | 0.9029 | 0.9029 | 0.9029 | 0.888489 | 0 | 0.060294 | 0.378011 | 11,706 | 415 | 95 | 28.207229 | 0.702101 | 0.02409 | 0 | 0.456 | 0 | 0 | 0.046645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.008 | 0.008 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
acc399cdef833bd0af98a7c63fc6a8a54dc9c946 | 13,299 | py | Python | pytorch/test-files/test.py | alexandrosstergiou/SoftPool | 2d2ec6dca10b7683ffd41061a27910d67816bfa5 | [
"MIT"
] | 226 | 2021-01-05T03:19:42.000Z | 2022-03-16T17:02:20.000Z | pytorch/test-files/test.py | yisun98/SoftPool | 70e7b3b51640b9abd34103f7b68990c5ef6aa2f8 | [
"MIT"
] | 44 | 2021-01-10T10:15:46.000Z | 2022-02-22T09:49:41.000Z | pytorch/test-files/test.py | yisun98/SoftPool | 70e7b3b51640b9abd34103f7b68990c5ef6aa2f8 | [
"MIT"
] | 42 | 2021-01-11T06:17:24.000Z | 2022-03-30T04:30:48.000Z | import torch
import softpool_cuda
from SoftPool import soft_pool1d, soft_pool2d, soft_pool3d, SoftPool1d, SoftPool2d, SoftPool3d
import timeit
def check_close_enough(a, check):
a = a.cpu()
check = check.cpu()
residual = (a-check).data.abs().mean().cpu().item()
assert torch.isnan(check).sum() == 0, 'meet NaN(s) in `check`'
assert residual < .2, 'residual is not small: {}'.format(residual)
x_1d = torch.rand((20, 32, 128)).float()
x_2d = torch.rand((20, 32, 128, 128)).float()
x_3d = torch.rand((20, 32, 16, 128, 128)).float()
print('\033[95m' + '--- Initial checks for forward ---' + '\033[0m')
################## 1D FORWARD ##################
print('\033[93m' + '> Checking 1D CPU ...' + '\033[0m')
try:
pl_1d_cpu = soft_pool1d(x_1d)
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 1D GPU ...' + '\033[0m')
try:
pl_1d_gpu = soft_pool1d(x_1d.cuda())
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 1D CPU-GPU output similarities ...' + '\033[0m')
try:
check_close_enough(pl_1d_cpu.data, pl_1d_gpu.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
################## 2D FORWARD ##################
print('\033[93m' + '> Checking 2D CPU ...' + '\033[0m')
try:
pl_2d_cpu = soft_pool2d(x_2d)
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 2D GPU ...' + '\033[0m')
try:
pl_2d_gpu = soft_pool2d(x_2d.cuda())
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 2D CPU-GPU output similarities ...' + '\033[0m')
try:
check_close_enough(pl_2d_cpu.data, pl_2d_gpu.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
################## 3D FORWARD ##################
print('\033[93m' + '> Checking 3D CPU ...' + '\033[0m')
try:
pl_3d_cpu = soft_pool3d(x_3d)
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 3D GPU ...' + '\033[0m')
try:
pl_3d_gpu = soft_pool3d(x_3d.cuda())
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 3D CPU-GPU output similarities ...' + '\033[0m')
try:
check_close_enough(pl_3d_cpu.data, pl_3d_gpu.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
print('\033[95m' + '--- Initial checks for backward ---' + '\033[0m')
a_1d = torch.rand((20, 32, 128)).float()
b_1d = a_1d.clone().cuda()
a_2d = torch.rand((20, 32, 128, 128)).float()
b_2d = a_2d.clone().cuda()
a_3d = torch.rand((20, 32, 16, 128, 128)).float()
b_3d = a_3d.clone().cuda()
a_1d.requires_grad = True
a_2d.requires_grad = True
a_3d.requires_grad = True
b_1d.requires_grad = True
b_2d.requires_grad = True
b_3d.requires_grad = True
print('\033[93m' + '> Checking 1D CPU ...' + '\033[0m')
try:
soft_pool1d(a_1d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 1D GPU ...' + '\033[0m')
try:
soft_pool1d(b_1d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 1D grad similarities ...' + '\033[0m')
try:
check_close_enough(a_1d.grad.data, b_1d.grad.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
print('\033[93m' + '> Checking 2D CPU ...' + '\033[0m')
try:
soft_pool2d(a_2d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 2D GPU ...' + '\033[0m')
try:
soft_pool2d(b_2d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 2D grad similarities ...' + '\033[0m')
try:
check_close_enough(a_2d.grad.data, b_2d.grad.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
print('\033[93m' + '> Checking 3D CPU ...' + '\033[0m')
try:
soft_pool3d(a_3d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 3D GPU ...' + '\033[0m')
try:
soft_pool3d(b_3d).pow(2).mean().backward()
print('\033[92m' + '> PASSED' + '\033[0m')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e)
print('\033[93m' + '> Checking 3D grad similarities ...' + '\033[0m')
try:
check_close_enough(a_3d.grad.data, b_3d.grad.data)
print('\033[92m' + '> PASSED' + '\033[0m'+'\n')
except Exception as e:
print('\033[91m' + '> FAILED' + '\033[0m')
print(e,'\n')
print('\n'+'\033[92m' + 'TESTS COMPLETED' + '\033[0m'+'\n')
print('\033[95m' + '--- Profiling checks ---' + '\033[0m')
a_1d = torch.rand((10, 32, 80)).float()
b_1d = a_1d.clone().cuda()
c_1d = a_1d.clone().cuda()
a_2d = torch.rand((10, 32, 80, 80)).float()
b_2d = a_2d.clone().cuda()
c_2d = a_2d.clone().cuda()
a_3d = torch.rand((10, 32, 8, 80, 80)).float()
b_3d = a_3d.clone().cuda()
c_3d = a_3d.clone().cuda()
a_1d.requires_grad = True
a_2d.requires_grad = True
a_3d.requires_grad = True
b_1d.requires_grad = True
b_2d.requires_grad = True
b_3d.requires_grad = True
c_1d.requires_grad = True
c_2d.requires_grad = True
c_3d.requires_grad = True
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool1d(a_1d)
print('\033[93m' +'SoftPool1d (CPU) [foward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_f_1d_cpu = ''.join(str(prof).split('\n')[-2:])
_tt = soft_pool1d(a_1d)
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool1d(a_1d).backward(_tt)
print('\033[93m' +'SoftPool1d (CPU) [forward + backward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_b_1d_cpu = ''.join(str(prof).split('\n')[-2:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool1d(b_1d,force_inplace=True)
print('\033[93m' +'SoftPool1d (CUDA-inplace) [foward]'+ '\033[0m')
print(prof.key_averages())
time_f_1d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
_tt = soft_pool1d(b_1d,force_inplace=True)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool1d(b_1d,force_inplace=True).backward(_tt)
print('\033[93m' +'SoftPool1d (CUDA-inplace) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_1d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool1d(c_1d)
print('\033[93m' +'SoftPool1d (CUDA) [foward]'+ '\033[0m')
print(prof.key_averages())
time_f_1d_cuda = ''.join(str(prof).split('\n')[-3:])
_tt = soft_pool1d(c_1d)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool1d(c_1d).backward(_tt)
print('\033[93m' +'SoftPool1d (CUDA) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_1d_cuda = ''.join(str(prof).split('\n')[-3:])
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool2d(a_2d)
print('\033[93m' +'SoftPool2d (CPU) [foward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_f_2d_cpu = ''.join(str(prof).split('\n')[-2:])
_tt = soft_pool2d(a_2d)
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool2d(a_2d).backward(_tt)
print('\033[93m' +'SoftPool2d (CPU) [forward + backward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_b_2d_cpu = ''.join(str(prof).split('\n')[-2:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool2d(b_2d,force_inplace=True)
print('\033[93m' +'SoftPool2d (CUDA-inplace) [foward]'+ '\033[0m')
print(prof.key_averages())
time_f_2d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
_tt = soft_pool2d(b_2d,force_inplace=True)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool2d(b_2d,force_inplace=True).backward(_tt)
print('\033[93m' +'SoftPool2d (CUDA-inplace) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_2d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool2d(c_2d)
print('\033[93m' +'SoftPool2d (CUDA) [foward]'+ '\033[0m')
time_f_2d_cuda = ''.join(str(prof).split('\n')[-3:])
print(prof.key_averages())
_tt = soft_pool2d(c_2d)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool2d(c_2d).backward(_tt)
print('\033[93m' +'SoftPool2d (CUDA) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_2d_cuda = ''.join(str(prof).split('\n')[-3:])
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool3d(a_3d)
print('\033[93m' +'SoftPool3d (CPU) [foward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_f_3d_cpu = ''.join(str(prof).split('\n')[-2:])
_tt = soft_pool3d(a_3d)
with torch.autograd.profiler.profile(use_cuda=False) as prof:
for i in range(100):
soft_pool3d(a_3d).backward(_tt)
print('\033[93m' +'SoftPool3d (CPU) [forward + backward]'+ '\033[0m')
print(prof.key_averages().table(sort_by="self_cpu_time_total"))
time_b_3d_cpu = ''.join(str(prof).split('\n')[-2:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool3d(b_3d,force_inplace=True)
print('\033[93m' +'SoftPool3d (CUDA-inplace) [foward]'+ '\033[0m')
print(prof.key_averages())
time_f_3d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
_tt = soft_pool3d(b_3d,force_inplace=True)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool3d(b_3d,force_inplace=True).backward(_tt)
print('\033[93m' +'SoftPool3d (CUDA-inplace) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_3d_cuda_forced = ''.join(str(prof).split('\n')[-3:])
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool3d(c_3d)
print('\033[93m' +'SoftPool3d (CUDA) [foward]'+ '\033[0m')
print(prof.key_averages())
time_f_3d_cuda = ''.join(str(prof).split('\n')[-3:])
_tt = soft_pool3d(c_3d)
with torch.autograd.profiler.profile(use_cuda=True) as prof:
for i in range(100):
soft_pool3d(c_3d).backward(_tt)
print('\033[93m' +'SoftPool3d (CUDA) [forward + backward]'+ '\033[0m')
print(prof.key_averages())
time_b_3d_cuda = ''.join(str(prof).split('\n')[-3:])
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\033[93m' +'SoftPool1d [forward + backward]'+ '\033[0m')
print('\n'+'\033[93m' +'----------- C P U ------------'+ '\033[0m')
print(time_b_1d_cpu)
print('\n'+'\033[93m' +'-- C U D A - I N P L A C E ---'+ '\033[0m')
print(time_b_1d_cuda_forced)
print('\n'+'\033[93m' +'---------- C U D A -----------'+ '\033[0m')
print(time_b_1d_cuda)
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\033[93m' +'SoftPool2d [forward + backward]'+ '\033[0m')
print('\n'+'\033[93m' +'----------- C P U ------------'+ '\033[0m')
print(time_b_2d_cpu)
print('\n'+'\033[93m' +'-- C U D A - I N P L A C E ---'+ '\033[0m')
print(time_b_2d_cuda_forced)
print('\n'+'\033[93m' +'---------- C U D A -----------'+ '\033[0m')
print(time_b_2d_cuda)
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\033[93m' +'SoftPool3d [forward + backward]'+ '\033[0m')
print('\n'+'\033[93m' +'----------- C P U ------------'+ '\033[0m')
print(time_b_3d_cpu)
print('\n'+'\033[93m' +'-- C U D A - I N P L A C E ---'+ '\033[0m')
print(time_b_3d_cuda_forced)
print('\n'+'\033[93m' +'---------- C U D A -----------'+ '\033[0m')
print(time_b_3d_cuda)
print('\n'+'\033[93m' +'-------------------------------'+ '\033[0m')
print('\n'+'\033[95m' + '--- Tests finished ---' + '\033[0m')
| 35.275862 | 94 | 0.610196 | 2,093 | 13,299 | 3.705208 | 0.056378 | 0.061251 | 0.068343 | 0.044101 | 0.906125 | 0.874275 | 0.85648 | 0.81109 | 0.788008 | 0.709478 | 0 | 0.107261 | 0.138431 | 13,299 | 376 | 95 | 35.369681 | 0.569558 | 0.002557 | 0 | 0.59434 | 0 | 0 | 0.285747 | 0.014139 | 0 | 0 | 0 | 0 | 0.006289 | 1 | 0.003145 | false | 0.056604 | 0.012579 | 0 | 0.015723 | 0.440252 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 8 |
accc10426adf9843bc001d91b514dd9eb7917f1b | 246,554 | py | Python | sdk/python/pulumi_vsphere/virtual_machine.py | pulumi/pulumi-vsphere | a4536cd49860323bd57cbf2a127c5b57c9f9b60c | [
"ECL-2.0",
"Apache-2.0"
] | 38 | 2018-09-17T18:56:29.000Z | 2022-03-26T03:07:20.000Z | sdk/python/pulumi_vsphere/virtual_machine.py | pulumi/pulumi-vsphere | a4536cd49860323bd57cbf2a127c5b57c9f9b60c | [
"ECL-2.0",
"Apache-2.0"
] | 75 | 2018-09-17T13:18:24.000Z | 2022-03-31T21:32:30.000Z | sdk/python/pulumi_vsphere/virtual_machine.py | pulumi/pulumi-vsphere | a4536cd49860323bd57cbf2a127c5b57c9f9b60c | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-10-05T10:30:01.000Z | 2020-09-30T11:16:59.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
from . import outputs
from ._inputs import *
__all__ = ['VirtualMachineArgs', 'VirtualMachine']
@pulumi.input_type
class VirtualMachineArgs:
def __init__(__self__, *,
resource_pool_id: pulumi.Input[str],
alternate_guest_name: Optional[pulumi.Input[str]] = None,
annotation: Optional[pulumi.Input[str]] = None,
boot_delay: Optional[pulumi.Input[int]] = None,
boot_retry_delay: Optional[pulumi.Input[int]] = None,
boot_retry_enabled: Optional[pulumi.Input[bool]] = None,
cdrom: Optional[pulumi.Input['VirtualMachineCdromArgs']] = None,
clone: Optional[pulumi.Input['VirtualMachineCloneArgs']] = None,
cpu_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
cpu_hot_remove_enabled: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_performance_counters_enabled: Optional[pulumi.Input[bool]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_count: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
datacenter_id: Optional[pulumi.Input[str]] = None,
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
disks: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]] = None,
efi_secure_boot_enabled: Optional[pulumi.Input[bool]] = None,
enable_disk_uuid: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
ept_rvi_mode: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
firmware: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
force_power_off: Optional[pulumi.Input[bool]] = None,
guest_id: Optional[pulumi.Input[str]] = None,
hardware_version: Optional[pulumi.Input[int]] = None,
host_system_id: Optional[pulumi.Input[str]] = None,
hv_mode: Optional[pulumi.Input[str]] = None,
ide_controller_count: Optional[pulumi.Input[int]] = None,
ignored_guest_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
latency_sensitivity: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
memory_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_count: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
migrate_wait_timeout: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_hv_enabled: Optional[pulumi.Input[bool]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]] = None,
num_cores_per_socket: Optional[pulumi.Input[int]] = None,
num_cpus: Optional[pulumi.Input[int]] = None,
ovf_deploy: Optional[pulumi.Input['VirtualMachineOvfDeployArgs']] = None,
pci_device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
poweron_timeout: Optional[pulumi.Input[int]] = None,
replace_trigger: Optional[pulumi.Input[str]] = None,
run_tools_scripts_after_power_on: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_after_resume: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_reboot: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_shutdown: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_standby: Optional[pulumi.Input[bool]] = None,
sata_controller_count: Optional[pulumi.Input[int]] = None,
scsi_bus_sharing: Optional[pulumi.Input[str]] = None,
scsi_controller_count: Optional[pulumi.Input[int]] = None,
scsi_type: Optional[pulumi.Input[str]] = None,
shutdown_wait_timeout: Optional[pulumi.Input[int]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
swap_placement_policy: Optional[pulumi.Input[str]] = None,
sync_time_with_host: Optional[pulumi.Input[bool]] = None,
sync_time_with_host_periodically: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vapp: Optional[pulumi.Input['VirtualMachineVappArgs']] = None,
vbs_enabled: Optional[pulumi.Input[bool]] = None,
vvtd_enabled: Optional[pulumi.Input[bool]] = None,
wait_for_guest_ip_timeout: Optional[pulumi.Input[int]] = None,
wait_for_guest_net_routable: Optional[pulumi.Input[bool]] = None,
wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None):
"""
The set of arguments for constructing a VirtualMachine resource.
:param pulumi.Input[str] resource_pool_id: The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
:param pulumi.Input[str] alternate_guest_name: The guest name for the operating system
when `guest_id` is `other` or `other-64`.
:param pulumi.Input[str] annotation: A user-provided description of the virtual machine.
The default is no annotation.
:param pulumi.Input[int] boot_delay: The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
:param pulumi.Input[int] boot_retry_delay: The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
:param pulumi.Input[bool] boot_retry_enabled: If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
:param pulumi.Input['VirtualMachineCdromArgs'] cdrom: A specification for a CDROM device on this virtual
machine. See CDROM options below.
:param pulumi.Input['VirtualMachineCloneArgs'] clone: When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
:param pulumi.Input[bool] cpu_hot_add_enabled: Allow CPUs to be added to this virtual
machine while it is running.
:param pulumi.Input[bool] cpu_hot_remove_enabled: Allow CPUs to be removed to this
virtual machine while it is running.
:param pulumi.Input[int] cpu_limit: The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
:param pulumi.Input[bool] cpu_performance_counters_enabled: Enable CPU performance
counters on this virtual machine. Default: `false`.
:param pulumi.Input[int] cpu_reservation: The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] cpu_share_count: The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
:param pulumi.Input[str] cpu_share_level: The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
value strings to set for virtual machine.
:param pulumi.Input[str] datacenter_id: The datacenter id. Required only when deploying
an ovf template.
:param pulumi.Input[str] datastore_cluster_id: The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
:param pulumi.Input[str] datastore_id: The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
:param pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]] disks: A specification for a virtual disk device on this virtual
machine. See disk options below.
:param pulumi.Input[bool] efi_secure_boot_enabled: When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
:param pulumi.Input[bool] enable_disk_uuid: Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
:param pulumi.Input[bool] enable_logging: Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
:param pulumi.Input[str] ept_rvi_mode: The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] extra_config: Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
:param pulumi.Input[str] firmware: The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
:param pulumi.Input[str] folder: The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
:param pulumi.Input[bool] force_power_off: If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
:param pulumi.Input[str] guest_id: The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
:param pulumi.Input[int] hardware_version: The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
:param pulumi.Input[str] host_system_id: An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
:param pulumi.Input[str] hv_mode: The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
:param pulumi.Input[int] ide_controller_count: The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ignored_guest_ips: List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
:param pulumi.Input[str] latency_sensitivity: Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
:param pulumi.Input[int] memory: The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
:param pulumi.Input[bool] memory_hot_add_enabled: Allow memory to be added to this
virtual machine while it is running.
:param pulumi.Input[int] memory_limit: The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
:param pulumi.Input[int] memory_reservation: The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] memory_share_count: The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
:param pulumi.Input[str] memory_share_level: The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[int] migrate_wait_timeout: The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
:param pulumi.Input[str] name: The name of the virtual machine.
:param pulumi.Input[bool] nested_hv_enabled: Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]] network_interfaces: A specification for a virtual NIC on this
virtual machine. See network interface options
below.
:param pulumi.Input[int] num_cores_per_socket: The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
:param pulumi.Input[int] num_cpus: The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
:param pulumi.Input['VirtualMachineOvfDeployArgs'] ovf_deploy: When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] pci_device_ids: List of host PCI device IDs to create PCI
passthroughs for.
:param pulumi.Input[int] poweron_timeout: The amount of time, in seconds, that we will be trying to power on a VM
:param pulumi.Input[str] replace_trigger: Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
:param pulumi.Input[bool] run_tools_scripts_after_power_on: Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_after_resume: Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_reboot: Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_shutdown: Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_standby: Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[int] sata_controller_count: The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[str] scsi_bus_sharing: Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
:param pulumi.Input[int] scsi_controller_count: The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
:param pulumi.Input[str] scsi_type: The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
:param pulumi.Input[int] shutdown_wait_timeout: The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
:param pulumi.Input[str] storage_policy_id: The UUID of the storage policy to assign to this disk.
:param pulumi.Input[str] swap_placement_policy: The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
:param pulumi.Input[bool] sync_time_with_host: Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[bool] sync_time_with_host_periodically: Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
:param pulumi.Input['VirtualMachineVappArgs'] vapp: Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
:param pulumi.Input[bool] vbs_enabled: Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[bool] vvtd_enabled: Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[int] wait_for_guest_ip_timeout: The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
:param pulumi.Input[bool] wait_for_guest_net_routable: Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
:param pulumi.Input[int] wait_for_guest_net_timeout: The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
pulumi.set(__self__, "resource_pool_id", resource_pool_id)
if alternate_guest_name is not None:
pulumi.set(__self__, "alternate_guest_name", alternate_guest_name)
if annotation is not None:
pulumi.set(__self__, "annotation", annotation)
if boot_delay is not None:
pulumi.set(__self__, "boot_delay", boot_delay)
if boot_retry_delay is not None:
pulumi.set(__self__, "boot_retry_delay", boot_retry_delay)
if boot_retry_enabled is not None:
pulumi.set(__self__, "boot_retry_enabled", boot_retry_enabled)
if cdrom is not None:
pulumi.set(__self__, "cdrom", cdrom)
if clone is not None:
pulumi.set(__self__, "clone", clone)
if cpu_hot_add_enabled is not None:
pulumi.set(__self__, "cpu_hot_add_enabled", cpu_hot_add_enabled)
if cpu_hot_remove_enabled is not None:
pulumi.set(__self__, "cpu_hot_remove_enabled", cpu_hot_remove_enabled)
if cpu_limit is not None:
pulumi.set(__self__, "cpu_limit", cpu_limit)
if cpu_performance_counters_enabled is not None:
pulumi.set(__self__, "cpu_performance_counters_enabled", cpu_performance_counters_enabled)
if cpu_reservation is not None:
pulumi.set(__self__, "cpu_reservation", cpu_reservation)
if cpu_share_count is not None:
pulumi.set(__self__, "cpu_share_count", cpu_share_count)
if cpu_share_level is not None:
pulumi.set(__self__, "cpu_share_level", cpu_share_level)
if custom_attributes is not None:
pulumi.set(__self__, "custom_attributes", custom_attributes)
if datacenter_id is not None:
pulumi.set(__self__, "datacenter_id", datacenter_id)
if datastore_cluster_id is not None:
pulumi.set(__self__, "datastore_cluster_id", datastore_cluster_id)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if disks is not None:
pulumi.set(__self__, "disks", disks)
if efi_secure_boot_enabled is not None:
pulumi.set(__self__, "efi_secure_boot_enabled", efi_secure_boot_enabled)
if enable_disk_uuid is not None:
pulumi.set(__self__, "enable_disk_uuid", enable_disk_uuid)
if enable_logging is not None:
pulumi.set(__self__, "enable_logging", enable_logging)
if ept_rvi_mode is not None:
pulumi.set(__self__, "ept_rvi_mode", ept_rvi_mode)
if extra_config is not None:
pulumi.set(__self__, "extra_config", extra_config)
if firmware is not None:
pulumi.set(__self__, "firmware", firmware)
if folder is not None:
pulumi.set(__self__, "folder", folder)
if force_power_off is not None:
pulumi.set(__self__, "force_power_off", force_power_off)
if guest_id is not None:
pulumi.set(__self__, "guest_id", guest_id)
if hardware_version is not None:
pulumi.set(__self__, "hardware_version", hardware_version)
if host_system_id is not None:
pulumi.set(__self__, "host_system_id", host_system_id)
if hv_mode is not None:
pulumi.set(__self__, "hv_mode", hv_mode)
if ide_controller_count is not None:
pulumi.set(__self__, "ide_controller_count", ide_controller_count)
if ignored_guest_ips is not None:
pulumi.set(__self__, "ignored_guest_ips", ignored_guest_ips)
if latency_sensitivity is not None:
pulumi.set(__self__, "latency_sensitivity", latency_sensitivity)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if memory_hot_add_enabled is not None:
pulumi.set(__self__, "memory_hot_add_enabled", memory_hot_add_enabled)
if memory_limit is not None:
pulumi.set(__self__, "memory_limit", memory_limit)
if memory_reservation is not None:
pulumi.set(__self__, "memory_reservation", memory_reservation)
if memory_share_count is not None:
pulumi.set(__self__, "memory_share_count", memory_share_count)
if memory_share_level is not None:
pulumi.set(__self__, "memory_share_level", memory_share_level)
if migrate_wait_timeout is not None:
pulumi.set(__self__, "migrate_wait_timeout", migrate_wait_timeout)
if name is not None:
pulumi.set(__self__, "name", name)
if nested_hv_enabled is not None:
pulumi.set(__self__, "nested_hv_enabled", nested_hv_enabled)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if num_cores_per_socket is not None:
pulumi.set(__self__, "num_cores_per_socket", num_cores_per_socket)
if num_cpus is not None:
pulumi.set(__self__, "num_cpus", num_cpus)
if ovf_deploy is not None:
pulumi.set(__self__, "ovf_deploy", ovf_deploy)
if pci_device_ids is not None:
pulumi.set(__self__, "pci_device_ids", pci_device_ids)
if poweron_timeout is not None:
pulumi.set(__self__, "poweron_timeout", poweron_timeout)
if replace_trigger is not None:
pulumi.set(__self__, "replace_trigger", replace_trigger)
if run_tools_scripts_after_power_on is not None:
pulumi.set(__self__, "run_tools_scripts_after_power_on", run_tools_scripts_after_power_on)
if run_tools_scripts_after_resume is not None:
pulumi.set(__self__, "run_tools_scripts_after_resume", run_tools_scripts_after_resume)
if run_tools_scripts_before_guest_reboot is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_reboot", run_tools_scripts_before_guest_reboot)
if run_tools_scripts_before_guest_shutdown is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_shutdown", run_tools_scripts_before_guest_shutdown)
if run_tools_scripts_before_guest_standby is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_standby", run_tools_scripts_before_guest_standby)
if sata_controller_count is not None:
pulumi.set(__self__, "sata_controller_count", sata_controller_count)
if scsi_bus_sharing is not None:
pulumi.set(__self__, "scsi_bus_sharing", scsi_bus_sharing)
if scsi_controller_count is not None:
pulumi.set(__self__, "scsi_controller_count", scsi_controller_count)
if scsi_type is not None:
pulumi.set(__self__, "scsi_type", scsi_type)
if shutdown_wait_timeout is not None:
pulumi.set(__self__, "shutdown_wait_timeout", shutdown_wait_timeout)
if storage_policy_id is not None:
pulumi.set(__self__, "storage_policy_id", storage_policy_id)
if swap_placement_policy is not None:
pulumi.set(__self__, "swap_placement_policy", swap_placement_policy)
if sync_time_with_host is not None:
pulumi.set(__self__, "sync_time_with_host", sync_time_with_host)
if sync_time_with_host_periodically is not None:
pulumi.set(__self__, "sync_time_with_host_periodically", sync_time_with_host_periodically)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if vapp is not None:
pulumi.set(__self__, "vapp", vapp)
if vbs_enabled is not None:
pulumi.set(__self__, "vbs_enabled", vbs_enabled)
if vvtd_enabled is not None:
pulumi.set(__self__, "vvtd_enabled", vvtd_enabled)
if wait_for_guest_ip_timeout is not None:
pulumi.set(__self__, "wait_for_guest_ip_timeout", wait_for_guest_ip_timeout)
if wait_for_guest_net_routable is not None:
pulumi.set(__self__, "wait_for_guest_net_routable", wait_for_guest_net_routable)
if wait_for_guest_net_timeout is not None:
pulumi.set(__self__, "wait_for_guest_net_timeout", wait_for_guest_net_timeout)
@property
@pulumi.getter(name="resourcePoolId")
def resource_pool_id(self) -> pulumi.Input[str]:
"""
The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
"""
return pulumi.get(self, "resource_pool_id")
@resource_pool_id.setter
def resource_pool_id(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_pool_id", value)
@property
@pulumi.getter(name="alternateGuestName")
def alternate_guest_name(self) -> Optional[pulumi.Input[str]]:
"""
The guest name for the operating system
when `guest_id` is `other` or `other-64`.
"""
return pulumi.get(self, "alternate_guest_name")
@alternate_guest_name.setter
def alternate_guest_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_guest_name", value)
@property
@pulumi.getter
def annotation(self) -> Optional[pulumi.Input[str]]:
"""
A user-provided description of the virtual machine.
The default is no annotation.
"""
return pulumi.get(self, "annotation")
@annotation.setter
def annotation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "annotation", value)
@property
@pulumi.getter(name="bootDelay")
def boot_delay(self) -> Optional[pulumi.Input[int]]:
"""
The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
"""
return pulumi.get(self, "boot_delay")
@boot_delay.setter
def boot_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "boot_delay", value)
@property
@pulumi.getter(name="bootRetryDelay")
def boot_retry_delay(self) -> Optional[pulumi.Input[int]]:
"""
The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
"""
return pulumi.get(self, "boot_retry_delay")
@boot_retry_delay.setter
def boot_retry_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "boot_retry_delay", value)
@property
@pulumi.getter(name="bootRetryEnabled")
def boot_retry_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
"""
return pulumi.get(self, "boot_retry_enabled")
@boot_retry_enabled.setter
def boot_retry_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "boot_retry_enabled", value)
@property
@pulumi.getter
def cdrom(self) -> Optional[pulumi.Input['VirtualMachineCdromArgs']]:
"""
A specification for a CDROM device on this virtual
machine. See CDROM options below.
"""
return pulumi.get(self, "cdrom")
@cdrom.setter
def cdrom(self, value: Optional[pulumi.Input['VirtualMachineCdromArgs']]):
pulumi.set(self, "cdrom", value)
@property
@pulumi.getter
def clone(self) -> Optional[pulumi.Input['VirtualMachineCloneArgs']]:
"""
When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
"""
return pulumi.get(self, "clone")
@clone.setter
def clone(self, value: Optional[pulumi.Input['VirtualMachineCloneArgs']]):
pulumi.set(self, "clone", value)
@property
@pulumi.getter(name="cpuHotAddEnabled")
def cpu_hot_add_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow CPUs to be added to this virtual
machine while it is running.
"""
return pulumi.get(self, "cpu_hot_add_enabled")
@cpu_hot_add_enabled.setter
def cpu_hot_add_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_hot_add_enabled", value)
@property
@pulumi.getter(name="cpuHotRemoveEnabled")
def cpu_hot_remove_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow CPUs to be removed to this
virtual machine while it is running.
"""
return pulumi.get(self, "cpu_hot_remove_enabled")
@cpu_hot_remove_enabled.setter
def cpu_hot_remove_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_hot_remove_enabled", value)
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
"""
return pulumi.get(self, "cpu_limit")
@cpu_limit.setter
def cpu_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_limit", value)
@property
@pulumi.getter(name="cpuPerformanceCountersEnabled")
def cpu_performance_counters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable CPU performance
counters on this virtual machine. Default: `false`.
"""
return pulumi.get(self, "cpu_performance_counters_enabled")
@cpu_performance_counters_enabled.setter
def cpu_performance_counters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_performance_counters_enabled", value)
@property
@pulumi.getter(name="cpuReservation")
def cpu_reservation(self) -> Optional[pulumi.Input[int]]:
"""
The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "cpu_reservation")
@cpu_reservation.setter
def cpu_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_reservation", value)
@property
@pulumi.getter(name="cpuShareCount")
def cpu_share_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
"""
return pulumi.get(self, "cpu_share_count")
@cpu_share_count.setter
def cpu_share_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_share_count", value)
@property
@pulumi.getter(name="cpuShareLevel")
def cpu_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "cpu_share_level")
@cpu_share_level.setter
def cpu_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_share_level", value)
@property
@pulumi.getter(name="customAttributes")
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of custom attribute ids to attribute
value strings to set for virtual machine.
"""
return pulumi.get(self, "custom_attributes")
@custom_attributes.setter
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "custom_attributes", value)
@property
@pulumi.getter(name="datacenterId")
def datacenter_id(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter id. Required only when deploying
an ovf template.
"""
return pulumi.get(self, "datacenter_id")
@datacenter_id.setter
def datacenter_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter_id", value)
@property
@pulumi.getter(name="datastoreClusterId")
def datastore_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
"""
return pulumi.get(self, "datastore_cluster_id")
@datastore_cluster_id.setter
def datastore_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_cluster_id", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter
def disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]]:
"""
A specification for a virtual disk device on this virtual
machine. See disk options below.
"""
return pulumi.get(self, "disks")
@disks.setter
def disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]]):
pulumi.set(self, "disks", value)
@property
@pulumi.getter(name="efiSecureBootEnabled")
def efi_secure_boot_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
"""
return pulumi.get(self, "efi_secure_boot_enabled")
@efi_secure_boot_enabled.setter
def efi_secure_boot_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "efi_secure_boot_enabled", value)
@property
@pulumi.getter(name="enableDiskUuid")
def enable_disk_uuid(self) -> Optional[pulumi.Input[bool]]:
"""
Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
"""
return pulumi.get(self, "enable_disk_uuid")
@enable_disk_uuid.setter
def enable_disk_uuid(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_disk_uuid", value)
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> Optional[pulumi.Input[bool]]:
"""
Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
"""
return pulumi.get(self, "enable_logging")
@enable_logging.setter
def enable_logging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_logging", value)
@property
@pulumi.getter(name="eptRviMode")
def ept_rvi_mode(self) -> Optional[pulumi.Input[str]]:
"""
The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
"""
return pulumi.get(self, "ept_rvi_mode")
@ept_rvi_mode.setter
def ept_rvi_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ept_rvi_mode", value)
@property
@pulumi.getter(name="extraConfig")
def extra_config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
"""
return pulumi.get(self, "extra_config")
@extra_config.setter
def extra_config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "extra_config", value)
@property
@pulumi.getter
def firmware(self) -> Optional[pulumi.Input[str]]:
"""
The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
"""
return pulumi.get(self, "firmware")
@firmware.setter
def firmware(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firmware", value)
@property
@pulumi.getter
def folder(self) -> Optional[pulumi.Input[str]]:
"""
The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
"""
return pulumi.get(self, "folder")
@folder.setter
def folder(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "folder", value)
@property
@pulumi.getter(name="forcePowerOff")
def force_power_off(self) -> Optional[pulumi.Input[bool]]:
"""
If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
"""
return pulumi.get(self, "force_power_off")
@force_power_off.setter
def force_power_off(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_power_off", value)
@property
@pulumi.getter(name="guestId")
def guest_id(self) -> Optional[pulumi.Input[str]]:
"""
The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
"""
return pulumi.get(self, "guest_id")
@guest_id.setter
def guest_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "guest_id", value)
@property
@pulumi.getter(name="hardwareVersion")
def hardware_version(self) -> Optional[pulumi.Input[int]]:
"""
The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
"""
return pulumi.get(self, "hardware_version")
@hardware_version.setter
def hardware_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hardware_version", value)
@property
@pulumi.getter(name="hostSystemId")
def host_system_id(self) -> Optional[pulumi.Input[str]]:
"""
An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
"""
return pulumi.get(self, "host_system_id")
@host_system_id.setter
def host_system_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_system_id", value)
@property
@pulumi.getter(name="hvMode")
def hv_mode(self) -> Optional[pulumi.Input[str]]:
"""
The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
"""
return pulumi.get(self, "hv_mode")
@hv_mode.setter
def hv_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hv_mode", value)
@property
@pulumi.getter(name="ideControllerCount")
def ide_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "ide_controller_count")
@ide_controller_count.setter
def ide_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ide_controller_count", value)
@property
@pulumi.getter(name="ignoredGuestIps")
def ignored_guest_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
"""
return pulumi.get(self, "ignored_guest_ips")
@ignored_guest_ips.setter
def ignored_guest_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ignored_guest_ips", value)
@property
@pulumi.getter(name="latencySensitivity")
def latency_sensitivity(self) -> Optional[pulumi.Input[str]]:
"""
Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
"""
return pulumi.get(self, "latency_sensitivity")
@latency_sensitivity.setter
def latency_sensitivity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "latency_sensitivity", value)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input[int]]:
"""
The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
"""
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter(name="memoryHotAddEnabled")
def memory_hot_add_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow memory to be added to this
virtual machine while it is running.
"""
return pulumi.get(self, "memory_hot_add_enabled")
@memory_hot_add_enabled.setter
def memory_hot_add_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "memory_hot_add_enabled", value)
@property
@pulumi.getter(name="memoryLimit")
def memory_limit(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
"""
return pulumi.get(self, "memory_limit")
@memory_limit.setter
def memory_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_limit", value)
@property
@pulumi.getter(name="memoryReservation")
def memory_reservation(self) -> Optional[pulumi.Input[int]]:
"""
The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "memory_reservation")
@memory_reservation.setter
def memory_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_reservation", value)
@property
@pulumi.getter(name="memoryShareCount")
def memory_share_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
"""
return pulumi.get(self, "memory_share_count")
@memory_share_count.setter
def memory_share_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_share_count", value)
@property
@pulumi.getter(name="memoryShareLevel")
def memory_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "memory_share_level")
@memory_share_level.setter
def memory_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "memory_share_level", value)
@property
@pulumi.getter(name="migrateWaitTimeout")
def migrate_wait_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
"""
return pulumi.get(self, "migrate_wait_timeout")
@migrate_wait_timeout.setter
def migrate_wait_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "migrate_wait_timeout", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual machine.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nestedHvEnabled")
def nested_hv_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
"""
return pulumi.get(self, "nested_hv_enabled")
@nested_hv_enabled.setter
def nested_hv_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nested_hv_enabled", value)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]]:
"""
A specification for a virtual NIC on this
virtual machine. See network interface options
below.
"""
return pulumi.get(self, "network_interfaces")
@network_interfaces.setter
def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]]):
pulumi.set(self, "network_interfaces", value)
@property
@pulumi.getter(name="numCoresPerSocket")
def num_cores_per_socket(self) -> Optional[pulumi.Input[int]]:
"""
The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
"""
return pulumi.get(self, "num_cores_per_socket")
@num_cores_per_socket.setter
def num_cores_per_socket(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_cores_per_socket", value)
@property
@pulumi.getter(name="numCpus")
def num_cpus(self) -> Optional[pulumi.Input[int]]:
"""
The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
"""
return pulumi.get(self, "num_cpus")
@num_cpus.setter
def num_cpus(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_cpus", value)
@property
@pulumi.getter(name="ovfDeploy")
def ovf_deploy(self) -> Optional[pulumi.Input['VirtualMachineOvfDeployArgs']]:
"""
When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
"""
return pulumi.get(self, "ovf_deploy")
@ovf_deploy.setter
def ovf_deploy(self, value: Optional[pulumi.Input['VirtualMachineOvfDeployArgs']]):
pulumi.set(self, "ovf_deploy", value)
@property
@pulumi.getter(name="pciDeviceIds")
def pci_device_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of host PCI device IDs to create PCI
passthroughs for.
"""
return pulumi.get(self, "pci_device_ids")
@pci_device_ids.setter
def pci_device_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "pci_device_ids", value)
@property
@pulumi.getter(name="poweronTimeout")
def poweron_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, that we will be trying to power on a VM
"""
return pulumi.get(self, "poweron_timeout")
@poweron_timeout.setter
def poweron_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poweron_timeout", value)
@property
@pulumi.getter(name="replaceTrigger")
def replace_trigger(self) -> Optional[pulumi.Input[str]]:
"""
Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
"""
return pulumi.get(self, "replace_trigger")
@replace_trigger.setter
def replace_trigger(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replace_trigger", value)
@property
@pulumi.getter(name="runToolsScriptsAfterPowerOn")
def run_tools_scripts_after_power_on(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_power_on")
@run_tools_scripts_after_power_on.setter
def run_tools_scripts_after_power_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_after_power_on", value)
@property
@pulumi.getter(name="runToolsScriptsAfterResume")
def run_tools_scripts_after_resume(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_resume")
@run_tools_scripts_after_resume.setter
def run_tools_scripts_after_resume(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_after_resume", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestReboot")
def run_tools_scripts_before_guest_reboot(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_reboot")
@run_tools_scripts_before_guest_reboot.setter
def run_tools_scripts_before_guest_reboot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_reboot", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestShutdown")
def run_tools_scripts_before_guest_shutdown(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_shutdown")
@run_tools_scripts_before_guest_shutdown.setter
def run_tools_scripts_before_guest_shutdown(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_shutdown", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestStandby")
def run_tools_scripts_before_guest_standby(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_standby")
@run_tools_scripts_before_guest_standby.setter
def run_tools_scripts_before_guest_standby(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_standby", value)
@property
@pulumi.getter(name="sataControllerCount")
def sata_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "sata_controller_count")
@sata_controller_count.setter
def sata_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sata_controller_count", value)
@property
@pulumi.getter(name="scsiBusSharing")
def scsi_bus_sharing(self) -> Optional[pulumi.Input[str]]:
"""
Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
"""
return pulumi.get(self, "scsi_bus_sharing")
@scsi_bus_sharing.setter
def scsi_bus_sharing(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scsi_bus_sharing", value)
@property
@pulumi.getter(name="scsiControllerCount")
def scsi_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
"""
return pulumi.get(self, "scsi_controller_count")
@scsi_controller_count.setter
def scsi_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "scsi_controller_count", value)
@property
@pulumi.getter(name="scsiType")
def scsi_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
"""
return pulumi.get(self, "scsi_type")
@scsi_type.setter
def scsi_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scsi_type", value)
@property
@pulumi.getter(name="shutdownWaitTimeout")
def shutdown_wait_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
"""
return pulumi.get(self, "shutdown_wait_timeout")
@shutdown_wait_timeout.setter
def shutdown_wait_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "shutdown_wait_timeout", value)
@property
@pulumi.getter(name="storagePolicyId")
def storage_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the storage policy to assign to this disk.
"""
return pulumi.get(self, "storage_policy_id")
@storage_policy_id.setter
def storage_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_policy_id", value)
@property
@pulumi.getter(name="swapPlacementPolicy")
def swap_placement_policy(self) -> Optional[pulumi.Input[str]]:
"""
The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
"""
return pulumi.get(self, "swap_placement_policy")
@swap_placement_policy.setter
def swap_placement_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "swap_placement_policy", value)
@property
@pulumi.getter(name="syncTimeWithHost")
def sync_time_with_host(self) -> Optional[pulumi.Input[bool]]:
"""
Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host")
@sync_time_with_host.setter
def sync_time_with_host(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_time_with_host", value)
@property
@pulumi.getter(name="syncTimeWithHostPeriodically")
def sync_time_with_host_periodically(self) -> Optional[pulumi.Input[bool]]:
"""
Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host_periodically")
@sync_time_with_host_periodically.setter
def sync_time_with_host_periodically(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_time_with_host_periodically", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The IDs of any tags to attach to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def vapp(self) -> Optional[pulumi.Input['VirtualMachineVappArgs']]:
"""
Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
"""
return pulumi.get(self, "vapp")
@vapp.setter
def vapp(self, value: Optional[pulumi.Input['VirtualMachineVappArgs']]):
pulumi.set(self, "vapp", value)
@property
@pulumi.getter(name="vbsEnabled")
def vbs_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vbs_enabled")
@vbs_enabled.setter
def vbs_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vbs_enabled", value)
@property
@pulumi.getter(name="vvtdEnabled")
def vvtd_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vvtd_enabled")
@vvtd_enabled.setter
def vvtd_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vvtd_enabled", value)
@property
@pulumi.getter(name="waitForGuestIpTimeout")
def wait_for_guest_ip_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
"""
return pulumi.get(self, "wait_for_guest_ip_timeout")
@wait_for_guest_ip_timeout.setter
def wait_for_guest_ip_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_guest_ip_timeout", value)
@property
@pulumi.getter(name="waitForGuestNetRoutable")
def wait_for_guest_net_routable(self) -> Optional[pulumi.Input[bool]]:
"""
Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
"""
return pulumi.get(self, "wait_for_guest_net_routable")
@wait_for_guest_net_routable.setter
def wait_for_guest_net_routable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_guest_net_routable", value)
@property
@pulumi.getter(name="waitForGuestNetTimeout")
def wait_for_guest_net_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
return pulumi.get(self, "wait_for_guest_net_timeout")
@wait_for_guest_net_timeout.setter
def wait_for_guest_net_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_guest_net_timeout", value)
@pulumi.input_type
class _VirtualMachineState:
def __init__(__self__, *,
alternate_guest_name: Optional[pulumi.Input[str]] = None,
annotation: Optional[pulumi.Input[str]] = None,
boot_delay: Optional[pulumi.Input[int]] = None,
boot_retry_delay: Optional[pulumi.Input[int]] = None,
boot_retry_enabled: Optional[pulumi.Input[bool]] = None,
cdrom: Optional[pulumi.Input['VirtualMachineCdromArgs']] = None,
change_version: Optional[pulumi.Input[str]] = None,
clone: Optional[pulumi.Input['VirtualMachineCloneArgs']] = None,
cpu_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
cpu_hot_remove_enabled: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_performance_counters_enabled: Optional[pulumi.Input[bool]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_count: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
datacenter_id: Optional[pulumi.Input[str]] = None,
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
default_ip_address: Optional[pulumi.Input[str]] = None,
disks: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]] = None,
efi_secure_boot_enabled: Optional[pulumi.Input[bool]] = None,
enable_disk_uuid: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
ept_rvi_mode: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
firmware: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
force_power_off: Optional[pulumi.Input[bool]] = None,
guest_id: Optional[pulumi.Input[str]] = None,
guest_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hardware_version: Optional[pulumi.Input[int]] = None,
host_system_id: Optional[pulumi.Input[str]] = None,
hv_mode: Optional[pulumi.Input[str]] = None,
ide_controller_count: Optional[pulumi.Input[int]] = None,
ignored_guest_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
imported: Optional[pulumi.Input[bool]] = None,
latency_sensitivity: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
memory_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_count: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
migrate_wait_timeout: Optional[pulumi.Input[int]] = None,
moid: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_hv_enabled: Optional[pulumi.Input[bool]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]] = None,
num_cores_per_socket: Optional[pulumi.Input[int]] = None,
num_cpus: Optional[pulumi.Input[int]] = None,
ovf_deploy: Optional[pulumi.Input['VirtualMachineOvfDeployArgs']] = None,
pci_device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
poweron_timeout: Optional[pulumi.Input[int]] = None,
reboot_required: Optional[pulumi.Input[bool]] = None,
replace_trigger: Optional[pulumi.Input[str]] = None,
resource_pool_id: Optional[pulumi.Input[str]] = None,
run_tools_scripts_after_power_on: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_after_resume: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_reboot: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_shutdown: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_standby: Optional[pulumi.Input[bool]] = None,
sata_controller_count: Optional[pulumi.Input[int]] = None,
scsi_bus_sharing: Optional[pulumi.Input[str]] = None,
scsi_controller_count: Optional[pulumi.Input[int]] = None,
scsi_type: Optional[pulumi.Input[str]] = None,
shutdown_wait_timeout: Optional[pulumi.Input[int]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
swap_placement_policy: Optional[pulumi.Input[str]] = None,
sync_time_with_host: Optional[pulumi.Input[bool]] = None,
sync_time_with_host_periodically: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uuid: Optional[pulumi.Input[str]] = None,
vapp: Optional[pulumi.Input['VirtualMachineVappArgs']] = None,
vapp_transports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vbs_enabled: Optional[pulumi.Input[bool]] = None,
vmware_tools_status: Optional[pulumi.Input[str]] = None,
vmx_path: Optional[pulumi.Input[str]] = None,
vvtd_enabled: Optional[pulumi.Input[bool]] = None,
wait_for_guest_ip_timeout: Optional[pulumi.Input[int]] = None,
wait_for_guest_net_routable: Optional[pulumi.Input[bool]] = None,
wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering VirtualMachine resources.
:param pulumi.Input[str] alternate_guest_name: The guest name for the operating system
when `guest_id` is `other` or `other-64`.
:param pulumi.Input[str] annotation: A user-provided description of the virtual machine.
The default is no annotation.
:param pulumi.Input[int] boot_delay: The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
:param pulumi.Input[int] boot_retry_delay: The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
:param pulumi.Input[bool] boot_retry_enabled: If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
:param pulumi.Input['VirtualMachineCdromArgs'] cdrom: A specification for a CDROM device on this virtual
machine. See CDROM options below.
:param pulumi.Input[str] change_version: A unique identifier for a given version of the last
configuration applied, such the timestamp of the last update to the
configuration.
:param pulumi.Input['VirtualMachineCloneArgs'] clone: When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
:param pulumi.Input[bool] cpu_hot_add_enabled: Allow CPUs to be added to this virtual
machine while it is running.
:param pulumi.Input[bool] cpu_hot_remove_enabled: Allow CPUs to be removed to this
virtual machine while it is running.
:param pulumi.Input[int] cpu_limit: The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
:param pulumi.Input[bool] cpu_performance_counters_enabled: Enable CPU performance
counters on this virtual machine. Default: `false`.
:param pulumi.Input[int] cpu_reservation: The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] cpu_share_count: The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
:param pulumi.Input[str] cpu_share_level: The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
value strings to set for virtual machine.
:param pulumi.Input[str] datacenter_id: The datacenter id. Required only when deploying
an ovf template.
:param pulumi.Input[str] datastore_cluster_id: The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
:param pulumi.Input[str] datastore_id: The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
:param pulumi.Input[str] default_ip_address: The IP address selected by the provider to be used with
any provisioners configured on this resource.
Whenever possible, this is the first IPv4 address that is reachable through
the default gateway configured on the machine, then the first reachable IPv6
address, and then the first general discovered address if neither exist. If
VMware tools is not running on the virtual machine, or if the VM is powered
off, this value will be blank.
:param pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]] disks: A specification for a virtual disk device on this virtual
machine. See disk options below.
:param pulumi.Input[bool] efi_secure_boot_enabled: When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
:param pulumi.Input[bool] enable_disk_uuid: Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
:param pulumi.Input[bool] enable_logging: Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
:param pulumi.Input[str] ept_rvi_mode: The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] extra_config: Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
:param pulumi.Input[str] firmware: The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
:param pulumi.Input[str] folder: The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
:param pulumi.Input[bool] force_power_off: If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
:param pulumi.Input[str] guest_id: The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] guest_ip_addresses: The current list of IP addresses on this machine,
including the value of `default_ip_address`. If VMware tools is not running
on the virtual machine, or if the VM is powered off, this list will be empty.
* `moid`: The managed object reference ID of the created
virtual machine.
:param pulumi.Input[int] hardware_version: The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
:param pulumi.Input[str] host_system_id: An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
:param pulumi.Input[str] hv_mode: The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
:param pulumi.Input[int] ide_controller_count: The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ignored_guest_ips: List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
:param pulumi.Input[bool] imported: This is flagged if the virtual machine has been imported, or the
state has been migrated from a previous version of the resource. It
influences the behavior of the first post-import apply operation. See the
section on importing below.
:param pulumi.Input[str] latency_sensitivity: Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
:param pulumi.Input[int] memory: The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
:param pulumi.Input[bool] memory_hot_add_enabled: Allow memory to be added to this
virtual machine while it is running.
:param pulumi.Input[int] memory_limit: The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
:param pulumi.Input[int] memory_reservation: The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] memory_share_count: The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
:param pulumi.Input[str] memory_share_level: The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[int] migrate_wait_timeout: The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
:param pulumi.Input[str] moid: The machine object ID from VMWare
:param pulumi.Input[str] name: The name of the virtual machine.
:param pulumi.Input[bool] nested_hv_enabled: Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]] network_interfaces: A specification for a virtual NIC on this
virtual machine. See network interface options
below.
:param pulumi.Input[int] num_cores_per_socket: The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
:param pulumi.Input[int] num_cpus: The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
:param pulumi.Input['VirtualMachineOvfDeployArgs'] ovf_deploy: When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] pci_device_ids: List of host PCI device IDs to create PCI
passthroughs for.
:param pulumi.Input[int] poweron_timeout: The amount of time, in seconds, that we will be trying to power on a VM
:param pulumi.Input[bool] reboot_required: Value internal to the provider used to determine if a
configuration set change requires a reboot. This value is only useful during
an update process and gets reset on refresh.
:param pulumi.Input[str] replace_trigger: Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
:param pulumi.Input[str] resource_pool_id: The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
:param pulumi.Input[bool] run_tools_scripts_after_power_on: Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_after_resume: Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_reboot: Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_shutdown: Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_standby: Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[int] sata_controller_count: The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[str] scsi_bus_sharing: Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
:param pulumi.Input[int] scsi_controller_count: The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
:param pulumi.Input[str] scsi_type: The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
:param pulumi.Input[int] shutdown_wait_timeout: The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
:param pulumi.Input[str] storage_policy_id: The UUID of the storage policy to assign to this disk.
:param pulumi.Input[str] swap_placement_policy: The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
:param pulumi.Input[bool] sync_time_with_host: Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[bool] sync_time_with_host_periodically: Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
:param pulumi.Input[str] uuid: The UUID of the virtual disk's VMDK file. This is used to track the
virtual disk on the virtual machine.
:param pulumi.Input['VirtualMachineVappArgs'] vapp: Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vapp_transports: Computed value which is only valid for cloned virtual
machines. A list of vApp transport methods supported by the source virtual
machine or template.
:param pulumi.Input[bool] vbs_enabled: Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[str] vmware_tools_status: The state of VMware tools in the guest. This will
determine the proper course of action for some device operations.
:param pulumi.Input[str] vmx_path: The path of the virtual machine's configuration file in the VM's
datastore.
:param pulumi.Input[bool] vvtd_enabled: Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[int] wait_for_guest_ip_timeout: The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
:param pulumi.Input[bool] wait_for_guest_net_routable: Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
:param pulumi.Input[int] wait_for_guest_net_timeout: The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
if alternate_guest_name is not None:
pulumi.set(__self__, "alternate_guest_name", alternate_guest_name)
if annotation is not None:
pulumi.set(__self__, "annotation", annotation)
if boot_delay is not None:
pulumi.set(__self__, "boot_delay", boot_delay)
if boot_retry_delay is not None:
pulumi.set(__self__, "boot_retry_delay", boot_retry_delay)
if boot_retry_enabled is not None:
pulumi.set(__self__, "boot_retry_enabled", boot_retry_enabled)
if cdrom is not None:
pulumi.set(__self__, "cdrom", cdrom)
if change_version is not None:
pulumi.set(__self__, "change_version", change_version)
if clone is not None:
pulumi.set(__self__, "clone", clone)
if cpu_hot_add_enabled is not None:
pulumi.set(__self__, "cpu_hot_add_enabled", cpu_hot_add_enabled)
if cpu_hot_remove_enabled is not None:
pulumi.set(__self__, "cpu_hot_remove_enabled", cpu_hot_remove_enabled)
if cpu_limit is not None:
pulumi.set(__self__, "cpu_limit", cpu_limit)
if cpu_performance_counters_enabled is not None:
pulumi.set(__self__, "cpu_performance_counters_enabled", cpu_performance_counters_enabled)
if cpu_reservation is not None:
pulumi.set(__self__, "cpu_reservation", cpu_reservation)
if cpu_share_count is not None:
pulumi.set(__self__, "cpu_share_count", cpu_share_count)
if cpu_share_level is not None:
pulumi.set(__self__, "cpu_share_level", cpu_share_level)
if custom_attributes is not None:
pulumi.set(__self__, "custom_attributes", custom_attributes)
if datacenter_id is not None:
pulumi.set(__self__, "datacenter_id", datacenter_id)
if datastore_cluster_id is not None:
pulumi.set(__self__, "datastore_cluster_id", datastore_cluster_id)
if datastore_id is not None:
pulumi.set(__self__, "datastore_id", datastore_id)
if default_ip_address is not None:
pulumi.set(__self__, "default_ip_address", default_ip_address)
if disks is not None:
pulumi.set(__self__, "disks", disks)
if efi_secure_boot_enabled is not None:
pulumi.set(__self__, "efi_secure_boot_enabled", efi_secure_boot_enabled)
if enable_disk_uuid is not None:
pulumi.set(__self__, "enable_disk_uuid", enable_disk_uuid)
if enable_logging is not None:
pulumi.set(__self__, "enable_logging", enable_logging)
if ept_rvi_mode is not None:
pulumi.set(__self__, "ept_rvi_mode", ept_rvi_mode)
if extra_config is not None:
pulumi.set(__self__, "extra_config", extra_config)
if firmware is not None:
pulumi.set(__self__, "firmware", firmware)
if folder is not None:
pulumi.set(__self__, "folder", folder)
if force_power_off is not None:
pulumi.set(__self__, "force_power_off", force_power_off)
if guest_id is not None:
pulumi.set(__self__, "guest_id", guest_id)
if guest_ip_addresses is not None:
pulumi.set(__self__, "guest_ip_addresses", guest_ip_addresses)
if hardware_version is not None:
pulumi.set(__self__, "hardware_version", hardware_version)
if host_system_id is not None:
pulumi.set(__self__, "host_system_id", host_system_id)
if hv_mode is not None:
pulumi.set(__self__, "hv_mode", hv_mode)
if ide_controller_count is not None:
pulumi.set(__self__, "ide_controller_count", ide_controller_count)
if ignored_guest_ips is not None:
pulumi.set(__self__, "ignored_guest_ips", ignored_guest_ips)
if imported is not None:
pulumi.set(__self__, "imported", imported)
if latency_sensitivity is not None:
pulumi.set(__self__, "latency_sensitivity", latency_sensitivity)
if memory is not None:
pulumi.set(__self__, "memory", memory)
if memory_hot_add_enabled is not None:
pulumi.set(__self__, "memory_hot_add_enabled", memory_hot_add_enabled)
if memory_limit is not None:
pulumi.set(__self__, "memory_limit", memory_limit)
if memory_reservation is not None:
pulumi.set(__self__, "memory_reservation", memory_reservation)
if memory_share_count is not None:
pulumi.set(__self__, "memory_share_count", memory_share_count)
if memory_share_level is not None:
pulumi.set(__self__, "memory_share_level", memory_share_level)
if migrate_wait_timeout is not None:
pulumi.set(__self__, "migrate_wait_timeout", migrate_wait_timeout)
if moid is not None:
pulumi.set(__self__, "moid", moid)
if name is not None:
pulumi.set(__self__, "name", name)
if nested_hv_enabled is not None:
pulumi.set(__self__, "nested_hv_enabled", nested_hv_enabled)
if network_interfaces is not None:
pulumi.set(__self__, "network_interfaces", network_interfaces)
if num_cores_per_socket is not None:
pulumi.set(__self__, "num_cores_per_socket", num_cores_per_socket)
if num_cpus is not None:
pulumi.set(__self__, "num_cpus", num_cpus)
if ovf_deploy is not None:
pulumi.set(__self__, "ovf_deploy", ovf_deploy)
if pci_device_ids is not None:
pulumi.set(__self__, "pci_device_ids", pci_device_ids)
if poweron_timeout is not None:
pulumi.set(__self__, "poweron_timeout", poweron_timeout)
if reboot_required is not None:
pulumi.set(__self__, "reboot_required", reboot_required)
if replace_trigger is not None:
pulumi.set(__self__, "replace_trigger", replace_trigger)
if resource_pool_id is not None:
pulumi.set(__self__, "resource_pool_id", resource_pool_id)
if run_tools_scripts_after_power_on is not None:
pulumi.set(__self__, "run_tools_scripts_after_power_on", run_tools_scripts_after_power_on)
if run_tools_scripts_after_resume is not None:
pulumi.set(__self__, "run_tools_scripts_after_resume", run_tools_scripts_after_resume)
if run_tools_scripts_before_guest_reboot is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_reboot", run_tools_scripts_before_guest_reboot)
if run_tools_scripts_before_guest_shutdown is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_shutdown", run_tools_scripts_before_guest_shutdown)
if run_tools_scripts_before_guest_standby is not None:
pulumi.set(__self__, "run_tools_scripts_before_guest_standby", run_tools_scripts_before_guest_standby)
if sata_controller_count is not None:
pulumi.set(__self__, "sata_controller_count", sata_controller_count)
if scsi_bus_sharing is not None:
pulumi.set(__self__, "scsi_bus_sharing", scsi_bus_sharing)
if scsi_controller_count is not None:
pulumi.set(__self__, "scsi_controller_count", scsi_controller_count)
if scsi_type is not None:
pulumi.set(__self__, "scsi_type", scsi_type)
if shutdown_wait_timeout is not None:
pulumi.set(__self__, "shutdown_wait_timeout", shutdown_wait_timeout)
if storage_policy_id is not None:
pulumi.set(__self__, "storage_policy_id", storage_policy_id)
if swap_placement_policy is not None:
pulumi.set(__self__, "swap_placement_policy", swap_placement_policy)
if sync_time_with_host is not None:
pulumi.set(__self__, "sync_time_with_host", sync_time_with_host)
if sync_time_with_host_periodically is not None:
pulumi.set(__self__, "sync_time_with_host_periodically", sync_time_with_host_periodically)
if tags is not None:
pulumi.set(__self__, "tags", tags)
if uuid is not None:
pulumi.set(__self__, "uuid", uuid)
if vapp is not None:
pulumi.set(__self__, "vapp", vapp)
if vapp_transports is not None:
pulumi.set(__self__, "vapp_transports", vapp_transports)
if vbs_enabled is not None:
pulumi.set(__self__, "vbs_enabled", vbs_enabled)
if vmware_tools_status is not None:
pulumi.set(__self__, "vmware_tools_status", vmware_tools_status)
if vmx_path is not None:
pulumi.set(__self__, "vmx_path", vmx_path)
if vvtd_enabled is not None:
pulumi.set(__self__, "vvtd_enabled", vvtd_enabled)
if wait_for_guest_ip_timeout is not None:
pulumi.set(__self__, "wait_for_guest_ip_timeout", wait_for_guest_ip_timeout)
if wait_for_guest_net_routable is not None:
pulumi.set(__self__, "wait_for_guest_net_routable", wait_for_guest_net_routable)
if wait_for_guest_net_timeout is not None:
pulumi.set(__self__, "wait_for_guest_net_timeout", wait_for_guest_net_timeout)
@property
@pulumi.getter(name="alternateGuestName")
def alternate_guest_name(self) -> Optional[pulumi.Input[str]]:
"""
The guest name for the operating system
when `guest_id` is `other` or `other-64`.
"""
return pulumi.get(self, "alternate_guest_name")
@alternate_guest_name.setter
def alternate_guest_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "alternate_guest_name", value)
@property
@pulumi.getter
def annotation(self) -> Optional[pulumi.Input[str]]:
"""
A user-provided description of the virtual machine.
The default is no annotation.
"""
return pulumi.get(self, "annotation")
@annotation.setter
def annotation(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "annotation", value)
@property
@pulumi.getter(name="bootDelay")
def boot_delay(self) -> Optional[pulumi.Input[int]]:
"""
The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
"""
return pulumi.get(self, "boot_delay")
@boot_delay.setter
def boot_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "boot_delay", value)
@property
@pulumi.getter(name="bootRetryDelay")
def boot_retry_delay(self) -> Optional[pulumi.Input[int]]:
"""
The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
"""
return pulumi.get(self, "boot_retry_delay")
@boot_retry_delay.setter
def boot_retry_delay(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "boot_retry_delay", value)
@property
@pulumi.getter(name="bootRetryEnabled")
def boot_retry_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
"""
return pulumi.get(self, "boot_retry_enabled")
@boot_retry_enabled.setter
def boot_retry_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "boot_retry_enabled", value)
@property
@pulumi.getter
def cdrom(self) -> Optional[pulumi.Input['VirtualMachineCdromArgs']]:
"""
A specification for a CDROM device on this virtual
machine. See CDROM options below.
"""
return pulumi.get(self, "cdrom")
@cdrom.setter
def cdrom(self, value: Optional[pulumi.Input['VirtualMachineCdromArgs']]):
pulumi.set(self, "cdrom", value)
@property
@pulumi.getter(name="changeVersion")
def change_version(self) -> Optional[pulumi.Input[str]]:
"""
A unique identifier for a given version of the last
configuration applied, such the timestamp of the last update to the
configuration.
"""
return pulumi.get(self, "change_version")
@change_version.setter
def change_version(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "change_version", value)
@property
@pulumi.getter
def clone(self) -> Optional[pulumi.Input['VirtualMachineCloneArgs']]:
"""
When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
"""
return pulumi.get(self, "clone")
@clone.setter
def clone(self, value: Optional[pulumi.Input['VirtualMachineCloneArgs']]):
pulumi.set(self, "clone", value)
@property
@pulumi.getter(name="cpuHotAddEnabled")
def cpu_hot_add_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow CPUs to be added to this virtual
machine while it is running.
"""
return pulumi.get(self, "cpu_hot_add_enabled")
@cpu_hot_add_enabled.setter
def cpu_hot_add_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_hot_add_enabled", value)
@property
@pulumi.getter(name="cpuHotRemoveEnabled")
def cpu_hot_remove_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow CPUs to be removed to this
virtual machine while it is running.
"""
return pulumi.get(self, "cpu_hot_remove_enabled")
@cpu_hot_remove_enabled.setter
def cpu_hot_remove_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_hot_remove_enabled", value)
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
"""
return pulumi.get(self, "cpu_limit")
@cpu_limit.setter
def cpu_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_limit", value)
@property
@pulumi.getter(name="cpuPerformanceCountersEnabled")
def cpu_performance_counters_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable CPU performance
counters on this virtual machine. Default: `false`.
"""
return pulumi.get(self, "cpu_performance_counters_enabled")
@cpu_performance_counters_enabled.setter
def cpu_performance_counters_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "cpu_performance_counters_enabled", value)
@property
@pulumi.getter(name="cpuReservation")
def cpu_reservation(self) -> Optional[pulumi.Input[int]]:
"""
The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "cpu_reservation")
@cpu_reservation.setter
def cpu_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_reservation", value)
@property
@pulumi.getter(name="cpuShareCount")
def cpu_share_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
"""
return pulumi.get(self, "cpu_share_count")
@cpu_share_count.setter
def cpu_share_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "cpu_share_count", value)
@property
@pulumi.getter(name="cpuShareLevel")
def cpu_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "cpu_share_level")
@cpu_share_level.setter
def cpu_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "cpu_share_level", value)
@property
@pulumi.getter(name="customAttributes")
def custom_attributes(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Map of custom attribute ids to attribute
value strings to set for virtual machine.
"""
return pulumi.get(self, "custom_attributes")
@custom_attributes.setter
def custom_attributes(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "custom_attributes", value)
@property
@pulumi.getter(name="datacenterId")
def datacenter_id(self) -> Optional[pulumi.Input[str]]:
"""
The datacenter id. Required only when deploying
an ovf template.
"""
return pulumi.get(self, "datacenter_id")
@datacenter_id.setter
def datacenter_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datacenter_id", value)
@property
@pulumi.getter(name="datastoreClusterId")
def datastore_cluster_id(self) -> Optional[pulumi.Input[str]]:
"""
The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
"""
return pulumi.get(self, "datastore_cluster_id")
@datastore_cluster_id.setter
def datastore_cluster_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_cluster_id", value)
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> Optional[pulumi.Input[str]]:
"""
The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
"""
return pulumi.get(self, "datastore_id")
@datastore_id.setter
def datastore_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "datastore_id", value)
@property
@pulumi.getter(name="defaultIpAddress")
def default_ip_address(self) -> Optional[pulumi.Input[str]]:
"""
The IP address selected by the provider to be used with
any provisioners configured on this resource.
Whenever possible, this is the first IPv4 address that is reachable through
the default gateway configured on the machine, then the first reachable IPv6
address, and then the first general discovered address if neither exist. If
VMware tools is not running on the virtual machine, or if the VM is powered
off, this value will be blank.
"""
return pulumi.get(self, "default_ip_address")
@default_ip_address.setter
def default_ip_address(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "default_ip_address", value)
@property
@pulumi.getter
def disks(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]]:
"""
A specification for a virtual disk device on this virtual
machine. See disk options below.
"""
return pulumi.get(self, "disks")
@disks.setter
def disks(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineDiskArgs']]]]):
pulumi.set(self, "disks", value)
@property
@pulumi.getter(name="efiSecureBootEnabled")
def efi_secure_boot_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
"""
return pulumi.get(self, "efi_secure_boot_enabled")
@efi_secure_boot_enabled.setter
def efi_secure_boot_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "efi_secure_boot_enabled", value)
@property
@pulumi.getter(name="enableDiskUuid")
def enable_disk_uuid(self) -> Optional[pulumi.Input[bool]]:
"""
Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
"""
return pulumi.get(self, "enable_disk_uuid")
@enable_disk_uuid.setter
def enable_disk_uuid(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_disk_uuid", value)
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> Optional[pulumi.Input[bool]]:
"""
Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
"""
return pulumi.get(self, "enable_logging")
@enable_logging.setter
def enable_logging(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "enable_logging", value)
@property
@pulumi.getter(name="eptRviMode")
def ept_rvi_mode(self) -> Optional[pulumi.Input[str]]:
"""
The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
"""
return pulumi.get(self, "ept_rvi_mode")
@ept_rvi_mode.setter
def ept_rvi_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ept_rvi_mode", value)
@property
@pulumi.getter(name="extraConfig")
def extra_config(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
"""
return pulumi.get(self, "extra_config")
@extra_config.setter
def extra_config(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "extra_config", value)
@property
@pulumi.getter
def firmware(self) -> Optional[pulumi.Input[str]]:
"""
The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
"""
return pulumi.get(self, "firmware")
@firmware.setter
def firmware(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firmware", value)
@property
@pulumi.getter
def folder(self) -> Optional[pulumi.Input[str]]:
"""
The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
"""
return pulumi.get(self, "folder")
@folder.setter
def folder(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "folder", value)
@property
@pulumi.getter(name="forcePowerOff")
def force_power_off(self) -> Optional[pulumi.Input[bool]]:
"""
If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
"""
return pulumi.get(self, "force_power_off")
@force_power_off.setter
def force_power_off(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "force_power_off", value)
@property
@pulumi.getter(name="guestId")
def guest_id(self) -> Optional[pulumi.Input[str]]:
"""
The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
"""
return pulumi.get(self, "guest_id")
@guest_id.setter
def guest_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "guest_id", value)
@property
@pulumi.getter(name="guestIpAddresses")
def guest_ip_addresses(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The current list of IP addresses on this machine,
including the value of `default_ip_address`. If VMware tools is not running
on the virtual machine, or if the VM is powered off, this list will be empty.
* `moid`: The managed object reference ID of the created
virtual machine.
"""
return pulumi.get(self, "guest_ip_addresses")
@guest_ip_addresses.setter
def guest_ip_addresses(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "guest_ip_addresses", value)
@property
@pulumi.getter(name="hardwareVersion")
def hardware_version(self) -> Optional[pulumi.Input[int]]:
"""
The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
"""
return pulumi.get(self, "hardware_version")
@hardware_version.setter
def hardware_version(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "hardware_version", value)
@property
@pulumi.getter(name="hostSystemId")
def host_system_id(self) -> Optional[pulumi.Input[str]]:
"""
An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
"""
return pulumi.get(self, "host_system_id")
@host_system_id.setter
def host_system_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "host_system_id", value)
@property
@pulumi.getter(name="hvMode")
def hv_mode(self) -> Optional[pulumi.Input[str]]:
"""
The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
"""
return pulumi.get(self, "hv_mode")
@hv_mode.setter
def hv_mode(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "hv_mode", value)
@property
@pulumi.getter(name="ideControllerCount")
def ide_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "ide_controller_count")
@ide_controller_count.setter
def ide_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "ide_controller_count", value)
@property
@pulumi.getter(name="ignoredGuestIps")
def ignored_guest_ips(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
"""
return pulumi.get(self, "ignored_guest_ips")
@ignored_guest_ips.setter
def ignored_guest_ips(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "ignored_guest_ips", value)
@property
@pulumi.getter
def imported(self) -> Optional[pulumi.Input[bool]]:
"""
This is flagged if the virtual machine has been imported, or the
state has been migrated from a previous version of the resource. It
influences the behavior of the first post-import apply operation. See the
section on importing below.
"""
return pulumi.get(self, "imported")
@imported.setter
def imported(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "imported", value)
@property
@pulumi.getter(name="latencySensitivity")
def latency_sensitivity(self) -> Optional[pulumi.Input[str]]:
"""
Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
"""
return pulumi.get(self, "latency_sensitivity")
@latency_sensitivity.setter
def latency_sensitivity(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "latency_sensitivity", value)
@property
@pulumi.getter
def memory(self) -> Optional[pulumi.Input[int]]:
"""
The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
"""
return pulumi.get(self, "memory")
@memory.setter
def memory(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory", value)
@property
@pulumi.getter(name="memoryHotAddEnabled")
def memory_hot_add_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Allow memory to be added to this
virtual machine while it is running.
"""
return pulumi.get(self, "memory_hot_add_enabled")
@memory_hot_add_enabled.setter
def memory_hot_add_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "memory_hot_add_enabled", value)
@property
@pulumi.getter(name="memoryLimit")
def memory_limit(self) -> Optional[pulumi.Input[int]]:
"""
The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
"""
return pulumi.get(self, "memory_limit")
@memory_limit.setter
def memory_limit(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_limit", value)
@property
@pulumi.getter(name="memoryReservation")
def memory_reservation(self) -> Optional[pulumi.Input[int]]:
"""
The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "memory_reservation")
@memory_reservation.setter
def memory_reservation(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_reservation", value)
@property
@pulumi.getter(name="memoryShareCount")
def memory_share_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
"""
return pulumi.get(self, "memory_share_count")
@memory_share_count.setter
def memory_share_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "memory_share_count", value)
@property
@pulumi.getter(name="memoryShareLevel")
def memory_share_level(self) -> Optional[pulumi.Input[str]]:
"""
The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "memory_share_level")
@memory_share_level.setter
def memory_share_level(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "memory_share_level", value)
@property
@pulumi.getter(name="migrateWaitTimeout")
def migrate_wait_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
"""
return pulumi.get(self, "migrate_wait_timeout")
@migrate_wait_timeout.setter
def migrate_wait_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "migrate_wait_timeout", value)
@property
@pulumi.getter
def moid(self) -> Optional[pulumi.Input[str]]:
"""
The machine object ID from VMWare
"""
return pulumi.get(self, "moid")
@moid.setter
def moid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "moid", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the virtual machine.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="nestedHvEnabled")
def nested_hv_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
"""
return pulumi.get(self, "nested_hv_enabled")
@nested_hv_enabled.setter
def nested_hv_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "nested_hv_enabled", value)
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]]:
"""
A specification for a virtual NIC on this
virtual machine. See network interface options
below.
"""
return pulumi.get(self, "network_interfaces")
@network_interfaces.setter
def network_interfaces(self, value: Optional[pulumi.Input[Sequence[pulumi.Input['VirtualMachineNetworkInterfaceArgs']]]]):
pulumi.set(self, "network_interfaces", value)
@property
@pulumi.getter(name="numCoresPerSocket")
def num_cores_per_socket(self) -> Optional[pulumi.Input[int]]:
"""
The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
"""
return pulumi.get(self, "num_cores_per_socket")
@num_cores_per_socket.setter
def num_cores_per_socket(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_cores_per_socket", value)
@property
@pulumi.getter(name="numCpus")
def num_cpus(self) -> Optional[pulumi.Input[int]]:
"""
The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
"""
return pulumi.get(self, "num_cpus")
@num_cpus.setter
def num_cpus(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "num_cpus", value)
@property
@pulumi.getter(name="ovfDeploy")
def ovf_deploy(self) -> Optional[pulumi.Input['VirtualMachineOvfDeployArgs']]:
"""
When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
"""
return pulumi.get(self, "ovf_deploy")
@ovf_deploy.setter
def ovf_deploy(self, value: Optional[pulumi.Input['VirtualMachineOvfDeployArgs']]):
pulumi.set(self, "ovf_deploy", value)
@property
@pulumi.getter(name="pciDeviceIds")
def pci_device_ids(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
List of host PCI device IDs to create PCI
passthroughs for.
"""
return pulumi.get(self, "pci_device_ids")
@pci_device_ids.setter
def pci_device_ids(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "pci_device_ids", value)
@property
@pulumi.getter(name="poweronTimeout")
def poweron_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in seconds, that we will be trying to power on a VM
"""
return pulumi.get(self, "poweron_timeout")
@poweron_timeout.setter
def poweron_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "poweron_timeout", value)
@property
@pulumi.getter(name="rebootRequired")
def reboot_required(self) -> Optional[pulumi.Input[bool]]:
"""
Value internal to the provider used to determine if a
configuration set change requires a reboot. This value is only useful during
an update process and gets reset on refresh.
"""
return pulumi.get(self, "reboot_required")
@reboot_required.setter
def reboot_required(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "reboot_required", value)
@property
@pulumi.getter(name="replaceTrigger")
def replace_trigger(self) -> Optional[pulumi.Input[str]]:
"""
Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
"""
return pulumi.get(self, "replace_trigger")
@replace_trigger.setter
def replace_trigger(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "replace_trigger", value)
@property
@pulumi.getter(name="resourcePoolId")
def resource_pool_id(self) -> Optional[pulumi.Input[str]]:
"""
The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
"""
return pulumi.get(self, "resource_pool_id")
@resource_pool_id.setter
def resource_pool_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_pool_id", value)
@property
@pulumi.getter(name="runToolsScriptsAfterPowerOn")
def run_tools_scripts_after_power_on(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_power_on")
@run_tools_scripts_after_power_on.setter
def run_tools_scripts_after_power_on(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_after_power_on", value)
@property
@pulumi.getter(name="runToolsScriptsAfterResume")
def run_tools_scripts_after_resume(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_resume")
@run_tools_scripts_after_resume.setter
def run_tools_scripts_after_resume(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_after_resume", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestReboot")
def run_tools_scripts_before_guest_reboot(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_reboot")
@run_tools_scripts_before_guest_reboot.setter
def run_tools_scripts_before_guest_reboot(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_reboot", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestShutdown")
def run_tools_scripts_before_guest_shutdown(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_shutdown")
@run_tools_scripts_before_guest_shutdown.setter
def run_tools_scripts_before_guest_shutdown(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_shutdown", value)
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestStandby")
def run_tools_scripts_before_guest_standby(self) -> Optional[pulumi.Input[bool]]:
"""
Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_standby")
@run_tools_scripts_before_guest_standby.setter
def run_tools_scripts_before_guest_standby(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "run_tools_scripts_before_guest_standby", value)
@property
@pulumi.getter(name="sataControllerCount")
def sata_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "sata_controller_count")
@sata_controller_count.setter
def sata_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "sata_controller_count", value)
@property
@pulumi.getter(name="scsiBusSharing")
def scsi_bus_sharing(self) -> Optional[pulumi.Input[str]]:
"""
Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
"""
return pulumi.get(self, "scsi_bus_sharing")
@scsi_bus_sharing.setter
def scsi_bus_sharing(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scsi_bus_sharing", value)
@property
@pulumi.getter(name="scsiControllerCount")
def scsi_controller_count(self) -> Optional[pulumi.Input[int]]:
"""
The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
"""
return pulumi.get(self, "scsi_controller_count")
@scsi_controller_count.setter
def scsi_controller_count(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "scsi_controller_count", value)
@property
@pulumi.getter(name="scsiType")
def scsi_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
"""
return pulumi.get(self, "scsi_type")
@scsi_type.setter
def scsi_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "scsi_type", value)
@property
@pulumi.getter(name="shutdownWaitTimeout")
def shutdown_wait_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
"""
return pulumi.get(self, "shutdown_wait_timeout")
@shutdown_wait_timeout.setter
def shutdown_wait_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "shutdown_wait_timeout", value)
@property
@pulumi.getter(name="storagePolicyId")
def storage_policy_id(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the storage policy to assign to this disk.
"""
return pulumi.get(self, "storage_policy_id")
@storage_policy_id.setter
def storage_policy_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "storage_policy_id", value)
@property
@pulumi.getter(name="swapPlacementPolicy")
def swap_placement_policy(self) -> Optional[pulumi.Input[str]]:
"""
The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
"""
return pulumi.get(self, "swap_placement_policy")
@swap_placement_policy.setter
def swap_placement_policy(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "swap_placement_policy", value)
@property
@pulumi.getter(name="syncTimeWithHost")
def sync_time_with_host(self) -> Optional[pulumi.Input[bool]]:
"""
Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host")
@sync_time_with_host.setter
def sync_time_with_host(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_time_with_host", value)
@property
@pulumi.getter(name="syncTimeWithHostPeriodically")
def sync_time_with_host_periodically(self) -> Optional[pulumi.Input[bool]]:
"""
Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host_periodically")
@sync_time_with_host_periodically.setter
def sync_time_with_host_periodically(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "sync_time_with_host_periodically", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
The IDs of any tags to attach to this resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@property
@pulumi.getter
def uuid(self) -> Optional[pulumi.Input[str]]:
"""
The UUID of the virtual disk's VMDK file. This is used to track the
virtual disk on the virtual machine.
"""
return pulumi.get(self, "uuid")
@uuid.setter
def uuid(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "uuid", value)
@property
@pulumi.getter
def vapp(self) -> Optional[pulumi.Input['VirtualMachineVappArgs']]:
"""
Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
"""
return pulumi.get(self, "vapp")
@vapp.setter
def vapp(self, value: Optional[pulumi.Input['VirtualMachineVappArgs']]):
pulumi.set(self, "vapp", value)
@property
@pulumi.getter(name="vappTransports")
def vapp_transports(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:
"""
Computed value which is only valid for cloned virtual
machines. A list of vApp transport methods supported by the source virtual
machine or template.
"""
return pulumi.get(self, "vapp_transports")
@vapp_transports.setter
def vapp_transports(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]):
pulumi.set(self, "vapp_transports", value)
@property
@pulumi.getter(name="vbsEnabled")
def vbs_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vbs_enabled")
@vbs_enabled.setter
def vbs_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vbs_enabled", value)
@property
@pulumi.getter(name="vmwareToolsStatus")
def vmware_tools_status(self) -> Optional[pulumi.Input[str]]:
"""
The state of VMware tools in the guest. This will
determine the proper course of action for some device operations.
"""
return pulumi.get(self, "vmware_tools_status")
@vmware_tools_status.setter
def vmware_tools_status(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vmware_tools_status", value)
@property
@pulumi.getter(name="vmxPath")
def vmx_path(self) -> Optional[pulumi.Input[str]]:
"""
The path of the virtual machine's configuration file in the VM's
datastore.
"""
return pulumi.get(self, "vmx_path")
@vmx_path.setter
def vmx_path(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "vmx_path", value)
@property
@pulumi.getter(name="vvtdEnabled")
def vvtd_enabled(self) -> Optional[pulumi.Input[bool]]:
"""
Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vvtd_enabled")
@vvtd_enabled.setter
def vvtd_enabled(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "vvtd_enabled", value)
@property
@pulumi.getter(name="waitForGuestIpTimeout")
def wait_for_guest_ip_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
"""
return pulumi.get(self, "wait_for_guest_ip_timeout")
@wait_for_guest_ip_timeout.setter
def wait_for_guest_ip_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_guest_ip_timeout", value)
@property
@pulumi.getter(name="waitForGuestNetRoutable")
def wait_for_guest_net_routable(self) -> Optional[pulumi.Input[bool]]:
"""
Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
"""
return pulumi.get(self, "wait_for_guest_net_routable")
@wait_for_guest_net_routable.setter
def wait_for_guest_net_routable(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "wait_for_guest_net_routable", value)
@property
@pulumi.getter(name="waitForGuestNetTimeout")
def wait_for_guest_net_timeout(self) -> Optional[pulumi.Input[int]]:
"""
The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
return pulumi.get(self, "wait_for_guest_net_timeout")
@wait_for_guest_net_timeout.setter
def wait_for_guest_net_timeout(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "wait_for_guest_net_timeout", value)
class VirtualMachine(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_guest_name: Optional[pulumi.Input[str]] = None,
annotation: Optional[pulumi.Input[str]] = None,
boot_delay: Optional[pulumi.Input[int]] = None,
boot_retry_delay: Optional[pulumi.Input[int]] = None,
boot_retry_enabled: Optional[pulumi.Input[bool]] = None,
cdrom: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCdromArgs']]] = None,
clone: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCloneArgs']]] = None,
cpu_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
cpu_hot_remove_enabled: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_performance_counters_enabled: Optional[pulumi.Input[bool]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_count: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
datacenter_id: Optional[pulumi.Input[str]] = None,
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineDiskArgs']]]]] = None,
efi_secure_boot_enabled: Optional[pulumi.Input[bool]] = None,
enable_disk_uuid: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
ept_rvi_mode: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
firmware: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
force_power_off: Optional[pulumi.Input[bool]] = None,
guest_id: Optional[pulumi.Input[str]] = None,
hardware_version: Optional[pulumi.Input[int]] = None,
host_system_id: Optional[pulumi.Input[str]] = None,
hv_mode: Optional[pulumi.Input[str]] = None,
ide_controller_count: Optional[pulumi.Input[int]] = None,
ignored_guest_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
latency_sensitivity: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
memory_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_count: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
migrate_wait_timeout: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_hv_enabled: Optional[pulumi.Input[bool]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineNetworkInterfaceArgs']]]]] = None,
num_cores_per_socket: Optional[pulumi.Input[int]] = None,
num_cpus: Optional[pulumi.Input[int]] = None,
ovf_deploy: Optional[pulumi.Input[pulumi.InputType['VirtualMachineOvfDeployArgs']]] = None,
pci_device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
poweron_timeout: Optional[pulumi.Input[int]] = None,
replace_trigger: Optional[pulumi.Input[str]] = None,
resource_pool_id: Optional[pulumi.Input[str]] = None,
run_tools_scripts_after_power_on: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_after_resume: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_reboot: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_shutdown: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_standby: Optional[pulumi.Input[bool]] = None,
sata_controller_count: Optional[pulumi.Input[int]] = None,
scsi_bus_sharing: Optional[pulumi.Input[str]] = None,
scsi_controller_count: Optional[pulumi.Input[int]] = None,
scsi_type: Optional[pulumi.Input[str]] = None,
shutdown_wait_timeout: Optional[pulumi.Input[int]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
swap_placement_policy: Optional[pulumi.Input[str]] = None,
sync_time_with_host: Optional[pulumi.Input[bool]] = None,
sync_time_with_host_periodically: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vapp: Optional[pulumi.Input[pulumi.InputType['VirtualMachineVappArgs']]] = None,
vbs_enabled: Optional[pulumi.Input[bool]] = None,
vvtd_enabled: Optional[pulumi.Input[bool]] = None,
wait_for_guest_ip_timeout: Optional[pulumi.Input[int]] = None,
wait_for_guest_net_routable: Optional[pulumi.Input[bool]] = None,
wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Create a VirtualMachine resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alternate_guest_name: The guest name for the operating system
when `guest_id` is `other` or `other-64`.
:param pulumi.Input[str] annotation: A user-provided description of the virtual machine.
The default is no annotation.
:param pulumi.Input[int] boot_delay: The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
:param pulumi.Input[int] boot_retry_delay: The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
:param pulumi.Input[bool] boot_retry_enabled: If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
:param pulumi.Input[pulumi.InputType['VirtualMachineCdromArgs']] cdrom: A specification for a CDROM device on this virtual
machine. See CDROM options below.
:param pulumi.Input[pulumi.InputType['VirtualMachineCloneArgs']] clone: When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
:param pulumi.Input[bool] cpu_hot_add_enabled: Allow CPUs to be added to this virtual
machine while it is running.
:param pulumi.Input[bool] cpu_hot_remove_enabled: Allow CPUs to be removed to this
virtual machine while it is running.
:param pulumi.Input[int] cpu_limit: The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
:param pulumi.Input[bool] cpu_performance_counters_enabled: Enable CPU performance
counters on this virtual machine. Default: `false`.
:param pulumi.Input[int] cpu_reservation: The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] cpu_share_count: The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
:param pulumi.Input[str] cpu_share_level: The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
value strings to set for virtual machine.
:param pulumi.Input[str] datacenter_id: The datacenter id. Required only when deploying
an ovf template.
:param pulumi.Input[str] datastore_cluster_id: The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
:param pulumi.Input[str] datastore_id: The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineDiskArgs']]]] disks: A specification for a virtual disk device on this virtual
machine. See disk options below.
:param pulumi.Input[bool] efi_secure_boot_enabled: When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
:param pulumi.Input[bool] enable_disk_uuid: Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
:param pulumi.Input[bool] enable_logging: Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
:param pulumi.Input[str] ept_rvi_mode: The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] extra_config: Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
:param pulumi.Input[str] firmware: The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
:param pulumi.Input[str] folder: The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
:param pulumi.Input[bool] force_power_off: If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
:param pulumi.Input[str] guest_id: The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
:param pulumi.Input[int] hardware_version: The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
:param pulumi.Input[str] host_system_id: An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
:param pulumi.Input[str] hv_mode: The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
:param pulumi.Input[int] ide_controller_count: The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ignored_guest_ips: List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
:param pulumi.Input[str] latency_sensitivity: Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
:param pulumi.Input[int] memory: The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
:param pulumi.Input[bool] memory_hot_add_enabled: Allow memory to be added to this
virtual machine while it is running.
:param pulumi.Input[int] memory_limit: The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
:param pulumi.Input[int] memory_reservation: The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] memory_share_count: The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
:param pulumi.Input[str] memory_share_level: The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[int] migrate_wait_timeout: The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
:param pulumi.Input[str] name: The name of the virtual machine.
:param pulumi.Input[bool] nested_hv_enabled: Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineNetworkInterfaceArgs']]]] network_interfaces: A specification for a virtual NIC on this
virtual machine. See network interface options
below.
:param pulumi.Input[int] num_cores_per_socket: The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
:param pulumi.Input[int] num_cpus: The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
:param pulumi.Input[pulumi.InputType['VirtualMachineOvfDeployArgs']] ovf_deploy: When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] pci_device_ids: List of host PCI device IDs to create PCI
passthroughs for.
:param pulumi.Input[int] poweron_timeout: The amount of time, in seconds, that we will be trying to power on a VM
:param pulumi.Input[str] replace_trigger: Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
:param pulumi.Input[str] resource_pool_id: The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
:param pulumi.Input[bool] run_tools_scripts_after_power_on: Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_after_resume: Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_reboot: Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_shutdown: Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_standby: Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[int] sata_controller_count: The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[str] scsi_bus_sharing: Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
:param pulumi.Input[int] scsi_controller_count: The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
:param pulumi.Input[str] scsi_type: The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
:param pulumi.Input[int] shutdown_wait_timeout: The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
:param pulumi.Input[str] storage_policy_id: The UUID of the storage policy to assign to this disk.
:param pulumi.Input[str] swap_placement_policy: The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
:param pulumi.Input[bool] sync_time_with_host: Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[bool] sync_time_with_host_periodically: Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
:param pulumi.Input[pulumi.InputType['VirtualMachineVappArgs']] vapp: Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
:param pulumi.Input[bool] vbs_enabled: Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[bool] vvtd_enabled: Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[int] wait_for_guest_ip_timeout: The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
:param pulumi.Input[bool] wait_for_guest_net_routable: Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
:param pulumi.Input[int] wait_for_guest_net_timeout: The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: VirtualMachineArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Create a VirtualMachine resource with the given unique name, props, and options.
:param str resource_name: The name of the resource.
:param VirtualMachineArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(VirtualMachineArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
alternate_guest_name: Optional[pulumi.Input[str]] = None,
annotation: Optional[pulumi.Input[str]] = None,
boot_delay: Optional[pulumi.Input[int]] = None,
boot_retry_delay: Optional[pulumi.Input[int]] = None,
boot_retry_enabled: Optional[pulumi.Input[bool]] = None,
cdrom: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCdromArgs']]] = None,
clone: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCloneArgs']]] = None,
cpu_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
cpu_hot_remove_enabled: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_performance_counters_enabled: Optional[pulumi.Input[bool]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_count: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
datacenter_id: Optional[pulumi.Input[str]] = None,
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineDiskArgs']]]]] = None,
efi_secure_boot_enabled: Optional[pulumi.Input[bool]] = None,
enable_disk_uuid: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
ept_rvi_mode: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
firmware: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
force_power_off: Optional[pulumi.Input[bool]] = None,
guest_id: Optional[pulumi.Input[str]] = None,
hardware_version: Optional[pulumi.Input[int]] = None,
host_system_id: Optional[pulumi.Input[str]] = None,
hv_mode: Optional[pulumi.Input[str]] = None,
ide_controller_count: Optional[pulumi.Input[int]] = None,
ignored_guest_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
latency_sensitivity: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
memory_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_count: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
migrate_wait_timeout: Optional[pulumi.Input[int]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_hv_enabled: Optional[pulumi.Input[bool]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineNetworkInterfaceArgs']]]]] = None,
num_cores_per_socket: Optional[pulumi.Input[int]] = None,
num_cpus: Optional[pulumi.Input[int]] = None,
ovf_deploy: Optional[pulumi.Input[pulumi.InputType['VirtualMachineOvfDeployArgs']]] = None,
pci_device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
poweron_timeout: Optional[pulumi.Input[int]] = None,
replace_trigger: Optional[pulumi.Input[str]] = None,
resource_pool_id: Optional[pulumi.Input[str]] = None,
run_tools_scripts_after_power_on: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_after_resume: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_reboot: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_shutdown: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_standby: Optional[pulumi.Input[bool]] = None,
sata_controller_count: Optional[pulumi.Input[int]] = None,
scsi_bus_sharing: Optional[pulumi.Input[str]] = None,
scsi_controller_count: Optional[pulumi.Input[int]] = None,
scsi_type: Optional[pulumi.Input[str]] = None,
shutdown_wait_timeout: Optional[pulumi.Input[int]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
swap_placement_policy: Optional[pulumi.Input[str]] = None,
sync_time_with_host: Optional[pulumi.Input[bool]] = None,
sync_time_with_host_periodically: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vapp: Optional[pulumi.Input[pulumi.InputType['VirtualMachineVappArgs']]] = None,
vbs_enabled: Optional[pulumi.Input[bool]] = None,
vvtd_enabled: Optional[pulumi.Input[bool]] = None,
wait_for_guest_ip_timeout: Optional[pulumi.Input[int]] = None,
wait_for_guest_net_routable: Optional[pulumi.Input[bool]] = None,
wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = VirtualMachineArgs.__new__(VirtualMachineArgs)
__props__.__dict__["alternate_guest_name"] = alternate_guest_name
__props__.__dict__["annotation"] = annotation
__props__.__dict__["boot_delay"] = boot_delay
__props__.__dict__["boot_retry_delay"] = boot_retry_delay
__props__.__dict__["boot_retry_enabled"] = boot_retry_enabled
__props__.__dict__["cdrom"] = cdrom
__props__.__dict__["clone"] = clone
__props__.__dict__["cpu_hot_add_enabled"] = cpu_hot_add_enabled
__props__.__dict__["cpu_hot_remove_enabled"] = cpu_hot_remove_enabled
__props__.__dict__["cpu_limit"] = cpu_limit
__props__.__dict__["cpu_performance_counters_enabled"] = cpu_performance_counters_enabled
__props__.__dict__["cpu_reservation"] = cpu_reservation
__props__.__dict__["cpu_share_count"] = cpu_share_count
__props__.__dict__["cpu_share_level"] = cpu_share_level
__props__.__dict__["custom_attributes"] = custom_attributes
__props__.__dict__["datacenter_id"] = datacenter_id
__props__.__dict__["datastore_cluster_id"] = datastore_cluster_id
__props__.__dict__["datastore_id"] = datastore_id
__props__.__dict__["disks"] = disks
__props__.__dict__["efi_secure_boot_enabled"] = efi_secure_boot_enabled
__props__.__dict__["enable_disk_uuid"] = enable_disk_uuid
__props__.__dict__["enable_logging"] = enable_logging
__props__.__dict__["ept_rvi_mode"] = ept_rvi_mode
__props__.__dict__["extra_config"] = extra_config
__props__.__dict__["firmware"] = firmware
__props__.__dict__["folder"] = folder
__props__.__dict__["force_power_off"] = force_power_off
__props__.__dict__["guest_id"] = guest_id
__props__.__dict__["hardware_version"] = hardware_version
__props__.__dict__["host_system_id"] = host_system_id
__props__.__dict__["hv_mode"] = hv_mode
__props__.__dict__["ide_controller_count"] = ide_controller_count
__props__.__dict__["ignored_guest_ips"] = ignored_guest_ips
__props__.__dict__["latency_sensitivity"] = latency_sensitivity
__props__.__dict__["memory"] = memory
__props__.__dict__["memory_hot_add_enabled"] = memory_hot_add_enabled
__props__.__dict__["memory_limit"] = memory_limit
__props__.__dict__["memory_reservation"] = memory_reservation
__props__.__dict__["memory_share_count"] = memory_share_count
__props__.__dict__["memory_share_level"] = memory_share_level
__props__.__dict__["migrate_wait_timeout"] = migrate_wait_timeout
__props__.__dict__["name"] = name
__props__.__dict__["nested_hv_enabled"] = nested_hv_enabled
__props__.__dict__["network_interfaces"] = network_interfaces
__props__.__dict__["num_cores_per_socket"] = num_cores_per_socket
__props__.__dict__["num_cpus"] = num_cpus
__props__.__dict__["ovf_deploy"] = ovf_deploy
__props__.__dict__["pci_device_ids"] = pci_device_ids
__props__.__dict__["poweron_timeout"] = poweron_timeout
__props__.__dict__["replace_trigger"] = replace_trigger
if resource_pool_id is None and not opts.urn:
raise TypeError("Missing required property 'resource_pool_id'")
__props__.__dict__["resource_pool_id"] = resource_pool_id
__props__.__dict__["run_tools_scripts_after_power_on"] = run_tools_scripts_after_power_on
__props__.__dict__["run_tools_scripts_after_resume"] = run_tools_scripts_after_resume
__props__.__dict__["run_tools_scripts_before_guest_reboot"] = run_tools_scripts_before_guest_reboot
__props__.__dict__["run_tools_scripts_before_guest_shutdown"] = run_tools_scripts_before_guest_shutdown
__props__.__dict__["run_tools_scripts_before_guest_standby"] = run_tools_scripts_before_guest_standby
__props__.__dict__["sata_controller_count"] = sata_controller_count
__props__.__dict__["scsi_bus_sharing"] = scsi_bus_sharing
__props__.__dict__["scsi_controller_count"] = scsi_controller_count
__props__.__dict__["scsi_type"] = scsi_type
__props__.__dict__["shutdown_wait_timeout"] = shutdown_wait_timeout
__props__.__dict__["storage_policy_id"] = storage_policy_id
__props__.__dict__["swap_placement_policy"] = swap_placement_policy
__props__.__dict__["sync_time_with_host"] = sync_time_with_host
__props__.__dict__["sync_time_with_host_periodically"] = sync_time_with_host_periodically
__props__.__dict__["tags"] = tags
__props__.__dict__["vapp"] = vapp
__props__.__dict__["vbs_enabled"] = vbs_enabled
__props__.__dict__["vvtd_enabled"] = vvtd_enabled
__props__.__dict__["wait_for_guest_ip_timeout"] = wait_for_guest_ip_timeout
__props__.__dict__["wait_for_guest_net_routable"] = wait_for_guest_net_routable
__props__.__dict__["wait_for_guest_net_timeout"] = wait_for_guest_net_timeout
__props__.__dict__["change_version"] = None
__props__.__dict__["default_ip_address"] = None
__props__.__dict__["guest_ip_addresses"] = None
__props__.__dict__["imported"] = None
__props__.__dict__["moid"] = None
__props__.__dict__["reboot_required"] = None
__props__.__dict__["uuid"] = None
__props__.__dict__["vapp_transports"] = None
__props__.__dict__["vmware_tools_status"] = None
__props__.__dict__["vmx_path"] = None
super(VirtualMachine, __self__).__init__(
'vsphere:index/virtualMachine:VirtualMachine',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
alternate_guest_name: Optional[pulumi.Input[str]] = None,
annotation: Optional[pulumi.Input[str]] = None,
boot_delay: Optional[pulumi.Input[int]] = None,
boot_retry_delay: Optional[pulumi.Input[int]] = None,
boot_retry_enabled: Optional[pulumi.Input[bool]] = None,
cdrom: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCdromArgs']]] = None,
change_version: Optional[pulumi.Input[str]] = None,
clone: Optional[pulumi.Input[pulumi.InputType['VirtualMachineCloneArgs']]] = None,
cpu_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
cpu_hot_remove_enabled: Optional[pulumi.Input[bool]] = None,
cpu_limit: Optional[pulumi.Input[int]] = None,
cpu_performance_counters_enabled: Optional[pulumi.Input[bool]] = None,
cpu_reservation: Optional[pulumi.Input[int]] = None,
cpu_share_count: Optional[pulumi.Input[int]] = None,
cpu_share_level: Optional[pulumi.Input[str]] = None,
custom_attributes: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
datacenter_id: Optional[pulumi.Input[str]] = None,
datastore_cluster_id: Optional[pulumi.Input[str]] = None,
datastore_id: Optional[pulumi.Input[str]] = None,
default_ip_address: Optional[pulumi.Input[str]] = None,
disks: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineDiskArgs']]]]] = None,
efi_secure_boot_enabled: Optional[pulumi.Input[bool]] = None,
enable_disk_uuid: Optional[pulumi.Input[bool]] = None,
enable_logging: Optional[pulumi.Input[bool]] = None,
ept_rvi_mode: Optional[pulumi.Input[str]] = None,
extra_config: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
firmware: Optional[pulumi.Input[str]] = None,
folder: Optional[pulumi.Input[str]] = None,
force_power_off: Optional[pulumi.Input[bool]] = None,
guest_id: Optional[pulumi.Input[str]] = None,
guest_ip_addresses: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
hardware_version: Optional[pulumi.Input[int]] = None,
host_system_id: Optional[pulumi.Input[str]] = None,
hv_mode: Optional[pulumi.Input[str]] = None,
ide_controller_count: Optional[pulumi.Input[int]] = None,
ignored_guest_ips: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
imported: Optional[pulumi.Input[bool]] = None,
latency_sensitivity: Optional[pulumi.Input[str]] = None,
memory: Optional[pulumi.Input[int]] = None,
memory_hot_add_enabled: Optional[pulumi.Input[bool]] = None,
memory_limit: Optional[pulumi.Input[int]] = None,
memory_reservation: Optional[pulumi.Input[int]] = None,
memory_share_count: Optional[pulumi.Input[int]] = None,
memory_share_level: Optional[pulumi.Input[str]] = None,
migrate_wait_timeout: Optional[pulumi.Input[int]] = None,
moid: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
nested_hv_enabled: Optional[pulumi.Input[bool]] = None,
network_interfaces: Optional[pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineNetworkInterfaceArgs']]]]] = None,
num_cores_per_socket: Optional[pulumi.Input[int]] = None,
num_cpus: Optional[pulumi.Input[int]] = None,
ovf_deploy: Optional[pulumi.Input[pulumi.InputType['VirtualMachineOvfDeployArgs']]] = None,
pci_device_ids: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
poweron_timeout: Optional[pulumi.Input[int]] = None,
reboot_required: Optional[pulumi.Input[bool]] = None,
replace_trigger: Optional[pulumi.Input[str]] = None,
resource_pool_id: Optional[pulumi.Input[str]] = None,
run_tools_scripts_after_power_on: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_after_resume: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_reboot: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_shutdown: Optional[pulumi.Input[bool]] = None,
run_tools_scripts_before_guest_standby: Optional[pulumi.Input[bool]] = None,
sata_controller_count: Optional[pulumi.Input[int]] = None,
scsi_bus_sharing: Optional[pulumi.Input[str]] = None,
scsi_controller_count: Optional[pulumi.Input[int]] = None,
scsi_type: Optional[pulumi.Input[str]] = None,
shutdown_wait_timeout: Optional[pulumi.Input[int]] = None,
storage_policy_id: Optional[pulumi.Input[str]] = None,
swap_placement_policy: Optional[pulumi.Input[str]] = None,
sync_time_with_host: Optional[pulumi.Input[bool]] = None,
sync_time_with_host_periodically: Optional[pulumi.Input[bool]] = None,
tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
uuid: Optional[pulumi.Input[str]] = None,
vapp: Optional[pulumi.Input[pulumi.InputType['VirtualMachineVappArgs']]] = None,
vapp_transports: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,
vbs_enabled: Optional[pulumi.Input[bool]] = None,
vmware_tools_status: Optional[pulumi.Input[str]] = None,
vmx_path: Optional[pulumi.Input[str]] = None,
vvtd_enabled: Optional[pulumi.Input[bool]] = None,
wait_for_guest_ip_timeout: Optional[pulumi.Input[int]] = None,
wait_for_guest_net_routable: Optional[pulumi.Input[bool]] = None,
wait_for_guest_net_timeout: Optional[pulumi.Input[int]] = None) -> 'VirtualMachine':
"""
Get an existing VirtualMachine resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] alternate_guest_name: The guest name for the operating system
when `guest_id` is `other` or `other-64`.
:param pulumi.Input[str] annotation: A user-provided description of the virtual machine.
The default is no annotation.
:param pulumi.Input[int] boot_delay: The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
:param pulumi.Input[int] boot_retry_delay: The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
:param pulumi.Input[bool] boot_retry_enabled: If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
:param pulumi.Input[pulumi.InputType['VirtualMachineCdromArgs']] cdrom: A specification for a CDROM device on this virtual
machine. See CDROM options below.
:param pulumi.Input[str] change_version: A unique identifier for a given version of the last
configuration applied, such the timestamp of the last update to the
configuration.
:param pulumi.Input[pulumi.InputType['VirtualMachineCloneArgs']] clone: When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
:param pulumi.Input[bool] cpu_hot_add_enabled: Allow CPUs to be added to this virtual
machine while it is running.
:param pulumi.Input[bool] cpu_hot_remove_enabled: Allow CPUs to be removed to this
virtual machine while it is running.
:param pulumi.Input[int] cpu_limit: The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
:param pulumi.Input[bool] cpu_performance_counters_enabled: Enable CPU performance
counters on this virtual machine. Default: `false`.
:param pulumi.Input[int] cpu_reservation: The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] cpu_share_count: The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
:param pulumi.Input[str] cpu_share_level: The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] custom_attributes: Map of custom attribute ids to attribute
value strings to set for virtual machine.
:param pulumi.Input[str] datacenter_id: The datacenter id. Required only when deploying
an ovf template.
:param pulumi.Input[str] datastore_cluster_id: The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
:param pulumi.Input[str] datastore_id: The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
:param pulumi.Input[str] default_ip_address: The IP address selected by the provider to be used with
any provisioners configured on this resource.
Whenever possible, this is the first IPv4 address that is reachable through
the default gateway configured on the machine, then the first reachable IPv6
address, and then the first general discovered address if neither exist. If
VMware tools is not running on the virtual machine, or if the VM is powered
off, this value will be blank.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineDiskArgs']]]] disks: A specification for a virtual disk device on this virtual
machine. See disk options below.
:param pulumi.Input[bool] efi_secure_boot_enabled: When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
:param pulumi.Input[bool] enable_disk_uuid: Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
:param pulumi.Input[bool] enable_logging: Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
:param pulumi.Input[str] ept_rvi_mode: The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] extra_config: Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
:param pulumi.Input[str] firmware: The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
:param pulumi.Input[str] folder: The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
:param pulumi.Input[bool] force_power_off: If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
:param pulumi.Input[str] guest_id: The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] guest_ip_addresses: The current list of IP addresses on this machine,
including the value of `default_ip_address`. If VMware tools is not running
on the virtual machine, or if the VM is powered off, this list will be empty.
* `moid`: The managed object reference ID of the created
virtual machine.
:param pulumi.Input[int] hardware_version: The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
:param pulumi.Input[str] host_system_id: An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
:param pulumi.Input[str] hv_mode: The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
:param pulumi.Input[int] ide_controller_count: The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[Sequence[pulumi.Input[str]]] ignored_guest_ips: List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
:param pulumi.Input[bool] imported: This is flagged if the virtual machine has been imported, or the
state has been migrated from a previous version of the resource. It
influences the behavior of the first post-import apply operation. See the
section on importing below.
:param pulumi.Input[str] latency_sensitivity: Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
:param pulumi.Input[int] memory: The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
:param pulumi.Input[bool] memory_hot_add_enabled: Allow memory to be added to this
virtual machine while it is running.
:param pulumi.Input[int] memory_limit: The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
:param pulumi.Input[int] memory_reservation: The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
:param pulumi.Input[int] memory_share_count: The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
:param pulumi.Input[str] memory_share_level: The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
:param pulumi.Input[int] migrate_wait_timeout: The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
:param pulumi.Input[str] moid: The machine object ID from VMWare
:param pulumi.Input[str] name: The name of the virtual machine.
:param pulumi.Input[bool] nested_hv_enabled: Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[pulumi.InputType['VirtualMachineNetworkInterfaceArgs']]]] network_interfaces: A specification for a virtual NIC on this
virtual machine. See network interface options
below.
:param pulumi.Input[int] num_cores_per_socket: The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
:param pulumi.Input[int] num_cpus: The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
:param pulumi.Input[pulumi.InputType['VirtualMachineOvfDeployArgs']] ovf_deploy: When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] pci_device_ids: List of host PCI device IDs to create PCI
passthroughs for.
:param pulumi.Input[int] poweron_timeout: The amount of time, in seconds, that we will be trying to power on a VM
:param pulumi.Input[bool] reboot_required: Value internal to the provider used to determine if a
configuration set change requires a reboot. This value is only useful during
an update process and gets reset on refresh.
:param pulumi.Input[str] replace_trigger: Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
:param pulumi.Input[str] resource_pool_id: The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
:param pulumi.Input[bool] run_tools_scripts_after_power_on: Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_after_resume: Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_reboot: Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_shutdown: Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[bool] run_tools_scripts_before_guest_standby: Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
:param pulumi.Input[int] sata_controller_count: The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
:param pulumi.Input[str] scsi_bus_sharing: Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
:param pulumi.Input[int] scsi_controller_count: The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
:param pulumi.Input[str] scsi_type: The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
:param pulumi.Input[int] shutdown_wait_timeout: The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
:param pulumi.Input[str] storage_policy_id: The UUID of the storage policy to assign to this disk.
:param pulumi.Input[str] swap_placement_policy: The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
:param pulumi.Input[bool] sync_time_with_host: Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[bool] sync_time_with_host_periodically: Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
:param pulumi.Input[Sequence[pulumi.Input[str]]] tags: The IDs of any tags to attach to this resource.
:param pulumi.Input[str] uuid: The UUID of the virtual disk's VMDK file. This is used to track the
virtual disk on the virtual machine.
:param pulumi.Input[pulumi.InputType['VirtualMachineVappArgs']] vapp: Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
:param pulumi.Input[Sequence[pulumi.Input[str]]] vapp_transports: Computed value which is only valid for cloned virtual
machines. A list of vApp transport methods supported by the source virtual
machine or template.
:param pulumi.Input[bool] vbs_enabled: Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[str] vmware_tools_status: The state of VMware tools in the guest. This will
determine the proper course of action for some device operations.
:param pulumi.Input[str] vmx_path: The path of the virtual machine's configuration file in the VM's
datastore.
:param pulumi.Input[bool] vvtd_enabled: Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
:param pulumi.Input[int] wait_for_guest_ip_timeout: The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
:param pulumi.Input[bool] wait_for_guest_net_routable: Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
:param pulumi.Input[int] wait_for_guest_net_timeout: The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _VirtualMachineState.__new__(_VirtualMachineState)
__props__.__dict__["alternate_guest_name"] = alternate_guest_name
__props__.__dict__["annotation"] = annotation
__props__.__dict__["boot_delay"] = boot_delay
__props__.__dict__["boot_retry_delay"] = boot_retry_delay
__props__.__dict__["boot_retry_enabled"] = boot_retry_enabled
__props__.__dict__["cdrom"] = cdrom
__props__.__dict__["change_version"] = change_version
__props__.__dict__["clone"] = clone
__props__.__dict__["cpu_hot_add_enabled"] = cpu_hot_add_enabled
__props__.__dict__["cpu_hot_remove_enabled"] = cpu_hot_remove_enabled
__props__.__dict__["cpu_limit"] = cpu_limit
__props__.__dict__["cpu_performance_counters_enabled"] = cpu_performance_counters_enabled
__props__.__dict__["cpu_reservation"] = cpu_reservation
__props__.__dict__["cpu_share_count"] = cpu_share_count
__props__.__dict__["cpu_share_level"] = cpu_share_level
__props__.__dict__["custom_attributes"] = custom_attributes
__props__.__dict__["datacenter_id"] = datacenter_id
__props__.__dict__["datastore_cluster_id"] = datastore_cluster_id
__props__.__dict__["datastore_id"] = datastore_id
__props__.__dict__["default_ip_address"] = default_ip_address
__props__.__dict__["disks"] = disks
__props__.__dict__["efi_secure_boot_enabled"] = efi_secure_boot_enabled
__props__.__dict__["enable_disk_uuid"] = enable_disk_uuid
__props__.__dict__["enable_logging"] = enable_logging
__props__.__dict__["ept_rvi_mode"] = ept_rvi_mode
__props__.__dict__["extra_config"] = extra_config
__props__.__dict__["firmware"] = firmware
__props__.__dict__["folder"] = folder
__props__.__dict__["force_power_off"] = force_power_off
__props__.__dict__["guest_id"] = guest_id
__props__.__dict__["guest_ip_addresses"] = guest_ip_addresses
__props__.__dict__["hardware_version"] = hardware_version
__props__.__dict__["host_system_id"] = host_system_id
__props__.__dict__["hv_mode"] = hv_mode
__props__.__dict__["ide_controller_count"] = ide_controller_count
__props__.__dict__["ignored_guest_ips"] = ignored_guest_ips
__props__.__dict__["imported"] = imported
__props__.__dict__["latency_sensitivity"] = latency_sensitivity
__props__.__dict__["memory"] = memory
__props__.__dict__["memory_hot_add_enabled"] = memory_hot_add_enabled
__props__.__dict__["memory_limit"] = memory_limit
__props__.__dict__["memory_reservation"] = memory_reservation
__props__.__dict__["memory_share_count"] = memory_share_count
__props__.__dict__["memory_share_level"] = memory_share_level
__props__.__dict__["migrate_wait_timeout"] = migrate_wait_timeout
__props__.__dict__["moid"] = moid
__props__.__dict__["name"] = name
__props__.__dict__["nested_hv_enabled"] = nested_hv_enabled
__props__.__dict__["network_interfaces"] = network_interfaces
__props__.__dict__["num_cores_per_socket"] = num_cores_per_socket
__props__.__dict__["num_cpus"] = num_cpus
__props__.__dict__["ovf_deploy"] = ovf_deploy
__props__.__dict__["pci_device_ids"] = pci_device_ids
__props__.__dict__["poweron_timeout"] = poweron_timeout
__props__.__dict__["reboot_required"] = reboot_required
__props__.__dict__["replace_trigger"] = replace_trigger
__props__.__dict__["resource_pool_id"] = resource_pool_id
__props__.__dict__["run_tools_scripts_after_power_on"] = run_tools_scripts_after_power_on
__props__.__dict__["run_tools_scripts_after_resume"] = run_tools_scripts_after_resume
__props__.__dict__["run_tools_scripts_before_guest_reboot"] = run_tools_scripts_before_guest_reboot
__props__.__dict__["run_tools_scripts_before_guest_shutdown"] = run_tools_scripts_before_guest_shutdown
__props__.__dict__["run_tools_scripts_before_guest_standby"] = run_tools_scripts_before_guest_standby
__props__.__dict__["sata_controller_count"] = sata_controller_count
__props__.__dict__["scsi_bus_sharing"] = scsi_bus_sharing
__props__.__dict__["scsi_controller_count"] = scsi_controller_count
__props__.__dict__["scsi_type"] = scsi_type
__props__.__dict__["shutdown_wait_timeout"] = shutdown_wait_timeout
__props__.__dict__["storage_policy_id"] = storage_policy_id
__props__.__dict__["swap_placement_policy"] = swap_placement_policy
__props__.__dict__["sync_time_with_host"] = sync_time_with_host
__props__.__dict__["sync_time_with_host_periodically"] = sync_time_with_host_periodically
__props__.__dict__["tags"] = tags
__props__.__dict__["uuid"] = uuid
__props__.__dict__["vapp"] = vapp
__props__.__dict__["vapp_transports"] = vapp_transports
__props__.__dict__["vbs_enabled"] = vbs_enabled
__props__.__dict__["vmware_tools_status"] = vmware_tools_status
__props__.__dict__["vmx_path"] = vmx_path
__props__.__dict__["vvtd_enabled"] = vvtd_enabled
__props__.__dict__["wait_for_guest_ip_timeout"] = wait_for_guest_ip_timeout
__props__.__dict__["wait_for_guest_net_routable"] = wait_for_guest_net_routable
__props__.__dict__["wait_for_guest_net_timeout"] = wait_for_guest_net_timeout
return VirtualMachine(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="alternateGuestName")
def alternate_guest_name(self) -> pulumi.Output[Optional[str]]:
"""
The guest name for the operating system
when `guest_id` is `other` or `other-64`.
"""
return pulumi.get(self, "alternate_guest_name")
@property
@pulumi.getter
def annotation(self) -> pulumi.Output[Optional[str]]:
"""
A user-provided description of the virtual machine.
The default is no annotation.
"""
return pulumi.get(self, "annotation")
@property
@pulumi.getter(name="bootDelay")
def boot_delay(self) -> pulumi.Output[Optional[int]]:
"""
The number of milliseconds to wait before starting
the boot sequence. The default is no delay.
"""
return pulumi.get(self, "boot_delay")
@property
@pulumi.getter(name="bootRetryDelay")
def boot_retry_delay(self) -> pulumi.Output[Optional[int]]:
"""
The number of milliseconds to wait before
retrying the boot sequence. This only valid if `boot_retry_enabled` is true.
Default: `10000` (10 seconds).
"""
return pulumi.get(self, "boot_retry_delay")
@property
@pulumi.getter(name="bootRetryEnabled")
def boot_retry_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
If set to true, a virtual machine that
fails to boot will try again after the delay defined in `boot_retry_delay`.
Default: `false`.
"""
return pulumi.get(self, "boot_retry_enabled")
@property
@pulumi.getter
def cdrom(self) -> pulumi.Output[Optional['outputs.VirtualMachineCdrom']]:
"""
A specification for a CDROM device on this virtual
machine. See CDROM options below.
"""
return pulumi.get(self, "cdrom")
@property
@pulumi.getter(name="changeVersion")
def change_version(self) -> pulumi.Output[str]:
"""
A unique identifier for a given version of the last
configuration applied, such the timestamp of the last update to the
configuration.
"""
return pulumi.get(self, "change_version")
@property
@pulumi.getter
def clone(self) -> pulumi.Output[Optional['outputs.VirtualMachineClone']]:
"""
When specified, the VM will be created as a clone of a
specified template. Optional customization options can be submitted as well.
See creating a virtual machine from a
template for more details.
"""
return pulumi.get(self, "clone")
@property
@pulumi.getter(name="cpuHotAddEnabled")
def cpu_hot_add_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Allow CPUs to be added to this virtual
machine while it is running.
"""
return pulumi.get(self, "cpu_hot_add_enabled")
@property
@pulumi.getter(name="cpuHotRemoveEnabled")
def cpu_hot_remove_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Allow CPUs to be removed to this
virtual machine while it is running.
"""
return pulumi.get(self, "cpu_hot_remove_enabled")
@property
@pulumi.getter(name="cpuLimit")
def cpu_limit(self) -> pulumi.Output[Optional[int]]:
"""
The maximum amount of CPU (in MHz) that this virtual
machine can consume, regardless of available resources. The default is no
limit.
"""
return pulumi.get(self, "cpu_limit")
@property
@pulumi.getter(name="cpuPerformanceCountersEnabled")
def cpu_performance_counters_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable CPU performance
counters on this virtual machine. Default: `false`.
"""
return pulumi.get(self, "cpu_performance_counters_enabled")
@property
@pulumi.getter(name="cpuReservation")
def cpu_reservation(self) -> pulumi.Output[Optional[int]]:
"""
The amount of CPU (in MHz) that this virtual
machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "cpu_reservation")
@property
@pulumi.getter(name="cpuShareCount")
def cpu_share_count(self) -> pulumi.Output[int]:
"""
The number of CPU shares allocated to the
virtual machine when the `cpu_share_level` is `custom`.
"""
return pulumi.get(self, "cpu_share_count")
@property
@pulumi.getter(name="cpuShareLevel")
def cpu_share_level(self) -> pulumi.Output[Optional[str]]:
"""
The allocation level for CPU resources. Can be
one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "cpu_share_level")
@property
@pulumi.getter(name="customAttributes")
def custom_attributes(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Map of custom attribute ids to attribute
value strings to set for virtual machine.
"""
return pulumi.get(self, "custom_attributes")
@property
@pulumi.getter(name="datacenterId")
def datacenter_id(self) -> pulumi.Output[Optional[str]]:
"""
The datacenter id. Required only when deploying
an ovf template.
"""
return pulumi.get(self, "datacenter_id")
@property
@pulumi.getter(name="datastoreClusterId")
def datastore_cluster_id(self) -> pulumi.Output[Optional[str]]:
"""
The managed object reference
ID of the datastore cluster ID to use. This setting
applies to entire virtual machine and implies that you wish to use Storage
DRS with this virtual machine. See the section on virtual machine
migration for details on changing this value.
"""
return pulumi.get(self, "datastore_cluster_id")
@property
@pulumi.getter(name="datastoreId")
def datastore_id(self) -> pulumi.Output[str]:
"""
The datastore ID that the ISO is located in.
Requried for using a datastore ISO. Conflicts with `client_device`.
"""
return pulumi.get(self, "datastore_id")
@property
@pulumi.getter(name="defaultIpAddress")
def default_ip_address(self) -> pulumi.Output[str]:
"""
The IP address selected by the provider to be used with
any provisioners configured on this resource.
Whenever possible, this is the first IPv4 address that is reachable through
the default gateway configured on the machine, then the first reachable IPv6
address, and then the first general discovered address if neither exist. If
VMware tools is not running on the virtual machine, or if the VM is powered
off, this value will be blank.
"""
return pulumi.get(self, "default_ip_address")
@property
@pulumi.getter
def disks(self) -> pulumi.Output[Sequence['outputs.VirtualMachineDisk']]:
"""
A specification for a virtual disk device on this virtual
machine. See disk options below.
"""
return pulumi.get(self, "disks")
@property
@pulumi.getter(name="efiSecureBootEnabled")
def efi_secure_boot_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
When the `firmware` type is set to is
`efi`, this enables EFI secure boot. Default: `false`.
"""
return pulumi.get(self, "efi_secure_boot_enabled")
@property
@pulumi.getter(name="enableDiskUuid")
def enable_disk_uuid(self) -> pulumi.Output[Optional[bool]]:
"""
Expose the UUIDs of attached virtual disks to
the virtual machine, allowing access to them in the guest. Default: `false`.
"""
return pulumi.get(self, "enable_disk_uuid")
@property
@pulumi.getter(name="enableLogging")
def enable_logging(self) -> pulumi.Output[Optional[bool]]:
"""
Enable logging of virtual machine events to a
log file stored in the virtual machine directory. Default: `false`.
"""
return pulumi.get(self, "enable_logging")
@property
@pulumi.getter(name="eptRviMode")
def ept_rvi_mode(self) -> pulumi.Output[Optional[str]]:
"""
The EPT/RVI (hardware memory virtualization)
setting for this virtual machine. Can be one of `automatic`, `on`, or `off`.
Default: `automatic`.
"""
return pulumi.get(self, "ept_rvi_mode")
@property
@pulumi.getter(name="extraConfig")
def extra_config(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
Extra configuration data for this virtual
machine. Can be used to supply advanced parameters not normally in
configuration, such as instance metadata.
"""
return pulumi.get(self, "extra_config")
@property
@pulumi.getter
def firmware(self) -> pulumi.Output[Optional[str]]:
"""
The firmware interface to use on the virtual machine.
Can be one of `bios` or `EFI`. Default: `bios`.
"""
return pulumi.get(self, "firmware")
@property
@pulumi.getter
def folder(self) -> pulumi.Output[Optional[str]]:
"""
The path to the folder to put this virtual machine in,
relative to the datacenter that the resource pool is in.
"""
return pulumi.get(self, "folder")
@property
@pulumi.getter(name="forcePowerOff")
def force_power_off(self) -> pulumi.Output[Optional[bool]]:
"""
If a guest shutdown failed or timed out while
updating or destroying (see
`shutdown_wait_timeout`), force the power-off of
the virtual machine. Default: `true`.
"""
return pulumi.get(self, "force_power_off")
@property
@pulumi.getter(name="guestId")
def guest_id(self) -> pulumi.Output[str]:
"""
The guest ID for the operating system type. For a
full list of possible values, see [here][vmware-docs-guest-ids]. Default: `other-64`.
"""
return pulumi.get(self, "guest_id")
@property
@pulumi.getter(name="guestIpAddresses")
def guest_ip_addresses(self) -> pulumi.Output[Sequence[str]]:
"""
The current list of IP addresses on this machine,
including the value of `default_ip_address`. If VMware tools is not running
on the virtual machine, or if the VM is powered off, this list will be empty.
* `moid`: The managed object reference ID of the created
virtual machine.
"""
return pulumi.get(self, "guest_ip_addresses")
@property
@pulumi.getter(name="hardwareVersion")
def hardware_version(self) -> pulumi.Output[int]:
"""
The hardware version number. Valid range
is from 4 to 15. The hardware version cannot be downgraded. See [virtual
machine hardware compatibility][virtual-machine-hardware-compatibility] for
more details.
"""
return pulumi.get(self, "hardware_version")
@property
@pulumi.getter(name="hostSystemId")
def host_system_id(self) -> pulumi.Output[str]:
"""
An optional managed object reference
ID of a host to put this virtual machine on. See the
section on virtual machine migration for
details on changing this value. If a `host_system_id` is not supplied,
vSphere will select a host in the resource pool to place the virtual machine,
according to any defaults or DRS policies in place.
"""
return pulumi.get(self, "host_system_id")
@property
@pulumi.getter(name="hvMode")
def hv_mode(self) -> pulumi.Output[Optional[str]]:
"""
The (non-nested) hardware virtualization setting for
this virtual machine. Can be one of `hvAuto`, `hvOn`, or `hvOff`. Default:
`hvAuto`.
"""
return pulumi.get(self, "hv_mode")
@property
@pulumi.getter(name="ideControllerCount")
def ide_controller_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of IDE controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "ide_controller_count")
@property
@pulumi.getter(name="ignoredGuestIps")
def ignored_guest_ips(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of IP addresses and CIDR networks to
ignore while waiting for an available IP address using either of the waiters.
Any IP addresses in this list will be ignored if they show up so that the
waiter will continue to wait for a real IP address. Default: [].
"""
return pulumi.get(self, "ignored_guest_ips")
@property
@pulumi.getter
def imported(self) -> pulumi.Output[bool]:
"""
This is flagged if the virtual machine has been imported, or the
state has been migrated from a previous version of the resource. It
influences the behavior of the first post-import apply operation. See the
section on importing below.
"""
return pulumi.get(self, "imported")
@property
@pulumi.getter(name="latencySensitivity")
def latency_sensitivity(self) -> pulumi.Output[Optional[str]]:
"""
Controls the scheduling delay of the
virtual machine. Use a higher sensitivity for applications that require lower
latency, such as VOIP, media player applications, or applications that
require frequent access to mouse or keyboard devices. Can be one of `low`,
`normal`, `medium`, or `high`.
"""
return pulumi.get(self, "latency_sensitivity")
@property
@pulumi.getter
def memory(self) -> pulumi.Output[Optional[int]]:
"""
The size of the virtual machine's memory, in MB.
Default: `1024` (1 GB).
"""
return pulumi.get(self, "memory")
@property
@pulumi.getter(name="memoryHotAddEnabled")
def memory_hot_add_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Allow memory to be added to this
virtual machine while it is running.
"""
return pulumi.get(self, "memory_hot_add_enabled")
@property
@pulumi.getter(name="memoryLimit")
def memory_limit(self) -> pulumi.Output[Optional[int]]:
"""
The maximum amount of memory (in MB) that this
virtual machine can consume, regardless of available resources. The default
is no limit.
"""
return pulumi.get(self, "memory_limit")
@property
@pulumi.getter(name="memoryReservation")
def memory_reservation(self) -> pulumi.Output[Optional[int]]:
"""
The amount of memory (in MB) that this
virtual machine is guaranteed. The default is no reservation.
"""
return pulumi.get(self, "memory_reservation")
@property
@pulumi.getter(name="memoryShareCount")
def memory_share_count(self) -> pulumi.Output[int]:
"""
The number of memory shares allocated to
the virtual machine when the `memory_share_level` is `custom`.
"""
return pulumi.get(self, "memory_share_count")
@property
@pulumi.getter(name="memoryShareLevel")
def memory_share_level(self) -> pulumi.Output[Optional[str]]:
"""
The allocation level for memory resources.
Can be one of `high`, `low`, `normal`, or `custom`. Default: `custom`.
"""
return pulumi.get(self, "memory_share_level")
@property
@pulumi.getter(name="migrateWaitTimeout")
def migrate_wait_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The amount of time, in minutes, to wait
for a virtual machine migration to complete before failing. Default: 10
minutes. Also see the section on virtual machine
migration.
"""
return pulumi.get(self, "migrate_wait_timeout")
@property
@pulumi.getter
def moid(self) -> pulumi.Output[str]:
"""
The machine object ID from VMWare
"""
return pulumi.get(self, "moid")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the virtual machine.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="nestedHvEnabled")
def nested_hv_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable nested hardware virtualization on
this virtual machine, facilitating nested virtualization in the guest.
Default: `false`.
"""
return pulumi.get(self, "nested_hv_enabled")
@property
@pulumi.getter(name="networkInterfaces")
def network_interfaces(self) -> pulumi.Output[Optional[Sequence['outputs.VirtualMachineNetworkInterface']]]:
"""
A specification for a virtual NIC on this
virtual machine. See network interface options
below.
"""
return pulumi.get(self, "network_interfaces")
@property
@pulumi.getter(name="numCoresPerSocket")
def num_cores_per_socket(self) -> pulumi.Output[Optional[int]]:
"""
The number of cores per socket in this
virtual machine. The number of vCPUs on the virtual machine will be
`num_cpus` divided by `num_cores_per_socket`. If specified, the value
supplied to `num_cpus` must be evenly divisible by this value. Default: `1`.
"""
return pulumi.get(self, "num_cores_per_socket")
@property
@pulumi.getter(name="numCpus")
def num_cpus(self) -> pulumi.Output[Optional[int]]:
"""
The total number of virtual processor cores to assign
to this virtual machine. Default: `1`.
"""
return pulumi.get(self, "num_cpus")
@property
@pulumi.getter(name="ovfDeploy")
def ovf_deploy(self) -> pulumi.Output[Optional['outputs.VirtualMachineOvfDeploy']]:
"""
When specified, the VM will be deployed from the
provided ovf/ova template. See creating a virtual machine from a
ovf/ova template for more details.
"""
return pulumi.get(self, "ovf_deploy")
@property
@pulumi.getter(name="pciDeviceIds")
def pci_device_ids(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
List of host PCI device IDs to create PCI
passthroughs for.
"""
return pulumi.get(self, "pci_device_ids")
@property
@pulumi.getter(name="poweronTimeout")
def poweron_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The amount of time, in seconds, that we will be trying to power on a VM
"""
return pulumi.get(self, "poweron_timeout")
@property
@pulumi.getter(name="rebootRequired")
def reboot_required(self) -> pulumi.Output[bool]:
"""
Value internal to the provider used to determine if a
configuration set change requires a reboot. This value is only useful during
an update process and gets reset on refresh.
"""
return pulumi.get(self, "reboot_required")
@property
@pulumi.getter(name="replaceTrigger")
def replace_trigger(self) -> pulumi.Output[Optional[str]]:
"""
Triggers replacement of resource whenever it changes.
`replace_trigger = sha256(format("%s-%s",data.template_file.cloud_init_metadata.rendered,data.template_file.cloud_init_userdata.rendered))`
will fingerprint the changes in cloud_init metadata and userdata templates. This will enable a replacement
of the resource whenever the dependant template renders a new configuration. (Forces a replacement)
"""
return pulumi.get(self, "replace_trigger")
@property
@pulumi.getter(name="resourcePoolId")
def resource_pool_id(self) -> pulumi.Output[str]:
"""
The managed object reference
ID of the resource pool to put this virtual machine in.
See the section on virtual machine migration
for details on changing this value.
"""
return pulumi.get(self, "resource_pool_id")
@property
@pulumi.getter(name="runToolsScriptsAfterPowerOn")
def run_tools_scripts_after_power_on(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the execution of
post-power-on scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_power_on")
@property
@pulumi.getter(name="runToolsScriptsAfterResume")
def run_tools_scripts_after_resume(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the execution of
post-resume scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_after_resume")
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestReboot")
def run_tools_scripts_before_guest_reboot(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the execution of
pre-reboot scripts when VMware tools is installed. Default: `false`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_reboot")
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestShutdown")
def run_tools_scripts_before_guest_shutdown(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the execution
of pre-shutdown scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_shutdown")
@property
@pulumi.getter(name="runToolsScriptsBeforeGuestStandby")
def run_tools_scripts_before_guest_standby(self) -> pulumi.Output[Optional[bool]]:
"""
Enable the execution of
pre-standby scripts when VMware tools is installed. Default: `true`.
"""
return pulumi.get(self, "run_tools_scripts_before_guest_standby")
@property
@pulumi.getter(name="sataControllerCount")
def sata_controller_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of SATA controllers that Terraform manages on this virtual machine. This directly affects the amount of disks
you can add to the virtual machine and the maximum disk unit number. Note that lowering this value does not remove
controllers.
"""
return pulumi.get(self, "sata_controller_count")
@property
@pulumi.getter(name="scsiBusSharing")
def scsi_bus_sharing(self) -> pulumi.Output[Optional[str]]:
"""
Mode for sharing the SCSI bus. The modes are
physicalSharing, virtualSharing, and noSharing. Default: `noSharing`.
"""
return pulumi.get(self, "scsi_bus_sharing")
@property
@pulumi.getter(name="scsiControllerCount")
def scsi_controller_count(self) -> pulumi.Output[Optional[int]]:
"""
The number of SCSI controllers that
this provider manages on this virtual machine. This directly affects the amount
of disks you can add to the virtual machine and the maximum disk unit number.
Note that lowering this value does not remove controllers. Default: `1`.
"""
return pulumi.get(self, "scsi_controller_count")
@property
@pulumi.getter(name="scsiType")
def scsi_type(self) -> pulumi.Output[Optional[str]]:
"""
The type of SCSI bus this virtual machine will have.
Can be one of lsilogic (LSI Logic Parallel), lsilogic-sas (LSI Logic SAS) or
pvscsi (VMware Paravirtual). Defualt: `pvscsi`.
"""
return pulumi.get(self, "scsi_type")
@property
@pulumi.getter(name="shutdownWaitTimeout")
def shutdown_wait_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The amount of time, in minutes, to wait
for a graceful guest shutdown when making necessary updates to the virtual
machine. If `force_power_off` is set to true, the VM will be force powered-off
after this timeout, otherwise an error is returned. Default: 3 minutes.
"""
return pulumi.get(self, "shutdown_wait_timeout")
@property
@pulumi.getter(name="storagePolicyId")
def storage_policy_id(self) -> pulumi.Output[str]:
"""
The UUID of the storage policy to assign to this disk.
"""
return pulumi.get(self, "storage_policy_id")
@property
@pulumi.getter(name="swapPlacementPolicy")
def swap_placement_policy(self) -> pulumi.Output[Optional[str]]:
"""
The swap file placement policy for this
virtual machine. Can be one of `inherit`, `hostLocal`, or `vmDirectory`.
Default: `inherit`.
"""
return pulumi.get(self, "swap_placement_policy")
@property
@pulumi.getter(name="syncTimeWithHost")
def sync_time_with_host(self) -> pulumi.Output[Optional[bool]]:
"""
Enable guest clock synchronization with the host.
On vSphere 7 U1 and above, with only this setting the clock is synchronized on
startup and resume so consider also setting `sync_time_with_host_periodically`.
Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host")
@property
@pulumi.getter(name="syncTimeWithHostPeriodically")
def sync_time_with_host_periodically(self) -> pulumi.Output[Optional[bool]]:
"""
Enable periodic clock
synchronization with the host. Supported only on vSphere 7 U1 and above.
On older versions setting `sync_time_with_host` is enough for periodic
synchronization. Requires VMware tools to be installed. Default: `false`.
"""
return pulumi.get(self, "sync_time_with_host_periodically")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Sequence[str]]]:
"""
The IDs of any tags to attach to this resource.
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def uuid(self) -> pulumi.Output[str]:
"""
The UUID of the virtual disk's VMDK file. This is used to track the
virtual disk on the virtual machine.
"""
return pulumi.get(self, "uuid")
@property
@pulumi.getter
def vapp(self) -> pulumi.Output[Optional['outputs.VirtualMachineVapp']]:
"""
Optional vApp configuration. The only sub-key available
is `properties`, which is a key/value map of properties for virtual machines
imported from OVF or OVA files. See Using vApp properties to supply OVF/OVA
configuration for
more details.
"""
return pulumi.get(self, "vapp")
@property
@pulumi.getter(name="vappTransports")
def vapp_transports(self) -> pulumi.Output[Sequence[str]]:
"""
Computed value which is only valid for cloned virtual
machines. A list of vApp transport methods supported by the source virtual
machine or template.
"""
return pulumi.get(self, "vapp_transports")
@property
@pulumi.getter(name="vbsEnabled")
def vbs_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Enable Virtualization Based Security. Requires
`firmware` to be `efi`, and `vvtd_enabled`, `nested_hv_enabled` and
`efi_secure_boot_enabled` must all have a value of `true`. Supported on
vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vbs_enabled")
@property
@pulumi.getter(name="vmwareToolsStatus")
def vmware_tools_status(self) -> pulumi.Output[str]:
"""
The state of VMware tools in the guest. This will
determine the proper course of action for some device operations.
"""
return pulumi.get(self, "vmware_tools_status")
@property
@pulumi.getter(name="vmxPath")
def vmx_path(self) -> pulumi.Output[str]:
"""
The path of the virtual machine's configuration file in the VM's
datastore.
"""
return pulumi.get(self, "vmx_path")
@property
@pulumi.getter(name="vvtdEnabled")
def vvtd_enabled(self) -> pulumi.Output[Optional[bool]]:
"""
Flag to specify if Intel Virtualization Technology
for Directed I/O is enabled for this virtual machine (_I/O MMU_ in the
vSphere Client). Supported on vSphere 6.7 and higher. Default: `false`.
"""
return pulumi.get(self, "vvtd_enabled")
@property
@pulumi.getter(name="waitForGuestIpTimeout")
def wait_for_guest_ip_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The amount of time, in minutes, to
wait for an available guest IP address on this virtual machine. This should
only be used if your version of VMware Tools does not allow the
`wait_for_guest_net_timeout` waiter to be
used. A value less than 1 disables the waiter. Default: 0.
"""
return pulumi.get(self, "wait_for_guest_ip_timeout")
@property
@pulumi.getter(name="waitForGuestNetRoutable")
def wait_for_guest_net_routable(self) -> pulumi.Output[Optional[bool]]:
"""
Controls whether or not the guest
network waiter waits for a routable address. When `false`, the waiter does
not wait for a default gateway, nor are IP addresses checked against any
discovered default gateways as part of its success criteria. This property is
ignored if the `wait_for_guest_ip_timeout`
waiter is used. Default: `true`.
"""
return pulumi.get(self, "wait_for_guest_net_routable")
@property
@pulumi.getter(name="waitForGuestNetTimeout")
def wait_for_guest_net_timeout(self) -> pulumi.Output[Optional[int]]:
"""
The amount of time, in minutes, to
wait for an available IP address on this virtual machine's NICs. Older
versions of VMware Tools do not populate this property. In those cases, this
waiter can be disabled and the
`wait_for_guest_ip_timeout` waiter can be used
instead. A value less than 1 disables the waiter. Default: 5 minutes.
"""
return pulumi.get(self, "wait_for_guest_net_timeout")
| 51.526437 | 176 | 0.666321 | 30,994 | 246,554 | 5.084113 | 0.021359 | 0.076997 | 0.082595 | 0.028456 | 0.983995 | 0.97822 | 0.970459 | 0.966854 | 0.965204 | 0.951922 | 0 | 0.001555 | 0.246319 | 246,554 | 4,784 | 177 | 51.537207 | 0.84644 | 0.417588 | 0 | 0.908542 | 1 | 0 | 0.130087 | 0.052002 | 0 | 0 | 0 | 0 | 0 | 1 | 0.171268 | false | 0.000431 | 0.008628 | 0 | 0.283434 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
c57f056b856ac35953a90591df05b515f8214bac | 12,924 | py | Python | ios/models.py | RainYang0925/testcase_web | 6698190c426be56bfc54e92b6f99a3de335d5e82 | [
"CC-BY-4.0"
] | 7 | 2017-08-03T08:02:11.000Z | 2021-02-22T02:25:03.000Z | ios/models.py | kian11/testcase_web | 6698190c426be56bfc54e92b6f99a3de335d5e82 | [
"CC-BY-4.0"
] | null | null | null | ios/models.py | kian11/testcase_web | 6698190c426be56bfc54e92b6f99a3de335d5e82 | [
"CC-BY-4.0"
] | null | null | null | #-*-coding:utf-8-*-
from __future__ import unicode_literals
from django.db import models
# Create your models here.
'''{'动物保护项目发布': 'i_animal_project_released',
'灾难救助项目发布': 'i_disaster_project_released',
'梦想清单项目发布': 'i_dream_project_released',
'大病救助项目发布': 'i_illness_project_released',
'扶贫助学项目发布': 'i_poverty_project_released',
'尝鲜预售项目发布': 'i_presale_project_released',
'支持已创建的项目': 'i_support_project',
'其它项目发布': 'i_other_project_released',
'个人中心': 'i_check_personal',
'项目管理':'i_check_project',
'查看个人项目发布':'i_check_build_project',
'删除银行卡':'i_delete_card',
'登录APP':'i_login',
}
'''
class IOSTestCase1(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-动物保护项目发布'
verbose_name_plural = 'IOS-动物保护项目发布'
ordering = ['case_id']
db_table = 'i_animal_project_released'
class IOSTestCase2(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-灾难救助项目发布'
verbose_name_plural = 'IOS-灾难救助项目发布'
ordering = ['case_id']
db_table = 'i_disaster_project_released'
class IOSTestCase3(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-梦想清单项目发布'
verbose_name_plural = 'IOS-梦想清单项目发布'
ordering = ['case_id']
db_table = 'i_dream_project_released'
class IOSTestCase4(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-大病救助项目发布'
verbose_name_plural = 'IOS-大病救助项目发布'
ordering = ['case_id']
db_table = 'i_illness_project_released'
class IOSTestCase5(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-扶贫助学项目发布'
verbose_name_plural = 'IOS-扶贫助学项目发布'
ordering = ['case_id']
db_table = 'i_poverty_project_released'
class IOSTestCase6(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-尝鲜预售项目发布'
verbose_name_plural = 'IOS-尝鲜预售项目发布'
ordering = ['case_id']
db_table = 'i_presale_project_released'
class IOSTestCase7(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-其它项目发布'
verbose_name_plural = 'IOS-其它项目发布'
ordering = ['case_id']
db_table = 'i_other_project_released'
class IOSTestCase8(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-支持已创建的项目'
verbose_name_plural = 'IOS-支持已创建的项目'
ordering = ['case_id']
db_table = 'i_support_project'
class IOSTestCase9(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-个人中心'
verbose_name_plural = 'IOS-个人中心'
ordering = ['case_id']
db_table = 'i_check_personal'
class IOSTestCase10(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-项目管理'
verbose_name_plural = 'IOS-项目管理'
ordering = ['case_id']
db_table = 'i_check_project'
class IOSTestCase11(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-查看个人项目发布'
verbose_name_plural = 'IOS-查看个人项目发布'
ordering = ['case_id']
db_table = 'i_check_build_project'
class IOSTestCase12(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-删除银行卡'
verbose_name_plural = 'IOS-删除银行卡'
ordering = ['case_id']
db_table = 'i_delete_card'
class IOSTestCase13(models.Model):
id = models.AutoField(primary_key=True, editable=False)
case_id = models.CharField(unique=True, max_length=45)
case_name = models.CharField(max_length=45)
page = models.CharField(max_length=60, null=True, blank=True)
name = models.CharField(max_length=45, null=True, blank=True)
action = models.CharField(max_length=45, null=True, blank=True)
value = models.TextField(null=True, blank=True)
expected = models.TextField(null=True, blank=True)
actual = models.TextField(null=True, blank=True, editable=False)
result = models.CharField(max_length=5, null=True, blank=True, editable=False)
state = models.IntegerField(default='1')
def __unicode__(self):
return self.case_name
class Meta:
verbose_name = 'IOS-登录APP'
verbose_name_plural = 'IOS-登录APP'
ordering = ['case_id']
db_table = 'i_login' | 42.09772 | 82 | 0.6987 | 1,695 | 12,924 | 5.139823 | 0.057227 | 0.083563 | 0.13579 | 0.177571 | 0.853765 | 0.853765 | 0.828512 | 0.819215 | 0.819215 | 0.819215 | 0 | 0.01644 | 0.181059 | 12,924 | 307 | 83 | 42.09772 | 0.806689 | 0.003327 | 0 | 0.783133 | 0 | 0 | 0.0531 | 0.016232 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052209 | false | 0 | 0.008032 | 0.052209 | 0.791165 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 9 |
c5d4bdf80f444d42bdf0dde072b72a534e3a0a69 | 3,911 | py | Python | demos/main_commands_default_shell.py | taskcontrols/py-taskcontrols | 0f3f8d64cafc77dfbf23e70b0c86a91fea858ce4 | [
"MIT"
] | 1 | 2020-04-17T07:15:14.000Z | 2020-04-17T07:15:14.000Z | demos/main_commands_default_shell.py | taskcontrols/py-taskcontrols | 0f3f8d64cafc77dfbf23e70b0c86a91fea858ce4 | [
"MIT"
] | 14 | 2021-11-26T15:18:05.000Z | 2021-12-12T03:05:31.000Z | demos/main_commands_default_shell.py | taskcontrols/py-taskcontrols | 0f3f8d64cafc77dfbf23e70b0c86a91fea858ce4 | [
"MIT"
] | 1 | 2021-11-26T15:05:52.000Z | 2021-11-26T15:05:52.000Z |
from taskcontrol.lib import CommandsBase
c = CommandsBase()
# cmd = ["ssh", "-i", "./developers.pem", "user@192.168.0.1"]
# Comment following line out and change above cmd with your details or commands
from ctests import cmd
if cmd:
command = cmd.pop(0)
# # Access the resulting process after the command execution
# # Will have STDOUT or STDERR
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "./testssh.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "./testssh_rm.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "./testssh_all.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
#### RUNNING RESULT FOR BASH.SH USING BASH
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "bash ./testssh.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "bash ./testssh_rm.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "bash ./testssh_all.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
#### RUNNING RESULT FOR BASH.SH USING RBASH
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "rbash ./testssh.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "rbash ./testssh_rm.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "rbash ./testssh_all.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "rbash ./testssh.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "rbash ./testssh_bash.sh"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
result_popen = c.execute(command, mode="subprocess_popen", stdin_mode=True, options={
"args": cmd, "stdin_input": "echo '\n\nECHO_SSH_RAN_WELL'"})
print("RUNNING shell through subprocess_popen:\n")
print(result_popen[0].__dict__.get("_stdout_buff")[0])
| 50.141026 | 90 | 0.647149 | 494 | 3,911 | 4.811741 | 0.145749 | 0.111064 | 0.060581 | 0.095919 | 0.878839 | 0.878839 | 0.878839 | 0.878839 | 0.878839 | 0.878839 | 0 | 0.010621 | 0.205574 | 3,911 | 77 | 91 | 50.792208 | 0.754425 | 0.076707 | 0 | 0.716981 | 0 | 0 | 0.352289 | 0.00654 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.037736 | 0 | 0.037736 | 0.45283 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
681acecd43db748bc94add5bbaee6576efc11d20 | 1,763 | py | Python | matrx/logger/log_idle_agents.py | matrx-software/matrx | 5b36ef1018e85172dc88cd7467e3087ef94c58ba | [
"MIT"
] | 6 | 2020-03-02T10:42:34.000Z | 2021-05-16T12:21:25.000Z | matrx/logger/log_idle_agents.py | matrx-software/matrx | 5b36ef1018e85172dc88cd7467e3087ef94c58ba | [
"MIT"
] | 262 | 2020-02-27T13:37:40.000Z | 2022-03-29T11:44:57.000Z | matrx/logger/log_idle_agents.py | matrx-software/matrx | 5b36ef1018e85172dc88cd7467e3087ef94c58ba | [
"MIT"
] | 3 | 2020-02-27T12:59:22.000Z | 2021-12-10T13:53:58.000Z | from matrx.logger.logger import GridWorldLogger, GridWorldLoggerV2
class LogIdleAgents(GridWorldLogger):
""" Logs the number of idle agents per tick """
def __init__(self, log_strategy=1, save_path="", file_name_prefix="", file_extension=".csv", delimeter=";"):
super().__init__(log_strategy=log_strategy, save_path=save_path, file_name=file_name_prefix,
file_extension=file_extension, delimiter=delimeter)
def log(self, grid_world, agent_data):
log_statement = {}
for agent_id, agent_obj in grid_world.registered_agents.items():
idle = agent_obj.properties['current_action'] is None
log_statement[agent_id] = int(idle)
for agent_id, agent_obj in grid_world.registered_agents.items():
if log_statement[agent_id] != 0:
return log_statement
return None
class LogIdleAgentsV2(GridWorldLoggerV2):
""" Logs the number of idle agents per tick """
def __init__(self, log_strategy=1, save_path="", file_name_prefix="", file_extension=".csv", delimeter=";"):
super().__init__(log_strategy=log_strategy, save_path=save_path, file_name=file_name_prefix,
file_extension=file_extension, delimiter=delimeter)
def log(self, world_state, agent_data, grid_world):
log_statement = {}
for agent_id, agent_obj in grid_world.registered_agents.items():
idle = agent_obj.properties['current_action'] is None
log_statement[agent_id] = int(idle)
for agent_id, agent_obj in grid_world.registered_agents.items():
if log_statement[agent_id] != 0:
return log_statement
return None
| 41.97619 | 113 | 0.660238 | 215 | 1,763 | 5.037209 | 0.251163 | 0.088643 | 0.044321 | 0.059095 | 0.838412 | 0.838412 | 0.838412 | 0.838412 | 0.838412 | 0.838412 | 0 | 0.005255 | 0.24447 | 1,763 | 41 | 114 | 43 | 0.807808 | 0.045377 | 0 | 0.814815 | 0 | 0 | 0.023342 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.037037 | 0 | 0.407407 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a86e0742484be93df8a984a6a6ad49e049312bf3 | 442 | py | Python | python/testData/inspections/PyStringFormatInspection/NewStyleMappingKeyWithSubscriptionFuncArgs.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2019-04-28T07:48:50.000Z | 2020-12-11T14:18:08.000Z | python/testData/inspections/PyStringFormatInspection/NewStyleMappingKeyWithSubscriptionFuncArgs.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 173 | 2018-07-05T13:59:39.000Z | 2018-08-09T01:12:03.000Z | python/testData/inspections/PyStringFormatInspection/NewStyleMappingKeyWithSubscriptionFuncArgs.py | jnthn/intellij-community | 8fa7c8a3ace62400c838e0d5926a7be106aa8557 | [
"Apache-2.0"
] | 2 | 2020-03-15T08:57:37.000Z | 2020-04-07T04:48:14.000Z | def f():
return [1, 2, 3]
"{foo[1]}".format(foo=f())
<warning descr="Too few arguments for format string">"{foo[3]}"</warning>.format(foo=f())
def g():
return 1, 2, 3
"{foo[1]:d}".format(foo=g())
<warning descr="Too few arguments for format string">"{foo[3]}"</warning>.format(foo=g())
def ff():
return g()
"{foo[1]}".format(foo=g())
<warning descr="Too few arguments for format string">"{foo[3]}"</warning>.format(foo=ff())
| 24.555556 | 90 | 0.613122 | 74 | 442 | 3.662162 | 0.243243 | 0.199262 | 0.166052 | 0.199262 | 0.856089 | 0.856089 | 0.760148 | 0.760148 | 0.760148 | 0.760148 | 0 | 0.031169 | 0.128959 | 442 | 17 | 91 | 26 | 0.672727 | 0 | 0 | 0 | 0 | 0 | 0.350679 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
4f246be5163f107a0c580138c0279d69c0565522 | 208 | py | Python | supermamas/accounts/viewmodels/__init__.py | oasalonen/supermamas | 3ab2b2370de903cea614ea9dfa10ce1c0504a715 | [
"Apache-2.0"
] | null | null | null | supermamas/accounts/viewmodels/__init__.py | oasalonen/supermamas | 3ab2b2370de903cea614ea9dfa10ce1c0504a715 | [
"Apache-2.0"
] | null | null | null | supermamas/accounts/viewmodels/__init__.py | oasalonen/supermamas | 3ab2b2370de903cea614ea9dfa10ce1c0504a715 | [
"Apache-2.0"
] | null | null | null | from supermamas.accounts.viewmodels.breadcrumbs import BubbleMamaRegistrationBreadcrumbs, HelpingMamaRegistrationBreadcrumbs
from supermamas.accounts.viewmodels.bubble_mama_list import BubbleMamaListViewModel | 104 | 124 | 0.927885 | 17 | 208 | 11.235294 | 0.705882 | 0.146597 | 0.230366 | 0.335079 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038462 | 208 | 2 | 125 | 104 | 0.955 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 1 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 9 |
4f6bcb36ce223cf1d6724552e6d94cfce3375c82 | 18,548 | py | Python | proj/hog/tests/11.py | weijiew/cs61a-sp20 | 73322b87fe40add0350e0076ad3589fbee1f28ec | [
"MIT"
] | 4 | 2020-10-15T13:39:30.000Z | 2021-06-22T09:30:16.000Z | proj/hog/tests/11.py | weijiew/cs61a-sp20 | 73322b87fe40add0350e0076ad3589fbee1f28ec | [
"MIT"
] | null | null | null | proj/hog/tests/11.py | weijiew/cs61a-sp20 | 73322b87fe40add0350e0076ad3589fbee1f28ec | [
"MIT"
] | null | null | null | test = {
'name': 'Question 11',
'points': 2,
'suites': [
{
'cases': [
{
'code': r"""
>>> swap_strategy(1, 19, 8, 6)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(30, 54, 7, 6)
327b19ffebddf93982e1ad2a4a6486f4
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(1, 19, 100, 6)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(24, 1, 1, 6)
327b19ffebddf93982e1ad2a4a6486f4
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(13, 18, 10, 6)
327b19ffebddf93982e1ad2a4a6486f4
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> swap_strategy(13, 18, 1, 6)
962aea5f59fc55bd65ccacf4603c8f22
# locked
""",
'hidden': False,
'locked': True
},
{
'code': r"""
>>> from tests.check_strategy import check_strategy
>>> check_strategy(swap_strategy)
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
},
{
'cases': [
{
'code': r"""
>>> swap_strategy(9, 83, 18, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(25, 50, 5, 3)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(15, 72, 11, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(24, 3, 8, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(46, 55, 5, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(7, 76, 15, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(62, 25, 1, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(6, 19, 12, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(14, 93, 9, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(92, 54, 1, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(9, 15, 18, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(82, 24, 13, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(27, 6, 17, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(26, 66, 14, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(28, 10, 16, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(12, 5, 0, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(84, 30, 12, 10)
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(56, 8, 18, 10)
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(19, 69, 17, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(58, 63, 10, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(78, 42, 3, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(26, 44, 5, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(81, 38, 19, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(85, 27, 1, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(1, 79, 4, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(62, 90, 15, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(62, 6, 16, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(90, 76, 7, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(7, 12, 12, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(24, 85, 19, 2)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(6, 62, 14, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(87, 16, 2, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(42, 82, 10, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(7, 88, 2, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(19, 93, 14, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(15, 55, 1, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(74, 43, 8, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(46, 12, 17, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(68, 55, 5, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(94, 77, 9, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(91, 7, 5, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(35, 12, 2, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(51, 92, 14, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(64, 49, 16, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(35, 45, 3, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(14, 53, 8, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(72, 32, 2, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(71, 45, 17, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(51, 81, 4, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(45, 40, 18, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(96, 11, 13, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(57, 96, 9, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(28, 11, 13, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(29, 37, 16, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(69, 2, 15, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(94, 19, 10, 10)
10
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(33, 89, 10, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(5, 78, 16, 7)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(99, 67, 0, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(2, 56, 9, 9)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(15, 57, 10, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(81, 38, 10, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(24, 81, 9, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(11, 87, 18, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(38, 54, 17, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(63, 40, 7, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(60, 51, 13, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(55, 56, 7, 3)
3
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(64, 10, 14, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(75, 73, 11, 8)
8
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(86, 24, 0, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(40, 81, 16, 6)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(19, 46, 10, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(18, 46, 10, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(9, 28, 10, 6)
6
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(19, 81, 8, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(56, 58, 9, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(39, 44, 19, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(86, 32, 3, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(5, 27, 12, 10)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(24, 1, 12, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(69, 8, 6, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(78, 42, 1, 2)
2
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(23, 39, 0, 1)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(89, 56, 14, 1)
1
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(32, 13, 4, 4)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(96, 44, 3, 9)
9
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(77, 59, 15, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(71, 90, 7, 8)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(61, 64, 17, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(40, 87, 10, 3)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(46, 82, 14, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(42, 67, 18, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(8, 71, 16, 9)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(50, 22, 19, 7)
7
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(41, 19, 5, 3)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(90, 38, 12, 5)
5
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(35, 51, 7, 4)
4
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(42, 52, 1, 5)
0
""",
'hidden': False,
'locked': False
},
{
'code': r"""
>>> swap_strategy(12, 96, 1, 3)
0
""",
'hidden': False,
'locked': False
}
],
'scored': True,
'setup': r"""
>>> from hog import *
""",
'teardown': '',
'type': 'doctest'
}
]
}
| 20.863892 | 61 | 0.295126 | 1,430 | 18,548 | 3.751049 | 0.07972 | 0.099739 | 0.339113 | 0.335943 | 0.910515 | 0.905108 | 0.899702 | 0.899702 | 0.899702 | 0.870992 | 0 | 0.103408 | 0.538063 | 18,548 | 888 | 62 | 20.887387 | 0.522642 | 0 | 0 | 0.516892 | 0 | 0 | 0.49752 | 0.011915 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.003378 | 0 | 0.003378 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
4f747a4afc00e0fe6e9d0c85d1ae04a841c3bac9 | 9,492 | py | Python | model/utnet.py | yhygao/UTNet | 4e9df66e5b83a33201fb3da54f6e7c121231fd04 | [
"MIT"
] | 44 | 2021-09-29T11:00:13.000Z | 2022-03-30T01:32:38.000Z | model/utnet.py | shijun18/UTNet | e4702f39a54ccb06a5fb0ea5eaee4d75f70656e5 | [
"MIT"
] | 6 | 2021-11-03T10:32:35.000Z | 2022-03-03T21:05:40.000Z | model/utnet.py | shijun18/UTNet | e4702f39a54ccb06a5fb0ea5eaee4d75f70656e5 | [
"MIT"
] | 9 | 2021-11-08T09:39:37.000Z | 2022-03-06T14:07:41.000Z | import torch
import torch.nn as nn
from .unet_utils import up_block, down_block
from .conv_trans_utils import *
import pdb
class UTNet(nn.Module):
def __init__(self, in_chan, base_chan, num_classes=1, reduce_size=8, block_list='234', num_blocks=[1, 2, 4], projection='interp', num_heads=[2,4,8], attn_drop=0., proj_drop=0., bottleneck=False, maxpool=True, rel_pos=True, aux_loss=False):
super().__init__()
self.aux_loss = aux_loss
self.inc = [BasicBlock(in_chan, base_chan)]
if '0' in block_list:
self.inc.append(BasicTransBlock(base_chan, heads=num_heads[-5], dim_head=base_chan//num_heads[-5], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos))
self.up4 = up_block_trans(2*base_chan, base_chan, num_block=0, bottleneck=bottleneck, heads=num_heads[-4], dim_head=base_chan//num_heads[-4], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.inc.append(BasicBlock(base_chan, base_chan))
self.up4 = up_block(2*base_chan, base_chan, scale=(2,2), num_block=2)
self.inc = nn.Sequential(*self.inc)
if '1' in block_list:
self.down1 = down_block_trans(base_chan, 2*base_chan, num_block=num_blocks[-4], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-4], dim_head=2*base_chan//num_heads[-4], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
self.up3 = up_block_trans(4*base_chan, 2*base_chan, num_block=0, bottleneck=bottleneck, heads=num_heads[-3], dim_head=2*base_chan//num_heads[-3], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down1 = down_block(base_chan, 2*base_chan, (2,2), num_block=2)
self.up3 = up_block(4*base_chan, 2*base_chan, scale=(2,2), num_block=2)
if '2' in block_list:
self.down2 = down_block_trans(2*base_chan, 4*base_chan, num_block=num_blocks[-3], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-3], dim_head=4*base_chan//num_heads[-3], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
self.up2 = up_block_trans(8*base_chan, 4*base_chan, num_block=0, bottleneck=bottleneck, heads=num_heads[-2], dim_head=4*base_chan//num_heads[-2], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down2 = down_block(2*base_chan, 4*base_chan, (2, 2), num_block=2)
self.up2 = up_block(8*base_chan, 4*base_chan, scale=(2,2), num_block=2)
if '3' in block_list:
self.down3 = down_block_trans(4*base_chan, 8*base_chan, num_block=num_blocks[-2], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-2], dim_head=8*base_chan//num_heads[-2], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
self.up1 = up_block_trans(16*base_chan, 8*base_chan, num_block=0, bottleneck=bottleneck, heads=num_heads[-1], dim_head=8*base_chan//num_heads[-1], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down3 = down_block(4*base_chan, 8*base_chan, (2,2), num_block=2)
self.up1 = up_block(16*base_chan, 8*base_chan, scale=(2,2), num_block=2)
if '4' in block_list:
self.down4 = down_block_trans(8*base_chan, 16*base_chan, num_block=num_blocks[-1], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-1], dim_head=16*base_chan//num_heads[-1], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down4 = down_block(8*base_chan, 16*base_chan, (2,2), num_block=2)
self.outc = nn.Conv2d(base_chan, num_classes, kernel_size=1, bias=True)
if aux_loss:
self.out1 = nn.Conv2d(8*base_chan, num_classes, kernel_size=1, bias=True)
self.out2 = nn.Conv2d(4*base_chan, num_classes, kernel_size=1, bias=True)
self.out3 = nn.Conv2d(2*base_chan, num_classes, kernel_size=1, bias=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
if self.aux_loss:
out = self.up1(x5, x4)
out1 = F.interpolate(self.out1(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up2(out, x3)
out2 = F.interpolate(self.out2(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up3(out, x2)
out3 = F.interpolate(self.out3(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up4(out, x1)
out = self.outc(out)
return out, out3, out2, out1
else:
out = self.up1(x5, x4)
out = self.up2(out, x3)
out = self.up3(out, x2)
out = self.up4(out, x1)
out = self.outc(out)
return out
class UTNet_Encoderonly(nn.Module):
def __init__(self, in_chan, base_chan, num_classes=1, reduce_size=8, block_list='234', num_blocks=[1, 2, 4], projection='interp', num_heads=[2,4,8], attn_drop=0., proj_drop=0., bottleneck=False, maxpool=True, rel_pos=True, aux_loss=False):
super().__init__()
self.aux_loss = aux_loss
self.inc = [BasicBlock(in_chan, base_chan)]
if '0' in block_list:
self.inc.append(BasicTransBlock(base_chan, heads=num_heads[-5], dim_head=base_chan//num_heads[-5], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos))
else:
self.inc.append(BasicBlock(base_chan, base_chan))
self.inc = nn.Sequential(*self.inc)
if '1' in block_list:
self.down1 = down_block_trans(base_chan, 2*base_chan, num_block=num_blocks[-4], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-4], dim_head=2*base_chan//num_heads[-4], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down1 = down_block(base_chan, 2*base_chan, (2,2), num_block=2)
if '2' in block_list:
self.down2 = down_block_trans(2*base_chan, 4*base_chan, num_block=num_blocks[-3], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-3], dim_head=4*base_chan//num_heads[-3], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down2 = down_block(2*base_chan, 4*base_chan, (2, 2), num_block=2)
if '3' in block_list:
self.down3 = down_block_trans(4*base_chan, 8*base_chan, num_block=num_blocks[-2], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-2], dim_head=8*base_chan//num_heads[-2], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down3 = down_block(4*base_chan, 8*base_chan, (2,2), num_block=2)
if '4' in block_list:
self.down4 = down_block_trans(8*base_chan, 16*base_chan, num_block=num_blocks[-1], bottleneck=bottleneck, maxpool=maxpool, heads=num_heads[-1], dim_head=16*base_chan//num_heads[-1], attn_drop=attn_drop, proj_drop=proj_drop, reduce_size=reduce_size, projection=projection, rel_pos=rel_pos)
else:
self.down4 = down_block(8*base_chan, 16*base_chan, (2,2), num_block=2)
self.up1 = up_block(16*base_chan, 8*base_chan, scale=(2,2), num_block=2)
self.up2 = up_block(8*base_chan, 4*base_chan, scale=(2,2), num_block=2)
self.up3 = up_block(4*base_chan, 2*base_chan, scale=(2,2), num_block=2)
self.up4 = up_block(2*base_chan, base_chan, scale=(2,2), num_block=2)
self.outc = nn.Conv2d(base_chan, num_classes, kernel_size=1, bias=True)
if aux_loss:
self.out1 = nn.Conv2d(8*base_chan, num_classes, kernel_size=1, bias=True)
self.out2 = nn.Conv2d(4*base_chan, num_classes, kernel_size=1, bias=True)
self.out3 = nn.Conv2d(2*base_chan, num_classes, kernel_size=1, bias=True)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x5 = self.down4(x4)
if self.aux_loss:
out = self.up1(x5, x4)
out1 = F.interpolate(self.out1(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up2(out, x3)
out2 = F.interpolate(self.out2(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up3(out, x2)
out3 = F.interpolate(self.out3(out), size=x.shape[-2:], mode='bilinear', align_corners=True)
out = self.up4(out, x1)
out = self.outc(out)
return out, out3, out2, out1
else:
out = self.up1(x5, x4)
out = self.up2(out, x3)
out = self.up3(out, x2)
out = self.up4(out, x1)
out = self.outc(out)
return out
| 49.4375 | 300 | 0.657817 | 1,503 | 9,492 | 3.886893 | 0.062542 | 0.120507 | 0.067785 | 0.027388 | 0.973126 | 0.96645 | 0.963882 | 0.955152 | 0.955152 | 0.955152 | 0 | 0.044578 | 0.208281 | 9,492 | 191 | 301 | 49.696335 | 0.732801 | 0 | 0 | 0.912 | 0 | 0 | 0.008008 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032 | false | 0 | 0.04 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
4fc20bdb0c86df46c91f88497ec588d6bb1e69da | 34 | py | Python | tests/test_empty.py | jmwri/runehistory-cli | 67b1c56a0c6f1eeb2c2bbc55bb5e70c661bba74d | [
"MIT"
] | null | null | null | tests/test_empty.py | jmwri/runehistory-cli | 67b1c56a0c6f1eeb2c2bbc55bb5e70c661bba74d | [
"MIT"
] | 3 | 2018-06-15T07:40:10.000Z | 2018-07-11T12:45:02.000Z | tests/test_empty.py | jmwri/runehistory-cli | 67b1c56a0c6f1eeb2c2bbc55bb5e70c661bba74d | [
"MIT"
] | null | null | null | def test_empty():
return True
| 11.333333 | 17 | 0.676471 | 5 | 34 | 4.4 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.235294 | 34 | 2 | 18 | 17 | 0.846154 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 0 | 0.5 | 1 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
4fcb35f9d1424452b9afd8cd2ad63f0ae888eb82 | 9,229 | py | Python | src/genesis_api_wrapper/metadata.py | j-suchard/destatis-genesis-api | b0bbec0283ba41707d543b11e556ab0997e5f792 | [
"BSD-3-Clause"
] | null | null | null | src/genesis_api_wrapper/metadata.py | j-suchard/destatis-genesis-api | b0bbec0283ba41707d543b11e556ab0997e5f792 | [
"BSD-3-Clause"
] | null | null | null | src/genesis_api_wrapper/metadata.py | j-suchard/destatis-genesis-api | b0bbec0283ba41707d543b11e556ab0997e5f792 | [
"BSD-3-Clause"
] | null | null | null | from . import enums, tools
class MetadataAPIWrapper:
def __init__(
self, username: str, password: str, language: enums.Language = enums.Language.GERMAN
):
"""Create a new HelloWorldAPIWrapper method wrapper
:param username: The username which will be used for authenticating at the database. Due
to constraints of the database the username needs to be exactly 10 characters long and
may not contain any whitespaces
:type username: str
:param password: The password which will be used for authenticating at the database. Due
to constraints of the database the password needs to be at least 10 characters long,
may not exceed 20 characters and may not contain any whitespaces
:type password: str
:param language: The language in which the responses are returned by the database.
:py:enum:mem:`~enums.Language.GERMAN` has the most compatibility with the database
since most of the tables are on German. Therefore, this parameter defaults to
:py:enum:mem:`~enums.Language.GERMAN`
:type language: enums.Language
:raise ValueError: The username or the password did not match the constraints stated in
their description.
"""
if " " in username:
raise ValueError("The username may not contain any whitespaces")
if len(username) != 10:
raise ValueError("The username may only be 10 characters long")
if " " in password:
raise ValueError("The password may not contain any whitespaces")
if len(password) < 10:
raise ValueError(
f"The password may not be shorter than 10 characters. Current "
f"length: {len(password)}"
)
if len(password) > 20:
raise ValueError(
f"The password may not be longer that 20 characters. Current "
f"length: {len(password)}"
)
self._username = username
self._password = password
self._language = language
self._service_url = "/metadata"
self._base_parameter = {
"username": self._username,
"password": self._password,
"language": self._language.value,
}
async def cube(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a data cube
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/cube"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
async def statistic(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a statistic
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/statistic"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
async def table(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a table
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/table"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
async def timeseries(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a table
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/timeseries"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
async def value(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a table
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/value"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
async def variable(
self, object_name: str, storage_location: enums.ObjectStorage = enums.ObjectStorage.ALL
) -> dict:
"""
Get metadata about a table
:param object_name: The object's identification code
:type object_name: str
:param storage_location: The storage location of the object, defaults to
:py:enum:mem:`enums.ObjectStorage.ALL`
:type storage_location: enums.ObjectStorage
:return: The response from the database
:rtype: dict
"""
if not object_name:
raise ValueError("The object_name is a required parameter")
if not (1 <= len(object_name.strip()) <= 15):
raise ValueError("The object_name may only contain between 1 and 15 characters")
# Build the query parameter
query_parameter = self._base_parameter | {
"name": object_name,
"area": storage_location.value,
}
# Build the query path
query_path = self._service_url + "/variable"
# Get the response
return await tools.get_database_response(query_path, query_parameter)
| 42.141553 | 98 | 0.630187 | 1,087 | 9,229 | 5.216191 | 0.114075 | 0.084656 | 0.050794 | 0.069841 | 0.822399 | 0.812698 | 0.788713 | 0.765432 | 0.753086 | 0.753086 | 0 | 0.008303 | 0.295265 | 9,229 | 218 | 99 | 42.334862 | 0.863469 | 0.14877 | 0 | 0.592593 | 0 | 0 | 0.191729 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009259 | false | 0.101852 | 0.009259 | 0 | 0.083333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
4fe9c15dd5ad81773451f18b8078af06dbd56117 | 113 | py | Python | src/model.py | herr-schmidt/pyomo-sudoku | 66f46f28694dabf9e13ace821f5d4dd478e491af | [
"MIT"
] | null | null | null | src/model.py | herr-schmidt/pyomo-sudoku | 66f46f28694dabf9e13ace821f5d4dd478e491af | [
"MIT"
] | null | null | null | src/model.py | herr-schmidt/pyomo-sudoku | 66f46f28694dabf9e13ace821f5d4dd478e491af | [
"MIT"
] | null | null | null | class Model():
def __init__(self):
self.sudokuGrid = [[0 for i in range(0, 9)] for j in range(0, 9)]
| 28.25 | 73 | 0.584071 | 20 | 113 | 3.1 | 0.65 | 0.225806 | 0.258065 | 0.290323 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.059524 | 0.256637 | 113 | 3 | 74 | 37.666667 | 0.678571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
8b29ae36c4367124f22510980abae7e49ac87362 | 1,758 | py | Python | src/pykeen/cli/utils/__init__.py | dertilo/PyKEEN | d420aed7cd10fc883b70fcd4c920e8edec7fb6ce | [
"MIT"
] | 1 | 2019-05-31T18:00:36.000Z | 2019-05-31T18:00:36.000Z | src/pykeen/cli/utils/__init__.py | dertilo/PyKEEN | d420aed7cd10fc883b70fcd4c920e8edec7fb6ce | [
"MIT"
] | null | null | null | src/pykeen/cli/utils/__init__.py | dertilo/PyKEEN | d420aed7cd10fc883b70fcd4c920e8edec7fb6ce | [
"MIT"
] | 1 | 2019-06-05T09:10:06.000Z | 2019-06-05T09:10:06.000Z | # -*- coding: utf-8 -*-
"""Utilities for the command line interface."""
from .conv_e_cli import configure_conv_e_hpo_pipeline, configure_conv_e_training_pipeline
from .distmult_cli import configure_distmult_hpo_pipeline, configure_distmult_training_pipeline
from .ermlp_cli import configure_ermlp_hpo_pipeline, configure_ermlp_training_pipeline
from .rescal_cli import configure_rescal_hpo_pipeline, configure_rescal_training_pipeline
from .structured_embedding_cli import configure_se_hpo_pipeline, configure_se_training_pipeline
from .trans_d_cli import configure_trans_d_hpo_pipeline, configure_trans_d_training_pipeline
from .trans_e_cli import configure_trans_e_hpo_pipeline, configure_trans_e_training_pipeline
from .trans_h_cli import configure_trans_h_hpo_pipeline, configure_trans_h_training_pipeline
from .trans_r_cli import configure_trans_r_hpo_pipeline, configure_trans_r_training_pipeline
from .unstructured_model_cli import configure_um_hpo_pipeline, configure_um_training_pipeline
__all__ = [
'configure_conv_e_hpo_pipeline', 'configure_conv_e_training_pipeline',
'configure_distmult_hpo_pipeline', 'configure_distmult_training_pipeline',
'configure_ermlp_hpo_pipeline', 'configure_ermlp_training_pipeline',
'configure_rescal_hpo_pipeline', 'configure_rescal_training_pipeline',
'configure_se_hpo_pipeline', 'configure_se_training_pipeline',
'configure_trans_d_hpo_pipeline', 'configure_trans_d_training_pipeline',
'configure_trans_e_hpo_pipeline', 'configure_trans_e_training_pipeline',
'configure_trans_h_hpo_pipeline', 'configure_trans_h_training_pipeline',
'configure_trans_r_hpo_pipeline', 'configure_trans_r_training_pipeline',
'configure_um_hpo_pipeline', 'configure_um_training_pipeline',
]
| 62.785714 | 95 | 0.870876 | 237 | 1,758 | 5.780591 | 0.135021 | 0.359854 | 0.291971 | 0.145985 | 0.808759 | 0.808759 | 0.808759 | 0.808759 | 0.413139 | 0.413139 | 0 | 0.000614 | 0.07281 | 1,758 | 27 | 96 | 65.111111 | 0.839877 | 0.036405 | 0 | 0 | 0 | 0 | 0.369668 | 0.369668 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.454545 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
8c97fcdfe79d16336e1009d3decff9f571b77b2e | 22,483 | py | Python | v6.0.5/system_dhcp/test_fortios_system_dhcp_server.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 14 | 2018-09-25T20:35:25.000Z | 2021-07-14T04:30:54.000Z | v6.0.6/system_dhcp/test_fortios_system_dhcp_server.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 32 | 2018-10-09T04:13:42.000Z | 2020-05-11T07:20:28.000Z | v6.0.5/system_dhcp/test_fortios_system_dhcp_server.py | fortinet-solutions-cse/ansible_fgt_modules | c45fba49258d7c9705e7a8fd9c2a09ea4c8a4719 | [
"Apache-2.0"
] | 11 | 2018-10-09T00:14:53.000Z | 2021-11-03T10:54:09.000Z | # Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <https://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
from mock import ANY
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
try:
from ansible.modules.network.fortios import fortios_system_dhcp_server
except ImportError:
pytest.skip("Could not load required modules for testing", allow_module_level=True)
@pytest.fixture(autouse=True)
def connection_mock(mocker):
connection_class_mock = mocker.patch('ansible.modules.network.fortios.fortios_system_dhcp_server.Connection')
return connection_class_mock
fos_instance = FortiOSHandler(connection_mock)
def test_system_dhcp_server_creation(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_dhcp_server': {
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
expected_data = {
'auto-configuration': 'disable',
'conflicted-ip-timeout': '4',
'ddns-auth': 'disable',
'ddns-key': 'test_value_6',
'ddns-keyname': 'test_value_7',
'ddns-server-ip': 'test_value_8',
'ddns-ttl': '9',
'ddns-update': 'disable',
'ddns-update-override': 'disable',
'ddns-zone': 'test_value_12',
'default-gateway': 'test_value_13',
'dns-server1': 'test_value_14',
'dns-server2': 'test_value_15',
'dns-server3': 'test_value_16',
'dns-service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient-on-net-status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip-mode': 'range',
'ipsec-lease-hold': '24',
'lease-time': '25',
'mac-acl-default-action': 'assign',
'netmask': 'test_value_27',
'next-server': 'test_value_28',
'ntp-server1': 'test_value_29',
'ntp-server2': 'test_value_30',
'ntp-server3': 'test_value_31',
'ntp-service': 'local',
'server-type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone-option': 'disable',
'vci-match': 'disable',
'wifi-ac1': 'test_value_38',
'wifi-ac2': 'test_value_39',
'wifi-ac3': 'test_value_40',
'wins-server1': 'test_value_41',
'wins-server2': 'test_value_42'
}
set_method_mock.assert_called_with('system.dhcp', 'server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_dhcp_server_creation_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_dhcp_server': {
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
expected_data = {
'auto-configuration': 'disable',
'conflicted-ip-timeout': '4',
'ddns-auth': 'disable',
'ddns-key': 'test_value_6',
'ddns-keyname': 'test_value_7',
'ddns-server-ip': 'test_value_8',
'ddns-ttl': '9',
'ddns-update': 'disable',
'ddns-update-override': 'disable',
'ddns-zone': 'test_value_12',
'default-gateway': 'test_value_13',
'dns-server1': 'test_value_14',
'dns-server2': 'test_value_15',
'dns-server3': 'test_value_16',
'dns-service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient-on-net-status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip-mode': 'range',
'ipsec-lease-hold': '24',
'lease-time': '25',
'mac-acl-default-action': 'assign',
'netmask': 'test_value_27',
'next-server': 'test_value_28',
'ntp-server1': 'test_value_29',
'ntp-server2': 'test_value_30',
'ntp-server3': 'test_value_31',
'ntp-service': 'local',
'server-type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone-option': 'disable',
'vci-match': 'disable',
'wifi-ac1': 'test_value_38',
'wifi-ac2': 'test_value_39',
'wifi-ac3': 'test_value_40',
'wins-server1': 'test_value_41',
'wins-server2': 'test_value_42'
}
set_method_mock.assert_called_with('system.dhcp', 'server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_dhcp_server_removal(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_dhcp_server': {
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
delete_method_mock.assert_called_with('system.dhcp', 'server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
def test_system_dhcp_server_deletion_fails(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
delete_method_result = {'status': 'error', 'http_method': 'POST', 'http_status': 500}
delete_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.delete', return_value=delete_method_result)
input_data = {
'username': 'admin',
'state': 'absent',
'system_dhcp_server': {
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
delete_method_mock.assert_called_with('system.dhcp', 'server', mkey=ANY, vdom='root')
schema_method_mock.assert_not_called()
assert is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 500
def test_system_dhcp_server_idempotent(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'error', 'http_method': 'DELETE', 'http_status': 404}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_dhcp_server': {
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
expected_data = {
'auto-configuration': 'disable',
'conflicted-ip-timeout': '4',
'ddns-auth': 'disable',
'ddns-key': 'test_value_6',
'ddns-keyname': 'test_value_7',
'ddns-server-ip': 'test_value_8',
'ddns-ttl': '9',
'ddns-update': 'disable',
'ddns-update-override': 'disable',
'ddns-zone': 'test_value_12',
'default-gateway': 'test_value_13',
'dns-server1': 'test_value_14',
'dns-server2': 'test_value_15',
'dns-server3': 'test_value_16',
'dns-service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient-on-net-status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip-mode': 'range',
'ipsec-lease-hold': '24',
'lease-time': '25',
'mac-acl-default-action': 'assign',
'netmask': 'test_value_27',
'next-server': 'test_value_28',
'ntp-server1': 'test_value_29',
'ntp-server2': 'test_value_30',
'ntp-server3': 'test_value_31',
'ntp-service': 'local',
'server-type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone-option': 'disable',
'vci-match': 'disable',
'wifi-ac1': 'test_value_38',
'wifi-ac2': 'test_value_39',
'wifi-ac3': 'test_value_40',
'wins-server1': 'test_value_41',
'wins-server2': 'test_value_42'
}
set_method_mock.assert_called_with('system.dhcp', 'server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert not changed
assert response['status'] == 'error'
assert response['http_status'] == 404
def test_system_dhcp_server_filter_foreign_attributes(mocker):
schema_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.schema')
set_method_result = {'status': 'success', 'http_method': 'POST', 'http_status': 200}
set_method_mock = mocker.patch('ansible.module_utils.network.fortios.fortios.FortiOSHandler.set', return_value=set_method_result)
input_data = {
'username': 'admin',
'state': 'present',
'system_dhcp_server': {
'random_attribute_not_valid': 'tag',
'auto_configuration': 'disable',
'conflicted_ip_timeout': '4',
'ddns_auth': 'disable',
'ddns_key': 'test_value_6',
'ddns_keyname': 'test_value_7',
'ddns_server_ip': 'test_value_8',
'ddns_ttl': '9',
'ddns_update': 'disable',
'ddns_update_override': 'disable',
'ddns_zone': 'test_value_12',
'default_gateway': 'test_value_13',
'dns_server1': 'test_value_14',
'dns_server2': 'test_value_15',
'dns_server3': 'test_value_16',
'dns_service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient_on_net_status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip_mode': 'range',
'ipsec_lease_hold': '24',
'lease_time': '25',
'mac_acl_default_action': 'assign',
'netmask': 'test_value_27',
'next_server': 'test_value_28',
'ntp_server1': 'test_value_29',
'ntp_server2': 'test_value_30',
'ntp_server3': 'test_value_31',
'ntp_service': 'local',
'server_type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone_option': 'disable',
'vci_match': 'disable',
'wifi_ac1': 'test_value_38',
'wifi_ac2': 'test_value_39',
'wifi_ac3': 'test_value_40',
'wins_server1': 'test_value_41',
'wins_server2': 'test_value_42'
},
'vdom': 'root'}
is_error, changed, response = fortios_system_dhcp_server.fortios_system_dhcp(input_data, fos_instance)
expected_data = {
'auto-configuration': 'disable',
'conflicted-ip-timeout': '4',
'ddns-auth': 'disable',
'ddns-key': 'test_value_6',
'ddns-keyname': 'test_value_7',
'ddns-server-ip': 'test_value_8',
'ddns-ttl': '9',
'ddns-update': 'disable',
'ddns-update-override': 'disable',
'ddns-zone': 'test_value_12',
'default-gateway': 'test_value_13',
'dns-server1': 'test_value_14',
'dns-server2': 'test_value_15',
'dns-server3': 'test_value_16',
'dns-service': 'local',
'domain': 'test_value_18',
'filename': 'test_value_19',
'forticlient-on-net-status': 'disable',
'id': '21',
'interface': 'test_value_22',
'ip-mode': 'range',
'ipsec-lease-hold': '24',
'lease-time': '25',
'mac-acl-default-action': 'assign',
'netmask': 'test_value_27',
'next-server': 'test_value_28',
'ntp-server1': 'test_value_29',
'ntp-server2': 'test_value_30',
'ntp-server3': 'test_value_31',
'ntp-service': 'local',
'server-type': 'regular',
'status': 'disable',
'timezone': '01',
'timezone-option': 'disable',
'vci-match': 'disable',
'wifi-ac1': 'test_value_38',
'wifi-ac2': 'test_value_39',
'wifi-ac3': 'test_value_40',
'wins-server1': 'test_value_41',
'wins-server2': 'test_value_42'
}
set_method_mock.assert_called_with('system.dhcp', 'server', data=expected_data, vdom='root')
schema_method_mock.assert_not_called()
assert not is_error
assert changed
assert response['status'] == 'success'
assert response['http_status'] == 200
| 38.10678 | 142 | 0.581551 | 2,504 | 22,483 | 4.848243 | 0.094649 | 0.155684 | 0.039539 | 0.026771 | 0.915157 | 0.91112 | 0.900082 | 0.900082 | 0.900082 | 0.900082 | 0 | 0.039139 | 0.270427 | 22,483 | 589 | 143 | 38.171477 | 0.700969 | 0.029533 | 0 | 0.927894 | 0 | 0 | 0.453561 | 0.07132 | 0 | 0 | 0 | 0 | 0.068311 | 1 | 0.013283 | false | 0 | 0.01518 | 0 | 0.030361 | 0.001898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
8cb696ae872efef0f768f759f53e8ca14f56e6f4 | 126 | py | Python | brain_training/programming_challenges/euler/T016.py | kuzxnia/algoritms | eda3185f39d79a2657b7ef0da869fcc6b825889d | [
"MIT"
] | null | null | null | brain_training/programming_challenges/euler/T016.py | kuzxnia/algoritms | eda3185f39d79a2657b7ef0da869fcc6b825889d | [
"MIT"
] | null | null | null | brain_training/programming_challenges/euler/T016.py | kuzxnia/algoritms | eda3185f39d79a2657b7ef0da869fcc6b825889d | [
"MIT"
] | null | null | null | from functools import reduce
def power_digit_sum():
return reduce(lambda x, y: x + y, [int(i) for i in str(2 ** 1000)])
| 21 | 71 | 0.65873 | 23 | 126 | 3.521739 | 0.826087 | 0.049383 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.05 | 0.206349 | 126 | 5 | 72 | 25.2 | 0.76 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0.333333 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
507ec554ed1de4e0966a90cbd5930caa533d702d | 222 | py | Python | deep_collector/compat/fields.py | tzetter/django-deep-collector | 1bd599d5362ade525cb51d6ee70713a3f58af219 | [
"BSD-3-Clause"
] | 27 | 2015-04-28T15:45:11.000Z | 2021-06-21T09:53:06.000Z | deep_collector/compat/fields.py | tzetter/django-deep-collector | 1bd599d5362ade525cb51d6ee70713a3f58af219 | [
"BSD-3-Clause"
] | 21 | 2015-07-28T12:36:23.000Z | 2022-03-20T23:53:53.000Z | deep_collector/compat/fields.py | tzetter/django-deep-collector | 1bd599d5362ade525cb51d6ee70713a3f58af219 | [
"BSD-3-Clause"
] | 13 | 2015-05-21T11:27:26.000Z | 2021-03-31T07:42:26.000Z | import django
if django.VERSION < (1, 7):
from django.contrib.contenttypes.generic import GenericForeignKey, GenericRelation
else:
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
| 31.714286 | 86 | 0.810811 | 24 | 222 | 7.5 | 0.583333 | 0.111111 | 0.188889 | 0.322222 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010256 | 0.121622 | 222 | 6 | 87 | 37 | 0.912821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.6 | 0 | 0.6 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
508d531ad2a527788ed555f77a28f8973713ecca | 4,027 | py | Python | test.py | ho0ber/amizon_scrape | c72efef78880579adbd478f85c109f6491614fb6 | [
"MIT"
] | null | null | null | test.py | ho0ber/amizon_scrape | c72efef78880579adbd478f85c109f6491614fb6 | [
"MIT"
] | null | null | null | test.py | ho0ber/amizon_scrape | c72efef78880579adbd478f85c109f6491614fb6 | [
"MIT"
] | null | null | null | from scraper.scraper import Scraper
import json
results = ['{"1":{"Marketplace":{"id":"1","user_id":"2","sold_by":"Viridian Dynamics","item_id":"5","quantity":"25","unit_price":1,"expires":null},"Item":{"id":"5","type":"component","name":"OpAmp","description":"","categories":"[]","breakdown_into":"{\\"4\\":1}","crafted_with":"{}","base_price":"1","breakdown_description":"Useful for Technological crafting. "}},"3":{"Marketplace":{"id":"3","user_id":"2","sold_by":"Viridian Dynamics","item_id":"7","quantity":"16","unit_price":3,"expires":null},"Item":{"id":"7","type":"component","name":"Programmable FPGA","description":"","categories":"[]","breakdown_into":"{\\"4\\":3,\\"2\\":1}","crafted_with":"{}","base_price":"3","breakdown_description":"Useful for Technological crafting. Useful for Intrinsic crafting. "}},"4":{"Marketplace":{"id":"4","user_id":"2","sold_by":"Viridian Dynamics","item_id":"8","quantity":"10","unit_price":1,"expires":null},"Item":{"id":"8","type":"component","name":"Nuts and Bolts","description":"","categories":"[]","breakdown_into":"{\\"2\\":1}","crafted_with":"{}","base_price":"1","breakdown_description":"Useful for Intrinsic crafting. "}},"5":{"Marketplace":{"id":"5","user_id":"2","sold_by":"Viridian Dynamics","item_id":"9","quantity":"17","unit_price":2,"expires":null},"Item":{"id":"9","type":"component","name":"Copper Wiring","description":"","categories":"[]","breakdown_into":"{\\"2\\":2,\\"4\\":1}","crafted_with":"{}","base_price":"2","breakdown_description":"Useful for Intrinsic crafting. Useful for Technological crafting. "}},"6":{"Marketplace":{"id":"6","user_id":"2","sold_by":"Viridian Dynamics","item_id":"10","quantity":"24","unit_price":2,"expires":null},"Item":{"id":"10","type":"component","name":"Silver Wire","description":"","categories":"[]","breakdown_into":"{\\"2\\":2,\\"3\\":1}","crafted_with":"{}","base_price":"2","breakdown_description":"Useful for Intrinsic crafting. Useful for Paranormal crafting. "}},"7":{"Marketplace":{"id":"7","user_id":"2","sold_by":"Viridian Dynamics","item_id":"11","quantity":"3","unit_price":3,"expires":null},"Item":{"id":"11","type":"component","name":"Microchip","description":"","categories":"[]","breakdown_into":"{\\"2\\":3,\\"4\\":2}","crafted_with":"{}","base_price":"3","breakdown_description":"Useful for Intrinsic crafting. Useful for Technological crafting. "}},"8":{"Marketplace":{"id":"8","user_id":"2","sold_by":"Viridian Dynamics","item_id":"12","quantity":"10","unit_price":3,"expires":null},"Item":{"id":"12","type":"component","name":"Gold Wiring","description":"","categories":"[]","breakdown_into":"{\\"2\\":3,\\"3\\":2}","crafted_with":"{}","base_price":"3","breakdown_description":"Useful for Intrinsic crafting. Useful for Paranormal crafting. "}},"9":{"Marketplace":{"id":"9","user_id":"2","sold_by":"Viridian Dynamics","item_id":"13","quantity":"32","unit_price":1,"expires":null},"Item":{"id":"13","type":"component","name":"Incense","description":"","categories":"[]","breakdown_into":"{\\"3\\":1}","crafted_with":"{}","base_price":"1","breakdown_description":"Useful for Paranormal crafting. "}},"10":{"Marketplace":{"id":"10","user_id":"2","sold_by":"Viridian Dynamics","item_id":"14","quantity":"8","unit_price":2,"expires":null},"Item":{"id":"14","type":"component","name":"Gold Dust","description":"","categories":"[]","breakdown_into":"{\\"2\\":1,\\"3\\":2}","crafted_with":"{}","base_price":"2","breakdown_description":"Useful for Intrinsic crafting. Useful for Paranormal crafting. "}},"11":{"Marketplace":{"id":"11","user_id":"2","sold_by":"Viridian Dynamics","item_id":"15","quantity":"15","unit_price":3,"expires":null},"Item":{"id":"15","type":"component","name":"Diamond Dust","description":"","categories":"[]","breakdown_into":"{\\"2\\":1,\\"3\\":3}","crafted_with":"{}","base_price":"3","breakdown_description":"Useful for Intrinsic crafting. Useful for Paranormal crafting. "}}}']
res = json.loads(results[0])
print json.dumps(res, sort_keys=False, indent=4, separators=(',', ': '))
| 575.285714 | 3,875 | 0.649863 | 523 | 4,027 | 4.848948 | 0.151052 | 0.047319 | 0.027603 | 0.043375 | 0.712145 | 0.684937 | 0.583202 | 0.476735 | 0.444401 | 0.286672 | 0 | 0.035705 | 0.026322 | 4,027 | 6 | 3,876 | 671.166667 | 0.611069 | 0 | 0 | 0 | 0 | 0.2 | 0.959523 | 0.831388 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.4 | null | null | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
508e3eb912e7adf2f085a0f722093e10fe6a4d01 | 213 | py | Python | gitlab_release_notes/tests/test_generate.py | vuillaut/gitlab_release_notes | e9587e42c7f2b3f327d891b84c890a3e5f95abed | [
"MIT"
] | null | null | null | gitlab_release_notes/tests/test_generate.py | vuillaut/gitlab_release_notes | e9587e42c7f2b3f327d891b84c890a3e5f95abed | [
"MIT"
] | null | null | null | gitlab_release_notes/tests/test_generate.py | vuillaut/gitlab_release_notes | e9587e42c7f2b3f327d891b84c890a3e5f95abed | [
"MIT"
] | null | null | null | from gitlab_release_notes import generate_release_notes
def test_generate_release_notes():
# TODO: replace with a dedicated test repo
notes = generate_release_notes(14117, url='https://gitlab.in2p3.fr')
| 30.428571 | 72 | 0.788732 | 30 | 213 | 5.3 | 0.633333 | 0.301887 | 0.377358 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037838 | 0.131455 | 213 | 6 | 73 | 35.5 | 0.821622 | 0.187793 | 0 | 0 | 1 | 0 | 0.134503 | 0 | 0 | 0 | 0 | 0.166667 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
509f0c14c4bc3a859c44ad2565a3bd0a9a23450f | 112 | py | Python | ex005.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | 1 | 2021-07-13T21:41:00.000Z | 2021-07-13T21:41:00.000Z | ex005.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | ex005.py | noahbarros/Python-Exercises | fafda898473bc984280e201ed11d8ad76cc8624a | [
"MIT"
] | null | null | null | n = int(input('Digite um número: '))
print(f'Seu antecessor é o número {n-1} e seu sucessor é o número {n+1}!') | 56 | 74 | 0.660714 | 23 | 112 | 3.217391 | 0.652174 | 0.054054 | 0.216216 | 0.243243 | 0.27027 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021505 | 0.169643 | 112 | 2 | 74 | 56 | 0.774194 | 0 | 0 | 0 | 0 | 0.5 | 0.732143 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 7 |
0fbfa1ffc2363cd59a8dbe6120b9287aedf63703 | 7,530 | py | Python | misago/users/tests/test_user_feeds_api.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | 1 | 2017-07-25T03:04:36.000Z | 2017-07-25T03:04:36.000Z | misago/users/tests/test_user_feeds_api.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | misago/users/tests/test_user_feeds_api.py | HenryChenV/iJiangNan | 68f156d264014939f0302222e16e3125119dd3e3 | [
"MIT"
] | null | null | null | from django.urls import reverse
from misago.threads import testutils
from misago.threads.tests.test_threads_api import ThreadsApiTestCase
class UserThreadsApiTests(ThreadsApiTestCase):
def setUp(self):
super(UserThreadsApiTests, self).setUp()
self.api_link = reverse(
'misago:api:user-threads', kwargs={
'pk': self.user.pk,
}
)
def test_invalid_user_id(self):
"""api validates user id"""
link = reverse(
'misago:api:user-threads', kwargs={
'pk': 'abcd',
}
)
response = self.client.get(link)
self.assertEqual(response.status_code, 404)
def test_nonexistant_user_id(self):
"""api validates that user for id exists"""
link = reverse(
'misago:api:user-threads', kwargs={
'pk': self.user.pk + 1,
}
)
response = self.client.get(link)
self.assertEqual(response.status_code, 404)
def test_empty_response(self):
"""api has no showstopers on empty response"""
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_post(self):
"""user post doesn't show in feed because its not first post in thread"""
testutils.reply_thread(self.thread, poster=self.user)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_event(self):
"""events don't show in feeds at all"""
testutils.reply_thread(
self.thread,
poster=self.user,
is_event=True,
)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_thread(self):
"""user thread shows in feed"""
thread = testutils.post_thread(
category=self.category,
poster=self.user,
)
# this post will not show in feed
testutils.reply_thread(thread, poster=self.user)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 1)
self.assertEqual(response.json()['results'][0]['id'], thread.first_post_id)
def test_user_thread_anonymous(self):
"""user thread shows in feed requested by unauthenticated user"""
thread = testutils.post_thread(
category=self.category,
poster=self.user,
)
self.logout_user()
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 1)
self.assertEqual(response.json()['results'][0]['id'], thread.first_post_id)
class UserPostsApiTests(ThreadsApiTestCase):
def setUp(self):
super(UserPostsApiTests, self).setUp()
self.api_link = reverse(
'misago:api:user-posts', kwargs={
'pk': self.user.pk,
}
)
def test_invalid_user_id(self):
"""api validates user id"""
link = reverse(
'misago:api:user-posts', kwargs={
'pk': 'abcd',
}
)
response = self.client.get(link)
self.assertEqual(response.status_code, 404)
def test_nonexistant_user_id(self):
"""api validates that user for id exists"""
link = reverse(
'misago:api:user-posts', kwargs={
'pk': self.user.pk + 1,
}
)
response = self.client.get(link)
self.assertEqual(response.status_code, 404)
def test_empty_response(self):
"""api has no showstopers on empty response"""
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_event(self):
"""events don't show in feeds at all"""
testutils.reply_thread(
self.thread,
poster=self.user,
is_event=True,
)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_hidden_post(self):
"""hidden posts don't show in feeds at all"""
testutils.reply_thread(
self.thread,
poster=self.user,
is_hidden=True,
)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_unapproved_post(self):
"""unapproved posts don't show in feeds at all"""
testutils.reply_thread(
self.thread,
poster=self.user,
is_unapproved=True,
)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 0)
def test_user_posts(self):
"""user posts show in feed"""
post = testutils.reply_thread(self.thread, poster=self.user)
other_post = testutils.reply_thread(self.thread, poster=self.user)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 2)
self.assertEqual(response.json()['results'][0]['id'], other_post.pk)
self.assertEqual(response.json()['results'][1]['id'], post.pk)
def test_user_thread(self):
"""user thread shows in feed"""
thread = testutils.post_thread(
category=self.category,
poster=self.user,
)
post = testutils.reply_thread(thread, poster=self.user)
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 2)
self.assertEqual(response.json()['results'][0]['id'], post.pk)
self.assertEqual(response.json()['results'][1]['id'], thread.first_post_id)
def test_user_post_anonymous(self):
"""user post shows in feed requested by unauthenticated user"""
post = testutils.reply_thread(self.thread, poster=self.user)
other_post = testutils.reply_thread(self.thread, poster=self.user)
self.logout_user()
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 2)
self.assertEqual(response.json()['results'][0]['id'], other_post.pk)
self.assertEqual(response.json()['results'][1]['id'], post.pk)
def test_user_thread_anonymous(self):
"""user thread shows in feed requested by unauthenticated user"""
thread = testutils.post_thread(
category=self.category,
poster=self.user,
)
post = testutils.reply_thread(thread, poster=self.user)
self.logout_user()
response = self.client.get(self.api_link)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json()['count'], 2)
self.assertEqual(response.json()['results'][0]['id'], post.pk)
self.assertEqual(response.json()['results'][1]['id'], thread.first_post_id)
| 34.541284 | 83 | 0.611155 | 887 | 7,530 | 5.062007 | 0.094701 | 0.13363 | 0.2049 | 0.138307 | 0.9098 | 0.894209 | 0.894209 | 0.885078 | 0.879955 | 0.872829 | 0 | 0.013679 | 0.262151 | 7,530 | 217 | 84 | 34.700461 | 0.794456 | 0.094024 | 0 | 0.775 | 0 | 0 | 0.045576 | 0.019596 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.11875 | false | 0 | 0.01875 | 0 | 0.15 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
ba23818e46f52b3e7546e249ed6a73ab09e0d32d | 8,936 | py | Python | src/all_ingest_csv.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | src/all_ingest_csv.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | src/all_ingest_csv.py | Langutang/Marauder.AI | fd25af74d3c9a99ed6e0cd7092cd5846041605a9 | [
"MIT"
] | null | null | null | import requests
from requests.auth import HTTPBasicAuth
import time
url = "https://api.mysportsfeeds.com/v2.1/pull/nhl/2021-2022-regular/games.csv"
token = "4e92f126-d598-4577-98e3-bb0674"
password = "MYSPORTSFEEDS"
top_directory = r"C:\Users\John Lang\Documents\Marauder\NHL\core"
years = ["2017","2018","2019","2020","2021"]
season_playoff = ["regular","playoff"]
################################################
################### CSVs #######################
################################################
############## grabbing season games
print("_____________________")
print("GRABBING SEASON GAMES")
print("_____________________")
for condition in season_playoff:
for year in years:
try:
season_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/games.csv"
season_r = requests.get(season_urls, auth=HTTPBasicAuth(token, password))
print(season_r.status_code)
file = open(top_directory + f"/{year}/" + f"/game_{condition}.csv", "w+")
file.write(season_r.text)
print(f"success for the year: {year} and season type: {condition}")
except:
print("something fucked up")
print(season_r.status_code)
print("Ingesting Seasons done: moving team game log")
############## seasonal team gamelogs
print("________________________________")
print("GRABBING SEASON GAMELOGS BY TEAM")
print("________________________________")
counter = 0
teams = ["PIT","MTL","CHI","VAN","STL","WSH","BOS","NYI","CAR","CBJ","CGY","SJS",
"ANA","MIN","PIT","TOR","WPJ","BUF","ARI","NJD","COL","EDM","PHI","TBL",
"NYR","DET","NSH","VGK","LAK","OTT","FLO","DAL"]
for team in teams:
for condition in season_playoff:
for year in years:
try:
print("----------------------------------------")
print("Currently ingesting season gamelog:")
print(f"TEAM: {team}")
print(f"YEAR: {year}")
print(f"IN SEASON TYPE: {condition}")
team.lower()
gamelog_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/team_gamelogs.csv?team={team}"
gamelog_r = requests.get(gamelog_urls, auth=HTTPBasicAuth(token, password))
print("status code success: " + gamelog_r.status_code)
file = open(top_directory + f"/{year}/" + f"/{team}_{condition}_teamgamelog.csv", "w+")
file.write(gamelog_r.text)
counter = counter + 1
print(f"COUNTER VALUE: {counter}")
print(f"success for the year: {year} and season type: {condition}")
print("------------FINALIZING SAVE-------------")
print("------------------------------------------")
while (counter > 30):
print("Cooling down request sends... Go get money:")
time.sleep(120)
counter = 0
print("COUNTER RESET, loop restarting")
except:
print("something fucked up")
print("status code on failure: " + gamelog_r.status_code)
############## seasonal team gamelogs
print("__________________________________")
print("GRABBING SEASON GAMELOGS BY PLAYER")
print("__________________________________")
counter = 0
teams = ["PIT","MTL","CHI","VAN","STL","WSH","BOS","NYI","CAR","CBJ","CGY","SJS",
"ANA","MIN","PIT","TOR","WPJ","BUF","ARI","NJD","COL","EDM","PHI","TBL",
"NYR","DET","NSH","VGK","LAK","OTT","FLO","DAL"]
for team in teams:
for condition in season_playoff:
for year in years:
try:
print("----------------------------------------")
print("Currently ingesting season gamelog:")
print(f"TEAM: {team}")
print(f"YEAR: {year}")
print(f"IN SEASON TYPE: {season_playoff}")
team.lower()
gamelog_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/player_gamelogs.csv?team={team}"
gamelog_r = requests.get(gamelog_urls, auth=HTTPBasicAuth(token, password))
print("status code success: " + gamelog_r.status_code)
file = open(top_directory + f"/{year}/" + f"/{team}_{condition}_playergame.csv", "w+")
file.write(gamelog_r.text)
counter = counter + 1
print(f"COUNTER VALUE: {counter}")
print(f"success for the year: {year} and season type: {condition}")
print("------------FINALIZING SAVE-------------")
print("------------------------------------------")
while (counter > 30):
print("Cooling down request sends... Go get money:")
time.sleep(120)
counter = 0
print("COUNTER RESET, loop restarting")
except:
print("something fucked up")
print("status code on failure: " + gamelog_r.status_code)
############## player stat projections
print("_________________________")
print("PLAYER STAT PROJECTSIONS ")
print("_________________________")
for year in years:
for condition in season_playoff:
try:
print(year)
print(condition)
player_stat_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/player_stats_totals_projections.csv"
player_stat_r = requests.get(player_stat_urls, auth=HTTPBasicAuth(token, password))
print(player_stat_r.status_code)
print("writing file to " + top_directory + f"/{year[0:4]}/{year[0:4]}_playerprojections_{condition}.csv")
file = open(top_directory + f"/{year[0:4]}/{year[0:4]}_playerprojections_{condition}.csv", "w+", encoding="utf-8")
file.write(player_stat_r.text)
print("-------------------------------------")
print(f"BOOM SAVED FOR {year} AND {condition}")
print("-------------------------------------")
except:
print("-------------------------------------")
print("bro something fucked up: " + str(player_stat_r.status_code))
print("-------------------------------------")
############## seasonal player stat
print("_________________________")
print("SEASONAL PLAYER STAT")
print("_________________________")
counter = 0
for year in years:
for condition in season_playoff:
try:
print(year)
print(condition)
seasonal_player_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/player_stats_totals.csv"
seasonal_player_r = requests.get(seasonal_player_urls, auth=HTTPBasicAuth(token, password))
print(seasonal_player_r.status_code)
print("writing file to " + top_directory + f"/{year[0:4]}/{year[0:4]}_playerstat_{condition}.csv")
file = open(top_directory + f"/{year[0:4]}/{year[0:4]}_playerstat_{condition}.csv", "w+", encoding="utf-8")
file.write(seasonal_player_r.text)
print("-------------------------------------")
print(f"BOOM SAVED FOR {year} AND {condition}")
print("-------------------------------------")
counter = counter + 1
while (counter > 0):
print("Cooling down request sends... Go get money:")
time.sleep(30)
counter = 0
except:
print("-------------------------------------")
print("bro something fucked up: " + str(seasonal_player_r.status_code))
print("-------------------------------------")
break
############## seasonal teams stat
print("_________________________")
print("SEASONAL TEAMS STAT")
print("_________________________")
counter = 0
for year in years:
for condition in season_playoff:
try:
print(year)
print(condition)
seasonal_team_urls = f"https://api.mysportsfeeds.com/v2.1/pull/nhl/{year}-{condition}/team_stats_totals.csv"
seasonal_team_r = requests.get(seasonal_team_urls, auth=HTTPBasicAuth(token, password))
print(seasonal_team_r.status_code)
print("writing file to " + top_directory + f"/{year[0:4]}/{year[0:4]}_teamstat_{condition}.csv")
file = open(top_directory + f"/{year[0:4]}/{year[0:4]}_teamstat_{condition}.csv", "w+", encoding="utf-8")
file.write(seasonal_team_r.text)
print("-------------------------------------")
print(f"BOOM SAVED FOR {year} AND {condition}")
print("-------------------------------------")
counter = counter + 1
while (counter > 0):
print("Cooling down request sends... Go get money:")
time.sleep(10)
counter = 0
except:
print("-------------------------------------")
print("bro something fucked up: " + str(seasonal_team_r.status_code))
print("-------------------------------------")
break
| 40.071749 | 133 | 0.542972 | 948 | 8,936 | 4.660338 | 0.166667 | 0.036215 | 0.029878 | 0.034631 | 0.830693 | 0.813717 | 0.768674 | 0.747397 | 0.725894 | 0.687868 | 0 | 0.016947 | 0.227395 | 8,936 | 222 | 134 | 40.252252 | 0.622972 | 0.015443 | 0 | 0.715116 | 0 | 0.040698 | 0.429187 | 0.172872 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.040698 | 0.017442 | null | null | 0.482558 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 9 |
e84bd9b375c64ac94f790ac90fca45aaa3d9a019 | 30,886 | py | Python | sdk/python/pulumi_azure/network/express_route_circuit.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/network/express_route_circuit.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure/network/express_route_circuit.py | aangelisc/pulumi-azure | 71dd9c75403146e16f7480e5a60b08bc0329660e | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._inputs import *
__all__ = ['ExpressRouteCircuitArgs', 'ExpressRouteCircuit']
@pulumi.input_type
class ExpressRouteCircuitArgs:
def __init__(__self__, *,
bandwidth_in_mbps: pulumi.Input[int],
peering_location: pulumi.Input[str],
resource_group_name: pulumi.Input[str],
service_provider_name: pulumi.Input[str],
sku: pulumi.Input['ExpressRouteCircuitSkuArgs'],
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
The set of arguments for constructing a ExpressRouteCircuit resource.
:param pulumi.Input[int] bandwidth_in_mbps: The bandwidth in Mbps of the circuit being created.
:param pulumi.Input[str] peering_location: The name of the peering location and **not** the Azure resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_provider_name: The name of the ExpressRoute Service Provider.
:param pulumi.Input['ExpressRouteCircuitSkuArgs'] sku: A `sku` block for the ExpressRoute circuit as documented below.
:param pulumi.Input[bool] allow_classic_operations: Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps)
pulumi.set(__self__, "peering_location", peering_location)
pulumi.set(__self__, "resource_group_name", resource_group_name)
pulumi.set(__self__, "service_provider_name", service_provider_name)
pulumi.set(__self__, "sku", sku)
if allow_classic_operations is not None:
pulumi.set(__self__, "allow_classic_operations", allow_classic_operations)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> pulumi.Input[int]:
"""
The bandwidth in Mbps of the circuit being created.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@bandwidth_in_mbps.setter
def bandwidth_in_mbps(self, value: pulumi.Input[int]):
pulumi.set(self, "bandwidth_in_mbps", value)
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Input[str]:
"""
The name of the peering location and **not** the Azure resource location.
"""
return pulumi.get(self, "peering_location")
@peering_location.setter
def peering_location(self, value: pulumi.Input[str]):
pulumi.set(self, "peering_location", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Input[str]:
"""
The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: pulumi.Input[str]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> pulumi.Input[str]:
"""
The name of the ExpressRoute Service Provider.
"""
return pulumi.get(self, "service_provider_name")
@service_provider_name.setter
def service_provider_name(self, value: pulumi.Input[str]):
pulumi.set(self, "service_provider_name", value)
@property
@pulumi.getter
def sku(self) -> pulumi.Input['ExpressRouteCircuitSkuArgs']:
"""
A `sku` block for the ExpressRoute circuit as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: pulumi.Input['ExpressRouteCircuitSkuArgs']):
pulumi.set(self, "sku", value)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> Optional[pulumi.Input[bool]]:
"""
Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
"""
return pulumi.get(self, "allow_classic_operations")
@allow_classic_operations.setter
def allow_classic_operations(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_classic_operations", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
@pulumi.input_type
class _ExpressRouteCircuitState:
def __init__(__self__, *,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
bandwidth_in_mbps: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_key: Optional[pulumi.Input[str]] = None,
service_provider_name: Optional[pulumi.Input[str]] = None,
service_provider_provisioning_state: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None):
"""
Input properties used for looking up and filtering ExpressRouteCircuit resources.
:param pulumi.Input[bool] allow_classic_operations: Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
:param pulumi.Input[int] bandwidth_in_mbps: The bandwidth in Mbps of the circuit being created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] peering_location: The name of the peering location and **not** the Azure resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_key: The string needed by the service provider to provision the ExpressRoute circuit.
:param pulumi.Input[str] service_provider_name: The name of the ExpressRoute Service Provider.
:param pulumi.Input[str] service_provider_provisioning_state: The ExpressRoute circuit provisioning state from your chosen service provider. Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
:param pulumi.Input['ExpressRouteCircuitSkuArgs'] sku: A `sku` block for the ExpressRoute circuit as documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
if allow_classic_operations is not None:
pulumi.set(__self__, "allow_classic_operations", allow_classic_operations)
if bandwidth_in_mbps is not None:
pulumi.set(__self__, "bandwidth_in_mbps", bandwidth_in_mbps)
if location is not None:
pulumi.set(__self__, "location", location)
if name is not None:
pulumi.set(__self__, "name", name)
if peering_location is not None:
pulumi.set(__self__, "peering_location", peering_location)
if resource_group_name is not None:
pulumi.set(__self__, "resource_group_name", resource_group_name)
if service_key is not None:
pulumi.set(__self__, "service_key", service_key)
if service_provider_name is not None:
pulumi.set(__self__, "service_provider_name", service_provider_name)
if service_provider_provisioning_state is not None:
pulumi.set(__self__, "service_provider_provisioning_state", service_provider_provisioning_state)
if sku is not None:
pulumi.set(__self__, "sku", sku)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> Optional[pulumi.Input[bool]]:
"""
Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
"""
return pulumi.get(self, "allow_classic_operations")
@allow_classic_operations.setter
def allow_classic_operations(self, value: Optional[pulumi.Input[bool]]):
pulumi.set(self, "allow_classic_operations", value)
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> Optional[pulumi.Input[int]]:
"""
The bandwidth in Mbps of the circuit being created.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@bandwidth_in_mbps.setter
def bandwidth_in_mbps(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "bandwidth_in_mbps", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@name.setter
def name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "name", value)
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> Optional[pulumi.Input[str]]:
"""
The name of the peering location and **not** the Azure resource location.
"""
return pulumi.get(self, "peering_location")
@peering_location.setter
def peering_location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "peering_location", value)
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@resource_group_name.setter
def resource_group_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "resource_group_name", value)
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> Optional[pulumi.Input[str]]:
"""
The string needed by the service provider to provision the ExpressRoute circuit.
"""
return pulumi.get(self, "service_key")
@service_key.setter
def service_key(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_key", value)
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> Optional[pulumi.Input[str]]:
"""
The name of the ExpressRoute Service Provider.
"""
return pulumi.get(self, "service_provider_name")
@service_provider_name.setter
def service_provider_name(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_name", value)
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> Optional[pulumi.Input[str]]:
"""
The ExpressRoute circuit provisioning state from your chosen service provider. Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
"""
return pulumi.get(self, "service_provider_provisioning_state")
@service_provider_provisioning_state.setter
def service_provider_provisioning_state(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "service_provider_provisioning_state", value)
@property
@pulumi.getter
def sku(self) -> Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']]:
"""
A `sku` block for the ExpressRoute circuit as documented below.
"""
return pulumi.get(self, "sku")
@sku.setter
def sku(self, value: Optional[pulumi.Input['ExpressRouteCircuitSkuArgs']]):
pulumi.set(self, "sku", value)
@property
@pulumi.getter
def tags(self) -> Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
@tags.setter
def tags(self, value: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]]):
pulumi.set(self, "tags", value)
class ExpressRouteCircuit(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
bandwidth_in_mbps: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_provider_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
"""
Manages an ExpressRoute circuit.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_express_route_circuit = azure.network.ExpressRouteCircuit("exampleExpressRouteCircuit",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
service_provider_name="Equinix",
peering_location="Silicon Valley",
bandwidth_in_mbps=50,
sku=azure.network.ExpressRouteCircuitSkuArgs(
tier="Standard",
family="MeteredData",
),
tags={
"environment": "Production",
})
```
## Import
ExpressRoute circuits can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:network/expressRouteCircuit:ExpressRouteCircuit myExpressRoute /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/expressRouteCircuits/myExpressRoute
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_classic_operations: Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
:param pulumi.Input[int] bandwidth_in_mbps: The bandwidth in Mbps of the circuit being created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] peering_location: The name of the peering location and **not** the Azure resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_provider_name: The name of the ExpressRoute Service Provider.
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']] sku: A `sku` block for the ExpressRoute circuit as documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: ExpressRouteCircuitArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Manages an ExpressRoute circuit.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_express_route_circuit = azure.network.ExpressRouteCircuit("exampleExpressRouteCircuit",
resource_group_name=example_resource_group.name,
location=example_resource_group.location,
service_provider_name="Equinix",
peering_location="Silicon Valley",
bandwidth_in_mbps=50,
sku=azure.network.ExpressRouteCircuitSkuArgs(
tier="Standard",
family="MeteredData",
),
tags={
"environment": "Production",
})
```
## Import
ExpressRoute circuits can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:network/expressRouteCircuit:ExpressRouteCircuit myExpressRoute /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/mygroup1/providers/Microsoft.Network/expressRouteCircuits/myExpressRoute
```
:param str resource_name: The name of the resource.
:param ExpressRouteCircuitArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(ExpressRouteCircuitArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
bandwidth_in_mbps: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_provider_name: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = ExpressRouteCircuitArgs.__new__(ExpressRouteCircuitArgs)
__props__.__dict__["allow_classic_operations"] = allow_classic_operations
if bandwidth_in_mbps is None and not opts.urn:
raise TypeError("Missing required property 'bandwidth_in_mbps'")
__props__.__dict__["bandwidth_in_mbps"] = bandwidth_in_mbps
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
if peering_location is None and not opts.urn:
raise TypeError("Missing required property 'peering_location'")
__props__.__dict__["peering_location"] = peering_location
if resource_group_name is None and not opts.urn:
raise TypeError("Missing required property 'resource_group_name'")
__props__.__dict__["resource_group_name"] = resource_group_name
if service_provider_name is None and not opts.urn:
raise TypeError("Missing required property 'service_provider_name'")
__props__.__dict__["service_provider_name"] = service_provider_name
if sku is None and not opts.urn:
raise TypeError("Missing required property 'sku'")
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
__props__.__dict__["service_key"] = None
__props__.__dict__["service_provider_provisioning_state"] = None
super(ExpressRouteCircuit, __self__).__init__(
'azure:network/expressRouteCircuit:ExpressRouteCircuit',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
allow_classic_operations: Optional[pulumi.Input[bool]] = None,
bandwidth_in_mbps: Optional[pulumi.Input[int]] = None,
location: Optional[pulumi.Input[str]] = None,
name: Optional[pulumi.Input[str]] = None,
peering_location: Optional[pulumi.Input[str]] = None,
resource_group_name: Optional[pulumi.Input[str]] = None,
service_key: Optional[pulumi.Input[str]] = None,
service_provider_name: Optional[pulumi.Input[str]] = None,
service_provider_provisioning_state: Optional[pulumi.Input[str]] = None,
sku: Optional[pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']]] = None,
tags: Optional[pulumi.Input[Mapping[str, pulumi.Input[str]]]] = None) -> 'ExpressRouteCircuit':
"""
Get an existing ExpressRouteCircuit resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[bool] allow_classic_operations: Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
:param pulumi.Input[int] bandwidth_in_mbps: The bandwidth in Mbps of the circuit being created.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] peering_location: The name of the peering location and **not** the Azure resource location.
:param pulumi.Input[str] resource_group_name: The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
:param pulumi.Input[str] service_key: The string needed by the service provider to provision the ExpressRoute circuit.
:param pulumi.Input[str] service_provider_name: The name of the ExpressRoute Service Provider.
:param pulumi.Input[str] service_provider_provisioning_state: The ExpressRoute circuit provisioning state from your chosen service provider. Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
:param pulumi.Input[pulumi.InputType['ExpressRouteCircuitSkuArgs']] sku: A `sku` block for the ExpressRoute circuit as documented below.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _ExpressRouteCircuitState.__new__(_ExpressRouteCircuitState)
__props__.__dict__["allow_classic_operations"] = allow_classic_operations
__props__.__dict__["bandwidth_in_mbps"] = bandwidth_in_mbps
__props__.__dict__["location"] = location
__props__.__dict__["name"] = name
__props__.__dict__["peering_location"] = peering_location
__props__.__dict__["resource_group_name"] = resource_group_name
__props__.__dict__["service_key"] = service_key
__props__.__dict__["service_provider_name"] = service_provider_name
__props__.__dict__["service_provider_provisioning_state"] = service_provider_provisioning_state
__props__.__dict__["sku"] = sku
__props__.__dict__["tags"] = tags
return ExpressRouteCircuit(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="allowClassicOperations")
def allow_classic_operations(self) -> pulumi.Output[Optional[bool]]:
"""
Allow the circuit to interact with classic (RDFE) resources. The default value is `false`.
"""
return pulumi.get(self, "allow_classic_operations")
@property
@pulumi.getter(name="bandwidthInMbps")
def bandwidth_in_mbps(self) -> pulumi.Output[int]:
"""
The bandwidth in Mbps of the circuit being created.
"""
return pulumi.get(self, "bandwidth_in_mbps")
@property
@pulumi.getter
def location(self) -> pulumi.Output[str]:
"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "location")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
The name of the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="peeringLocation")
def peering_location(self) -> pulumi.Output[str]:
"""
The name of the peering location and **not** the Azure resource location.
"""
return pulumi.get(self, "peering_location")
@property
@pulumi.getter(name="resourceGroupName")
def resource_group_name(self) -> pulumi.Output[str]:
"""
The name of the resource group in which to create the ExpressRoute circuit. Changing this forces a new resource to be created.
"""
return pulumi.get(self, "resource_group_name")
@property
@pulumi.getter(name="serviceKey")
def service_key(self) -> pulumi.Output[str]:
"""
The string needed by the service provider to provision the ExpressRoute circuit.
"""
return pulumi.get(self, "service_key")
@property
@pulumi.getter(name="serviceProviderName")
def service_provider_name(self) -> pulumi.Output[str]:
"""
The name of the ExpressRoute Service Provider.
"""
return pulumi.get(self, "service_provider_name")
@property
@pulumi.getter(name="serviceProviderProvisioningState")
def service_provider_provisioning_state(self) -> pulumi.Output[str]:
"""
The ExpressRoute circuit provisioning state from your chosen service provider. Possible values are "NotProvisioned", "Provisioning", "Provisioned", and "Deprovisioning".
"""
return pulumi.get(self, "service_provider_provisioning_state")
@property
@pulumi.getter
def sku(self) -> pulumi.Output['outputs.ExpressRouteCircuitSku']:
"""
A `sku` block for the ExpressRoute circuit as documented below.
"""
return pulumi.get(self, "sku")
@property
@pulumi.getter
def tags(self) -> pulumi.Output[Optional[Mapping[str, str]]]:
"""
A mapping of tags to assign to the resource.
"""
return pulumi.get(self, "tags")
| 47.737249 | 239 | 0.671275 | 3,552 | 30,886 | 5.614583 | 0.062218 | 0.08053 | 0.064584 | 0.048538 | 0.893296 | 0.880459 | 0.860753 | 0.844607 | 0.818683 | 0.790102 | 0 | 0.002997 | 0.233051 | 30,886 | 646 | 240 | 47.811146 | 0.838906 | 0.350223 | 0 | 0.693593 | 1 | 0 | 0.131033 | 0.061156 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16156 | false | 0.002786 | 0.019499 | 0 | 0.278552 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
e8b6fe7f2e37a8df1a2366b0f0141f288ef02b18 | 198 | py | Python | code_soup/ch5/__init__.py | gchhablani/code-soup | eec666b6cd76bad9c7133a185bb85021b4a390f0 | [
"MIT"
] | 18 | 2021-07-29T16:21:02.000Z | 2021-12-13T12:58:15.000Z | code_soup/ch5/__init__.py | gchhablani/code-soup | eec666b6cd76bad9c7133a185bb85021b4a390f0 | [
"MIT"
] | 93 | 2021-08-04T02:48:15.000Z | 2022-01-16T04:58:51.000Z | code_soup/ch5/__init__.py | gchhablani/code-soup | eec666b6cd76bad9c7133a185bb85021b4a390f0 | [
"MIT"
] | 27 | 2021-08-06T06:51:34.000Z | 2021-11-02T05:47:18.000Z | from code_soup.ch5.algorithms.gan import GAN, Discriminator, Generator
from code_soup.ch5.algorithms.one_pixel_attack import OnePixelAttack
from code_soup.ch5.algorithms.zoo_attack import ZooAttack
| 49.5 | 70 | 0.873737 | 29 | 198 | 5.758621 | 0.517241 | 0.143713 | 0.215569 | 0.269461 | 0.449102 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016304 | 0.070707 | 198 | 3 | 71 | 66 | 0.891304 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
fa163b1da2dd0a64bb391d91853c9c2c7aabcfc7 | 161 | py | Python | tests/test_imports.py | rdb/hexima | f92db577d8d28de99d99038837eefd7d734936ef | [
"CC-BY-3.0",
"MIT"
] | 12 | 2019-04-06T10:23:54.000Z | 2022-03-19T10:15:59.000Z | tests/test_imports.py | rdb/pyweek27 | f92db577d8d28de99d99038837eefd7d734936ef | [
"CC-BY-3.0",
"MIT"
] | 8 | 2019-04-22T18:58:22.000Z | 2019-11-16T21:30:18.000Z | tests/test_imports.py | rdb/pyweek27 | f92db577d8d28de99d99038837eefd7d734936ef | [
"CC-BY-3.0",
"MIT"
] | 3 | 2020-04-29T09:10:49.000Z | 2020-09-19T05:05:26.000Z | def test_import_panda3d():
import panda3d.core #pylint: disable=unused-import
def test_import_game():
import game.world #pylint: disable=unused-import
| 23 | 54 | 0.763975 | 22 | 161 | 5.409091 | 0.454545 | 0.117647 | 0.218487 | 0.420168 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.014388 | 0.136646 | 161 | 6 | 55 | 26.833333 | 0.841727 | 0.360248 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0 | 1 | 0 | 1.5 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 7 |
fa20d654cf5ce16166dc471c51ac24e842fca572 | 16,722 | py | Python | src/abaqus/EngineeringFeature/PointFastener.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/EngineeringFeature/PointFastener.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/EngineeringFeature/PointFastener.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | import typing
from abaqusConstants import *
from .Fastener import Fastener
from ..Region.Region import Region
from ..Region.RegionArray import RegionArray
class PointFastener(Fastener):
"""The PointFastener object defines a point fastener.
The PointFastener object is derived from the Fastener object.
Attributes
----------
suppressed: Boolean
A Boolean specifying whether the fastener is suppressed or not. The default value is
OFF.
Notes
-----
This object can be accessed by:
.. code-block:: python
import part
mdb.models[name].parts[name].engineeringFeatures.fasteners[name]
import assembly
mdb.models[name].rootAssembly.engineeringFeatures.fasteners[name]
The corresponding analysis keywords are:
- FASTENER
"""
# A Boolean specifying whether the fastener is suppressed or not. The default value is
# OFF.
suppressed: Boolean = OFF
def __init__(self, name: str, region: Region, physicalRadius: float, directionVector: tuple = None,
targetSurfaces: RegionArray = MODEL, ur1: Boolean = ON, ur2: Boolean = ON,
ur3: Boolean = ON, attachmentMethod: SymbolicConstant = FACETOFACE,
influenceRadius: typing.Union[SymbolicConstant, float] = DEFAULT,
searchRadius: typing.Union[SymbolicConstant, float] = DEFAULT,
maximumLayers: SymbolicConstant = ALL, coupling: SymbolicConstant = CONTINUUM,
weightingMethod: SymbolicConstant = UNIFORM, additionalMass: float = 0,
adjustOrientation: Boolean = ON, localCsys: int = None,
connectionType: SymbolicConstant = CONNECTOR, sectionName: str = '',
connectorOrientationLocalCsys1: int = None, axis1: SymbolicConstant = AXIS_1,
angle1: float = 0, orient2SameAs1: Boolean = ON,
connectorOrientationLocalCsys2: int = None, axis2: SymbolicConstant = AXIS_1,
angle2: float = 0, unsorted: Boolean = OFF):
"""This method creates a PointFastener object. Although the constructor is available both
for parts and for the assembly, PointFastener objects are currently supported only under
the assembly.
Notes
-----
This function can be accessed by:
.. code-block:: python
mdb.models[name].parts[name].engineeringFeatures.PointFastener
mdb.models[name].rootAssembly.engineeringFeatures.PointFastener
Parameters
----------
name
A String specifying the repository key.
region
A Region object specifying the region to which fasteners are applied.
physicalRadius
A Float specifying the physical fastener radius.
directionVector
A VertexArray object of length 2 specifying the direction of projection. Instead of
through a ConstrainedSketchVertex, each point may be specified through a tuple of coordinates. The
default value is None.
targetSurfaces
A RegionArray object specifying surfaces to be fastened. The default value is MODEL.
ur1
A Boolean specifying whether to constrain rotational displacement component about the
1-direction. The default value is ON.
ur2
A Boolean specifying whether to constrain rotational displacement component about the
2-direction. The default value is ON.
ur3
A Boolean specifying whether to constrain rotational displacement component about the
3-direction. The default value is ON.
attachmentMethod
A SymbolicConstant specifying the method used to locate points for attaching fasteners.
Possible values are FACETOFACE, EDGETOFACE, FACETOEDGE, and EDGETOEDGE. The default
value is FACETOFACE.
influenceRadius
The SymbolicConstant DEFAULT or a Float specifying the maximum distance from the
projection point on a connected surface within which the nodes on that surface must lie
to contribute to the motion of the projection point. If the value is DEFAULT, a radius
is computed from the fastener diameter and the surface facet lengths. The default value
is DEFAULT.
searchRadius
The SymbolicConstant DEFAULT or a Float specifying the distance from the positioning
points within which the connected points must lie. The default value is DEFAULT.
maximumLayers
The SymbolicConstant ALL or an Int specifying the maximum number of layers for each
fastener. If the value is ALL, the maximum possible number of layers within the
searchRadius will be used for each fastener. The default value is ALL.
coupling
A SymbolicConstant specifying the coupling method used to couple the displacement and
rotation of each attachment point to the average motion of the surface nodes within the
radius of influence from the fastener projection point. Possible values are CONTINUUM
and STRUCTURAL. The default value is CONTINUUM.
weightingMethod
A SymbolicConstant specifying the weighting scheme to be used to weight the contribution
of the displacements of the surface nodes within the radius of influence to the motion
of the fastener projection point. UNIFORM, LINEAR, QUADRATIC, and CUBIC indicate
uniform, linear decreasing, quadratic polynomial decreasing, and cubic polynomial
monotonic decreasing weight distributions. Possible values are UNIFORM, LINEAR,
QUADRATIC, and CUBIC. The default value is UNIFORM.
additionalMass
A Float specifying the mass that will be distributed to fastener attachment points. The
default value is 0.0.
adjustOrientation
A Boolean specifying whether to adjust localCsys such that the local z-axis for each
fastener is normal to the surface that is closest to the reference node for that
fastener. The default value is ON.
localCsys
None or a DatumCsys object specifying the local coordinate system. If *localCsys*=None,
the global coordinate system is used. When this member is queried, it returns an Int.
The default value is None.
connectionType
A SymbolicConstant specifying the fastener connection type. Possible values are
CONNECTOR and BEAM_MPC. The default value is CONNECTOR.
sectionName
A String specifying the connector section assigned to generated connectors. The default
value is an empty string.
connectorOrientationLocalCsys1
None or a DatumCsys object specifying the local coordinate system of the first connector
point in generated connectors. If *connectorOrientationLocalCsys1*=None, the degrees of
freedom are defined in the global coordinate system. When this member is queried, it
returns an Int. The default value is None.
axis1
A SymbolicConstant specifying the axis of a datum coordinate system about which an
additional rotation is applied for the first point in generated connectors. Possible
values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_1.
angle1
A Float specifying the angle of the additional rotation for the first point in generated
connectors. The default value is 0.0.
orient2SameAs1
A Boolean specifying whether or not the second connector point in generated connectors
is to use the same local coordinate system, axis, and angle as the first point. The
default value is ON.
connectorOrientationLocalCsys2
None or a DatumCsys object specifying the local coordinate system of the second
connector point in generated connectors. If *connectorOrientationLocalCsys2*=None, the
degrees of freedom are defined in the global coordinate system. When this member is
queried, it returns an Int. The default value is None.
axis2
A SymbolicConstant specifying the axis of a datum coordinate system about which an
additional rotation is applied for the second point in generated connectors. Possible
values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_1.
angle2
A Float specifying the angle of the additional rotation for the second point in
generated connectors. The default value is 0.0.
unsorted
A Boolean specifying whether the analysis product should leave targetSurfaces in the
given unsorted order, or sort them by proximity to determine the connectivity of
fastening points. The default value is OFF.
Returns
-------
A PointFastener object.
"""
super().__init__()
pass
def setValues(self, directionVector: tuple = None, targetSurfaces: RegionArray = MODEL, ur1: Boolean = ON,
ur2: Boolean = ON, ur3: Boolean = ON, attachmentMethod: SymbolicConstant = FACETOFACE,
influenceRadius: typing.Union[SymbolicConstant, float] = DEFAULT,
searchRadius: typing.Union[SymbolicConstant, float] = DEFAULT,
maximumLayers: SymbolicConstant = ALL, coupling: SymbolicConstant = CONTINUUM,
weightingMethod: SymbolicConstant = UNIFORM, additionalMass: float = 0,
adjustOrientation: Boolean = ON, localCsys: int = None,
connectionType: SymbolicConstant = CONNECTOR, sectionName: str = '',
connectorOrientationLocalCsys1: int = None, axis1: SymbolicConstant = AXIS_1,
angle1: float = 0, orient2SameAs1: Boolean = ON,
connectorOrientationLocalCsys2: int = None, axis2: SymbolicConstant = AXIS_1,
angle2: float = 0, unsorted: Boolean = OFF):
"""This method modifies the PointFastener object.
Parameters
----------
directionVector
A VertexArray object of length 2 specifying the direction of projection. Instead of
through a ConstrainedSketchVertex, each point may be specified through a tuple of coordinates. The
default value is None.
targetSurfaces
A RegionArray object specifying surfaces to be fastened. The default value is MODEL.
ur1
A Boolean specifying whether to constrain rotational displacement component about the
1-direction. The default value is ON.
ur2
A Boolean specifying whether to constrain rotational displacement component about the
2-direction. The default value is ON.
ur3
A Boolean specifying whether to constrain rotational displacement component about the
3-direction. The default value is ON.
attachmentMethod
A SymbolicConstant specifying the method used to locate points for attaching fasteners.
Possible values are FACETOFACE, EDGETOFACE, FACETOEDGE, and EDGETOEDGE. The default
value is FACETOFACE.
influenceRadius
The SymbolicConstant DEFAULT or a Float specifying the maximum distance from the
projection point on a connected surface within which the nodes on that surface must lie
to contribute to the motion of the projection point. If the value is DEFAULT, a radius
is computed from the fastener diameter and the surface facet lengths. The default value
is DEFAULT.
searchRadius
The SymbolicConstant DEFAULT or a Float specifying the distance from the positioning
points within which the connected points must lie. The default value is DEFAULT.
maximumLayers
The SymbolicConstant ALL or an Int specifying the maximum number of layers for each
fastener. If the value is ALL, the maximum possible number of layers within the
searchRadius will be used for each fastener. The default value is ALL.
coupling
A SymbolicConstant specifying the coupling method used to couple the displacement and
rotation of each attachment point to the average motion of the surface nodes within the
radius of influence from the fastener projection point. Possible values are CONTINUUM
and STRUCTURAL. The default value is CONTINUUM.
weightingMethod
A SymbolicConstant specifying the weighting scheme to be used to weight the contribution
of the displacements of the surface nodes within the radius of influence to the motion
of the fastener projection point. UNIFORM, LINEAR, QUADRATIC, and CUBIC indicate
uniform, linear decreasing, quadratic polynomial decreasing, and cubic polynomial
monotonic decreasing weight distributions. Possible values are UNIFORM, LINEAR,
QUADRATIC, and CUBIC. The default value is UNIFORM.
additionalMass
A Float specifying the mass that will be distributed to fastener attachment points. The
default value is 0.0.
adjustOrientation
A Boolean specifying whether to adjust localCsys such that the local z-axis for each
fastener is normal to the surface that is closest to the reference node for that
fastener. The default value is ON.
localCsys
None or a DatumCsys object specifying the local coordinate system. If *localCsys*=None,
the global coordinate system is used. When this member is queried, it returns an Int.
The default value is None.
connectionType
A SymbolicConstant specifying the fastener connection type. Possible values are
CONNECTOR and BEAM_MPC. The default value is CONNECTOR.
sectionName
A String specifying the connector section assigned to generated connectors. The default
value is an empty string.
connectorOrientationLocalCsys1
None or a DatumCsys object specifying the local coordinate system of the first connector
point in generated connectors. If *connectorOrientationLocalCsys1*=None, the degrees of
freedom are defined in the global coordinate system. When this member is queried, it
returns an Int. The default value is None.
axis1
A SymbolicConstant specifying the axis of a datum coordinate system about which an
additional rotation is applied for the first point in generated connectors. Possible
values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_1.
angle1
A Float specifying the angle of the additional rotation for the first point in generated
connectors. The default value is 0.0.
orient2SameAs1
A Boolean specifying whether or not the second connector point in generated connectors
is to use the same local coordinate system, axis, and angle as the first point. The
default value is ON.
connectorOrientationLocalCsys2
None or a DatumCsys object specifying the local coordinate system of the second
connector point in generated connectors. If *connectorOrientationLocalCsys2*=None, the
degrees of freedom are defined in the global coordinate system. When this member is
queried, it returns an Int. The default value is None.
axis2
A SymbolicConstant specifying the axis of a datum coordinate system about which an
additional rotation is applied for the second point in generated connectors. Possible
values are AXIS_1, AXIS_2, and AXIS_3. The default value is AXIS_1.
angle2
A Float specifying the angle of the additional rotation for the second point in
generated connectors. The default value is 0.0.
unsorted
A Boolean specifying whether the analysis product should leave targetSurfaces in the
given unsorted order, or sort them by proximity to determine the connectivity of
fastening points. The default value is OFF.
"""
pass
| 58.468531 | 110 | 0.674022 | 1,946 | 16,722 | 5.776465 | 0.123844 | 0.033627 | 0.06672 | 0.075616 | 0.918779 | 0.910951 | 0.903656 | 0.898319 | 0.898319 | 0.898319 | 0 | 0.00796 | 0.293805 | 16,722 | 285 | 111 | 58.673684 | 0.943941 | 0.746262 | 0 | 0.628571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0.057143 | 0.142857 | 0 | 0.257143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
fa6dacb5042a609ed6ddf2d835fc32f3a353786f | 5,177 | py | Python | bitbar/volume.5s.py | fenhl/syncbin | 1dd83837721e6c74ed7e564cabe592dfe3245acf | [
"MIT"
] | 3 | 2020-01-21T10:12:29.000Z | 2020-10-22T05:00:18.000Z | bitbar/volume.5s.py | fenhl/syncbin | 1dd83837721e6c74ed7e564cabe592dfe3245acf | [
"MIT"
] | 3 | 2017-09-30T05:29:11.000Z | 2021-07-16T13:39:18.000Z | bitbar/volume.5s.py | fenhl/syncbin | 1dd83837721e6c74ed7e564cabe592dfe3245acf | [
"MIT"
] | null | null | null | #!/usr/local/bin/python3
import sys
import subprocess
MUTED = 'iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAABYlAAAWJQFJUiTwAAABWWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpMwidZAAAA+0lEQVRYCc3V0Q7CMAgF0Gr88PnlWshooGMrTdbLlmxW0N4jfbCUdddWt/6t235uZ8E8AqQx6aAekwryMGmgM0wK6AoDB40wUFAEAwNFMRDQDGY5aBYzDfoUzP+NwF41z7ta/+11F9YkWEeYGhpEEA3Qa0bSCA9F7tz3kGMa5fDnkBMSmPdTWw8JIkgLVipTQ4O8YzM1JMgEqwnRsvVQoBa4Q+iYzFEJCgXaHfyiIXrNTTToAKgKU6M3/ThZeuPDBI72pQnRF6L3d7RhRn+roTTV6A0xzqAgIAqJomCgKAoKiqDgoBEqBXSFSgOdoVJBHiod1KMeAdKoKdAf1w2rln5sYTMAAAAASUVORK5CYII='
LOW = 'iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAABYlAAAWJQFJUiTwAAABWWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpMwidZAAAA2UlEQVRYCe3V0QqAIAwFUIs+3D8vfbgwxrSN8q6HhCh1bKdFVcq6UVvqc136WGZgPgGSmHSQxqSCLEwaaIRJAc0wdNAdhgryYGggL4YCimCWg6KYMOgonP8NYFurNx37dPe9TUAAG2ZmgYYAvcEEubrEBOlmmPMfZLZFLP4dEs0wL5kdwjcIb1s6yAToRVaHXN3ROM+8tqCePHJ48j6KqV8D9buJoHo8ZdRWxfPoKBgU8aAQSzvfoWgQWWiGknHU6xGKitDFLJSOoc81ig6wCkqUtZ+yBlSo+AWz9q1+ipbe+wAAAABJRU5ErkJggg=='
HIGH = 'iVBORw0KGgoAAAANSUhEUgAAACQAAAAkCAYAAADhAJiYAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAABYlAAAWJQFJUiTwAAABWWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpMwidZAAABEUlEQVRYCe2UjQ7DIAiEuz05b75h6zVOq6D402Q12RDwzm803baNW8TWn3H2dc6AuQVQCLMcKIZZCnQFswwoB7MEqAQzHUiCmQqkgZkGpIUpAbleqc9t3SI+BjNNDF1xHrU4R10diU/CRBtjc+hQ/8lf/gI0R8TwDrd3y0G4leTvoz78O7nY35iAzQJS/+KZQNKUduiZQKopPUDSmJ4J3WlCyX+Oh8Pbt6d/+8ik6cR96cmefeKdE9d8TrHfQIt6nKOujsQnYaKJoTHOoxbnqFdHYgXMpJgzhy7Xr64TK2BaitXGFoEGyuLfpJWgmkytohKU1btZn4NqNuwhvILq4WvyiKFMZr3EIVQvT7MPoKqMvgccyWgfMQBrAAAAAElFTkSuQmCC'
UNKNOWN = 'iVBORw0KGgoAAAANSUhEUgAAACYAAAAmCAYAAACoPemuAAAABGdBTUEAALGPC/xhBQAAACBjSFJNAAB6JgAAgIQAAPoAAACA6AAAdTAAAOpgAAA6mAAAF3CculE8AAAACXBIWXMAABYlAAAWJQFJUiTwAAABWWlUWHRYTUw6Y29tLmFkb2JlLnhtcAAAAAAAPHg6eG1wbWV0YSB4bWxuczp4PSJhZG9iZTpuczptZXRhLyIgeDp4bXB0az0iWE1QIENvcmUgNS40LjAiPgogICA8cmRmOlJERiB4bWxuczpyZGY9Imh0dHA6Ly93d3cudzMub3JnLzE5OTkvMDIvMjItcmRmLXN5bnRheC1ucyMiPgogICAgICA8cmRmOkRlc2NyaXB0aW9uIHJkZjphYm91dD0iIgogICAgICAgICAgICB4bWxuczp0aWZmPSJodHRwOi8vbnMuYWRvYmUuY29tL3RpZmYvMS4wLyI+CiAgICAgICAgIDx0aWZmOk9yaWVudGF0aW9uPjE8L3RpZmY6T3JpZW50YXRpb24+CiAgICAgIDwvcmRmOkRlc2NyaXB0aW9uPgogICA8L3JkZjpSREY+CjwveDp4bXBtZXRhPgpMwidZAAACV0lEQVRYCWNgGAWDKwT+k+scJnI1EqGPbEeBzKaVwyhyFK0cRrGjaOEwqjiK2g6jmqNADmMBEVQARDvq4P79/2/cf8HwB2wpOwOfuDSDuroSg5mKKCMV3IFiBMhR+DBY8elDG/Cp+Z8zcRfRnkOxHQcHr2VQBzOc3DGVGHX/tdOXUcVxRFkG8lC2uRmK2ooZW//vPH7h//o1C//HO6HKZS+5TJHjUCyChgxWsZM7+1HEl118jmFxubcpXI1p8WawPKyAhUvgswRJDsgkDnx88hCu0LblAEOUviRGIg/OCIWreXv2BphNrVwJNxidIW2VyrB4hSPDr9//GcR1VBgO16CrYGD48eMTQpCTFcEGskgNMVLUo1iEjVMTZAI3z65+N4gNB3AJoAi12XBL0BlXrp35X47kKJDdvUcfDazDdi/rwgiAuGnHUBwF8giGIiqKgcxHAYdWt2DYV770NIajQJowFFJRDMVRV+9fQLFLyavt/45bL7A6iq4Ou3xxK9xhyjrF/7fgcBSsHEPxFS05n+6dhxv/T0WZwUdNAqNcAymgu8M4hWTgDtO11oaz0Rkw1+KMY3QNZPBhdpCkle4hRqzryPIN0HBSQhjDjqVzp/+/+ZqRQcXKmSHOThVDnljH41IHz11Qh+Lio+jfOjETRd2Oe9iLCkqikiyfvnv1AsWhLz9jD3xKHAaygGTHqdt5oThMh58ZhQ/jkGwwTCMajd3bEEVk2UFpiMHcR5blMM3YaGo5DGQ2VR1HTYdR1XHUdhjVHEcLh1HNcSCDaAXw5VZa2TlCzQUAJ4GCI3Dm3PYAAAAASUVORK5CYII='
muted = subprocess.check_output(['osascript', '-e', 'output muted of (get volume settings)'], stderr=subprocess.DEVNULL).decode('utf-8') == 'true\n'
volume_str = subprocess.check_output(['osascript', '-e', 'output volume of (get volume settings)'], stderr=subprocess.DEVNULL).decode('utf-8')
if volume_str == 'missing value\n':
print(f'|templateImage={UNKNOWN}')
sys.exit()
else:
volume = int(volume_str)
#TODO check whether headphones are being used, expect 6 if yes, 25 if no
if muted:
print(f'|templateImage={MUTED}')
elif volume < 6:
print(f'|templateImage={LOW}')
elif volume > 25:
print(f'|templateImage={HIGH}')
| 191.740741 | 1,472 | 0.939347 | 170 | 5,177 | 28.576471 | 0.617647 | 0.401811 | 0.443804 | 0.370523 | 0.036641 | 0.036641 | 0.021408 | 0.021408 | 0.021408 | 0.021408 | 0 | 0.105346 | 0.020862 | 5,177 | 26 | 1,473 | 199.115385 | 0.853028 | 0.018157 | 0 | 0 | 0 | 0.210526 | 0.914584 | 0.885456 | 0 | 1 | 0 | 0.038462 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.105263 | 0.210526 | 0 | 0 | 1 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 11 |
d783bd0a28ffe99832c67723a6440032b477a03b | 9,584 | py | Python | FlightBooking/tests/booking_web/test_booking_model.py | davewalker5/FlightBooking | 6e5d1bb83789415edb2f9f35404eb0e5bf773111 | [
"MIT"
] | null | null | null | FlightBooking/tests/booking_web/test_booking_model.py | davewalker5/FlightBooking | 6e5d1bb83789415edb2f9f35404eb0e5bf773111 | [
"MIT"
] | null | null | null | FlightBooking/tests/booking_web/test_booking_model.py | davewalker5/FlightBooking | 6e5d1bb83789415edb2f9f35404eb0e5bf773111 | [
"MIT"
] | null | null | null | import unittest
import datetime
import os
from flight_booking.utils import get_flight_file_path
from src.booking_web.model import FlightBookingModel
class TestFlightBookingModel(unittest.TestCase):
def setUp(self) -> None:
self._model = FlightBookingModel()
def test_create_flight(self):
self._model.create_flight(
airline="EasyJet",
number="U28549",
embarkation="LGW",
destination="RMU",
departure_date="20/11/2021",
departure_time="10:45",
duration="2:25")
self.assertIsNotNone(self._model.flight)
self.assertEqual("EasyJet", self._model.flight.airline)
self.assertEqual("U28549", self._model.flight.number)
self.assertEqual("LGW", self._model.flight.embarkation_airport_code)
self.assertEqual("RMU", self._model.flight.destination_airport_code)
self.assertEqual(datetime.date(2021, 11, 20), self._model.flight.departure_date)
self.assertEqual((2, 25), self._model.flight.duration)
self.assertEqual(0, len(self._model.flight.passengers))
self.assertEqual(None, self._model.flight.aircraft)
self.assertEqual(None, self._model.flight.layout)
self.assertEqual(0, self._model.flight.capacity)
self.assertEqual(0, self._model.flight.available_capacity)
self.assertIsNone(None, self._model.flight.aircraft)
self.assertIsNone(self._model.flight.get_all_seat_allocations())
def test_create_dummy_flight(self):
self._model.create_dummy_flight(number_of_passengers=0,
aircraft=None,
layout=None,
perform_seat_allocations=False)
self.assertIsNotNone(self._model.flight)
self.assertEqual("EasyJet", self._model.flight.airline)
self.assertEqual("U28549", self._model.flight.number)
self.assertEqual("LGW", self._model.flight.embarkation_airport_code)
self.assertEqual("RMU", self._model.flight.destination_airport_code)
self.assertEqual(datetime.date(2021, 11, 20), self._model.flight.departure_date)
self.assertEqual((2, 25), self._model.flight.duration)
self.assertEqual(0, len(self._model.flight.passengers))
self.assertEqual(None, self._model.flight.aircraft)
self.assertEqual(None, self._model.flight.layout)
self.assertEqual(0, self._model.flight.capacity)
self.assertEqual(0, self._model.flight.available_capacity)
self.assertIsNone(None, self._model.flight.aircraft)
self.assertIsNone(self._model.flight.get_all_seat_allocations())
def test_create_dummy_flight_with_passengers(self):
self._model.create_dummy_flight(number_of_passengers=10,
aircraft=None,
layout=None,
perform_seat_allocations=False)
self.assertIsNotNone(self._model.flight)
self.assertEqual("EasyJet", self._model.flight.airline)
self.assertEqual("U28549", self._model.flight.number)
self.assertEqual("LGW", self._model.flight.embarkation_airport_code)
self.assertEqual("RMU", self._model.flight.destination_airport_code)
self.assertEqual(datetime.date(2021, 11, 20), self._model.flight.departure_date)
self.assertEqual((2, 25), self._model.flight.duration)
self.assertEqual(10, len(self._model.flight.passengers))
self.assertEqual(None, self._model.flight.aircraft)
self.assertEqual(None, self._model.flight.layout)
self.assertEqual(0, self._model.flight.capacity)
self.assertEqual(0, self._model.flight.available_capacity)
self.assertIsNone(None, self._model.flight.aircraft)
self.assertIsNone(self._model.flight.get_all_seat_allocations())
def test_create_dummy_flight_with_passengers_and_layout(self):
self._model.create_dummy_flight(number_of_passengers=10,
aircraft="A321",
layout="neo",
perform_seat_allocations=False)
self.assertIsNotNone(self._model.flight)
self.assertEqual("EasyJet", self._model.flight.airline)
self.assertEqual("U28549", self._model.flight.number)
self.assertEqual("LGW", self._model.flight.embarkation_airport_code)
self.assertEqual("RMU", self._model.flight.destination_airport_code)
self.assertEqual(datetime.date(2021, 11, 20), self._model.flight.departure_date)
self.assertEqual((2, 25), self._model.flight.duration)
self.assertEqual(10, len(self._model.flight.passengers))
self.assertEqual("A321", self._model.flight.aircraft)
self.assertEqual("neo", self._model.flight.layout)
self.assertEqual(235, self._model.flight.capacity)
self.assertEqual(225, self._model.flight.available_capacity)
self.assertEqual(0, len(self._model.flight.get_all_seat_allocations()))
def test_create_dummy_flight_with_passengers_layout_and_allocations(self):
self._model.create_dummy_flight(number_of_passengers=10,
aircraft="A321",
layout="neo",
perform_seat_allocations=True)
self.assertIsNotNone(self._model.flight)
self.assertEqual("EasyJet", self._model.flight.airline)
self.assertEqual("U28549", self._model.flight.number)
self.assertEqual("LGW", self._model.flight.embarkation_airport_code)
self.assertEqual("RMU", self._model.flight.destination_airport_code)
self.assertEqual(datetime.date(2021, 11, 20), self._model.flight.departure_date)
self.assertEqual((2, 25), self._model.flight.duration)
self.assertEqual(10, len(self._model.flight.passengers))
self.assertEqual("A321", self._model.flight.aircraft)
self.assertEqual("neo", self._model.flight.layout)
self.assertEqual(235, self._model.flight.capacity)
self.assertEqual(225, self._model.flight.available_capacity)
self.assertEqual(10, len(self._model.flight.get_all_seat_allocations()))
def test_can_add_passenger(self):
self._model.create_dummy_flight(number_of_passengers=0,
aircraft=None,
layout=None,
perform_seat_allocations=False)
self._model.add_passenger("Some One",
"M",
"01/02/1980",
"UK",
"UK",
"1234567890")
self.assertEqual(1, len(self._model.flight.passengers))
self.assertEqual(1, len(self._model.flight.passengers))
passenger_id = list(self._model.flight.passengers.keys())[0]
passenger = self._model.flight.passengers[passenger_id]
self.assertEqual("Some One", passenger["name"])
self.assertEqual("M", passenger["gender"])
self.assertEqual("19800201", passenger["dob"])
self.assertEqual("UK", passenger["nationality"])
self.assertEqual("UK", passenger["residency"])
self.assertEqual("1234567890", passenger["passport_number"])
def test_can_get_passenger_with_no_seat_allocation(self):
self._model.create_dummy_flight(number_of_passengers=1,
aircraft="A321",
layout="neo",
perform_seat_allocations=False)
passenger_details = self._model.get_passengers_including_seat_allocations()
passenger_id = list(passenger_details.keys())[0]
passenger = passenger_details[passenger_id]
self.assertIsNone(passenger["seat_number"])
def test_can_get_passenger_with_seat_allocation(self):
self._model.create_dummy_flight(number_of_passengers=1,
aircraft="A321",
layout="neo",
perform_seat_allocations=True)
passenger_details = self._model.get_passengers_including_seat_allocations()
passenger_id = list(passenger_details.keys())[0]
passenger = passenger_details[passenger_id]
self.assertEqual("1A", passenger["seat_number"])
def test_can_save_flight(self):
self._model.create_dummy_flight(number_of_passengers=0,
aircraft=None,
layout=None,
perform_seat_allocations=False)
file_path = get_flight_file_path(self._model.flight.number, self._model.flight.departure_date)
if os.path.exists(file_path):
os.remove(file_path)
self.assertFalse(os.path.exists(file_path))
self._model.save()
self.assertTrue(os.path.exists(file_path))
def test_can_close_flight(self):
self._model.create_dummy_flight(number_of_passengers=0,
aircraft=None,
layout=None,
perform_seat_allocations=False)
self.assertIsNotNone(self._model.flight)
self._model.close_flight()
self.assertIsNone(self._model.flight)
| 50.708995 | 102 | 0.630113 | 1,013 | 9,584 | 5.685094 | 0.101678 | 0.143775 | 0.197951 | 0.032992 | 0.855704 | 0.827053 | 0.811599 | 0.799965 | 0.782428 | 0.782428 | 0 | 0.028604 | 0.266799 | 9,584 | 188 | 103 | 50.978723 | 0.790949 | 0 | 0 | 0.693252 | 0 | 0 | 0.03245 | 0 | 0 | 0 | 0 | 0 | 0.503067 | 1 | 0.067485 | false | 0.239264 | 0.030675 | 0 | 0.104294 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
ad3872e0770454e38c029dcda9b5e9a1b95dc024 | 131 | py | Python | PIP/Class Program/ClassQuestion18.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | 10 | 2021-04-24T11:46:48.000Z | 2022-01-17T05:14:37.000Z | PIP/Class Program/ClassQuestion18.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | 2 | 2021-06-28T11:51:50.000Z | 2021-11-01T08:21:53.000Z | PIP/Class Program/ClassQuestion18.py | ankitrajbiswal/SEM_5 | db716e242e77149a4091e0e564356ddc724aeff0 | [
"Apache-2.0"
] | 16 | 2021-04-24T11:46:58.000Z | 2022-03-02T05:08:19.000Z | for i in range(1,11):
print(2," X ",i, " = ", 2*i)
print("\n")
for i in range(1,11):
print(3," X ",i, " = ", 3*i)
| 21.833333 | 33 | 0.412214 | 26 | 131 | 2.076923 | 0.423077 | 0.148148 | 0.222222 | 0.407407 | 0.703704 | 0.703704 | 0.703704 | 0 | 0 | 0 | 0 | 0.10989 | 0.305344 | 131 | 6 | 34 | 21.833333 | 0.483516 | 0 | 0 | 0.4 | 0 | 0 | 0.110236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.6 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
ad71a30a2e5fa00c94b2dca2b64a338611621779 | 208 | py | Python | EasyNN/model/activation/__init__.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 5 | 2021-01-28T21:19:02.000Z | 2022-02-03T05:47:47.000Z | EasyNN/model/activation/__init__.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 1 | 2021-02-04T20:57:45.000Z | 2021-03-03T14:49:44.000Z | EasyNN/model/activation/__init__.py | danielwilczak101/EasyNN | 89319e974c324dda228c6ecff7c39d723eda3ca2 | [
"MIT"
] | 2 | 2021-02-12T04:27:40.000Z | 2021-12-19T20:11:20.000Z | from EasyNN.model.activation.abc import Activation
from EasyNN.model.activation.log_softmax import LogSoftMax
from EasyNN.model.activation.relu import ReLU
from EasyNN.model.activation.softmax import SoftMax
| 41.6 | 58 | 0.865385 | 29 | 208 | 6.172414 | 0.344828 | 0.223464 | 0.335196 | 0.558659 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.076923 | 208 | 4 | 59 | 52 | 0.932292 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
ad9cb44e0fef520f8e2c92745f8b0aa48b2b7790 | 2,226 | py | Python | geo/gis/grass7.py | Tamlyn78/geo | dd63372acdd1fe8b744c05eca5ad23836e6a1604 | [
"MIT"
] | null | null | null | geo/gis/grass7.py | Tamlyn78/geo | dd63372acdd1fe8b744c05eca5ad23836e6a1604 | [
"MIT"
] | null | null | null | geo/gis/grass7.py | Tamlyn78/geo | dd63372acdd1fe8b744c05eca5ad23836e6a1604 | [
"MIT"
] | null | null | null | import os
from os.path import expanduser, join
import sys
import subprocess
def gisbase7(binpath):
"""
Set environment variables to allow import of grass into an external python session (for python3 Grass version must be <= 7.7.
Note that when OSGEO4W64 installation is used, the global dll files are not accessible; they either need to manually moved or the global dll folder added to the system path.
Find binpath on your current system by manually opening a grass terminal and typing g.gisenv get='GISBASE'"""
startcmd = [binpath, '--config', 'path']
p = subprocess.Popen(startcmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print >>sys.stderr, "ERROR: Cannot find GRASS GIS 7 start script (%s)" % startcmd
sys.exit(-1)
else:
gisbase = out.strip('\n\r')
os.environ['GISBASE'] = gisbase
py = join(gisbase, "etc", "python")
sys.path.append(py)
return(gisbase)
class GISBASE7:
"""
Set environment variables to allow import of grass into an external python session (for python3 Grass version must be <= 7.7.
Note that when OSGEO4W64 installation is used, the global dll files are not accessible; they either need to manually moved or the global dll folder added to the system path.
"""
def __init__(self, binpath):
"""Find binpath on your current system by manually opening a grass terminal and typing g.gisenv get='GISBASE'"""
self.gisbase = self._gisbase(binpath)
def _gisbase(self, binpath):
""""""
startcmd = [binpath, '--config', 'path']
p = subprocess.Popen(startcmd, shell=False, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
out, err = p.communicate()
if p.returncode != 0:
print >>sys.stderr, "ERROR: Cannot find GRASS GIS 7 start script (%s)" % startcmd
sys.exit(-1)
else:
gisbase = out.strip('\n\r')
os.environ['GISBASE'] = gisbase
py = join(gisbase, "etc", "python")
sys.path.append(py)
return(gisbase)
| 43.647059 | 182 | 0.631626 | 290 | 2,226 | 4.827586 | 0.351724 | 0.025714 | 0.034286 | 0.035714 | 0.884286 | 0.884286 | 0.884286 | 0.884286 | 0.884286 | 0.884286 | 0 | 0.012293 | 0.269093 | 2,226 | 50 | 183 | 44.52 | 0.848187 | 0.366128 | 0 | 0.727273 | 0 | 0 | 0.124031 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.121212 | 0 | 0.242424 | 0.060606 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
a8fbdc51ece5e6f10ff21555fa32154be54810b6 | 144 | py | Python | boa3_test/test_sc/interop_test/crypto/Hash256Bytes.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/interop_test/crypto/Hash256Bytes.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/interop_test/crypto/Hash256Bytes.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
from boa3.builtin.interop.crypto import hash256
@public
def Main() -> bytes:
return hash256(b'unit test')
| 18 | 47 | 0.75 | 21 | 144 | 5.142857 | 0.714286 | 0.148148 | 0.277778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.065574 | 0.152778 | 144 | 7 | 48 | 20.571429 | 0.819672 | 0 | 0 | 0 | 0 | 0 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | true | 0 | 0.4 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 0 | 0 | 7 |
d175194bd44682d0c0f97c3ffcef9e2a7d563c08 | 382 | py | Python | boa3_test/test_sc/logical_test/AugmentedAssignmentOperators.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 25 | 2020-07-22T19:37:43.000Z | 2022-03-08T03:23:55.000Z | boa3_test/test_sc/logical_test/AugmentedAssignmentOperators.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 419 | 2020-04-23T17:48:14.000Z | 2022-03-31T13:17:45.000Z | boa3_test/test_sc/logical_test/AugmentedAssignmentOperators.py | hal0x2328/neo3-boa | 6825a3533384cb01660773050719402a9703065b | [
"Apache-2.0"
] | 15 | 2020-05-21T21:54:24.000Z | 2021-11-18T06:17:24.000Z | from boa3.builtin import public
@public
def left_shift(a: int, b: int) -> int:
a <<= b
return a
@public
def right_shift(a: int, b: int) -> int:
a >>= b
return a
@public
def l_and(a: int, b: int) -> int:
a &= b
return a
@public
def l_or(a: int, b: int) -> int:
a |= b
return a
@public
def xor(a: int, b: int) -> int:
a ^= b
return a
| 11.9375 | 39 | 0.536649 | 69 | 382 | 2.913043 | 0.246377 | 0.223881 | 0.124378 | 0.199005 | 0.736318 | 0.736318 | 0.736318 | 0.736318 | 0.736318 | 0.636816 | 0 | 0.003802 | 0.311518 | 382 | 31 | 40 | 12.322581 | 0.760456 | 0 | 0 | 0.47619 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.238095 | false | 0 | 0.047619 | 0 | 0.52381 | 0 | 0 | 0 | 0 | null | 1 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 9 |
0f107e44c4012774cc56749edb51a69d39e35e74 | 9,267 | py | Python | dictionary_learning.py | denisilie94/pyod-dl | 70043f545b0cf84e72e6d851aabd34a199b2f332 | [
"MIT"
] | null | null | null | dictionary_learning.py | denisilie94/pyod-dl | 70043f545b0cf84e72e6d851aabd34a199b2f332 | [
"MIT"
] | null | null | null | dictionary_learning.py | denisilie94/pyod-dl | 70043f545b0cf84e72e6d851aabd34a199b2f332 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Paul Irofti <paul@irofti.net>
# Copyright (c) 2020 Denis Ilie-Ablachim <denis.ilie_ablachim@acse.pub.ro>
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import numpy as np
def dictionary_learning(Y, D, n_nonzero_coefs, n_iterations,
coding_method, learning_method, params):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
for iter in range(n_iterations):
# Update coefs
X, _ = coding_method(Y, D, n_nonzero_coefs, params)
# Update dictionaries
D, X = learning_method(Y, D, X, params)
# Compute error
rmse[iter] = (np.linalg.norm(Y - D @ X, 'fro') /
np.sqrt(Y.size))
# Update params
params._safe_update(iter)
return D, X, rmse, error_extra
def dictionary_learning_0(Y, D, n_nonzero_coefs, n_iterations,
coding_method, learning_method, params):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
# init reference
y0 = np.zeros((Y.shape[0], 1))
for iter in range(n_iterations):
# Update coefs
X, _ = coding_method(Y - y0, D, n_nonzero_coefs, params)
# Update dictionaries
D, X = learning_method(Y - y0, D, X, params)
# update reference
y0 = np.mean(Y - D @ X)
# Compute error
rmse[iter] = (np.linalg.norm(Y - y0 - D @ X, 'fro') /
np.sqrt(Y.size))
# Update params
params._safe_update(iter)
return y0, D, X, rmse, error_extra
def selective_dictionary_learning(Y, D, n_nonzero_coefs, n_iterations,
coding_method, learning_method, params):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
for iter in range(n_iterations):
# Update coefs
rp = np.random.permutation(Y.shape[1])
Y_tmp = Y[:, rp[:int(Y.shape[1] * params.train_proc)]]
X, _ = coding_method(Y_tmp, D, n_nonzero_coefs, params)
# get best training signlas
err = np.linalg.norm((Y_tmp - D @ X), axis=0)
err_index = np.argsort(err)
selection = err_index[:int(Y.shape[1] *
(params.train_proc - params.train_drop_proc))]
Y_tmp = Y_tmp[:, selection]
X = X[:, selection]
# Update dictionaries
D, X = learning_method(Y_tmp, D, X, params)
# Compute error
rmse[iter] = (np.linalg.norm(Y_tmp - D @ X, 'fro') /
np.sqrt(Y_tmp.size))
# Update params
params._safe_update(iter)
return D, X, rmse, error_extra
def selective_dictionary_learning_0(Y, D, n_nonzero_coefs, n_iterations,
coding_method, learning_method, params):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
# init reference
y0 = np.zeros((Y.shape[0], 1))
for iter in range(n_iterations):
# Update coefs
rp = np.random.permutation(Y.shape[1])
Y_tmp = Y[:, rp[:int(Y.shape[1] * params.train_proc)]]
X, _ = coding_method(Y_tmp - y0, D, n_nonzero_coefs, params)
# get best training signlas
err = np.linalg.norm((Y_tmp - D @ X), axis=0)
err_index = np.argsort(err)
selection = err_index[:int(Y.shape[1] *
(params.train_proc - params.train_drop_proc))]
Y_tmp = Y_tmp[:, selection]
X = X[:, selection]
# Update dictionaries
D, X = learning_method(Y_tmp - y0, D, X, params)
# update reference
y0 = np.mean(Y_tmp - D @ X)
# Compute error
rmse[iter] = (np.linalg.norm(Y_tmp - y0 - D @ X, 'fro') /
np.sqrt(Y.size))
# Update params
params._safe_update(iter)
return y0, D, X, rmse, error_extra
def selective_incoherent_dictionary_learning(Y, D, n_nonzero_coefs,
n_iterations, coding_method,
learning_method, params):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
for iter in range(n_iterations):
# Update coefs
rp = np.random.permutation(Y.shape[1])
Y_tmp_origin = Y[:, rp[:int(Y.shape[1] * params.train_proc)]]
X, _ = coding_method(Y_tmp_origin, D, n_nonzero_coefs, params)
# get best training signlas
err = np.linalg.norm((Y_tmp_origin - D @ X), axis=0)
err_index = np.argsort(err)
selection = err_index[:int(Y.shape[1] *
(params.train_proc - params.train_drop_proc))]
Y_tmp = Y_tmp_origin[:, selection]
Y_tmp_incoh = Y_tmp_origin[:, selection]
X = X[:, selection]
# Update dictionaries
D, X = learning_method(Y_tmp, Y_tmp_incoh, D, X, params)
# Compute error
rmse[iter] = (np.linalg.norm(Y_tmp - D @ X, 'fro') /
np.sqrt(Y_tmp.size))
# Update params
params._safe_update(iter)
return D, X, rmse, error_extra
def kernel_dictionary_learning(Y, A, n_nonzero_coefs, n_iterations,
coding_method, learning_method,
kernel_method, params, Y_bar=None):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
# Initialize kernel Matrix
if Y_bar is None:
n = int(Y.shape[1] * params.ker_proc)
rp = np.random.permutation(Y.shape[1])
Y_bar = Y[:, rp[:n]]
K_bar = kernel_method(Y_bar.T, Y_bar.T, params)
K_hat = kernel_method(Y.T, Y_bar.T, params)
for iter in range(n_iterations):
# Initialize coding coefs
X, _ = coding_method(A.T @ K_hat.T,
A.T @ K_bar @ A, n_nonzero_coefs, params)
# Update dictionaries
A, X = learning_method(K_bar, K_hat, A, X, params)
# Update params
params._safe_update(iter)
return K_bar, K_hat, A, X, Y_bar, rmse, error_extra
def selective_kernel_dictionary_learning(Y, A, n_nonzero_coefs, n_iterations,
coding_method, learning_method,
kernel_method, params, Y_bar=None):
# Dictionary learning iterations
rmse = np.zeros(n_iterations)
error_extra = np.zeros(n_iterations)
# Safe initialization of params
params._safe_init(learning_method.__name__)
# Initialize kernel Matrix
if Y_bar is None:
n = int(Y.shape[1] * params.ker_proc)
rp = np.random.permutation(Y.shape[1])
Y_bar = Y[:, rp[:n]]
K_bar = kernel_method(Y_bar.T, Y_bar.T, params)
for iter in range(n_iterations):
# Update coefs
rp = np.random.permutation(Y.shape[1])
Y_tmp = Y[:, rp[:int(Y.shape[1] * params.train_proc)]]
K_hat = kernel_method(Y_tmp.T, Y_bar.T, params)
X, _ = coding_method(A.T @ K_hat.T, A.T @ K_bar @ A,
n_nonzero_coefs, params)
# get best training signlas
err = np.linalg.norm((A.T @ K_hat.T - A.T @ K_bar @ A @ X), axis=0)
err_index = np.argsort(err)
selection = err_index[:int(Y.shape[1] *
(params.train_proc - params.train_drop_proc))]
K_hat = K_hat[selection, :]
X = X[:, selection]
# Update dictionaries
A, X = learning_method(K_bar, K_hat, A, X, params)
# Update params
params._safe_update(iter)
return K_bar, K_hat, A, X, Y_bar, rmse, error_extra
| 34.838346 | 78 | 0.590267 | 1,221 | 9,267 | 4.244881 | 0.123669 | 0.022381 | 0.021609 | 0.04862 | 0.84835 | 0.83716 | 0.832529 | 0.832529 | 0.832529 | 0.830214 | 0 | 0.007213 | 0.311859 | 9,267 | 265 | 79 | 34.969811 | 0.805551 | 0.202007 | 0 | 0.725926 | 0 | 0 | 0.002123 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051852 | false | 0 | 0.007407 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
0f90efc57a093ea577e92adb503d40c749b7c7f3 | 18,233 | py | Python | tests/unit_tests/logic/camera/test_cameraIndi.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 16 | 2020-01-11T22:32:26.000Z | 2022-03-31T15:18:14.000Z | tests/unit_tests/logic/camera/test_cameraIndi.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 196 | 2020-01-16T13:56:01.000Z | 2022-03-29T02:06:51.000Z | tests/unit_tests/logic/camera/test_cameraIndi.py | mworion/MountWizzard4 | 4e06b29ec2ef70be40e114b911b7bdf2f858a4b1 | [
"Apache-2.0"
] | 6 | 2019-12-01T19:39:33.000Z | 2021-05-27T13:14:20.000Z | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2021 by mworion
#
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
import unittest.mock as mock
import zlib
# external packages
from astropy.io import fits
from PyQt5.QtCore import QThreadPool, QObject, pyqtSignal
from indibase.indiBase import Device, Client
from skyfield.api import Angle, load
# local import
from logic.camera.cameraIndi import CameraIndi
from base.driverDataClass import Signals
from base.indiClass import IndiClass
@pytest.fixture(autouse=True, scope='function')
def module_setup_teardown():
class Test2:
raJNow = Angle(hours=0)
decJNow = Angle(degrees=0)
timeJD = load.timescale().tt_jd(23456789.5)
class Test1:
obsSite = Test2()
class Test(QObject):
threadPool = QThreadPool()
message = pyqtSignal(str, int)
deviceStat = {'mount': True}
mount = Test1()
global app
app = CameraIndi(app=Test(), signals=Signals(), data={})
yield
def test_setUpdateConfig_1():
app.deviceName = ''
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_2():
app.deviceName = 'test'
app.device = None
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'Test': 1}):
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_4():
app.deviceName = 'test'
app.device = Device()
app.UPDATE_RATE = 1
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
suc = app.setUpdateConfig('test')
assert suc
def test_setUpdateConfig_5():
app.deviceName = 'test'
app.device = Device()
app.client = Client()
app.UPDATE_RATE = 0
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=False):
suc = app.setUpdateConfig('test')
assert not suc
def test_setUpdateConfig_6():
app.deviceName = 'test'
app.device = Device()
app.client = Client()
app.UPDATE_RATE = 0
with mock.patch.object(app.device,
'getNumber',
return_value={'PERIOD_MS': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.setUpdateConfig('test')
assert suc
def test_setExposureState_1():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Busy'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': 0.0000001}
app.isDownloading = False
suc = app.setExposureState()
assert suc
assert app.isDownloading
def test_setExposureState_2():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Busy'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': 0.0000001}
app.isDownloading = True
suc = app.setExposureState()
assert suc
assert app.isDownloading
def test_setExposureState_3():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Busy'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': 1}
app.isDownloading = True
suc = app.setExposureState()
assert suc
assert app.isDownloading
def test_setExposureState_4():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Busy'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': None}
app.isDownloading = True
suc = app.setExposureState()
assert not suc
assert app.isDownloading
def test_setExposureState_5():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Ok'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': None}
app.isDownloading = True
suc = app.setExposureState()
assert suc
assert not app.isDownloading
def test_setExposureState_6():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'test'})
app.data = {'CCD_EXPOSURE.CCD_EXPOSURE_VALUE': None}
app.isDownloading = True
suc = app.setExposureState()
assert suc
assert app.isDownloading
def test_sendDownloadMode_1():
app.deviceName = 'test'
app.device = None
suc = app.sendDownloadMode()
assert not suc
def test_sendDownloadMode_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'Test': 1}):
suc = app.sendDownloadMode()
assert not suc
def test_updateNumber_1():
suc = app.updateNumber('test', 'test')
assert not suc
def test_updateNumber_2():
app.device = Device()
setattr(app.device, 'CCD_EXPOSURE', {'state': 'Busy'})
app.data = {'AUTO_DEW.DEW_C': 1,
'VERSION.UPB': 1}
with mock.patch.object(IndiClass,
'updateNumber',
return_value=True):
suc = app.updateNumber('test', 'CCD_EXPOSURE')
assert suc
def test_updateNumber_3():
app.data = {'AUTO_DEW.DEW_C': 1,
'VERSION.UPB': 1}
with mock.patch.object(IndiClass,
'updateNumber',
return_value=True):
suc = app.updateNumber('test', 'CCD_TEMPERATURE')
assert suc
def test_updateHeaderInfo_1():
header = {}
app.ra = Angle(hours=12)
app.dec = Angle(degrees=180)
h = app.updateHeaderInfo(header)
assert 'RA' in h
assert 'DEC' in h
def test_updateHeaderInfo_2():
header = {'RA': 90,
'DEC': 90}
app.ra = Angle(hours=12)
app.dec = Angle(degrees=180)
h = app.updateHeaderInfo(header)
assert 'RA' in h
assert 'DEC' in h
assert h['RA'] == 90
assert h['DEC'] == 90
def test_updateHeaderInfo_3():
header = {'RA': 90}
app.ra = Angle(hours=12)
app.dec = Angle(degrees=180)
h = app.updateHeaderInfo(header)
assert 'RA' in h
assert 'DEC' in h
assert h['RA'] == 180
assert h['DEC'] == 180
def test_updateHeaderInfo_4():
header = {}
app.ra = None
app.dec = None
h = app.updateHeaderInfo(header)
assert 'RA' not in h
assert 'DEC' not in h
def test_workerSaveBLOB_1():
app.imagePath = 'tests/workDir/image/test.fit'
hdu = fits.HDUList()
hdu.append(fits.PrimaryHDU())
data = {'value': '1',
'name': 'CCD1',
'format': '.fits.fz'}
with mock.patch.object(fits.HDUList,
'fromstring',
return_value=hdu):
suc = app.workerSaveBLOB(data)
assert suc
def test_workerSaveBLOB_2():
app.imagePath = 'tests/workDir/image/test.fit'
hdu = fits.HDUList()
hdu.append(fits.PrimaryHDU())
data = {'value': zlib.compress(b'1'),
'name': 'CCD1',
'format': '.fits.z'}
with mock.patch.object(fits.HDUList,
'fromstring',
return_value=hdu):
suc = app.workerSaveBLOB(data)
assert suc
def test_workerSaveBLOB_3():
app.imagePath = 'tests/workDir/image/test.fit'
hdu = fits.HDUList()
hdu.append(fits.PrimaryHDU())
data = {'value': '1',
'name': 'CCD1',
'format': '.fits'}
with mock.patch.object(fits.HDUList,
'fromstring',
return_value=hdu):
suc = app.workerSaveBLOB(data)
assert suc
def test_workerSaveBLOB_4():
app.imagePath = 'tests/workDir/image/test.fit'
hdu = fits.HDUList()
hdu.append(fits.PrimaryHDU())
data = {'value': '1',
'name': 'CCD1',
'format': '.test'}
with mock.patch.object(fits.HDUList,
'fromstring',
return_value=hdu):
suc = app.workerSaveBLOB(data)
assert suc
def test_updateBLOB_1():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=False):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_2():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_3():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_4():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1,
'name': 'test'}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_5():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1,
'name': 'CCD2',
'format': 'test'}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_6():
app.device = Device()
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1,
'name': 'CCD1',
'format': 'test'}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_7():
app.device = Device()
app.imagePath = 'tests/dummy/test.txt'
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1,
'name': 'CCD1',
'format': 'test'}):
suc = app.updateBLOB('test', 'test')
assert not suc
def test_updateBLOB_8():
app.device = Device()
app.imagePath = 'tests/workDir/image/test.fit'
hdu = fits.HDUList()
hdu.append(fits.PrimaryHDU())
with mock.patch.object(IndiClass,
'updateBLOB',
return_value=True):
with mock.patch.object(app.device,
'getBlob',
return_value={'value': 1,
'name': 'CCD1',
'format': '.fits.fz'}):
with mock.patch.object(fits.HDUList,
'fromstring',
return_value=hdu):
suc = app.updateBLOB('test', 'test')
assert suc
def test_expose_1():
app.deviceName = 'test'
app.device = None
suc = app.expose()
assert not suc
def test_expose_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app,
'sendDownloadMode',
return_value=False):
suc = app.expose()
assert not suc
def test_expose_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=False):
suc = app.expose()
assert not suc
def test_expose_4():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.expose()
assert suc
def test_expose_5():
app.deviceName = 'test'
app.app.deviceStat['mount'] = False
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.expose()
assert suc
def test_abort_1():
app.deviceName = 'test'
app.device = None
suc = app.abort()
assert not suc
def test_abort_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'Test': 1}):
suc = app.abort()
assert not suc
def test_abort_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'ABORT': 1}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.abort()
assert suc
def test_sendCoolerSwitch_1():
app.deviceName = 'test'
app.device = None
suc = app.sendCoolerSwitch()
assert not suc
def test_sendCoolerSwitch_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'Test': 1}):
suc = app.sendCoolerSwitch()
assert not suc
def test_sendCoolerSwitch_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getSwitch',
return_value={'COOLER_ON': True}):
with mock.patch.object(app.client,
'sendNewSwitch',
return_value=True):
suc = app.sendCoolerSwitch(True)
assert suc
def test_sendCoolerTemp_1():
app.deviceName = 'test'
app.device = None
suc = app.sendCoolerTemp()
assert not suc
def test_sendCoolerTemp_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'Test': 1}):
suc = app.sendCoolerTemp()
assert not suc
def test_sendCoolerTemp_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'CCD_TEMPERATURE_VALUE': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.sendCoolerTemp()
assert suc
def test_sendOffset_1():
app.deviceName = 'test'
app.device = None
suc = app.sendOffset()
assert not suc
def test_sendOffset_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'Test': 1}):
suc = app.sendOffset()
assert not suc
def test_sendOffset_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'OFFSET': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.sendOffset()
assert suc
def test_sendGain_1():
app.deviceName = 'test'
app.device = None
suc = app.sendGain()
assert not suc
def test_sendGain_2():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'Test': 1}):
suc = app.sendGain()
assert not suc
def test_sendGain_3():
app.deviceName = 'test'
app.device = Device()
with mock.patch.object(app.device,
'getNumber',
return_value={'GAIN': 1}):
with mock.patch.object(app.client,
'sendNewNumber',
return_value=True):
suc = app.sendGain()
assert suc
| 28.668239 | 70 | 0.512423 | 1,808 | 18,233 | 5.056969 | 0.096239 | 0.072843 | 0.072514 | 0.105983 | 0.834628 | 0.822925 | 0.807722 | 0.795363 | 0.776113 | 0.690145 | 0 | 0.014826 | 0.367411 | 18,233 | 635 | 71 | 28.713386 | 0.777874 | 0.017112 | 0 | 0.762475 | 0 | 0 | 0.093866 | 0.019563 | 0 | 0 | 0 | 0 | 0.133733 | 1 | 0.107784 | false | 0 | 0.01996 | 0 | 0.149701 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7e37dc67ec67dbe2d9e7c900033321b9e6f3953c | 6,322 | py | Python | StatisticalDisclosureControl/Microaggregation filter.py | bitmapup/privacyAlgorithms | fa8ff1d8fd903b31f78dfad5152ca55bc84b37bd | [
"Apache-2.0"
] | null | null | null | StatisticalDisclosureControl/Microaggregation filter.py | bitmapup/privacyAlgorithms | fa8ff1d8fd903b31f78dfad5152ca55bc84b37bd | [
"Apache-2.0"
] | null | null | null | StatisticalDisclosureControl/Microaggregation filter.py | bitmapup/privacyAlgorithms | fa8ff1d8fd903b31f78dfad5152ca55bc84b37bd | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# coding: utf-8
# In[29]:
# https://geoffboeing.com/2014/08/clustering-to-reduce-spatial-data-set-size/
# Essentials
import pandas as pd
import numpy as np
import time
import sys
import math
# Clustering
from sklearn.cluster import DBSCAN
# Ignorar ciertos warnings
import warnings
warnings.filterwarnings(action="ignore")
pd.options.display.max_seq_items = 8000
pd.options.display.max_rows = 8000
#arguments of the console
first_arg = sys.argv[1]
first_arg = float(first_arg)
second_arg = sys.argv[2]
second_arg = float(second_arg)
def AplicarMicroaggregationFilter(kilometers, minimunSamples):
df = pd.read_csv('dataParaPrivacidad.csv')
df.head()
# Pre-processing dataset
coords = df.as_matrix(columns=['LONG','LAT'])
# Setting up the algorithm
kms_per_radian = 6371.0088
kms= kilometers # 1
epsilon = kilometers/kms_per_radian
min_samples = minimunSamples # 50
# Gogo, power rangers
clustering = DBSCAN(eps = epsilon, min_samples = min_samples, algorithm = 'ball_tree', metric = 'haversine').fit(np.radians(coords))
# Inserta columna con el cluster al que pertenece cada data point
df["Cluster"] = clustering.labels_
# Selecciona las variables que se buscan realizar el m-microaggregation
a = ["SALI","TCL","CLO","TSM","Cluster"]
# Saca promedio de cada cluster e insertalas en el dataFrame original
new = pd.DataFrame(df[a].groupby("Cluster").transform('mean'))
new.columns = ["AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]
df = pd.concat([df, new], axis=1) #funca porque tienen el mismo índice (take it in consideration)
# Using new variables for the model
import pickle
with open('XGBModel.pkl', 'rb') as f:
xgbModel = pickle.load(f)
with open('RidgeModel.pkl', 'rb') as f:
ridgeModel = pickle.load(f)
with open('SVRModel.pkl', 'rb') as f:
supportVectorRegresorModel = pickle.load(f)
with open('LGBMRModel.pkl', 'rb') as f:
LGBMRModel = pickle.load(f)
with open('StackedModel.pkl', 'rb') as f:
stack_genModel = pickle.load(f)
# Setear el learner
def votingPredictions(X):
return ((0.30 * xgbModel.predict(X)) + (0.05 * ridgeModel.predict(X)) + (0.05 * supportVectorRegresorModel.predict(X)) + (0.25 * LGBMRModel.predict(X)) + (0.35 * stack_genModel.predict(np.array(X))))
# Set up the data set
variablesParaLog1p = ["AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]
for i in variablesParaLog1p:
df.loc[:,i] = np.log1p(df.loc[:,i])
porMientras = df.loc[:,["LONG","LAT","AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]]
porMientras.columns = ['LONGI', 'LATIT', 'Salinidad', 'TC', 'Clorofila', 'TSM']
# Resultados
df['MontoPescaMicroaggregated'] = votingPredictions(porMientras)
# IL (IL= sum( abs(V_i -V'_i)))
IL = sum(abs(np.expm1(df.MontoPescaOriginal) - np.expm1(df.MontoPescaMicroaggregated)))
# DR= abs(P_{verdadera}-V_{calculada})/P_{verdadera}
DR = np.mean(abs(np.expm1(df.MontoPescaOriginal)- np.expm1(df.MontoPescaMicroaggregated))/(np.expm1(df.MontoPescaOriginal)))
# Resultados
#Results = pd.read_csv('Resultados_MicroaggregationFilter.csv')
params = [kms, min_samples]
d = {'Params':[params],'IL': [IL], 'DR': [DR]}
d = pd.DataFrame(data=d)
Results = str(params)+'MicroaggregationFilter.csv'
d.to_csv(Results, index = False)
# For executing
if __name__ == "__main__":
AplicarMicroaggregationFilter(first_arg, second_arg)
# In[ ]:
# In[3]:
# Pre-processing dataset
coords = df.as_matrix(columns=['LONG','LAT'])
# In[16]:
# Setting up the algorithm
kms_per_radian = 6371.0088
kms= kilometers # 1
epsilon = kilometers/kms_per_radian
min_samples = minimunSamples # 50
# Gogo, power rangers
clustering = DBSCAN(eps = epsilon, min_samples = min_samples, algorithm = 'ball_tree', metric = 'haversine').fit(np.radians(coords))
# In[17]:
# just for knowing number of clusters
len(set(clustering.labels_))
# In[30]:
# Inserta columna con el cluster al que pertenece cada data point
df["Cluster"] = clustering.labels_
# Selecciona las variables que se buscan realizar el m-microaggregation
a = ["SALI","TCL","CLO","TSM","Cluster"]
# Saca promedio de cada cluster e insertalas en el dataFrame original
new = pd.DataFrame(df[a].groupby("Cluster").transform('mean'))
new.columns = ["AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]
df = pd.concat([df, new], axis=1) #funca porque tienen el mismo índice (take it in consideration)
df.head(5)
# In[32]:
# Using new variables for the model
import pickle
with open('XGBModel.pkl', 'rb') as f:
xgbModel = pickle.load(f)
with open('RidgeModel.pkl', 'rb') as f:
ridgeModel = pickle.load(f)
with open('SVRModel.pkl', 'rb') as f:
supportVectorRegresorModel = pickle.load(f)
with open('LGBMRModel.pkl', 'rb') as f:
LGBMRModel = pickle.load(f)
with open('StackedModel.pkl', 'rb') as f:
stack_genModel = pickle.load(f)
# Setear el learner
def votingPredictions(X):
return ((0.30 * xgbModel.predict(X)) + (0.05 * ridgeModel.predict(X)) + (0.05 * supportVectorRegresorModel.predict(X)) + (0.25 * LGBMRModel.predict(X)) + (0.35 * stack_genModel.predict(np.array(X))))
# Set up the data set
variablesParaLog1p = ["AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]
for i in variablesParaLog1p:
df.loc[:,i] = np.log1p(df.loc[:,i])
porMientras = df.loc[:,["LONG","LAT","AvgSALI","AvgTCL", "AvgCLO", "AvgTSM"]]
porMientras.columns = ['LONGI', 'LATIT', 'Salinidad', 'TC', 'Clorofila', 'TSM']
# Resultados
df['MontoPescaMicroaggregated'] = votingPredictions(porMientras)
# IL (IL= sum( abs(V_i -V'_i)))
IL = sum(abs(np.expm1(df.MontoPescaOriginal) - np.expm1(df.MontoPescaMicroaggregated)))
# DR= abs(P_{verdadera}-V_{calculada})/P_{verdadera}
DR = np.mean(abs(np.expm1(df.MontoPescaOriginal)- np.expm1(df.MontoPescaMicroaggregated))/(np.expm1(df.MontoPescaOriginal)))
# Resultados
#Results = pd.read_csv('Resultados_MicroaggregationFilter.csv')
params = [kms, min_samples]
d = {'Params':[params],'IL': [IL], 'DR': [DR]}
d = pd.DataFrame(data=d)
Results = str(params)+'MicroaggregationFilter.csv'
d.to_csv(Results, index = False)
# In[ ]:
| 28.86758 | 271 | 0.673679 | 827 | 6,322 | 5.073761 | 0.266022 | 0.019066 | 0.016683 | 0.019066 | 0.840324 | 0.840324 | 0.840324 | 0.840324 | 0.840324 | 0.840324 | 0 | 0.018962 | 0.174154 | 6,322 | 218 | 272 | 29 | 0.784716 | 0.227776 | 0 | 0.778947 | 0 | 0 | 0.138803 | 0.025689 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031579 | false | 0 | 0.094737 | 0.021053 | 0.147368 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7e3cbc1c0cbb923f4d8bcdd95d8d04975b0e238f | 246 | py | Python | tests/unit/test_version.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | tests/unit/test_version.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | 7 | 2018-02-26T10:32:26.000Z | 2018-03-19T12:27:12.000Z | tests/unit/test_version.py | shane-breeze/AlphaTwirl | 59dbd5348af31d02e133d43fd5bfaad6b99a155e | [
"BSD-3-Clause"
] | null | null | null | # Tai Sakuma <tai.sakuma@gmail.com>
import alphatwirl
##__________________________________________________________________||
def test_version():
alphatwirl.__version__
##__________________________________________________________________||
| 24.6 | 70 | 0.853659 | 13 | 246 | 5.615385 | 0.692308 | 0.246575 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.073171 | 246 | 9 | 71 | 27.333333 | 0.320175 | 0.686992 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | true | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
7e8781bc73d17fe11f24440c9600b23c1b74ccb0 | 18,469 | py | Python | tests/test_data_store_populate.py | tahirs95/pepys-import | 05f070e97dd2f3f84b71faa3578e03398e28ca15 | [
"Apache-2.0"
] | null | null | null | tests/test_data_store_populate.py | tahirs95/pepys-import | 05f070e97dd2f3f84b71faa3578e03398e28ca15 | [
"Apache-2.0"
] | null | null | null | tests/test_data_store_populate.py | tahirs95/pepys-import | 05f070e97dd2f3f84b71faa3578e03398e28ca15 | [
"Apache-2.0"
] | null | null | null | import unittest
import os
from datetime import datetime
from unittest import TestCase
from testing.postgresql import Postgresql
from sqlalchemy.exc import OperationalError
from pepys_import.core.store.data_store import DataStore
from contextlib import redirect_stdout
from io import StringIO
FILE_PATH = os.path.dirname(__file__)
TEST_DATA_PATH = os.path.join(FILE_PATH, "sample_data", "csv_files")
NOT_IMPLEMENTED_PATH = os.path.join(
FILE_PATH, "sample_data", "csv_files", "for_not_implemented_methods"
)
class DataStorePopulateSpatiaLiteTestCase(TestCase):
def setUp(self):
self.store = DataStore("", "", "", 0, ":memory:", db_type="sqlite")
self.store.initialise()
def tearDown(self):
pass
def test_populate_reference(self):
"""Test whether CSVs successfully imported to SQLite"""
# Check tables are created but empty
with self.store.session_scope():
nationalities = self.store.session.query(
self.store.db_classes.Nationality
).all()
platform_types = self.store.session.query(
self.store.db_classes.PlatformType
).all()
# There must be no entities at the beginning
self.assertEqual(len(nationalities), 0)
self.assertEqual(len(platform_types), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_reference()
# Check tables filled with correct data
with self.store.session_scope():
nationalities = self.store.session.query(
self.store.db_classes.Nationality
).all()
platform_types = self.store.session.query(
self.store.db_classes.PlatformType
).all()
nationality_object = self.store.search_nationality("UNITED KINGDOM")
platform_type_object = self.store.search_platform_type("TYPE-1")
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(nationalities), 0)
self.assertNotEqual(len(platform_types), 0)
self.assertIn(nationality_object.name, "UNITED KINGDOM")
self.assertIn(platform_type_object.name, "TTYPE-1")
def test_populate_metadata(self):
# reference tables must be filled first
with self.store.session_scope():
self.store.populate_reference()
# get all table values
with self.store.session_scope():
platforms = self.store.session.query(self.store.db_classes.Platform).all()
datafiles = self.store.session.query(self.store.db_classes.Datafile).all()
sensors = self.store.session.query(self.store.db_classes.Sensor).all()
# There must be no entities at the beginning
self.assertEqual(len(platforms), 0)
self.assertEqual(len(datafiles), 0)
self.assertEqual(len(sensors), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_metadata()
with self.store.session_scope():
platforms = self.store.session.query(self.store.db_classes.Platform).all()
datafiles = self.store.session.query(self.store.db_classes.Datafile).all()
sensors = self.store.session.query(self.store.db_classes.Sensor).all()
platform_object = self.store.search_platform("PLATFORM-1")
datafile_object = self.store.search_datafile("DATAFILE-1")
sensor_object = self.store.search_sensor("SENSOR-1")
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(platforms), 0)
self.assertNotEqual(len(datafiles), 0)
self.assertNotEqual(len(sensors), 0)
# The following assertions filter objects by foreign key ids and
# compares values with the data from CSV
# Platform Object: PLATFORM-1, UNITED KINGDOM, TYPE-1, PRIVACY-1
nationality = (
self.store.session.query(self.store.db_classes.Nationality)
.filter_by(nationality_id=platform_object.nationality_id)
.first()
)
self.assertEqual(nationality.name, "UNITED KINGDOM")
platform_type = (
self.store.session.query(self.store.db_classes.PlatformType)
.filter_by(platform_type_id=platform_object.platform_type_id)
.first()
)
self.assertEqual(platform_type.name, "TYPE-1")
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=platform_object.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
# Datafile Object: DATAFILE-1, True, PRIVACY-1, DATAFILE-TYPE-1
self.assertEqual(datafile_object.simulated, True)
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=datafile_object.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
datafile_type = (
self.store.session.query(self.store.db_classes.DatafileType)
.filter_by(datafile_type_id=datafile_object.datafile_type_id)
.first()
)
self.assertEqual(datafile_type.name, "DATAFILE-TYPE-1")
# Sensor Object: SENSOR-1, SENSOR-TYPE-1, PLATFORM-1
sensor_type = (
self.store.session.query(self.store.db_classes.SensorType)
.filter_by(sensor_type_id=sensor_object.sensor_type_id)
.first()
)
self.assertEqual(sensor_type.name, "SENSOR-TYPE-1")
def test_populate_measurement(self):
# reference and metadata tables must be filled first
with self.store.session_scope():
self.store.populate_reference()
self.store.populate_metadata()
# get all table values
with self.store.session_scope():
states = self.store.session.query(self.store.db_classes.State).all()
# There must be no entities at the beginning
self.assertEqual(len(states), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_measurement()
# Check tables filled with correct data
with self.store.session_scope():
states = self.store.session.query(self.store.db_classes.State).all()
first_state = self.store.session.query(self.store.db_classes.State).first()
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(states), 0)
# The following assertions filter objects by foreign key ids and
# compares values with the data from CSV
# first_state = 2019-01-12 12:10:00, SENSOR-1, DATAFILE-1,46.000 32.000,,,,
# PRIVACY-1
self.assertEqual(
first_state.time,
datetime.strptime("2019-01-12 12:10:00", "%Y-%m-%d %H:%M:%S"),
)
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=first_state.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
datafile = (
self.store.session.query(self.store.db_classes.Datafile)
.filter_by(datafile_id=first_state.source_id)
.first()
)
self.assertEqual(datafile.reference, "DATAFILE-1")
sensor = (
self.store.session.query(self.store.db_classes.Sensor)
.filter_by(sensor_id=first_state.sensor_id)
.first()
)
self.assertEqual(sensor.name, "SENSOR-1")
class DataStorePopulatePostGISTestCase(TestCase):
def setUp(self) -> None:
self.postgres = None
self.store = None
try:
self.postgres = Postgresql(
database="test",
host="localhost",
user="postgres",
password="postgres",
port=55527,
)
except RuntimeError:
print("PostgreSQL database couldn't be created! Test is skipping.")
return
try:
self.store = DataStore(
db_name="test",
db_host="localhost",
db_username="postgres",
db_password="postgres",
db_port=55527,
)
self.store.initialise()
except OperationalError:
print("Database schema and data population failed! Test is skipping.")
def tearDown(self) -> None:
try:
self.postgres.stop()
except AttributeError:
return
def test_populate_reference(self):
"""Test whether CSVs successfully imported to PostGIS"""
# Check tables are created but empty
with self.store.session_scope():
nationalities = self.store.session.query(
self.store.db_classes.Nationality
).all()
platform_types = self.store.session.query(
self.store.db_classes.PlatformType
).all()
# There must be no entities at the beginning
self.assertEqual(len(nationalities), 0)
self.assertEqual(len(platform_types), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_reference()
# Check tables filled with correct data
with self.store.session_scope():
nationalities = self.store.session.query(
self.store.db_classes.Nationality
).all()
platform_types = self.store.session.query(
self.store.db_classes.PlatformType
).all()
nationality_object = self.store.search_nationality("UNITED KINGDOM")
platform_type_object = self.store.search_platform_type("TYPE-1")
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(nationalities), 0)
self.assertNotEqual(len(platform_types), 0)
self.assertIn(nationality_object.name, "UNITED KINGDOM")
self.assertIn(platform_type_object.name, "TTYPE-1")
def test_populate_metadata(self):
# reference tables must be filled first
with self.store.session_scope():
self.store.populate_reference()
# get all table values
with self.store.session_scope():
platforms = self.store.session.query(self.store.db_classes.Platform).all()
datafiles = self.store.session.query(self.store.db_classes.Datafile).all()
sensors = self.store.session.query(self.store.db_classes.Sensor).all()
# There must be no entities at the beginning
self.assertEqual(len(platforms), 0)
self.assertEqual(len(datafiles), 0)
self.assertEqual(len(sensors), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_metadata()
with self.store.session_scope():
platforms = self.store.session.query(self.store.db_classes.Platform).all()
datafiles = self.store.session.query(self.store.db_classes.Datafile).all()
sensors = self.store.session.query(self.store.db_classes.Sensor).all()
platform_object = self.store.search_platform("PLATFORM-1")
datafile_object = self.store.search_datafile("DATAFILE-1")
sensor_object = self.store.search_sensor("SENSOR-1")
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(platforms), 0)
self.assertNotEqual(len(datafiles), 0)
self.assertNotEqual(len(sensors), 0)
# The following assertions filter objects by foreign key ids and
# compares values with the data from CSV
# Platform Object: PLATFORM-1, UNITED KINGDOM, TYPE-1, PRIVACY-1
nationality = (
self.store.session.query(self.store.db_classes.Nationality)
.filter_by(nationality_id=platform_object.nationality_id)
.first()
)
self.assertEqual(nationality.name, "UNITED KINGDOM")
platform_type = (
self.store.session.query(self.store.db_classes.PlatformType)
.filter_by(platform_type_id=platform_object.platform_type_id)
.first()
)
self.assertEqual(platform_type.name, "TYPE-1")
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=platform_object.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
# Datafile Object: DATAFILE-1, True, PRIVACY-1, DATAFILE-TYPE-1
self.assertEqual(datafile_object.simulated, True)
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=datafile_object.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
datafile_type = (
self.store.session.query(self.store.db_classes.DatafileType)
.filter_by(datafile_type_id=datafile_object.datafile_type_id)
.first()
)
self.assertEqual(datafile_type.name, "DATAFILE-TYPE-1")
# Sensor Object: SENSOR-1, SENSOR-TYPE-1, PLATFORM-1
sensor_type = (
self.store.session.query(self.store.db_classes.SensorType)
.filter_by(sensor_type_id=sensor_object.sensor_type_id)
.first()
)
self.assertEqual(sensor_type.name, "SENSOR-TYPE-1")
def test_populate_measurement(self):
# reference and metadata tables must be filled first
with self.store.session_scope():
self.store.populate_reference()
self.store.populate_metadata()
# get all table values
with self.store.session_scope():
states = self.store.session.query(self.store.db_classes.State).all()
# There must be no entities at the beginning
self.assertEqual(len(states), 0)
# Import CSVs to the related tables
with self.store.session_scope():
self.store.populate_measurement()
# Check tables filled with correct data
with self.store.session_scope():
states = self.store.session.query(self.store.db_classes.State).all()
first_state = self.store.session.query(self.store.db_classes.State).first()
# Check whether they are not empty anymore and filled with correct data
self.assertNotEqual(len(states), 0)
# The following assertions filter objects by foreign key ids and
# compares values with the data from CSV
# first_state = 2019-01-12 12:10:00, SENSOR-1, DATAFILE-1,46.000 32.000,,,,
# PRIVACY-1
self.assertEqual(
first_state.time,
datetime.strptime("2019-01-12 12:10:00", "%Y-%m-%d %H:%M:%S"),
)
privacy = (
self.store.session.query(self.store.db_classes.Privacy)
.filter_by(privacy_id=first_state.privacy_id)
.first()
)
self.assertEqual(privacy.name, "PRIVACY-1")
datafile = (
self.store.session.query(self.store.db_classes.Datafile)
.filter_by(datafile_id=first_state.source_id)
.first()
)
self.assertEqual(datafile.reference, "DATAFILE-1")
sensor = (
self.store.session.query(self.store.db_classes.Sensor)
.filter_by(sensor_id=first_state.sensor_id)
.first()
)
self.assertEqual(sensor.name, "SENSOR-1")
# TODO: This test case should fail when all add_to_XXX methods are implemented.
# Remove it when there are add methods for each DB table.
class DataStorePopulateNotImplementedMethodTestCase(TestCase):
"""Test whether populate methods print correct table name and message
when the corresponding add method is not found"""
def setUp(self):
self.store = DataStore("", "", "", 0, ":memory:", db_type="sqlite")
self.store.initialise()
def tearDown(self):
pass
def test_populate_reference(self):
with self.store.session_scope():
temp_output = StringIO()
with redirect_stdout(temp_output):
self.store.populate_reference(NOT_IMPLEMENTED_PATH)
output = temp_output.getvalue()
self.assertIn("Method(add_to_confidence_levels) not found!", output)
def test_populate_metadata(self):
with self.store.session_scope():
temp_output = StringIO()
with redirect_stdout(temp_output):
self.store.populate_reference(NOT_IMPLEMENTED_PATH)
self.store.populate_metadata(NOT_IMPLEMENTED_PATH)
output = temp_output.getvalue()
self.assertIn("Method(add_to_confidence_levels) not found!", output)
self.assertIn("Method(add_to_tags) not found!", output)
def test_populate_measurement(self):
with self.store.session_scope():
temp_output = StringIO()
with redirect_stdout(temp_output):
self.store.populate_reference(NOT_IMPLEMENTED_PATH)
self.store.populate_metadata(NOT_IMPLEMENTED_PATH)
self.store.populate_measurement(NOT_IMPLEMENTED_PATH)
output = temp_output.getvalue()
self.assertIn("Method(add_to_confidence_levels) not found!", output)
self.assertIn("Method(add_to_tags) not found!", output)
self.assertIn("Method(add_to_media) not found!", output)
if __name__ == "__main__":
unittest.main()
| 40.680617 | 87 | 0.61254 | 2,072 | 18,469 | 5.303571 | 0.091699 | 0.121212 | 0.100464 | 0.084084 | 0.890436 | 0.888707 | 0.88443 | 0.88443 | 0.882337 | 0.882337 | 0 | 0.012375 | 0.291191 | 18,469 | 453 | 88 | 40.770419 | 0.827057 | 0.143159 | 0 | 0.799383 | 0 | 0 | 0.056874 | 0.007808 | 0 | 0 | 0 | 0.002208 | 0.17284 | 1 | 0.046296 | false | 0.012346 | 0.027778 | 0 | 0.089506 | 0.006173 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
0e2da0019c992682a5aeec348afb4babe2cfba5a | 176 | py | Python | load_data/load_data.py | TCC-ADS-2020/Normalizador | f0a013de91ac0aa8c1f20ccbd11f6ec353778de8 | [
"MIT"
] | null | null | null | load_data/load_data.py | TCC-ADS-2020/Normalizador | f0a013de91ac0aa8c1f20ccbd11f6ec353778de8 | [
"MIT"
] | null | null | null | load_data/load_data.py | TCC-ADS-2020/Normalizador | f0a013de91ac0aa8c1f20ccbd11f6ec353778de8 | [
"MIT"
] | null | null | null | from numpy import genfromtxt
def load_csv(path):
return genfromtxt(path, delimiter=',')
def load_txt(path, separator):
return genfromtxt(path, delimiter=separator)
| 17.6 | 48 | 0.744318 | 22 | 176 | 5.863636 | 0.545455 | 0.108527 | 0.310078 | 0.449612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.153409 | 176 | 9 | 49 | 19.555556 | 0.865772 | 0 | 0 | 0 | 0 | 0 | 0.005682 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0 | 0.2 | 0.4 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 7 |
0e7d16af168b90783a05344337186cbbdf98f9e5 | 19,100 | bzl | Python | src/io/iohk/bazel/deps/cli/3rdparty/deps.bzl | input-output-hk/bazel-deps | cee4e38bab0e0b466816cd413615c06854376cc1 | [
"MIT"
] | null | null | null | src/io/iohk/bazel/deps/cli/3rdparty/deps.bzl | input-output-hk/bazel-deps | cee4e38bab0e0b466816cd413615c06854376cc1 | [
"MIT"
] | null | null | null | src/io/iohk/bazel/deps/cli/3rdparty/deps.bzl | input-output-hk/bazel-deps | cee4e38bab0e0b466816cd413615c06854376cc1 | [
"MIT"
] | null | null | null | # This file has been autogenerated by bazel-deps.
# Please, DO NOT EDIT it by hand.
_lookup = {
"org.json4s:json4s-jackson%": ["@org___json4s_and_json4s_ds_scalap__2___12", "@com___fasterxml___jackson___core_and_jackson_ds_core", "@com___thoughtworks___paranamer_and_paranamer", "@org___json4s_and_json4s_ds_ast__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@com___fasterxml___jackson___core_and_jackson_ds_annotations", "@com___fasterxml___jackson___core_and_jackson_ds_databind", "@org___json4s_and_json4s_ds_core__2___12"],
"org.sonatype.plexus:plexus-cipher": [],
"org.eclipse.aether:aether-connector-basic": ["@org___eclipse___aether_and_aether_ds_util", "@org___eclipse___aether_and_aether_ds_spi", "@org___eclipse___aether_and_aether_ds_api"],
"org.eclipse.aether:aether-util": ["@org___eclipse___aether_and_aether_ds_api"],
"com.fasterxml.jackson.core:jackson-core": [],
"org.typelevel:cats-kernel%": ["@org___scala_ds_lang_and_scala_ds_library"],
"io.circe:circe-generic%": ["@org___typelevel_and_cats_ds_core__2___12", "@io___circe_and_circe_ds_numbers__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@com___chuusai_and_shapeless__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@io___circe_and_circe_ds_core__2___12", "@org___typelevel_and_macro_ds_compat__2___12"],
"io.circe:circe-jackson25%": ["@org___typelevel_and_cats_ds_core__2___12", "@io___circe_and_circe_ds_numbers__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@com___fasterxml___jackson___core_and_jackson_ds_core", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@io___circe_and_circe_ds_core__2___12", "@com___fasterxml___jackson___core_and_jackson_ds_annotations", "@com___fasterxml___jackson___core_and_jackson_ds_databind"],
"org.scalactic:scalactic%": ["@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang_and_scala_ds_reflect"],
"com.fasterxml.jackson.core:jackson-databind": ["@com___fasterxml___jackson___core_and_jackson_ds_core", "@com___fasterxml___jackson___core_and_jackson_ds_annotations"],
"org.eclipse.sisu:org.eclipse.sisu.plexus": ["@javax___enterprise_and_cdi_ds_api", "@javax___inject_and_javax___inject", "@org___eclipse___sisu_and_org___eclipse___sisu___inject", "@javax___annotation_and_jsr250_ds_api", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations", "@org___codehaus___plexus_and_plexus_ds_utils", "@org___codehaus___plexus_and_plexus_ds_classworlds"],
"org.typelevel:cats-free%": ["@org___typelevel_and_cats_ds_core__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library"],
"org.sonatype.plexus:plexus-sec-dispatcher": ["@org___codehaus___plexus_and_plexus_ds_utils", "@org___sonatype___plexus_and_plexus_ds_cipher"],
"io.circe:circe-jawn%": ["@org___typelevel_and_cats_ds_core__2___12", "@org___spire_ds_math_and_jawn_ds_parser__2___12", "@io___circe_and_circe_ds_numbers__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@io___circe_and_circe_ds_core__2___12"],
"org.slf4j:slf4j-api": [],
"io.get-coursier:coursier%": ["@io___get_ds_coursier_and_coursier_ds_core__2___12", "@io___get_ds_coursier_and_coursier_ds_cache__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12"],
"org.eclipse.aether:aether-transport-http": ["@org___apache___httpcomponents_and_httpclient", "@commons_ds_codec_and_commons_ds_codec", "@org___eclipse___aether_and_aether_ds_api", "@org___eclipse___aether_and_aether_ds_util", "@org___apache___httpcomponents_and_httpcore", "@org___slf4j_and_slf4j_ds_api", "@org___slf4j_and_jcl_ds_over_ds_slf4j", "@org___eclipse___aether_and_aether_ds_spi"],
"org.apache.maven:maven-aether-provider": ["@org___eclipse___aether_and_aether_ds_api", "@org___apache___maven_and_maven_ds_model", "@org___apache___maven_and_maven_ds_artifact", "@org___eclipse___aether_and_aether_ds_impl", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations", "@org___eclipse___aether_and_aether_ds_util", "@com___google___guava_and_guava", "@org___apache___commons_and_commons_ds_lang3", "@org___codehaus___plexus_and_plexus_ds_interpolation", "@org___codehaus___plexus_and_plexus_ds_utils", "@org___apache___maven_and_maven_ds_model_ds_builder", "@org___apache___maven_and_maven_ds_repository_ds_metadata", "@org___eclipse___aether_and_aether_ds_spi", "@org___apache___maven_and_maven_ds_builder_ds_support"],
"org.scala-lang.modules:scala-xml%": ["@org___scala_ds_lang_and_scala_ds_library"],
"org.scalacheck:scalacheck%": ["@org___scala_ds_sbt_and_test_ds_interface", "@org___scala_ds_lang_and_scala_ds_library"],
"org.eclipse.aether:aether-spi": ["@org___eclipse___aether_and_aether_ds_api"],
"org.typelevel:paiges-core%": ["@org___scala_ds_lang_and_scala_ds_library"],
"org.apache.maven:maven-settings-builder": ["@org___sonatype___plexus_and_plexus_ds_sec_ds_dispatcher", "@org___apache___maven_and_maven_ds_settings", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations", "@org___sonatype___plexus_and_plexus_ds_cipher", "@org___apache___commons_and_commons_ds_lang3", "@org___codehaus___plexus_and_plexus_ds_interpolation", "@org___codehaus___plexus_and_plexus_ds_utils", "@org___apache___maven_and_maven_ds_builder_ds_support"],
"io.get-coursier:coursier-core%": ["@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12"],
"com.monovore:decline%": ["@org___typelevel_and_cats_ds_core__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library"],
"org.scalatest:scalatest%": ["@org___scala_ds_lang_and_scala_ds_reflect", "@org___scalactic_and_scalactic__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12"],
"org.eclipse.aether:aether-impl": ["@org___eclipse___aether_and_aether_ds_util", "@org___eclipse___aether_and_aether_ds_spi", "@org___eclipse___aether_and_aether_ds_api"],
"org.json4s:json4s-core%": ["@org___json4s_and_json4s_ds_scalap__2___12", "@com___thoughtworks___paranamer_and_paranamer", "@org___json4s_and_json4s_ds_ast__2___12", "@org___scala_ds_lang_and_scala_ds_library"],
"io.get-coursier:coursier-cache%": ["@org___scala_ds_lang_and_scala_ds_library", "@io___get_ds_coursier_and_coursier_ds_core__2___12", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12"],
"org.apache.maven:maven-settings": ["@org___codehaus___plexus_and_plexus_ds_utils"],
"com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": ["@org___yaml_and_snakeyaml", "@com___fasterxml___jackson___core_and_jackson_ds_core"],
"io.circe:circe-core%": ["@org___typelevel_and_cats_ds_core__2___12", "@io___circe_and_circe_ds_numbers__2___12", "@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library"],
"org.json4s:json4s-ast%": ["@org___scala_ds_lang_and_scala_ds_library"],
"org.typelevel:cats-core%": ["@org___typelevel_and_cats_ds_macros__2___12", "@org___scala_ds_lang_and_scala_ds_reflect", "@org___typelevel_and_machinist__2___12", "@org___typelevel_and_cats_ds_kernel__2___12", "@org___scala_ds_lang_and_scala_ds_library"],
"org.eclipse.aether:aether-api": [],
"org.slf4j:slf4j-simple": ["@org___slf4j_and_slf4j_ds_api"],
"org.typelevel:cats-macros%": ["@org___typelevel_and_machinist__2___12", "@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang_and_scala_ds_reflect"],
"org.spire-math:kind-projector%": ["@org___scala_ds_lang_and_scala_ds_reflect", "@org___scala_ds_lang_and_scala_ds_library", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12", "@org___scala_ds_lang_and_scala_ds_compiler"],
"org.eclipse.aether:aether-transport-file": ["@org___eclipse___aether_and_aether_ds_util", "@org___eclipse___aether_and_aether_ds_spi", "@org___eclipse___aether_and_aether_ds_api"],
}
_lookup__NEVERLINK = {
"org.json4s:json4s-jackson%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@com___thoughtworks___paranamer_and_paranamer__NEVERLINK", "@org___json4s_and_json4s_ds_scalap__2___12__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_databind__NEVERLINK", "@org___json4s_and_json4s_ds_ast__2___12__NEVERLINK", "@org___json4s_and_json4s_ds_core__2___12__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_annotations__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_core__NEVERLINK"],
"org.sonatype.plexus:plexus-cipher": [],
"org.eclipse.aether:aether-connector-basic": ["@org___eclipse___aether_and_aether_ds_util__NEVERLINK", "@org___eclipse___aether_and_aether_ds_spi__NEVERLINK", "@org___eclipse___aether_and_aether_ds_api__NEVERLINK"],
"org.eclipse.aether:aether-util": ["@org___eclipse___aether_and_aether_ds_api__NEVERLINK"],
"com.fasterxml.jackson.core:jackson-core": [],
"org.typelevel:cats-kernel%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK"],
"io.circe:circe-generic%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@io___circe_and_circe_ds_numbers__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@com___chuusai_and_shapeless__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@io___circe_and_circe_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___typelevel_and_macro_ds_compat__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"io.circe:circe-jackson25%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@io___circe_and_circe_ds_numbers__2___12__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_databind__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_annotations__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_core__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@io___circe_and_circe_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.scalactic:scalactic%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"com.fasterxml.jackson.core:jackson-databind": ["@com___fasterxml___jackson___core_and_jackson_ds_core__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_annotations__NEVERLINK"],
"org.eclipse.sisu:org.eclipse.sisu.plexus": ["@org___codehaus___plexus_and_plexus_ds_classworlds__NEVERLINK", "@javax___annotation_and_jsr250_ds_api__NEVERLINK", "@javax___enterprise_and_cdi_ds_api__NEVERLINK", "@javax___inject_and_javax___inject__NEVERLINK", "@org___eclipse___sisu_and_org___eclipse___sisu___inject__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_utils__NEVERLINK"],
"org.typelevel:cats-free%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.sonatype.plexus:plexus-sec-dispatcher": ["@org___codehaus___plexus_and_plexus_ds_utils__NEVERLINK", "@org___sonatype___plexus_and_plexus_ds_cipher__NEVERLINK"],
"io.circe:circe-jawn%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___spire_ds_math_and_jawn_ds_parser__2___12__NEVERLINK", "@io___circe_and_circe_ds_numbers__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@io___circe_and_circe_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.slf4j:slf4j-api": [],
"io.get-coursier:coursier%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@io___get_ds_coursier_and_coursier_ds_cache__2___12__NEVERLINK", "@io___get_ds_coursier_and_coursier_ds_core__2___12__NEVERLINK", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12__NEVERLINK"],
"org.eclipse.aether:aether-transport-http": ["@org___eclipse___aether_and_aether_ds_util__NEVERLINK", "@commons_ds_codec_and_commons_ds_codec__NEVERLINK", "@org___apache___httpcomponents_and_httpclient__NEVERLINK", "@org___slf4j_and_jcl_ds_over_ds_slf4j__NEVERLINK", "@org___slf4j_and_slf4j_ds_api__NEVERLINK", "@org___apache___httpcomponents_and_httpcore__NEVERLINK", "@org___eclipse___aether_and_aether_ds_api__NEVERLINK", "@org___eclipse___aether_and_aether_ds_spi__NEVERLINK"],
"org.apache.maven:maven-aether-provider": ["@org___apache___commons_and_commons_ds_lang3__NEVERLINK", "@org___apache___maven_and_maven_ds_artifact__NEVERLINK", "@org___eclipse___aether_and_aether_ds_util__NEVERLINK", "@com___google___guava_and_guava__NEVERLINK", "@org___eclipse___aether_and_aether_ds_impl__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_interpolation__NEVERLINK", "@org___eclipse___aether_and_aether_ds_api__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations__NEVERLINK", "@org___apache___maven_and_maven_ds_builder_ds_support__NEVERLINK", "@org___eclipse___aether_and_aether_ds_spi__NEVERLINK", "@org___apache___maven_and_maven_ds_repository_ds_metadata__NEVERLINK", "@org___apache___maven_and_maven_ds_model__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_utils__NEVERLINK", "@org___apache___maven_and_maven_ds_model_ds_builder__NEVERLINK"],
"org.scala-lang.modules:scala-xml%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK"],
"org.scalacheck:scalacheck%": ["@org___scala_ds_sbt_and_test_ds_interface__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_library__NEVERLINK"],
"org.eclipse.aether:aether-spi": ["@org___eclipse___aether_and_aether_ds_api__NEVERLINK"],
"org.typelevel:paiges-core%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK"],
"org.apache.maven:maven-settings-builder": ["@org___apache___commons_and_commons_ds_lang3__NEVERLINK", "@org___sonatype___plexus_and_plexus_ds_cipher__NEVERLINK", "@org___apache___maven_and_maven_ds_settings__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_interpolation__NEVERLINK", "@org___sonatype___plexus_and_plexus_ds_sec_ds_dispatcher__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_component_ds_annotations__NEVERLINK", "@org___apache___maven_and_maven_ds_builder_ds_support__NEVERLINK", "@org___codehaus___plexus_and_plexus_ds_utils__NEVERLINK"],
"io.get-coursier:coursier-core%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12__NEVERLINK"],
"com.monovore:decline%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.scalatest:scalatest%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK", "@org___scalactic_and_scalactic__2___12__NEVERLINK"],
"org.eclipse.aether:aether-impl": ["@org___eclipse___aether_and_aether_ds_util__NEVERLINK", "@org___eclipse___aether_and_aether_ds_spi__NEVERLINK", "@org___eclipse___aether_and_aether_ds_api__NEVERLINK"],
"org.json4s:json4s-core%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@com___thoughtworks___paranamer_and_paranamer__NEVERLINK", "@org___json4s_and_json4s_ds_scalap__2___12__NEVERLINK", "@org___json4s_and_json4s_ds_ast__2___12__NEVERLINK"],
"io.get-coursier:coursier-cache%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@io___get_ds_coursier_and_coursier_ds_core__2___12__NEVERLINK", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12__NEVERLINK"],
"org.apache.maven:maven-settings": ["@org___codehaus___plexus_and_plexus_ds_utils__NEVERLINK"],
"com.fasterxml.jackson.dataformat:jackson-dataformat-yaml": ["@org___yaml_and_snakeyaml__NEVERLINK", "@com___fasterxml___jackson___core_and_jackson_ds_core__NEVERLINK"],
"io.circe:circe-core%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@io___circe_and_circe_ds_numbers__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_core__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.json4s:json4s-ast%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK"],
"org.typelevel:cats-core%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___typelevel_and_cats_ds_macros__2___12__NEVERLINK", "@org___typelevel_and_machinist__2___12__NEVERLINK", "@org___typelevel_and_cats_ds_kernel__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.eclipse.aether:aether-api": [],
"org.slf4j:slf4j-simple": ["@org___slf4j_and_slf4j_ds_api__NEVERLINK"],
"org.typelevel:cats-macros%": ["@org___typelevel_and_machinist__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.spire-math:kind-projector%": ["@org___scala_ds_lang_and_scala_ds_library__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_compiler__NEVERLINK", "@org___scala_ds_lang___modules_and_scala_ds_xml__2___12__NEVERLINK", "@org___scala_ds_lang_and_scala_ds_reflect__NEVERLINK"],
"org.eclipse.aether:aether-transport-file": ["@org___eclipse___aether_and_aether_ds_util__NEVERLINK", "@org___eclipse___aether_and_aether_ds_spi__NEVERLINK", "@org___eclipse___aether_and_aether_ds_api__NEVERLINK"],
}
def dependencies(of, neverlink):
return _lookup__NEVERLINK[of] if neverlink else _lookup[of]
| 209.89011 | 899 | 0.867382 | 2,845 | 19,100 | 4.590861 | 0.045694 | 0.082536 | 0.05972 | 0.081464 | 0.980553 | 0.943649 | 0.929178 | 0.875048 | 0.824975 | 0.778654 | 0 | 0.020411 | 0.030419 | 19,100 | 90 | 900 | 212.222222 | 0.684864 | 0.004136 | 0 | 0.095238 | 1 | 0 | 0.895935 | 0.88973 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011905 | false | 0 | 0 | 0.011905 | 0.02381 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10 |
7ed13600da43bc088fec0a39e90d4fe1a57f344b | 19,582 | py | Python | pyGPs/Core/opt.py | Corentin-LF/pyGPs | b9d36777584cd53756bd4311c3c20ea52e945451 | [
"BSD-2-Clause"
] | 196 | 2015-03-02T06:42:52.000Z | 2022-03-01T03:32:00.000Z | pyGPs/Core/opt.py | Corentin-LF/pyGPs | b9d36777584cd53756bd4311c3c20ea52e945451 | [
"BSD-2-Clause"
] | 36 | 2015-01-16T12:34:46.000Z | 2022-03-14T06:32:23.000Z | pyGPs/Core/opt.py | Corentin-LF/pyGPs | b9d36777584cd53756bd4311c3c20ea52e945451 | [
"BSD-2-Clause"
] | 77 | 2015-01-30T16:25:34.000Z | 2021-01-03T18:25:24.000Z | from __future__ import division
from __future__ import absolute_import
from builtins import range
from past.utils import old_div
from builtins import object
#================================================================================
# Marion Neumann [marion dot neumann at uni-bonn dot de]
# Daniel Marthaler [dan dot marthaler at gmail dot com]
# Shan Huang [shan dot huang at iais dot fraunhofer dot de]
# Kristian Kersting [kristian dot kersting at cs dot tu-dortmund dot de]
#
# This file is part of pyGPs.
# The software package is released under the BSD 2-Clause (FreeBSD) License.
#
# Copyright (c) by
# Marion Neumann, Daniel Marthaler, Shan Huang & Kristian Kersting, 18/02/2014
#================================================================================
# This is a object-oriented python implementation of gpml functionality
# (Copyright (c) by Carl Edward Rasmussen and Hannes Nickisch, 2011-02-18).
# based on the functional-version of python implementation
# (Copyright (c) by Marion Neumann and Daniel Marthaler, 20/05/2013)
#
# Copyright (c) by Marion Neumann and Shan Huang, 30/09/2013
import numpy as np
from scipy.optimize import fmin_bfgs as bfgs
from scipy.optimize import fmin_cg as cg
from scipy.optimize import fmin as simplex
from pyGPs.Optimization import minimize, scg
from copy import deepcopy
import logging
class Optimizer(object):
def __init__(self, model=None, searchConfig = None):
self.model = model
self.logger = logging.getLogger(__name__)
def findMin(self, x, y, numIters):
'''
Find minimal value based on negative-log-marginal-likelihood.
optimalHyp, funcValue = findMin(x, y, numIters)
where funcValue is the minimal negative-log-marginal-likelihood during optimization,
and optimalHyp is a flattened numpy array
(in sequence of meanfunc.hyp, covfunc.hyp, likfunc.hyp)
of the hyparameters to achieve such value.
You can achieve advanced search strategy by initializing Optimizer with searchConfig,
which is an instance of pyGPs.Optimization.conf.
See more in pyGPs.Optimization.conf and pyGPs.Core.gp.GP.setOptimizer,
as well as in online documentation of section Optimizers.
'''
pass
def _nlml(self, hypInArray):
'''Find negative-log-marginal-likelihood'''
self._apply_in_objects(hypInArray)
nlZ, dnlZ = self.model.getPosterior(der=False)
return nlZ
def _dnlml(self, hypInArray):
'''Find derivatives wrt. negative-log-marginal-likelihood'''
self._apply_in_objects(hypInArray)
nlZ, dnlZ, post = self.model.getPosterior()
dnlml_List = dnlZ.mean + dnlZ.cov + dnlZ.lik
return np.array(dnlml_List)
def _nlzAnddnlz(self, hypInArray):
'''Find negative-log-marginal-likelihood and derivatives in one pass(faster)'''
self._apply_in_objects(hypInArray)
nlZ, dnlZ, post = self.model.getPosterior()
dnlml_List = dnlZ.mean + dnlZ.cov + dnlZ.lik
return nlZ, np.array(dnlml_List)
def _convert_to_array(self):
'''Convert all hyparameters in the model to an array'''
hyplist = self.model.meanfunc.hyp + self.model.covfunc.hyp + self.model.likfunc.hyp
return np.array(hyplist)
def _apply_in_objects(self, hypInArray):
'''Apply the values in the input array to hyparameters of model.'''
Lm = len(self.model.meanfunc.hyp)
Lc = len(self.model.covfunc.hyp)
hypInList = hypInArray.tolist()
self.model.meanfunc.hyp = hypInList[:Lm]
self.model.covfunc.hyp = hypInList[Lm:(Lm+Lc)]
self.model.likfunc.hyp = hypInList[(Lm+Lc):]
class Simplex(Optimizer):
'''Downhill simplex algorithm by Nelder-Mead'''
def __init__(self, model, searchConfig = None):
super(Simplex, self).__init__()
self.model = model
self.searchConfig = searchConfig
self.trailsCounter = 0
self.errorCounter = 0
def findMin(self, x, y, numIters = 100):
meanfunc = self.model.meanfunc
covfunc = self.model.covfunc
likfunc = self.model.likfunc
inffunc = self.model.inffunc
hypInArray = self._convert_to_array()
try:
opt = simplex(self._nlml, hypInArray, maxiter=numIters, disp=False, full_output=True)
optimalHyp = deepcopy(opt[0])
funcValue = opt[1]
warnFlag = opt[4]
if warnFlag == 1:
self.logger.warning("Maximum number of function evaluations made")
elif warnFlag == 2:
self.logger.warning("Maximum number of iterations exceeded.")
except:
self.errorCounter += 1
if not self.searchConfig:
raise Exception("Can not learn hyperparamters using Nelder-Mead.")
self.trailsCounter += 1
if self.searchConfig:
searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
raise Exception('Specify at least one of the stop conditions')
while True:
self.trailsCounter += 1 # increase counter
for i in range(hypInArray.shape[0]): # random init of hyp
hypInArray[i] = np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
# value this time is better than optiaml min value
try:
thisopt = simplex(self._nlml, hypInArray, maxiter=numIters, disp=False, full_output=True)
if thisopt[1] < funcValue:
funcValue = thisopt[1]
optimalHyp = thisopt[0]
except:
self.errorCounter += 1
if self.searchConfig.num_restarts and self.errorCounter > old_div(self.searchConfig.num_restarts, 2):
self.logger.warning("[Simplex] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
raise Exception("Over half of the trails failed for Nelder-Mead")
if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1: # if exceed num_restarts
self.logger.warning("[Simplex] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: # reach provided mininal
self.logger.warning("[Simplex] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
return optimalHyp, funcValue
class CG(Optimizer):
'''Conjugent gradient'''
def __init__(self, model, searchConfig = None):
super(CG, self).__init__()
self.model = model
self.searchConfig = searchConfig
self.trailsCounter = 0
self.errorCounter = 0
def findMin(self, x, y, numIters = 100):
meanfunc = self.model.meanfunc
covfunc = self.model.covfunc
likfunc = self.model.likfunc
inffunc = self.model.inffunc
hypInArray = self._convert_to_array()
try:
opt = cg(self._nlml, hypInArray, self._dnlml, maxiter=numIters, disp=False, full_output=True)
optimalHyp = deepcopy(opt[0])
funcValue = opt[1]
warnFlag = opt[4]
if warnFlag == 1:
self.logger.warning("Maximum number of iterations exceeded.")
elif warnFlag == 2:
self.logger.warning("Gradient and/or function calls not changing.")
except:
self.errorCounter += 1
if not self.searchConfig:
raise Exception("Can not learn hyperparamters using conjugate gradient.")
self.trailsCounter += 1
if self.searchConfig:
searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
raise Exception('Specify at least one of the stop conditions')
while True:
self.trailsCounter += 1 # increase counter
for i in range(hypInArray.shape[0]): # random init of hyp
hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
# value this time is better than optiaml min value
try:
thisopt = cg(self._nlml, hypInArray, self._dnlml, maxiter=numIters, disp=False, full_output=True)
if thisopt[1] < funcValue:
funcValue = thisopt[1]
optimalHyp = thisopt[0]
except:
self.errorCounter += 1
if self.searchConfig.num_restarts and self.errorCounter > old_div(self.searchConfig.num_restarts,2):
self.logger.warning("[CG] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
raise Exception("Over half of the trails failed for conjugate gradient")
if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1: # if exceed num_restarts
self.logger.warning("[CG] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: # reach provided mininal
self.logger.warning("[CG] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
return optimalHyp, funcValue
class BFGS(Optimizer):
'''quasi-Newton method of Broyden, Fletcher, Goldfarb, and Shanno (BFGS)'''
def __init__(self, model, searchConfig = None):
super(BFGS, self).__init__()
self.model = model
self.searchConfig = searchConfig
self.trailsCounter = 0
self.errorCounter = 0
def findMin(self, x, y, numIters = 100):
meanfunc = self.model.meanfunc
covfunc = self.model.covfunc
likfunc = self.model.likfunc
inffunc = self.model.inffunc
hypInArray = self._convert_to_array()
try:
opt = bfgs(self._nlml, hypInArray, self._dnlml, maxiter=numIters, disp=False, full_output=True)
optimalHyp = deepcopy(opt[0])
funcValue = opt[1]
warnFlag = opt[6]
if warnFlag == 1:
self.logger.warning("Maximum number of iterations exceeded.")
elif warnFlag == 2:
self.logger.warning("Gradient and/or function calls not changing.")
except:
self.errorCounter += 1
if not self.searchConfig:
raise Exception("Can not learn hyperparamters using BFGS.")
self.trailsCounter += 1
if self.searchConfig:
searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
raise Exception('Specify at least one of the stop conditions')
while True:
self.trailsCounter += 1 # increase counter
for i in range(hypInArray.shape[0]): # random init of hyp
hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
# value this time is better than optiaml min value
try:
thisopt = bfgs(self._nlml, hypInArray, self._dnlml, maxiter=numIters, disp=False, full_output=True)
if thisopt[1] < funcValue:
funcValue = thisopt[1]
optimalHyp = thisopt[0]
except:
self.errorCounter += 1
if self.searchConfig.num_restarts and self.errorCounter > old_div(self.searchConfig.num_restarts,2):
self.logger.warning("[BFGS] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
raise Exception("Over half of the trails failed for BFGS")
if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1: # if exceed num_restarts
self.logger.warning("[BFGS] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: # reach provided mininal
self.logger.warning("[BFGS] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
return optimalHyp, funcValue
class Minimize(Optimizer):
'''minimize by Carl Rasmussen (python implementation of "minimize" in GPML)'''
def __init__(self, model, searchConfig = None):
super(Minimize, self).__init__()
self.model = model
self.searchConfig = searchConfig
self.trailsCounter = 0
self.errorCounter = 0
def findMin(self, x, y, numIters = 200):
meanfunc = self.model.meanfunc
covfunc = self.model.covfunc
likfunc = self.model.likfunc
inffunc = self.model.inffunc
hypInArray = self._convert_to_array()
try:
# opt = minimize.run(self._nlzAnddnlz, hypInArray, length=-numIters)
opt = minimize.run(self._nlzAnddnlz, hypInArray, length=numIters)
optimalHyp = deepcopy(opt[0])
funcValue = opt[1][-1]
self.logger.warning("Number of line searches %g", opt[2])
except:
self.errorCounter += 1
if not self.searchConfig:
raise Exception("Can not learn hyperparamters using minimize.")
self.trailsCounter += 1
if self.searchConfig:
searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
raise Exception('Specify at least one of the stop conditions')
while True:
self.trailsCounter += 1 # increase counter
for i in range(hypInArray.shape[0]): # random init of hyp
hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
# value this time is better than optiaml min value
try:
# thisopt = minimize.run(self._nlzAnddnlz, hypInArray, length=-numIters)
thisopt = minimize.run(self._nlzAnddnlz, hypInArray, length=numIters)
self.logger.warning("Number of line searches %g", thisopt[2])
if thisopt[1][-1] < funcValue:
funcValue = thisopt[1][-1]
optimalHyp = thisopt[0]
except:
self.errorCounter += 1
if self.searchConfig.num_restarts and self.errorCounter > self.searchConfig.num_restarts/2:
self.logger.warning("[Minimize] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
raise Exception("Over half of the trails failed for minimize")
if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1: # if exceed num_restarts
self.logger.warning("[Minimize] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: # reach provided mininal
self.logger.warning("[Minimize] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
return optimalHyp, funcValue
class SCG(Optimizer):
'''Scaled conjugent gradient (faster than CG)'''
def __init__(self, model, searchConfig = None):
super(SCG, self).__init__()
self.model = model
self.searchConfig = searchConfig
self.trailsCounter = 0
self.errorCounter = 0
def findMin(self, x, y, numIters = 100):
meanfunc = self.model.meanfunc
covfunc = self.model.covfunc
likfunc = self.model.likfunc
inffunc = self.model.inffunc
hypInArray = self._convert_to_array()
try:
opt = scg.run(self._nlzAnddnlz, hypInArray, niters = numIters)
optimalHyp = deepcopy(opt[0])
funcValue = opt[1][-1]
except:
self.errorCounter += 1
if not self.searchConfig:
raise Exception("Can not learn hyperparamters using Scaled conjugate gradient.")
self.trailsCounter += 1
if self.searchConfig:
searchRange = self.searchConfig.meanRange + self.searchConfig.covRange + self.searchConfig.likRange
if not (self.searchConfig.num_restarts or self.searchConfig.min_threshold):
raise Exception('Specify at least one of the stop conditions')
while True:
self.trailsCounter += 1 # increase counter
for i in range(hypInArray.shape[0]): # random init of hyp
hypInArray[i]= np.random.uniform(low=searchRange[i][0], high=searchRange[i][1])
# value this time is better than optiaml min value
try:
thisopt = scg.run(self._nlzAnddnlz, hypInArray)
if thisopt[1][-1] < funcValue:
funcValue = thisopt[1][-1]
optimalHyp = thisopt[0]
except:
self.errorCounter += 1
if self.searchConfig.num_restarts and self.errorCounter > old_div(self.searchConfig.num_restarts,2):
self.logger.warning("[SCG] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
raise Exception("Over half of the trails failed for Scaled conjugate gradient")
if self.searchConfig.num_restarts and self.trailsCounter > self.searchConfig.num_restarts-1: # if exceed num_restarts
self.logger.warning("[SCG] %d out of %d trails failed during optimization", self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
if self.searchConfig.min_threshold and funcValue <= self.searchConfig.min_threshold: # reach provided mininal
self.logger.warning("[SCG] %d out of %d trails failed during optimization" , self.errorCounter, self.trailsCounter)
return optimalHyp, funcValue
return optimalHyp, funcValue
| 50.994792 | 141 | 0.616331 | 2,166 | 19,582 | 5.49446 | 0.128809 | 0.09411 | 0.039913 | 0.056718 | 0.812873 | 0.791026 | 0.783211 | 0.762205 | 0.732628 | 0.725233 | 0 | 0.010454 | 0.291696 | 19,582 | 383 | 142 | 51.127937 | 0.847585 | 0.14641 | 0 | 0.761905 | 0 | 0 | 0.109289 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057823 | false | 0.003401 | 0.040816 | 0 | 0.183673 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
7edb9d69ad7794bb8c7ced78d739f546ac36e252 | 3,142 | py | Python | test/test_schedule.py | HackUCF/Alexa-Skill | 123874cc0d5bb9e1e6892d80710c956315998e6a | [
"MIT"
] | 1 | 2019-09-13T14:12:27.000Z | 2019-09-13T14:12:27.000Z | test/test_schedule.py | HackUCF/Alexa-Skill | 123874cc0d5bb9e1e6892d80710c956315998e6a | [
"MIT"
] | 1 | 2021-06-02T00:21:06.000Z | 2021-06-02T00:21:06.000Z | test/test_schedule.py | HackUCF/Alexa-Skill | 123874cc0d5bb9e1e6892d80710c956315998e6a | [
"MIT"
] | 1 | 2019-09-13T14:13:38.000Z | 2019-09-13T14:13:38.000Z | from schedule import schedule
from datetime import datetime
import arrow
def test_get_next_event():
"""
get_next_meeting() should return a python dict containing the following
information, so we are going to check typing.
{
'name': 'General Meeting'
'date': '2019-11-01T00:00:00+00:00' (python date to string)
}
"""
result = schedule.get_next_event()
if result:
assert result['name'], 'Result has no `name` key'
assert result['date'], 'Result has not `date` key'
assert isinstance(result['name'], str), 'name is not a string'
assert isinstance(result['date'], arrow.Arrow), 'date is not a date'
def test_get_next_meeting():
"""
get_next_meeting() should return a python dict containing the following
information, so we are going to check typing.
{
'name': 'General Meeting'
'date': '2019-11-01T00:00:00+00:00' (python date to string)
}
"""
result = schedule.get_next_meeting()
if result:
assert result['name'], 'Result has no `name` key'
assert result['date'], 'Result has not `date` key'
assert isinstance(result['name'], str), 'name is not a string'
assert isinstance(result['date'], arrow.Arrow), 'date is not a date'
def test_get_next_workshop():
"""
get_next_workshop() should return a python dict containing the following
information, so we are going to check typing.
{
'name': 'Red Team Workshop'
'date': '2019-11-01T00:00:00+00:00' (python date to string)
}
"""
result = schedule.get_next_workshop()
if result:
assert result['name'], 'Result has no `name` key'
assert result['date'], 'Result has not `date` key'
assert isinstance(result['name'], str), 'name is not a string'
assert isinstance(result['date'], arrow.Arrow), 'date is not a date'
def test_get_next_ops_meeting():
"""
get_next_ops_meeting() should return a python dict containing the following
information, so we are going to check typing.
{
'name': 'Operations Meeting'
'date': '2019-11-01T00:00:00+00:00' (python date to string)
}
"""
result = schedule.get_next_workshop()
if result:
assert result['name'], 'Result has no `name` key'
assert result['date'], 'Result has not `date` key'
assert isinstance(result['name'], str), 'name is not a string'
assert isinstance(result['date'], arrow.Arrow), 'date is not a date'
def test_get_next_competition():
"""
get_next_competition() should return a python dict containing the following
information, so we are going to check typing.
{
'name': 'ISTS'
'date': '2019-11-01T00:00:00+00:00' (python date to string)
}
"""
result = schedule.get_next_competition()
if result:
assert result['name'], 'Result has no `name` key'
assert result['date'], 'Result has not `date` key'
assert isinstance(result['name'], str), 'name is not a string'
assert isinstance(result['date'], arrow.Arrow), 'date is not a date'
| 31.42 | 79 | 0.6324 | 427 | 3,142 | 4.566745 | 0.12178 | 0.053846 | 0.030769 | 0.035897 | 0.890769 | 0.890769 | 0.890769 | 0.890769 | 0.890769 | 0.890769 | 0 | 0.037943 | 0.245067 | 3,142 | 99 | 80 | 31.737374 | 0.784148 | 0.345958 | 0 | 0.710526 | 0 | 0 | 0.274082 | 0 | 0 | 0 | 0 | 0 | 0.526316 | 1 | 0.131579 | false | 0 | 0.078947 | 0 | 0.210526 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
7d3832c11cea14ee21d68ffd57bc8fb65ee53c25 | 192 | py | Python | torchblocks/metrics/__init__.py | Raiselimit/TorchBlocks | a5baecb9a2470ff175087475630f2b7db3f7ef51 | [
"MIT"
] | 1 | 2020-10-19T01:24:13.000Z | 2020-10-19T01:24:13.000Z | torchblocks/metrics/__init__.py | Raiselimit/TorchBlocks | a5baecb9a2470ff175087475630f2b7db3f7ef51 | [
"MIT"
] | null | null | null | torchblocks/metrics/__init__.py | Raiselimit/TorchBlocks | a5baecb9a2470ff175087475630f2b7db3f7ef51 | [
"MIT"
] | null | null | null | from torchblocks.metrics.classification import *
from torchblocks.metrics.regression import *
from torchblocks.metrics.utils import *
from torchblocks.metrics.sequence_labeling import *
| 32 | 52 | 0.822917 | 21 | 192 | 7.47619 | 0.428571 | 0.382166 | 0.56051 | 0.535032 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.114583 | 192 | 5 | 53 | 38.4 | 0.923529 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
7d3a65c4b21b4cb55cdaf6c98b7a704145095a92 | 17,153 | py | Python | regular_language/unit_tests/test_ast_AST_pullup_only_child.py | ShoYamanishi/nlpregex | 795b36d5a2fad8bc25264b2093ffa9c3723b282b | [
"MIT"
] | 1 | 2021-12-03T07:20:18.000Z | 2021-12-03T07:20:18.000Z | regular_language/unit_tests/test_ast_AST_pullup_only_child.py | ShoYamanishi/nlpregex | 795b36d5a2fad8bc25264b2093ffa9c3723b282b | [
"MIT"
] | null | null | null | regular_language/unit_tests/test_ast_AST_pullup_only_child.py | ShoYamanishi/nlpregex | 795b36d5a2fad8bc25264b2093ffa9c3723b282b | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Unit Tests for ast.AST.pullup_only_child() """
import unittest
import nlpregex.regular_language.ast
from nlpregex.regular_language.unit_tests.test_ast_helper import test_AST_helper
class test_ast_AST_pullup_only_child( unittest.TestCase ):
def __init__( self, *args, **kwargs ):
unittest.TestCase.__init__(self, *args, **kwargs)
self.helper = test_AST_helper()
def construct_ast_from_spec( self, spec01 ):
return self.helper.construct_ast_from_spec(spec01)
def display_tree( self, ast01 ):
return self.helper.display_tree(ast01)
def compare_specs( self, spec01, spec02 ):
return self.helper.compare_specs( spec01, spec02 )
def test_0001(self):
spec01 = 'S_001:E_002'
node_spec01 = 'S_001'
node_spec02 = 'E_002'
spec_expected_01 = 'E_002'
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0002(self):
spec01 = '''
S_001:S_002
S_002:T_003
'''
node_spec01 = 'S_001'
node_spec02 = 'S_002'
spec_expected_01 = 'S_002:T_003'
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0003(self):
spec01 = '''
S_001:S_002
S_002:T_003 T_004 T_005
'''
node_spec01 = 'S_001'
node_spec02 = 'S_002'
spec_expected_01 = 'S_002:T_003 T_004 T_005'
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0004(self):
spec01 = '''
S_001:S_002
S_002:|_003 +_004 ?_005
|_003:T_006 T_007 T_008
+_004:T_009 T_010 T_011
?_005:T_012 E_013 T_014
'''
node_spec01 = 'S_001'
node_spec02 = 'S_002'
spec_expected_01 = '''
S_002:|_003 +_004 ?_005
|_003:T_006 T_007 T_008
+_004:T_009 T_010 T_011
?_005:T_012 E_013 T_014
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0005(self):
spec01 = '''
S_001:S_002
S_002:E_003
'''
node_spec01 = 'S_002'
node_spec02 = 'E_003'
spec_expected_01 = '''
S_001:E_003
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0006(self):
spec01 = '''
S_001:S_002
S_002:S_003
S_003:T_004
'''
node_spec01 = 'S_002'
node_spec02 = 'S_003'
spec_expected_01 = '''
S_001:S_003
S_003:T_004
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0007(self):
spec01 = '''
S_001:S_002
S_002:S_003
S_003:T_004 T_005 T_006
'''
node_spec01 = 'S_002'
node_spec02 = 'S_003'
spec_expected_01 = '''
S_001:S_003
S_003:T_004 T_005 T_006
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0008(self):
spec01 = '''
S_001:S_002
S_002:S_003
S_003:|_004 +_005 ?_006
|_004:T_007 T_008 T_009
+_005:T_010 T_011 T_012
?_006:T_013 E_014 T_015
'''
node_spec01 = 'S_002'
node_spec02 = 'S_003'
spec_expected_01 = '''
S_001:S_003
S_003:|_004 +_005 ?_006
|_004:T_007 T_008 T_009
+_005:T_010 T_011 T_012
?_006:T_013 E_014 T_015
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0009(self):
spec01 = '''
S_001:T_002 S_003 T_004
S_003:S_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
node_spec01 = 'S_003'
node_spec02 = 'S_004'
spec_expected_01 = '''
S_001:T_002 S_004 T_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0010(self):
spec01 = '''
S_001:S_003 T_002 T_004
S_003:S_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
node_spec01 = 'S_003'
node_spec02 = 'S_004'
spec_expected_01 = '''
S_001:S_004 T_002 T_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0011(self):
spec01 = '''
S_001:T_002 T_004 S_003
S_003:S_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
node_spec01 = 'S_003'
node_spec02 = 'S_004'
spec_expected_01 = '''
S_001:T_002 T_004 S_004
S_004:|_005 +_006 ?_007
|_005:T_008 T_009 T_010
+_006:T_011 T_012 T_013
?_007:T_014 E_015 T_016
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
def test_0012(self):
spec01 = '''
|_020:T_021 S_001 T_022
S_001:T_002 S_003 T_004
S_003:S_005
S_005:|_006 +_007 ?_008
|_006:T_009 T_010 T_011
+_007:T_012 T_013 T_014
?_008:T_015 E_016 T_017
'''
node_spec01 = 'S_003'
node_spec02 = 'S_005'
spec_expected_01 = '''
|_020:T_021 S_001 T_022
S_001:T_002 S_005 T_004
S_005:|_006 +_007 ?_008
|_006:T_009 T_010 T_011
+_007:T_012 T_013 T_014
?_008:T_015 E_016 T_017
'''
ast01 = self.construct_ast_from_spec(spec01)
node01 = self.helper.get_node(ast01, node_spec01)
node02 = self.helper.get_node(ast01, node_spec02)
node01.append_out_token_pre ('token01')
node01.append_out_token_post('token02')
node02.append_out_token_pre ('token03')
node02.append_out_token_post('token04')
ast01.pullup_only_child(node01)
spec02 = self.display_tree(ast01)
self.assertEqual( self.compare_specs(spec_expected_01, spec02), True )
self.assertEqual( len(node01.out_token_pre), 2 )
self.assertEqual( node01.out_token_pre[0], 'token03' )
self.assertEqual( node01.out_token_pre[1], 'token01' )
self.assertEqual( len(node01.out_token_post), 2 )
self.assertEqual( node01.out_token_post[0], 'token04' )
self.assertEqual( node01.out_token_post[1], 'token02' )
if __name__ == '__main__':
unittest.main()
| 34.101392 | 82 | 0.592083 | 2,196 | 17,153 | 4.229964 | 0.044627 | 0.103348 | 0.108515 | 0.124018 | 0.935623 | 0.92658 | 0.91646 | 0.911185 | 0.905157 | 0.905157 | 0 | 0.165376 | 0.307643 | 17,153 | 502 | 83 | 34.169323 | 0.61679 | 0.005014 | 0 | 0.878378 | 0 | 0 | 0.226732 | 0 | 0 | 0 | 0 | 0 | 0.227027 | 1 | 0.043243 | false | 0 | 0.008108 | 0.008108 | 0.062162 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
adea4b2a56a4e93269fb3015ee7858d4674c5ed0 | 256 | py | Python | bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/image/__init__.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 11 | 2019-07-03T10:41:16.000Z | 2022-02-25T21:48:06.000Z | bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/image/__init__.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 8 | 2019-11-23T00:01:25.000Z | 2021-04-29T12:30:31.000Z | bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/image/__init__.py | bitmovin/bitmovin-api-sdk-python | 5a85147669c84b8ca411cf2d4dbdddc92d85bbe7 | [
"MIT"
] | 13 | 2020-01-02T14:58:18.000Z | 2022-03-26T12:10:30.000Z | from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.image.image_api import ImageApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.image.image_adaptation_set_list_query_params import ImageAdaptationSetListQueryParams
| 85.333333 | 154 | 0.914063 | 32 | 256 | 7 | 0.5625 | 0.107143 | 0.133929 | 0.160714 | 0.625 | 0.625 | 0.625 | 0.625 | 0.625 | 0.625 | 0 | 0 | 0.03125 | 256 | 2 | 155 | 128 | 0.903226 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
bc124a5158cb587cbae7acc1ebadea6ff496c959 | 96 | py | Python | script/theme/__init__.py | FellowHashbrown/virus.sh | 757e50fa402f63ab7161518dcec7e1441aa880dd | [
"MIT"
] | null | null | null | script/theme/__init__.py | FellowHashbrown/virus.sh | 757e50fa402f63ab7161518dcec7e1441aa880dd | [
"MIT"
] | null | null | null | script/theme/__init__.py | FellowHashbrown/virus.sh | 757e50fa402f63ab7161518dcec7e1441aa880dd | [
"MIT"
] | null | null | null | from script.theme.new_theme import new_theme
from script.theme.delete_theme import delete_theme
| 32 | 50 | 0.875 | 16 | 96 | 5 | 0.375 | 0.25 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.083333 | 96 | 2 | 51 | 48 | 0.909091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
cb00d86fe0f605d54d27ec2b99753ce55d312e30 | 4,663 | py | Python | archive/2016/projects/hard/todo/todo.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 6 | 2017-11-08T14:04:39.000Z | 2019-03-24T22:11:04.000Z | archive/2016/projects/hard/todo/todo.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | null | null | null | archive/2016/projects/hard/todo/todo.py | YAtOff/python0 | b5af5004131d64dd52d42746eddb72b6c43a13c7 | [
"Apache-2.0"
] | 7 | 2015-10-27T09:04:58.000Z | 2019-03-03T14:18:26.000Z | # -*- coding: utf-8 -*-
TAGS = [
'school',
'home'
]
PRIORITIES = {
'urgent': 1,
'normal': 2
}
def add_task(tasks, id, title, deadline, priority, tags):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> tasks == [{'id': '1', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False }]
True
"""
pass
def find_task(tasks, id):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> add_task(tasks, '2', 'Do your homework', '2016-04-30', 'normal', ['school'])
>>> find_task(tasks, '1')
0
"""
pass
def remove_task(tasks, id):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> remove_task(tasks, '1')
>>> tasks
[]
"""
pass
def complete_task(tasks, id):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> complete_task(tasks, '1')
>>> tasks[0]['completed']
True
"""
pass
def uncomplete_task(tasks, id):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> complete_task(tasks, '1')
>>> uncomplete_task(tasks, '1')
>>> tasks[0]['completed']
False
"""
pass
def get_completed(tasks):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> add_task(tasks, '2', 'Do your homework', '2016-04-30', 'normal', ['school'])
>>> complete_task(tasks, '1')
>>> get_completed(tasks) == [{'id': '1', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': True}]
True
"""
pass
def get_uncompleted(tasks):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> get_uncompleted(tasks) == [{'id': '1', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False}]
True
"""
pass
def clear_completed(tasks):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> add_task(tasks, '2', 'Do your homework', '2016-04-30', 'normal', ['school'])
>>> complete_task(tasks, '1')
>>> clear_completed(tasks)
>>> tasks == [{'id': '2', 'title': 'Do your homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False}]
True
"""
pass
def filter_by_tag(tasks, tag):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> add_task(tasks, '2', 'Do your homework', '2016-04-30', 'normal', ['work'])
>>> filter_by_tag(tasks, 'school') == [{'id': '1', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False}]
True
"""
pass
def filter_by_date_range(tasks, start_date, end_date):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> add_task(tasks, '2', 'Do your homework', '2016-05-10', 'normal', ['work'])
>>> filter_by_date_range(tasks, '2016-04-20', '2016-05-01') == [{'id': '1', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False}]
True
"""
pass
def order_by_deadline(tasks):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do your homework', '2016-05-10', 'normal', ['work'])
>>> add_task(tasks, '2', 'Do my homework', '2016-04-30', 'normal', ['school'])
>>> order_by_deadline(tasks) == [{'id': '2', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'normal', 'tags': ['school'], 'completed': False}, {'id': '1', 'title': 'Do your homework', 'deadline': '2016-05-10', 'priority': 'normal', 'tags': ['work'], 'completed': False}]
True
"""
pass
def order_by_priority(tasks):
"""
>>> tasks = []
>>> add_task(tasks, '1', 'Do your homework', '2016-05-10', 'normal', ['work'])
>>> add_task(tasks, '2', 'Do my homework', '2016-04-30', 'urgent', ['school'])
>>> order_by_deadline(tasks) == [{'id': '2', 'title': 'Do my homework', 'deadline': '2016-04-30', 'priority': 'urgent', 'tags': ['school'], 'completed': False}, {'id': '1', 'title': 'Do your homework', 'deadline': '2016-05-10', 'priority': 'normal', 'tags': ['work'], 'completed': False}]
True
"""
pass
| 31.938356 | 292 | 0.531418 | 571 | 4,663 | 4.23993 | 0.089317 | 0.115242 | 0.079306 | 0.105741 | 0.84304 | 0.824866 | 0.8038 | 0.796778 | 0.788517 | 0.788517 | 0 | 0.078167 | 0.204375 | 4,663 | 145 | 293 | 32.158621 | 0.574394 | 0.771821 | 0 | 0.375 | 0 | 0 | 0.032211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.375 | false | 0.375 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 8 |
cb7b682a632cb6add27448d988d6c6f063126bed | 117 | py | Python | pad/plot/__init__.py | richardangell/py-analysis-development | 4587c10c449bd083b416fd2e5b6d198997b9bf08 | [
"MIT"
] | null | null | null | pad/plot/__init__.py | richardangell/py-analysis-development | 4587c10c449bd083b416fd2e5b6d198997b9bf08 | [
"MIT"
] | 1 | 2019-04-14T18:29:58.000Z | 2019-04-14T18:29:58.000Z | pad/plot/__init__.py | richardangell/py-analysis-development | 4587c10c449bd083b416fd2e5b6d198997b9bf08 | [
"MIT"
] | null | null | null | from pad.plot import one_way
from pad.plot import two_way
from pad.plot import templates
from pad.plot import helpers | 29.25 | 30 | 0.837607 | 22 | 117 | 4.363636 | 0.409091 | 0.291667 | 0.458333 | 0.708333 | 0.416667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.128205 | 117 | 4 | 31 | 29.25 | 0.941176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
380601cea85503b0dc19c9f5c4936204c33dc795 | 6,545 | py | Python | loldib/getratings/models/NA/na_draven/na_draven_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_draven/na_draven_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_draven/na_draven_top.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_Draven_Top_Aatrox(Ratings):
pass
class NA_Draven_Top_Ahri(Ratings):
pass
class NA_Draven_Top_Akali(Ratings):
pass
class NA_Draven_Top_Alistar(Ratings):
pass
class NA_Draven_Top_Amumu(Ratings):
pass
class NA_Draven_Top_Anivia(Ratings):
pass
class NA_Draven_Top_Annie(Ratings):
pass
class NA_Draven_Top_Ashe(Ratings):
pass
class NA_Draven_Top_AurelionSol(Ratings):
pass
class NA_Draven_Top_Azir(Ratings):
pass
class NA_Draven_Top_Bard(Ratings):
pass
class NA_Draven_Top_Blitzcrank(Ratings):
pass
class NA_Draven_Top_Brand(Ratings):
pass
class NA_Draven_Top_Braum(Ratings):
pass
class NA_Draven_Top_Caitlyn(Ratings):
pass
class NA_Draven_Top_Camille(Ratings):
pass
class NA_Draven_Top_Cassiopeia(Ratings):
pass
class NA_Draven_Top_Chogath(Ratings):
pass
class NA_Draven_Top_Corki(Ratings):
pass
class NA_Draven_Top_Darius(Ratings):
pass
class NA_Draven_Top_Diana(Ratings):
pass
class NA_Draven_Top_Draven(Ratings):
pass
class NA_Draven_Top_DrMundo(Ratings):
pass
class NA_Draven_Top_Ekko(Ratings):
pass
class NA_Draven_Top_Elise(Ratings):
pass
class NA_Draven_Top_Evelynn(Ratings):
pass
class NA_Draven_Top_Ezreal(Ratings):
pass
class NA_Draven_Top_Fiddlesticks(Ratings):
pass
class NA_Draven_Top_Fiora(Ratings):
pass
class NA_Draven_Top_Fizz(Ratings):
pass
class NA_Draven_Top_Galio(Ratings):
pass
class NA_Draven_Top_Gangplank(Ratings):
pass
class NA_Draven_Top_Garen(Ratings):
pass
class NA_Draven_Top_Gnar(Ratings):
pass
class NA_Draven_Top_Gragas(Ratings):
pass
class NA_Draven_Top_Graves(Ratings):
pass
class NA_Draven_Top_Hecarim(Ratings):
pass
class NA_Draven_Top_Heimerdinger(Ratings):
pass
class NA_Draven_Top_Illaoi(Ratings):
pass
class NA_Draven_Top_Irelia(Ratings):
pass
class NA_Draven_Top_Ivern(Ratings):
pass
class NA_Draven_Top_Janna(Ratings):
pass
class NA_Draven_Top_JarvanIV(Ratings):
pass
class NA_Draven_Top_Jax(Ratings):
pass
class NA_Draven_Top_Jayce(Ratings):
pass
class NA_Draven_Top_Jhin(Ratings):
pass
class NA_Draven_Top_Jinx(Ratings):
pass
class NA_Draven_Top_Kalista(Ratings):
pass
class NA_Draven_Top_Karma(Ratings):
pass
class NA_Draven_Top_Karthus(Ratings):
pass
class NA_Draven_Top_Kassadin(Ratings):
pass
class NA_Draven_Top_Katarina(Ratings):
pass
class NA_Draven_Top_Kayle(Ratings):
pass
class NA_Draven_Top_Kayn(Ratings):
pass
class NA_Draven_Top_Kennen(Ratings):
pass
class NA_Draven_Top_Khazix(Ratings):
pass
class NA_Draven_Top_Kindred(Ratings):
pass
class NA_Draven_Top_Kled(Ratings):
pass
class NA_Draven_Top_KogMaw(Ratings):
pass
class NA_Draven_Top_Leblanc(Ratings):
pass
class NA_Draven_Top_LeeSin(Ratings):
pass
class NA_Draven_Top_Leona(Ratings):
pass
class NA_Draven_Top_Lissandra(Ratings):
pass
class NA_Draven_Top_Lucian(Ratings):
pass
class NA_Draven_Top_Lulu(Ratings):
pass
class NA_Draven_Top_Lux(Ratings):
pass
class NA_Draven_Top_Malphite(Ratings):
pass
class NA_Draven_Top_Malzahar(Ratings):
pass
class NA_Draven_Top_Maokai(Ratings):
pass
class NA_Draven_Top_MasterYi(Ratings):
pass
class NA_Draven_Top_MissFortune(Ratings):
pass
class NA_Draven_Top_MonkeyKing(Ratings):
pass
class NA_Draven_Top_Mordekaiser(Ratings):
pass
class NA_Draven_Top_Morgana(Ratings):
pass
class NA_Draven_Top_Nami(Ratings):
pass
class NA_Draven_Top_Nasus(Ratings):
pass
class NA_Draven_Top_Nautilus(Ratings):
pass
class NA_Draven_Top_Nidalee(Ratings):
pass
class NA_Draven_Top_Nocturne(Ratings):
pass
class NA_Draven_Top_Nunu(Ratings):
pass
class NA_Draven_Top_Olaf(Ratings):
pass
class NA_Draven_Top_Orianna(Ratings):
pass
class NA_Draven_Top_Ornn(Ratings):
pass
class NA_Draven_Top_Pantheon(Ratings):
pass
class NA_Draven_Top_Poppy(Ratings):
pass
class NA_Draven_Top_Quinn(Ratings):
pass
class NA_Draven_Top_Rakan(Ratings):
pass
class NA_Draven_Top_Rammus(Ratings):
pass
class NA_Draven_Top_RekSai(Ratings):
pass
class NA_Draven_Top_Renekton(Ratings):
pass
class NA_Draven_Top_Rengar(Ratings):
pass
class NA_Draven_Top_Riven(Ratings):
pass
class NA_Draven_Top_Rumble(Ratings):
pass
class NA_Draven_Top_Ryze(Ratings):
pass
class NA_Draven_Top_Sejuani(Ratings):
pass
class NA_Draven_Top_Shaco(Ratings):
pass
class NA_Draven_Top_Shen(Ratings):
pass
class NA_Draven_Top_Shyvana(Ratings):
pass
class NA_Draven_Top_Singed(Ratings):
pass
class NA_Draven_Top_Sion(Ratings):
pass
class NA_Draven_Top_Sivir(Ratings):
pass
class NA_Draven_Top_Skarner(Ratings):
pass
class NA_Draven_Top_Sona(Ratings):
pass
class NA_Draven_Top_Soraka(Ratings):
pass
class NA_Draven_Top_Swain(Ratings):
pass
class NA_Draven_Top_Syndra(Ratings):
pass
class NA_Draven_Top_TahmKench(Ratings):
pass
class NA_Draven_Top_Taliyah(Ratings):
pass
class NA_Draven_Top_Talon(Ratings):
pass
class NA_Draven_Top_Taric(Ratings):
pass
class NA_Draven_Top_Teemo(Ratings):
pass
class NA_Draven_Top_Thresh(Ratings):
pass
class NA_Draven_Top_Tristana(Ratings):
pass
class NA_Draven_Top_Trundle(Ratings):
pass
class NA_Draven_Top_Tryndamere(Ratings):
pass
class NA_Draven_Top_TwistedFate(Ratings):
pass
class NA_Draven_Top_Twitch(Ratings):
pass
class NA_Draven_Top_Udyr(Ratings):
pass
class NA_Draven_Top_Urgot(Ratings):
pass
class NA_Draven_Top_Varus(Ratings):
pass
class NA_Draven_Top_Vayne(Ratings):
pass
class NA_Draven_Top_Veigar(Ratings):
pass
class NA_Draven_Top_Velkoz(Ratings):
pass
class NA_Draven_Top_Vi(Ratings):
pass
class NA_Draven_Top_Viktor(Ratings):
pass
class NA_Draven_Top_Vladimir(Ratings):
pass
class NA_Draven_Top_Volibear(Ratings):
pass
class NA_Draven_Top_Warwick(Ratings):
pass
class NA_Draven_Top_Xayah(Ratings):
pass
class NA_Draven_Top_Xerath(Ratings):
pass
class NA_Draven_Top_XinZhao(Ratings):
pass
class NA_Draven_Top_Yasuo(Ratings):
pass
class NA_Draven_Top_Yorick(Ratings):
pass
class NA_Draven_Top_Zac(Ratings):
pass
class NA_Draven_Top_Zed(Ratings):
pass
class NA_Draven_Top_Ziggs(Ratings):
pass
class NA_Draven_Top_Zilean(Ratings):
pass
class NA_Draven_Top_Zyra(Ratings):
pass
| 15.695444 | 46 | 0.766692 | 972 | 6,545 | 4.736626 | 0.151235 | 0.209818 | 0.389661 | 0.479583 | 0.803432 | 0.803432 | 0 | 0 | 0 | 0 | 0 | 0 | 0.169748 | 6,545 | 416 | 47 | 15.733173 | 0.847258 | 0 | 0 | 0.498195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.498195 | 0.00361 | 0 | 0.501805 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 1 | 0 | 0 | 8 |
3822cb8115ec5ecd0ee832a05bb5d207ec2c9be3 | 17,458 | py | Python | htmd/htmd.py | Mingmingge/csdn-to-github.io | 84b6f86f536f696f695c56931e8f3b43c43d7476 | [
"Apache-2.0"
] | 3 | 2018-07-04T01:20:44.000Z | 2019-04-11T06:40:35.000Z | htmd/htmd.py | Mingmingge/csdn-to-github.io | 84b6f86f536f696f695c56931e8f3b43c43d7476 | [
"Apache-2.0"
] | null | null | null | htmd/htmd.py | Mingmingge/csdn-to-github.io | 84b6f86f536f696f695c56931e8f3b43c43d7476 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
# -*- coding: utf-8 -*-
import re
import os
import warnings
import bs4
MARKDOWN = {
'h1': ('\n# ', '\n'),
'h2': ('\n## ', '\n'),
'h3': ('\n### ', '\n'),
'h4': ('\n#### ', '\n'),
'h5': ('\n##### ', '\n'),
'h6': ('\n###### ', '\n'),
'em': ('*', '*'),
'strong': ('**', '**'),
'b': ('**', '**'),
'i': ('*', '*'),
'code': ('`', '`'),
'pre': ('\n```\n', '\n```\n'),
'td': ('|', ''),
'th': ('|', ''),
'tr': ('', '\n'),
'table': ('', '\n'),
'ul': ('', ''),
'ol': ('', ''),
'li': ('\n - ', '\n'),
'blockquote': ('\n> ', '\n'),
'block_code': ('\n```\n', '\n```\n'),
'span': (' ', '\n'),
'p': ('\n', '\n'),
'inline_p': ('', ''),
'inline_p_with_out_class': ('', ''),
'del': ('~~', '~~'),
'hr': ('\n---', '\n\n'),
'thead': ('\n', '|------\n'),
'tbody': ('\n', '\n'),
'e_p': ('', '\n'),
'img': ('\n\n')
}
HTML = {
'h1': ('<h1.*?>', '</h1>'),
'h2': ('<h2.*?>', '</h2>'),
'h3': ('<h3.*?>', '</h3>'),
'h4': ('<h4.*?>', '</h4>'),
'h5': ('<h5.*?>', '</h5>'),
'h6': ('<h6.*?>', '</h6>'),
'hr': ('<hr.*?', '<hr/>'),
'blockquote': ('<blockquote.*?>', '</blockquote>'),
'ul': ('<ul.*?>', '</ul>'),
'ol': ('<ol.*?>', '</ol>'),
'li': ('<li>*?>', '</li>'),
'block_code': ('<pre.*?><code.*?>', '</code></pre>'),
'p': ('<p.*?>', '</p>'),
'p_with_out_class': ('<p>', '</p>'),
'thead': ('<thead.*?>', '</thead>'),
'tr': ('<tr.*?>', '</tr>'),
'td': ('<td.*?>', '</td>'),
'th': ('<th.*?>', '</th>'),
'b': ('<b.*?>', '</b>'),
'i': ('<i.*?>', '</i>'),
'del':('<del.*?>', '</del>'),
'a': ('<a.*?href="(.*?)".*?>', '</a>'),
'em': ('<em.*?>', '</em>'),
'strong':('<strong.*?>', '<strong>')
}
'''BlOCK_ELEMENTS = {
'h1': '<h1.*?>(.*?)</h1>',
'h2': '<h2.*?>(.*?)</h2>',
'h3': '<h3.*?>(.*?)</h3>',
'h4': '<h4.*?>(.*?)</h4>',
'h5': '<h5.*?>(.*?)</h5>',
'h6': '<h6.*?>(.*?)</h6>',
'hr': '<hr/>',
'blockquote': '<blockquote.*?>(.*?)</blockquote>',
'ul': '<ul.*?>(.*?)</ul>',
'ol': '<ol.*?>(.*?)</ol>',
'li': '<li.*?>(.*?)<li>',
'block_code': '<pre.*?><code.*?>(.*?)</code></pre>',
'p': '<p.*?>(.*?)</p>',
'p_with_out_class': '<p>(.*?)</p>',
'thead': '<thead.*?>(.*?)</thead>',
'tr': '<tr.*?>(.*?)</tr>'
}'''
INLINE_ELEMENT = ['img', 'strong', 'em', 'b', 'code', 'li', 'ul', 'td', 'th', 'tr', 'span']
BLOCK_ELEMENTS = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6', 'pre', 'p']
'''INLINE_ELEMENTS = {
'strong':'<strong.*?>(.*?)</strong>',
'em': '<em.*?>(.*?)</em>',
'b': '<b.*?>(.*?)</b>',
'code': '<code.*?>(.*?)</code>',
'td': '<td.*?>((.|\n)*?)</td>',
'th': '<th.*?>(.*?)</th>',
'tr': '<tr.*?>((.|\n)*?)</tr>',
'ul': '<ul.*?>(.*?)</ul>',
'ol': '<ol.*?>(.*?)</ol>',
'li': '<li.*?>(.*?)</li>'
}'''
'''
'span': '<span.*?>(.*?)</span>',
'i': '<i.*?>(.*?)</i>',
'inline_p_with_out_class': '<p>(.*?)</p>',
'code': '<code.*?>(.*?)</code>',
'img': '<img.*?src="(.*?)".*?>(.*?)</img>',
'img_single': '<img.*?src="(.*?)".*?/>',
'img_single_no_close': '<img.*?src="(.*?)".*?>',
'strong': '<strong.*?>(\s*)(.*?)(\s*)</strong>',
'tbody': '<tbody.*?>((.|\n)*)</tbody>',
'a': '<a.*?href="(.*?)".*?>(.*?)</a>',
'''
DELETE_ELEMENTS = ['<span.*?>', '</span>', '<div.*?>', '</div>', '<center.*?>', '</center>', '<link .*?/>','\s', '<br.*?>', '</br>', '<ol.*?>', '</ol>',
'<article.*?>', '</article>', '<ul.*?>', '</ul>', '<table.*?>', '</table>', '<thead>']
def deleteElements(context):
for key in DELETE_ELEMENTS:
if key == '</span>':
context = re.sub(key, '\n', context)
else:
context = re.sub(key, ' ', context)
return context
def subElements(context):
con = context
soup = bs4.BeautifulSoup(con, 'lxml')
for ele in INLINE_ELEMENT:
if soup.find_all(ele):
for key in soup.find_all(ele):
print(str(key))
#con = re.sub(str(key), str(MARKDOWN[ele][0] + str(key.string) + MARKDOWN[ele][1]), con)
if ele == 'code':
if re.search(r'<code style=.*?>.*?</code>', str(key)):
con = con.replace(str(key), str(MARKDOWN[ele][0] + str(key.string).strip() + MARKDOWN[ele][1]))
else:
con = con.replace(str(key), str(key.string))
elif ele == 'img':
print(key)
imgpath = os.path.join('../images/',str(os.path.join(re.search(r'\d{10,20}', str(key)).group()) + '.png'))
con = con.replace(str(key),str(MARKDOWN[ele][0] + str(imgpath).strip() + MARKDOWN[ele][1]))
else:
con = con.replace(str(key).strip(),str(MARKDOWN[ele][0] + str(key.string).strip() + MARKDOWN[ele][1]))
else:
continue
for ele in BLOCK_ELEMENTS:
soup = bs4.BeautifulSoup(con, 'lxml')
if soup.find_all(ele):
for key in soup.find_all(ele):
print(key)
con = con.replace(str(key), str(MARKDOWN[ele][0] + str(key.string).strip() + MARKDOWN[ele][1]))
else:
continue
#print(bs4.BeautifulSoup(con, 'lxml').prettify())
return con
html = """
<article>
<div id="article_content" class="article_content clearfix csdn-tracking-statistics" data-pid="blog" data-mod="popu_307" data-dsm="post">
<link rel="stylesheet" href="https://csdnimg.cn/release/phoenix/template/css/ck_htmledit_views-e2445db1a8.css" />
<div class="htmledit_views">
<h2>sdvefvev</h2>
<h1 class="entry-title" style="border:0px;font-style:inherit;font-variant:inherit;line-height:1.2em;font-family:'PT Serif', Georgia, 'Helvetica Neue', Arial, sans-serif;font-size:2.6em;vertical-align:baseline;"> Jdbc 连接 Mysql 时的中文乱码问题</h1>
<p class="meta" style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:'PT Sans', 'Helvetica Neue', Arial, sans-serif;font-size:.9em;vertical-align:baseline;color:rgb(170,170,170);text-transform:uppercase;"> </p>
<div class="entry-content" style="border:0px;line-height:inherit;font-family:'PT Serif', Georgia, Times, 'Times New Roman', serif;font-size:18.4px;vertical-align:baseline;color:rgb(34,34,34);background-color:rgb(248,248,248);">
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 本文转载自http://chenyufei.info/blog/2007-06-27/post-070627-095625-802/</p>
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 在用 jdbc 向 mysql 数据库插入中文时出现了乱码,严格来说是通过 Hibernate。记录下搜索和查文档以后找到的解决办法。</p>
<ul style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;">
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 首先要告诉数据库要插入的字符串使用的字符集,mysql 默认使用的字符集是 latin1。我要保存的字符串是 UTF-8 编码的(字符集是 Unicode),所以包含这个字段的表应该使用 UTF-8 编码。这里有几种解决办法。</li>
<ol style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;">
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 在建立数据库的时候指定数据库的字符集编码,这样,这个数据库的所有表都会默认使用数据库的字符集编码。如 create database foo charset utf8;</li>
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 在建表的时候指定字符集编码。如 <code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">create table foo (id char(20)) charset utf8;</code> </li>
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 指定某一列使用的字符集编码。如<code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">create table foo (id char(20) charset utf8);</code> </li>
</ol> 如果你有需要的话还可以指定字符排序的规则,也就是指定 collation,如 <code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">create database foo charset utf8 collate utf8_general_ci;</code>,同样也可以指定单独的表、列使用的 collation 规则。</li>
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 然后在使用 jdbc 连接数据库的时候要告知 jdbc 使用什么字符集编码来跟服务器通信。很简单,只需要在 jdbc 指定数据库路径时做一点修改就可以了。比如,<code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">jdbc:mysql://localhost/test?useUnicode=true&characterEncoding=utf8</code>。注意如果在 XML 文件里面的话 “&” 要改成 “&”。</li>
</ul>
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 如果你使用的是 gbk 编码的话把上面所有提到 utf8 的地方改成 gbk 应该就可以了,只要服务器和客户端使用的字符集编码统一就可以了。</p>
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> mysql 命令行客户端默认使用的字符集也是 latin1,如果你通过这个来插入中文的话也会出现乱码的情况。解决的办法是执行语句 <code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">set names ‘utf8’</code> 来告诉服务器使用 UTF-8 编码来和客户端通信。你也可以使用 <code style="border:1px solid rgb(221,221,221);font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:1.5em;font-family:Menlo, Monaco, 'Andale Mono', 'lucida console', 'Courier New', monospace;font-size:.8em;vertical-align:baseline;display:inline-block;background:rgb(255,255,255);color:rgb(85,85,85);">set charset ‘utf8’</code>,它和 set names 区别只在于 collation 上。set names 和 set charset 都相当于执行了三条语句,具体的内容可以去看 mysql 文档 10.4 节。我想这个方法在使用 jdbc 的时候也是可以的,所以如果 jdbc 的指定数据库地址中没有告知使用的字符集编码的话可以通过执行上面的语句来达到相同的效果。</p>
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> (如果对文章中字符集和字符集编码的使用感到困惑的话去看点 Unicode 方面的文章吧。)</p>
</div>
</div>
</div>
<pre class="prettyprint"><code class=" hljs bash">! /bin/bash
<span class="hljs-built_in">read</span> -t <span class="hljs-number">10</span> -p <span class="hljs-string">"请输入您的姓名: "</span> name
<span class="hljs-built_in">echo</span> <span class="hljs-variable">$name</span>
<span class="hljs-built_in">read</span> <span class="hljs-operator">-s</span> pas
<span class="hljs-built_in">echo</span> <span class="hljs-variable">$pas</span> </code></pre>
<pre class="prettyprint"><code class=" hljs bash">! /bin/bash
<span class="hljs-built_in">read</span> -t <span class="hljs-number">10</span> -p <span class="hljs-string">"请输入您的姓名: "</span> name
<span class="hljs-built_in">echo</span> <span class="hljs-variable">$name</span>
<span class="hljs-built_in">read</span> <span class="hljs-operator">-s</span> pas
<span class="hljs-built_in">echo</span> <span class="hljs-variable">$pas</span> </code></pre>
<strong>strooo
</strong>
</article>
"""
#print(deleteElements(html))
soup = bs4.BeautifulSoup(deleteElements(html), 'lxml')
soup1 = bs4.BeautifulSoup(html, 'lxml')
'''print(soup.h1)
for p in soup.find_all('p'):
print(p)
for code in soup.find_all('code'):
print(code.string)
for strong in soup.find_all('strong'):
print(strong.string)
con = context
for key in INLINE_ELEMENTS:
list = re.findall(re.compile(INLINE_ELEMENTS[key]), str(con))
if not list:
continue
else:
for li in list:
if 1:
subcon = str(MARKDOWN[key][0]) + str(li) + str(MARKDOWN[key][1])
try:
con = re.sub(HTML[key][0] + str(li) + HTML[key][1], subcon, con)
except:
print("inline error")
for key in BlOCK_ELEMENTS:
list = re.findall(re.compile(BlOCK_ELEMENTS[key]), str(con))
if not list:
continue
else:
for li in list:
# print(li)
subcon = str(MARKDOWN[key][0]) + str(li) + str(MARKDOWN[key][1])
# print(subcon)
try:
con = re.sub(HTML[key][0] + str(li) + HTML[key][1], subcon, con)
except:
print('block error')
continue
con = re.sub(HTML[key][0] + str(li) + HTML[key][1], subcon, con)
#print(con)
return con
for key in soup.find_all('li'):
print(key)'''
#print(soup.pre.contents)
#print(soup.code.contents)
html2 = """# Jdbc 连接 Mysql 时的中文乱码问题
本文转载自http://chenyufei.info/blog/2007-06-27/post-070627-095625-802/
在用 jdbc 向 mysql 数据库插入中文时出现了乱码,严格来说是通过 Hibernate。记录下搜索和查文档以后找到的解决办法。
- 首先要告诉数据库要插入的字符串使用的字符集,mysql 默认使用的字符集是 latin1。我要保存的字符串是 UTF-8 编码的(字符集是 Unicode),所以包含这个字段的表应该使用 UTF-8 编码。这里有几种解决办法。
<li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 在建立数据库的时候指定数据库的字符集编码,这样,这个数据库的所有表都会默认使用数据库的字符集编码。如 create database foo charset utf8;</li> <li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 在建表的时候指定字符集编码。如 `create table foo (id char(20)) charset utf8;` </li>
- 指定某一列使用的字符集编码。如`create table foo (id char(20) charset utf8);`
如果你有需要的话还可以指定字符排序的规则,也就是指定 collation,如 `create database foo charset utf8 collate utf8_general_ci;`,同样也可以指定单独的表、列使用的 collation 规则。</li> <li style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> 然后在使用 jdbc 连接数据库的时候要告知 jdbc 使用什么字符集编码来跟服务器通信。很简单,只需要在 jdbc 指定数据库路径时做一点修改就可以了。比如,`jdbc:mysql://localhost/test?useUnicode=true&characterEncoding=utf8`。注意如果在 XML 文件里面的话 “&” 要改成 “&”。</li>
如果你使用的是 gbk 编码的话把上面所有提到 utf8 的地方改成 gbk 应该就可以了,只要服务器和客户端使用的字符集编码统一就可以了。
<p style="border:0px;font-style:inherit;font-variant:inherit;font-weight:inherit;line-height:inherit;font-family:inherit;font-size:18.4px;vertical-align:baseline;"> mysql 命令行客户端默认使用的字符集也是 latin1,如果你通过这个来插入中文的话也会出现乱码的情况。解决的办法是执行语句 `set names ‘utf8’` 来告诉服务器使用 UTF-8 编码来和客户端通信。你也可以使用 `set charset ‘utf8’`,它和 set names 区别只在于 collation 上。set names 和 set charset 都相当于执行了三条语句,具体的内容可以去看 mysql 文档 10.4 节。我想这个方法在使用 jdbc 的时候也是可以的,所以如果 jdbc 的指定数据库地址中没有告知使用的字符集编码的话可以通过执行上面的语句来达到相同的效果。</p>
(如果对文章中字符集和字符集编码的使用感到困惑的话去看点 Unicode 方面的文章吧。)
```
! /bin/bash read -t 10 -p "请输入您的姓名: " name echo $name read -s pas echo $pas
```
**strooo** """
html3 = """
<p>最后,安装验证代码正确性啦, <img src="https://img-blog.csdn.net/20180603132405509?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L2JhYnliYWJ5dXA=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70" alt="" title=""> 以及卸载模块后: <img src="https://img-blog.csdn.net/2018060313235489?watermark/2/text/aHR0cHM6Ly9ibG9nLmNzZG4ubmV0L2JhYnliYWJ5dXA=/font/5a6L5L2T/fontsize/400/fill/I0JBQkFCMA==/dissolve/70" alt="" title=""></p> """
soup3 = bs4.BeautifulSoup(html3, 'lxml')
#for key in soup3.find_all('img'):
#if re.search(r'\d{10,20}', str(key)):
#print(os.path.join('../images/', str(os.path.join(re.search(r'\d{10,20}', str(key)).group())+'.png')))
with open('/Users/hulimin/Desktop/1.md' ,'w') as file:
file.write(subElements(deleteElements(html3)))
| 56.135048 | 1,148 | 0.603563 | 2,279 | 17,458 | 4.596314 | 0.147433 | 0.08506 | 0.050119 | 0.045823 | 0.77642 | 0.755418 | 0.736325 | 0.710072 | 0.690597 | 0.660716 | 0 | 0.037014 | 0.159698 | 17,458 | 310 | 1,149 | 56.316129 | 0.677028 | 0.024172 | 0 | 0.195122 | 0 | 0.20122 | 0.778362 | 0.462429 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012195 | false | 0 | 0.02439 | 0 | 0.04878 | 0.030488 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
69b8e501a0838dd927f081808490b8ec4f2dfc95 | 28,866 | py | Python | spark_fhir_schemas/stu3/complex_types/chargeitem.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | 2 | 2020-10-31T23:25:01.000Z | 2021-06-09T14:12:42.000Z | spark_fhir_schemas/stu3/complex_types/chargeitem.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | spark_fhir_schemas/stu3/complex_types/chargeitem.py | icanbwell/SparkFhirSchemas | 8c828313c39850b65f8676e67f526ee92b7d624e | [
"Apache-2.0"
] | null | null | null | from typing import Union, List, Optional
from pyspark.sql.types import (
StructType,
StructField,
StringType,
ArrayType,
DateType,
DataType,
TimestampType,
FloatType,
)
# This file is auto-generated by generate_schema so do not edit manually
# noinspection PyPep8Naming
class ChargeItemSchema:
"""
The resource ChargeItem describes the provision of healthcare provider
products for a certain patient, therefore referring not only to the product,
but containing in addition details of the provision, like date, time, amounts
and participating organizations and persons. Main Usage of the ChargeItem is
to enable the billing process and internal cost allocation.
"""
# noinspection PyDefaultArgument
@staticmethod
def get_schema(
max_nesting_depth: Optional[int] = 6,
nesting_depth: int = 0,
nesting_list: List[str] = [],
max_recursion_limit: Optional[int] = 2,
include_extension: Optional[bool] = False,
extension_fields: Optional[List[str]] = [
"valueBoolean",
"valueCode",
"valueDate",
"valueDateTime",
"valueDecimal",
"valueId",
"valueInteger",
"valuePositiveInt",
"valueString",
"valueTime",
"valueUnsignedInt",
"valueUri",
"valueQuantity",
],
extension_depth: int = 0,
max_extension_depth: Optional[int] = 2,
) -> Union[StructType, DataType]:
"""
The resource ChargeItem describes the provision of healthcare provider
products for a certain patient, therefore referring not only to the product,
but containing in addition details of the provision, like date, time, amounts
and participating organizations and persons. Main Usage of the ChargeItem is
to enable the billing process and internal cost allocation.
id: The logical id of the resource, as used in the URL for the resource. Once
assigned, this value never changes.
extension: May be used to represent additional information that is not part of the basic
definition of the resource. In order to make the use of extensions safe and
manageable, there is a strict set of governance applied to the definition and
use of extensions. Though any implementer is allowed to define an extension,
there is a set of requirements that SHALL be met as part of the definition of
the extension.
meta: The metadata about the resource. This is content that is maintained by the
infrastructure. Changes to the content may not always be associated with
version changes to the resource.
implicitRules: A reference to a set of rules that were followed when the resource was
constructed, and which must be understood when processing the content.
language: The base language in which the resource is written.
text: A human-readable narrative that contains a summary of the resource, and may be
used to represent the content of the resource to a human. The narrative need
not encode all the structured data, but is required to contain sufficient
detail to make it "clinically safe" for a human to just read the narrative.
Resource definitions may define what content should be represented in the
narrative to ensure clinical safety.
contained: These resources do not have an independent existence apart from the resource
that contains them - they cannot be identified independently, and nor can they
have their own independent transaction scope.
resourceType: This is a ChargeItem resource
identifier: Identifiers assigned to this event performer or other systems.
definition: References the source of pricing information, rules of application for the
code this ChargeItem uses.
status: The current state of the ChargeItem.
partOf: ChargeItems can be grouped to larger ChargeItems covering the whole set.
code: A code that identifies the charge, like a billing code.
subject: The individual or set of individuals the action is being or was performed on.
context: The encounter or episode of care that establishes the context for this event.
occurrenceDateTime: Date/time(s) or duration when the charged service was applied.
occurrencePeriod: Date/time(s) or duration when the charged service was applied.
occurrenceTiming: Date/time(s) or duration when the charged service was applied.
participant: Indicates who or what performed or participated in the charged service.
performingOrganization: The organization requesting the service.
requestingOrganization: The organization performing the service.
quantity: Quantity of which the charge item has been serviced.
bodysite: The anatomical location where the related service has been applied.
factorOverride: Factor overriding the factor determined by the rules associated with the code.
priceOverride: Total price of the charge overriding the list price associated with the code.
overrideReason: If the list price or the rule based factor associated with the code is
overridden, this attribute can capture a text to indicate the reason for this
action.
enterer: The device, practitioner, etc. who entered the charge item.
enteredDate: Date the charge item was entered.
reason: Describes why the event occurred in coded or textual form.
service: Indicated the rendered service that caused this charge.
account: Account into which this ChargeItems belongs.
note: Comments made about the event by the performer, subject or other participants.
supportingInformation: Further information supporting the this charge.
"""
from spark_fhir_schemas.stu3.complex_types.extension import ExtensionSchema
from spark_fhir_schemas.stu3.complex_types.meta import MetaSchema
from spark_fhir_schemas.stu3.complex_types.narrative import NarrativeSchema
from spark_fhir_schemas.stu3.simple_types.resourcelist import ResourceListSchema
from spark_fhir_schemas.stu3.complex_types.identifier import IdentifierSchema
from spark_fhir_schemas.stu3.complex_types.reference import ReferenceSchema
from spark_fhir_schemas.stu3.complex_types.codeableconcept import (
CodeableConceptSchema,
)
from spark_fhir_schemas.stu3.complex_types.period import PeriodSchema
from spark_fhir_schemas.stu3.complex_types.timing import TimingSchema
from spark_fhir_schemas.stu3.complex_types.chargeitem_participant import (
ChargeItem_ParticipantSchema,
)
from spark_fhir_schemas.stu3.complex_types.quantity import QuantitySchema
from spark_fhir_schemas.stu3.complex_types.money import MoneySchema
from spark_fhir_schemas.stu3.complex_types.annotation import AnnotationSchema
if (
max_recursion_limit
and nesting_list.count("ChargeItem") >= max_recursion_limit
) or (max_nesting_depth and nesting_depth >= max_nesting_depth):
return StructType([StructField("id", StringType(), True)])
# add my name to recursion list for later
my_nesting_list: List[str] = nesting_list + ["ChargeItem"]
schema = StructType(
[
# The logical id of the resource, as used in the URL for the resource. Once
# assigned, this value never changes.
StructField("id", StringType(), True),
# May be used to represent additional information that is not part of the basic
# definition of the resource. In order to make the use of extensions safe and
# manageable, there is a strict set of governance applied to the definition and
# use of extensions. Though any implementer is allowed to define an extension,
# there is a set of requirements that SHALL be met as part of the definition of
# the extension.
StructField(
"extension",
ArrayType(
ExtensionSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The metadata about the resource. This is content that is maintained by the
# infrastructure. Changes to the content may not always be associated with
# version changes to the resource.
StructField(
"meta",
MetaSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# A reference to a set of rules that were followed when the resource was
# constructed, and which must be understood when processing the content.
StructField("implicitRules", StringType(), True),
# The base language in which the resource is written.
StructField("language", StringType(), True),
# A human-readable narrative that contains a summary of the resource, and may be
# used to represent the content of the resource to a human. The narrative need
# not encode all the structured data, but is required to contain sufficient
# detail to make it "clinically safe" for a human to just read the narrative.
# Resource definitions may define what content should be represented in the
# narrative to ensure clinical safety.
StructField(
"text",
NarrativeSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# These resources do not have an independent existence apart from the resource
# that contains them - they cannot be identified independently, and nor can they
# have their own independent transaction scope.
StructField(
"contained",
ArrayType(
ResourceListSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# This is a ChargeItem resource
StructField("resourceType", StringType(), True),
# Identifiers assigned to this event performer or other systems.
StructField(
"identifier",
IdentifierSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# References the source of pricing information, rules of application for the
# code this ChargeItem uses.
StructField("definition", ArrayType(StringType()), True),
# The current state of the ChargeItem.
StructField("status", StringType(), True),
# ChargeItems can be grouped to larger ChargeItems covering the whole set.
StructField(
"partOf",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# A code that identifies the charge, like a billing code.
StructField(
"code",
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The individual or set of individuals the action is being or was performed on.
StructField(
"subject",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The encounter or episode of care that establishes the context for this event.
StructField(
"context",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Date/time(s) or duration when the charged service was applied.
StructField("occurrenceDateTime", TimestampType(), True),
# Date/time(s) or duration when the charged service was applied.
StructField(
"occurrencePeriod",
PeriodSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Date/time(s) or duration when the charged service was applied.
StructField(
"occurrenceTiming",
TimingSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Indicates who or what performed or participated in the charged service.
StructField(
"participant",
ArrayType(
ChargeItem_ParticipantSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# The organization requesting the service.
StructField(
"performingOrganization",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The organization performing the service.
StructField(
"requestingOrganization",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Quantity of which the charge item has been serviced.
StructField(
"quantity",
QuantitySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# The anatomical location where the related service has been applied.
StructField(
"bodysite",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Factor overriding the factor determined by the rules associated with the code.
StructField("factorOverride", FloatType(), True),
# Total price of the charge overriding the list price associated with the code.
StructField(
"priceOverride",
MoneySchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# If the list price or the rule based factor associated with the code is
# overridden, this attribute can capture a text to indicate the reason for this
# action.
StructField("overrideReason", StringType(), True),
# The device, practitioner, etc. who entered the charge item.
StructField(
"enterer",
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth + 1,
max_extension_depth=max_extension_depth,
),
True,
),
# Date the charge item was entered.
StructField("enteredDate", DateType(), True),
# Describes why the event occurred in coded or textual form.
StructField(
"reason",
ArrayType(
CodeableConceptSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Indicated the rendered service that caused this charge.
StructField(
"service",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Account into which this ChargeItems belongs.
StructField(
"account",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Comments made about the event by the performer, subject or other participants.
StructField(
"note",
ArrayType(
AnnotationSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
# Further information supporting the this charge.
StructField(
"supportingInformation",
ArrayType(
ReferenceSchema.get_schema(
max_nesting_depth=max_nesting_depth,
nesting_depth=nesting_depth + 1,
nesting_list=my_nesting_list,
max_recursion_limit=max_recursion_limit,
include_extension=include_extension,
extension_fields=extension_fields,
extension_depth=extension_depth,
max_extension_depth=max_extension_depth,
)
),
True,
),
]
)
if not include_extension:
schema.fields = [
c
if c.name != "extension"
else StructField("extension", StringType(), True)
for c in schema.fields
]
return schema
| 49.091837 | 102 | 0.544828 | 2,599 | 28,866 | 5.812235 | 0.135052 | 0.077055 | 0.048656 | 0.073084 | 0.820402 | 0.803389 | 0.785648 | 0.75705 | 0.750695 | 0.724613 | 0 | 0.003244 | 0.412631 | 28,866 | 587 | 103 | 49.175468 | 0.887703 | 0.269313 | 0 | 0.70283 | 0 | 0 | 0.025471 | 0.003166 | 0 | 0 | 0 | 0 | 0 | 1 | 0.002358 | false | 0 | 0.035377 | 0 | 0.044811 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
69d9f9106186fc6e37826d51ff38c67584f6bc0f | 284 | py | Python | Curso_Guanabara/aula46.py | lucianojunnior17/Python | b216c8f567fcf9ff80aeae7441eadc99c2aad83f | [
"MIT"
] | 1 | 2020-05-21T10:54:49.000Z | 2020-05-21T10:54:49.000Z | Curso_Guanabara/aula46.py | lucianojunnior17/Python | b216c8f567fcf9ff80aeae7441eadc99c2aad83f | [
"MIT"
] | null | null | null | Curso_Guanabara/aula46.py | lucianojunnior17/Python | b216c8f567fcf9ff80aeae7441eadc99c2aad83f | [
"MIT"
] | null | null | null | <<<<<<< HEAD
from time import sleep
for cont in range(10,-1, -1):
print(cont)
sleep(1)
print('Feliz Ano Novo')
=======
from time import sleep
for cont in range(10,-1, -1):
print(cont)
sleep(1)
print('Feliz Ano Novo')
>>>>>>> bd0e1823ea061e95db29e314186751d1c072412f
| 17.75 | 48 | 0.640845 | 40 | 284 | 4.55 | 0.4 | 0.131868 | 0.153846 | 0.208791 | 0.758242 | 0.758242 | 0.758242 | 0.758242 | 0.758242 | 0.758242 | 0 | 0.16309 | 0.179577 | 284 | 15 | 49 | 18.933333 | 0.618026 | 0 | 0 | 0.769231 | 0 | 0 | 0.098592 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.153846 | null | null | 0.307692 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
3851623413f143d66446502cbdd6ee38556b8c42 | 2,095 | py | Python | dosth/myemoji.py | woailuoli993/ksxingtest | 09d861d076a1ff03152e27f5fb29e5c90144d0d1 | [
"WTFPL"
] | null | null | null | dosth/myemoji.py | woailuoli993/ksxingtest | 09d861d076a1ff03152e27f5fb29e5c90144d0d1 | [
"WTFPL"
] | 2 | 2021-03-31T18:39:09.000Z | 2021-12-13T19:44:48.000Z | dosth/myemoji.py | woailuoli993/ksxingtest | 09d861d076a1ff03152e27f5fb29e5c90144d0d1 | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Created by vici on 16-11-14
def main():
with open('/home/vici/myem.txt', 'rb') as f:
myem = f.read()
myem = f.read()
print ''.join(map(lambda x: '[em]'+x+'[/em]', set(map(lambda x: x[:4], myem.split(r'[em]')[1:]))))
if __name__ == '__main__':
main()
# delete emoji that have exist
# like:
# [em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em][em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em] [em]e248[/em][em]e249[/em][em]e245[/em][em]e251[/em][em]e247[/em][em]e246[/em][em]e250[/em][em]e251[/em][em]e252[/em][em]e253[/em][em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em][em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em] [em]e248[/em][em]e249[/em][em]e245[/em][em]e251[/em][em]e247[/em][em]e246[/em][em]e250[/em][em]e251[/em][em]e252[/em][em]e253[/em][em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em][em]e248[/em][em]e248[/em][em]e251[/em][em]e251[/em][em]e253[/em][em]e253[/em][em]e246[/em][em]e246[/em][em]e245[/em][em]e245[/em][em]e246[/em][em]e245[/em][em]e244[/em][em]e243[/em][em]e251[/em][em]e253[/em][em]e248[/em] [em]e248[/em][em]e249[/em][em]e245[/em][em]e251[/em][em]e247[/em][em]e246[/em][em]e250[/em][em]e251[/em][em]e252[/em][em]e253[/em] | 99.761905 | 1,721 | 0.6 | 456 | 2,095 | 2.739035 | 0.120614 | 0.419536 | 0.153723 | 0.192154 | 0.845476 | 0.845476 | 0.845476 | 0.845476 | 0.845476 | 0.845476 | 0 | 0.201794 | 0.042005 | 2,095 | 21 | 1,721 | 99.761905 | 0.420528 | 0.871599 | 0 | 0.285714 | 0 | 0 | 0.159091 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.142857 | 0 | 0 | 0 | null | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 |
3894ea7210bcd127dcae72eadb946fee9568dfc3 | 345 | py | Python | pydmfet/locints/__init__.py | fishjojo/pydmfe | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | 3 | 2021-02-26T06:26:00.000Z | 2022-02-20T08:58:20.000Z | pydmfet/locints/__init__.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | pydmfet/locints/__init__.py | fishjojo/pydmfet | 93cfc655314933d3531b5733521a1f95a044f6cb | [
"MIT"
] | null | null | null | from pydmfet.locints import localintegrals
from pydmfet.locints.localintegrals import *
#from pydmfet.locints import mmn_wannier90
#from pydmfet.locints.mmn_wannier90 import *
from pydmfet.locints import locints_base
#from pydmfet.locints import locints_wf
from pydmfet.locints.locints_base import *
#from pydmfet.locints.locints_wf import *
| 38.333333 | 44 | 0.84058 | 46 | 345 | 6.173913 | 0.195652 | 0.309859 | 0.507042 | 0.338028 | 0.34507 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012903 | 0.101449 | 345 | 8 | 45 | 43.125 | 0.903226 | 0.472464 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
38c3bb628ba67247668a7f51199f42862d647f91 | 186 | py | Python | tkdet/data/datasets/fruits_nuts.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | 1 | 2020-10-09T02:27:13.000Z | 2020-10-09T02:27:13.000Z | tkdet/data/datasets/fruits_nuts.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | null | null | null | tkdet/data/datasets/fruits_nuts.py | tkhe/tkdetection | 54e6c112ef2930e755f457e38449736f5743a9ea | [
"MIT"
] | null | null | null | from .coco import register_coco_instances
def register_fruits_nuts_instances(name, metadata, json_file, image_root):
register_coco_instances(name, metadata, json_file, image_root)
| 31 | 74 | 0.833333 | 26 | 186 | 5.538462 | 0.538462 | 0.166667 | 0.291667 | 0.347222 | 0.527778 | 0.527778 | 0.527778 | 0 | 0 | 0 | 0 | 0 | 0.102151 | 186 | 5 | 75 | 37.2 | 0.862275 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.333333 | false | 0 | 0.333333 | 0 | 0.666667 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
2a1fd625e2d9a070dc0ec16771761afaf6d85165 | 6,683 | py | Python | loldib/getratings/models/NA/na_drmundo/na_drmundo_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_drmundo/na_drmundo_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | loldib/getratings/models/NA/na_drmundo/na_drmundo_mid.py | koliupy/loldib | c9ab94deb07213cdc42b5a7c26467cdafaf81b7f | [
"Apache-2.0"
] | null | null | null | from getratings.models.ratings import Ratings
class NA_DrMundo_Mid_Aatrox(Ratings):
pass
class NA_DrMundo_Mid_Ahri(Ratings):
pass
class NA_DrMundo_Mid_Akali(Ratings):
pass
class NA_DrMundo_Mid_Alistar(Ratings):
pass
class NA_DrMundo_Mid_Amumu(Ratings):
pass
class NA_DrMundo_Mid_Anivia(Ratings):
pass
class NA_DrMundo_Mid_Annie(Ratings):
pass
class NA_DrMundo_Mid_Ashe(Ratings):
pass
class NA_DrMundo_Mid_AurelionSol(Ratings):
pass
class NA_DrMundo_Mid_Azir(Ratings):
pass
class NA_DrMundo_Mid_Bard(Ratings):
pass
class NA_DrMundo_Mid_Blitzcrank(Ratings):
pass
class NA_DrMundo_Mid_Brand(Ratings):
pass
class NA_DrMundo_Mid_Braum(Ratings):
pass
class NA_DrMundo_Mid_Caitlyn(Ratings):
pass
class NA_DrMundo_Mid_Camille(Ratings):
pass
class NA_DrMundo_Mid_Cassiopeia(Ratings):
pass
class NA_DrMundo_Mid_Chogath(Ratings):
pass
class NA_DrMundo_Mid_Corki(Ratings):
pass
class NA_DrMundo_Mid_Darius(Ratings):
pass
class NA_DrMundo_Mid_Diana(Ratings):
pass
class NA_DrMundo_Mid_Draven(Ratings):
pass
class NA_DrMundo_Mid_DrMundo(Ratings):
pass
class NA_DrMundo_Mid_Ekko(Ratings):
pass
class NA_DrMundo_Mid_Elise(Ratings):
pass
class NA_DrMundo_Mid_Evelynn(Ratings):
pass
class NA_DrMundo_Mid_Ezreal(Ratings):
pass
class NA_DrMundo_Mid_Fiddlesticks(Ratings):
pass
class NA_DrMundo_Mid_Fiora(Ratings):
pass
class NA_DrMundo_Mid_Fizz(Ratings):
pass
class NA_DrMundo_Mid_Galio(Ratings):
pass
class NA_DrMundo_Mid_Gangplank(Ratings):
pass
class NA_DrMundo_Mid_Garen(Ratings):
pass
class NA_DrMundo_Mid_Gnar(Ratings):
pass
class NA_DrMundo_Mid_Gragas(Ratings):
pass
class NA_DrMundo_Mid_Graves(Ratings):
pass
class NA_DrMundo_Mid_Hecarim(Ratings):
pass
class NA_DrMundo_Mid_Heimerdinger(Ratings):
pass
class NA_DrMundo_Mid_Illaoi(Ratings):
pass
class NA_DrMundo_Mid_Irelia(Ratings):
pass
class NA_DrMundo_Mid_Ivern(Ratings):
pass
class NA_DrMundo_Mid_Janna(Ratings):
pass
class NA_DrMundo_Mid_JarvanIV(Ratings):
pass
class NA_DrMundo_Mid_Jax(Ratings):
pass
class NA_DrMundo_Mid_Jayce(Ratings):
pass
class NA_DrMundo_Mid_Jhin(Ratings):
pass
class NA_DrMundo_Mid_Jinx(Ratings):
pass
class NA_DrMundo_Mid_Kalista(Ratings):
pass
class NA_DrMundo_Mid_Karma(Ratings):
pass
class NA_DrMundo_Mid_Karthus(Ratings):
pass
class NA_DrMundo_Mid_Kassadin(Ratings):
pass
class NA_DrMundo_Mid_Katarina(Ratings):
pass
class NA_DrMundo_Mid_Kayle(Ratings):
pass
class NA_DrMundo_Mid_Kayn(Ratings):
pass
class NA_DrMundo_Mid_Kennen(Ratings):
pass
class NA_DrMundo_Mid_Khazix(Ratings):
pass
class NA_DrMundo_Mid_Kindred(Ratings):
pass
class NA_DrMundo_Mid_Kled(Ratings):
pass
class NA_DrMundo_Mid_KogMaw(Ratings):
pass
class NA_DrMundo_Mid_Leblanc(Ratings):
pass
class NA_DrMundo_Mid_LeeSin(Ratings):
pass
class NA_DrMundo_Mid_Leona(Ratings):
pass
class NA_DrMundo_Mid_Lissandra(Ratings):
pass
class NA_DrMundo_Mid_Lucian(Ratings):
pass
class NA_DrMundo_Mid_Lulu(Ratings):
pass
class NA_DrMundo_Mid_Lux(Ratings):
pass
class NA_DrMundo_Mid_Malphite(Ratings):
pass
class NA_DrMundo_Mid_Malzahar(Ratings):
pass
class NA_DrMundo_Mid_Maokai(Ratings):
pass
class NA_DrMundo_Mid_MasterYi(Ratings):
pass
class NA_DrMundo_Mid_MissFortune(Ratings):
pass
class NA_DrMundo_Mid_MonkeyKing(Ratings):
pass
class NA_DrMundo_Mid_Mordekaiser(Ratings):
pass
class NA_DrMundo_Mid_Morgana(Ratings):
pass
class NA_DrMundo_Mid_Nami(Ratings):
pass
class NA_DrMundo_Mid_Nasus(Ratings):
pass
class NA_DrMundo_Mid_Nautilus(Ratings):
pass
class NA_DrMundo_Mid_Nidalee(Ratings):
pass
class NA_DrMundo_Mid_Nocturne(Ratings):
pass
class NA_DrMundo_Mid_Nunu(Ratings):
pass
class NA_DrMundo_Mid_Olaf(Ratings):
pass
class NA_DrMundo_Mid_Orianna(Ratings):
pass
class NA_DrMundo_Mid_Ornn(Ratings):
pass
class NA_DrMundo_Mid_Pantheon(Ratings):
pass
class NA_DrMundo_Mid_Poppy(Ratings):
pass
class NA_DrMundo_Mid_Quinn(Ratings):
pass
class NA_DrMundo_Mid_Rakan(Ratings):
pass
class NA_DrMundo_Mid_Rammus(Ratings):
pass
class NA_DrMundo_Mid_RekSai(Ratings):
pass
class NA_DrMundo_Mid_Renekton(Ratings):
pass
class NA_DrMundo_Mid_Rengar(Ratings):
pass
class NA_DrMundo_Mid_Riven(Ratings):
pass
class NA_DrMundo_Mid_Rumble(Ratings):
pass
class NA_DrMundo_Mid_Ryze(Ratings):
pass
class NA_DrMundo_Mid_Sejuani(Ratings):
pass
class NA_DrMundo_Mid_Shaco(Ratings):
pass
class NA_DrMundo_Mid_Shen(Ratings):
pass
class NA_DrMundo_Mid_Shyvana(Ratings):
pass
class NA_DrMundo_Mid_Singed(Ratings):
pass
class NA_DrMundo_Mid_Sion(Ratings):
pass
class NA_DrMundo_Mid_Sivir(Ratings):
pass
class NA_DrMundo_Mid_Skarner(Ratings):
pass
class NA_DrMundo_Mid_Sona(Ratings):
pass
class NA_DrMundo_Mid_Soraka(Ratings):
pass
class NA_DrMundo_Mid_Swain(Ratings):
pass
class NA_DrMundo_Mid_Syndra(Ratings):
pass
class NA_DrMundo_Mid_TahmKench(Ratings):
pass
class NA_DrMundo_Mid_Taliyah(Ratings):
pass
class NA_DrMundo_Mid_Talon(Ratings):
pass
class NA_DrMundo_Mid_Taric(Ratings):
pass
class NA_DrMundo_Mid_Teemo(Ratings):
pass
class NA_DrMundo_Mid_Thresh(Ratings):
pass
class NA_DrMundo_Mid_Tristana(Ratings):
pass
class NA_DrMundo_Mid_Trundle(Ratings):
pass
class NA_DrMundo_Mid_Tryndamere(Ratings):
pass
class NA_DrMundo_Mid_TwistedFate(Ratings):
pass
class NA_DrMundo_Mid_Twitch(Ratings):
pass
class NA_DrMundo_Mid_Udyr(Ratings):
pass
class NA_DrMundo_Mid_Urgot(Ratings):
pass
class NA_DrMundo_Mid_Varus(Ratings):
pass
class NA_DrMundo_Mid_Vayne(Ratings):
pass
class NA_DrMundo_Mid_Veigar(Ratings):
pass
class NA_DrMundo_Mid_Velkoz(Ratings):
pass
class NA_DrMundo_Mid_Vi(Ratings):
pass
class NA_DrMundo_Mid_Viktor(Ratings):
pass
class NA_DrMundo_Mid_Vladimir(Ratings):
pass
class NA_DrMundo_Mid_Volibear(Ratings):
pass
class NA_DrMundo_Mid_Warwick(Ratings):
pass
class NA_DrMundo_Mid_Xayah(Ratings):
pass
class NA_DrMundo_Mid_Xerath(Ratings):
pass
class NA_DrMundo_Mid_XinZhao(Ratings):
pass
class NA_DrMundo_Mid_Yasuo(Ratings):
pass
class NA_DrMundo_Mid_Yorick(Ratings):
pass
class NA_DrMundo_Mid_Zac(Ratings):
pass
class NA_DrMundo_Mid_Zed(Ratings):
pass
class NA_DrMundo_Mid_Ziggs(Ratings):
pass
class NA_DrMundo_Mid_Zilean(Ratings):
pass
class NA_DrMundo_Mid_Zyra(Ratings):
pass
| 16.026379 | 46 | 0.77151 | 972 | 6,683 | 4.878601 | 0.151235 | 0.203712 | 0.407423 | 0.494728 | 0.808941 | 0.808941 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166243 | 6,683 | 416 | 47 | 16.064904 | 0.851041 | 0 | 0 | 0.498195 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0.498195 | 0.00361 | 0 | 0.501805 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
2a40d4577089fdfc4c8415f7047c7b6c45d670f0 | 154 | py | Python | quinerelay.py | hanss314/random-stuff | 606c0393292e1ff08c99cdace16fecdcd7a2203b | [
"MIT"
] | null | null | null | quinerelay.py | hanss314/random-stuff | 606c0393292e1ff08c99cdace16fecdcd7a2203b | [
"MIT"
] | null | null | null | quinerelay.py | hanss314/random-stuff | 606c0393292e1ff08c99cdace16fecdcd7a2203b | [
"MIT"
] | null | null | null | x='main=putStr "x={};y=repr(x);print(x.format(y[:13]+chr(92)+y[13:84]+chr(92)+y[84:]))"';y=repr(x);print(x.format(y[:13]+chr(92)+y[13:84]+chr(92)+y[84:])) | 154 | 154 | 0.584416 | 38 | 154 | 2.368421 | 0.289474 | 0.133333 | 0.266667 | 0.244444 | 0.866667 | 0.866667 | 0.866667 | 0.866667 | 0.866667 | 0.866667 | 0 | 0.156863 | 0.006494 | 154 | 1 | 154 | 154 | 0.431373 | 0 | 0 | 0 | 0 | 1 | 0.541935 | 0.464516 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 14 |
2a73aa95561fd992d36a8090a82c6b07feaf5e47 | 126 | py | Python | awaitwhat/wait.py | Puneethnaik/awaitwhat | 41254780191cb77a32fe420d697455bac8b14c64 | [
"MIT"
] | 45 | 2019-08-12T12:15:52.000Z | 2022-03-03T13:27:13.000Z | awaitwhat/wait.py | Puneethnaik/awaitwhat | 41254780191cb77a32fe420d697455bac8b14c64 | [
"MIT"
] | 27 | 2019-08-16T06:32:16.000Z | 2021-07-14T12:15:04.000Z | awaitwhat/wait.py | Puneethnaik/awaitwhat | 41254780191cb77a32fe420d697455bac8b14c64 | [
"MIT"
] | 9 | 2019-09-14T02:05:00.000Z | 2021-06-22T04:14:01.000Z | import asyncio.tasks
def mine(frame):
return asyncio.tasks._wait.__code__ == frame.f_code
def decode(frame):
pass
| 12.6 | 55 | 0.714286 | 18 | 126 | 4.666667 | 0.666667 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.18254 | 126 | 9 | 56 | 14 | 0.815534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.4 | false | 0.2 | 0.2 | 0.2 | 0.8 | 0 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 1 | 0 | 0 | 7 |
aa689155e54e952042a74b1e054136c365499f85 | 69 | py | Python | nse_daily/__init__.py | v33rh0ra/get_nse_daily | c20362c149766116e52d85987f27c3d988af4965 | [
"MIT"
] | null | null | null | nse_daily/__init__.py | v33rh0ra/get_nse_daily | c20362c149766116e52d85987f27c3d988af4965 | [
"MIT"
] | null | null | null | nse_daily/__init__.py | v33rh0ra/get_nse_daily | c20362c149766116e52d85987f27c3d988af4965 | [
"MIT"
] | null | null | null | from nse_daily.nse import NSEDaily
from nse_daily.bse import BSEDaily | 34.5 | 34 | 0.869565 | 12 | 69 | 4.833333 | 0.583333 | 0.241379 | 0.413793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.101449 | 69 | 2 | 35 | 34.5 | 0.935484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
aaabdb44c3b7315555f6789cf2c94dd02a2578f1 | 19,993 | py | Python | aiohomekit/model/services/data.py | wired/aiohomekit | 6f5e1c48bf9f3b2da145c0097336b77982134402 | [
"Apache-2.0"
] | 21 | 2020-02-14T20:45:47.000Z | 2022-03-29T01:36:14.000Z | aiohomekit/model/services/data.py | roysjosh/aiohomekit | 845502b74c0db367f040a803f2ef0cf67c2969f1 | [
"Apache-2.0"
] | 78 | 2020-03-22T22:25:50.000Z | 2022-03-10T23:08:43.000Z | aiohomekit/model/services/data.py | roysjosh/aiohomekit | 845502b74c0db367f040a803f2ef0cf67c2969f1 | [
"Apache-2.0"
] | 18 | 2020-04-09T23:02:18.000Z | 2022-03-01T23:20:19.000Z | # AUTOGENERATED, DO NOT EDIT
services = {
"0000003E-0000-1000-8000-0026BB765291": {
"name": "ACCESSORY_INFORMATION",
"description": "Accessory Information",
"required": [
"00000014-0000-1000-8000-0026BB765291",
"00000020-0000-1000-8000-0026BB765291",
"00000021-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"00000030-0000-1000-8000-0026BB765291",
"00000052-0000-1000-8000-0026BB765291",
],
"optional": [
"00000053-0000-1000-8000-0026BB765291",
"000000A6-0000-1000-8000-0026BB765291",
],
},
"000000BB-0000-1000-8000-0026BB765291": {
"name": "AIR_PURIFIER",
"description": "Air Purifier",
"required": [
"000000B0-0000-1000-8000-0026BB765291",
"000000A9-0000-1000-8000-0026BB765291",
"000000A8-0000-1000-8000-0026BB765291",
],
"optional": [
"000000A7-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"000000B6-0000-1000-8000-0026BB765291",
"00000029-0000-1000-8000-0026BB765291",
],
},
"0000008D-0000-1000-8000-0026BB765291": {
"name": "AIR_QUALITY_SENSOR",
"description": "Air Quality Sensor",
"required": ["00000095-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"000000C3-0000-1000-8000-0026BB765291",
"000000C4-0000-1000-8000-0026BB765291",
"000000C5-0000-1000-8000-0026BB765291",
"000000C6-0000-1000-8000-0026BB765291",
"000000C7-0000-1000-8000-0026BB765291",
"000000C8-0000-1000-8000-0026BB765291",
"00000090-0000-1000-8000-0026BB765291",
"00000093-0000-1000-8000-0026BB765291",
],
},
"00000096-0000-1000-8000-0026BB765291": {
"name": "BATTERY_SERVICE",
"description": "Battery Service",
"required": [
"00000068-0000-1000-8000-0026BB765291",
"0000008F-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"00000110-0000-1000-8000-0026BB765291": {
"name": "CAMERA_RTP_STREAM_MANAGEMENT",
"description": "Camera RTP Stream Management",
"required": [
"00000114-0000-1000-8000-0026BB765291",
"00000115-0000-1000-8000-0026BB765291",
"00000116-0000-1000-8000-0026BB765291",
"00000117-0000-1000-8000-0026BB765291",
"00000120-0000-1000-8000-0026BB765291",
"00000118-0000-1000-8000-0026BB765291",
],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"00000097-0000-1000-8000-0026BB765291": {
"name": "CARBON_DIOXIDE_SENSOR",
"description": "Carbon Dioxide Sensor",
"required": ["00000092-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000093-0000-1000-8000-0026BB765291",
"00000094-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"0000007F-0000-1000-8000-0026BB765291": {
"name": "CARBON_MONOXIDE_SENSOR",
"description": "Carbon Monoxide Sensor",
"required": ["00000069-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000090-0000-1000-8000-0026BB765291",
"00000091-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000080-0000-1000-8000-0026BB765291": {
"name": "CONTACT_SENSOR",
"description": "Contact Sensor",
"required": ["0000006A-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000081-0000-1000-8000-0026BB765291": {
"name": "DOOR",
"description": "Door",
"required": [
"0000006D-0000-1000-8000-0026BB765291",
"00000072-0000-1000-8000-0026BB765291",
"0000007C-0000-1000-8000-0026BB765291",
],
"optional": [
"0000006F-0000-1000-8000-0026BB765291",
"00000024-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000121-0000-1000-8000-0026BB765291": {
"name": "DOORBELL",
"description": "Doorbell",
"required": ["00000073-0000-1000-8000-0026BB765291"],
"optional": [
"00000008-0000-1000-8000-0026BB765291",
"00000119-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000040-0000-1000-8000-0026BB765291": {
"name": "FAN",
"description": "Fan",
"required": ["00000025-0000-1000-8000-0026BB765291"],
"optional": [
"00000028-0000-1000-8000-0026BB765291",
"00000029-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000B7-0000-1000-8000-0026BB765291": {
"name": "FAN_V2",
"description": "Fan v2",
"required": ["000000B0-0000-1000-8000-0026BB765291"],
"optional": [
"000000AF-0000-1000-8000-0026BB765291",
"000000BF-0000-1000-8000-0026BB765291",
"000000A7-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"00000028-0000-1000-8000-0026BB765291",
"00000029-0000-1000-8000-0026BB765291",
"000000B6-0000-1000-8000-0026BB765291",
],
},
"000000BA-0000-1000-8000-0026BB765291": {
"name": "FILTER_MAINTENANCE",
"description": "Filter Maintenance",
"required": ["000000AC-0000-1000-8000-0026BB765291"],
"optional": [
"000000AB-0000-1000-8000-0026BB765291",
"000000AD-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000D7-0000-1000-8000-0026BB765291": {
"name": "FAUCET",
"description": "Faucet",
"required": ["000000B0-0000-1000-8000-0026BB765291"],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
],
},
"00000041-0000-1000-8000-0026BB765291": {
"name": "GARAGE_DOOR_OPENER",
"description": "Garage Door Opener",
"required": [
"0000000E-0000-1000-8000-0026BB765291",
"00000032-0000-1000-8000-0026BB765291",
"00000024-0000-1000-8000-0026BB765291",
],
"optional": [
"0000001D-0000-1000-8000-0026BB765291",
"0000001E-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000BC-0000-1000-8000-0026BB765291": {
"name": "HEATER_COOLER",
"description": "Heater Cooler",
"required": [
"000000B0-0000-1000-8000-0026BB765291",
"000000B1-0000-1000-8000-0026BB765291",
"000000B2-0000-1000-8000-0026BB765291",
"00000011-0000-1000-8000-0026BB765291",
],
"optional": [
"000000A7-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"000000B6-0000-1000-8000-0026BB765291",
"0000000D-0000-1000-8000-0026BB765291",
"00000012-0000-1000-8000-0026BB765291",
"00000036-0000-1000-8000-0026BB765291",
"00000029-0000-1000-8000-0026BB765291",
],
},
"000000BD-0000-1000-8000-0026BB765291": {
"name": "HUMIDIFIER_DEHUMIDIFIER",
"description": "Humidifier Dehumidifier",
"required": [
"00000010-0000-1000-8000-0026BB765291",
"000000B3-0000-1000-8000-0026BB765291",
"000000B4-0000-1000-8000-0026BB765291",
"000000B0-0000-1000-8000-0026BB765291",
],
"optional": [
"000000A7-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
"000000B6-0000-1000-8000-0026BB765291",
"000000B5-0000-1000-8000-0026BB765291",
"000000C9-0000-1000-8000-0026BB765291",
"000000CA-0000-1000-8000-0026BB765291",
"00000029-0000-1000-8000-0026BB765291",
],
},
"00000082-0000-1000-8000-0026BB765291": {
"name": "HUMIDITY_SENSOR",
"description": "Humidity Sensor",
"required": ["00000010-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000CF-0000-1000-8000-0026BB765291": {
"name": "IRRIGATION_SYSTEM",
"description": "Irrigation System",
"required": [
"000000B0-0000-1000-8000-0026BB765291",
"000000D1-0000-1000-8000-0026BB765291",
"000000D2-0000-1000-8000-0026BB765291",
],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"000000D4-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
],
},
"00000083-0000-1000-8000-0026BB765291": {
"name": "LEAK_SENSOR",
"description": "Leak Sensor",
"required": ["00000070-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000084-0000-1000-8000-0026BB765291": {
"name": "LIGHT_SENSOR",
"description": "Light Sensor",
"required": ["0000006B-0000-1000-8000-0026BB765291"],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
],
},
"00000043-0000-1000-8000-0026BB765291": {
"name": "LIGHTBULB",
"description": "Lightbulb",
"required": ["00000025-0000-1000-8000-0026BB765291"],
"optional": [
"00000008-0000-1000-8000-0026BB765291",
"00000013-0000-1000-8000-0026BB765291",
"0000002F-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000044-0000-1000-8000-0026BB765291": {
"name": "LOCK_MANAGEMENT",
"description": "Lock Management",
"required": [
"00000019-0000-1000-8000-0026BB765291",
"00000037-0000-1000-8000-0026BB765291",
],
"optional": [
"0000001F-0000-1000-8000-0026BB765291",
"00000005-0000-1000-8000-0026BB765291",
"0000001A-0000-1000-8000-0026BB765291",
"00000001-0000-1000-8000-0026BB765291",
"0000001C-0000-1000-8000-0026BB765291",
"0000000E-0000-1000-8000-0026BB765291",
"00000022-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000045-0000-1000-8000-0026BB765291": {
"name": "LOCK_MECHANISM",
"description": "Lock Mechanism",
"required": [
"0000001D-0000-1000-8000-0026BB765291",
"0000001E-0000-1000-8000-0026BB765291",
],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"00000112-0000-1000-8000-0026BB765291": {
"name": "MICROPHONE",
"description": "Microphone",
"required": [
"00000119-0000-1000-8000-0026BB765291",
"0000011A-0000-1000-8000-0026BB765291",
],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"00000085-0000-1000-8000-0026BB765291": {
"name": "MOTION_SENSOR",
"description": "Motion Sensor",
"required": ["00000022-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000086-0000-1000-8000-0026BB765291": {
"name": "OCCUPANCY_SENSOR",
"description": "Occupancy Sensor",
"required": ["00000071-0000-1000-8000-0026BB765291"],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
],
},
"00000047-0000-1000-8000-0026BB765291": {
"name": "OUTLET",
"description": "Outlet",
"required": [
"00000025-0000-1000-8000-0026BB765291",
"00000026-0000-1000-8000-0026BB765291",
],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"0000007E-0000-1000-8000-0026BB765291": {
"name": "SECURITY_SYSTEM",
"description": "Security System",
"required": [
"00000066-0000-1000-8000-0026BB765291",
"00000067-0000-1000-8000-0026BB765291",
],
"optional": [
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"0000008E-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000CC-0000-1000-8000-0026BB765291": {
"name": "SERVICE_LABEL",
"description": "Service Label",
"required": ["000000CD-0000-1000-8000-0026BB765291"],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"000000B9-0000-1000-8000-0026BB765291": {
"name": "SLAT",
"description": "Slat",
"required": [
"000000C0-0000-1000-8000-0026BB765291",
"000000AA-0000-1000-8000-0026BB765291",
],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"000000C1-0000-1000-8000-0026BB765291",
"000000C2-0000-1000-8000-0026BB765291",
"000000B6-0000-1000-8000-0026BB765291",
],
},
"00000087-0000-1000-8000-0026BB765291": {
"name": "SMOKE_SENSOR",
"description": "Smoke Sensor",
"required": ["00000076-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"00000113-0000-1000-8000-0026BB765291": {
"name": "SPEAKER",
"description": "Speaker",
"required": ["0000011A-0000-1000-8000-0026BB765291"],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"00000119-0000-1000-8000-0026BB765291",
],
},
"00000089-0000-1000-8000-0026BB765291": {
"name": "STATELESS_PROGRAMMABLE_SWITCH",
"description": "Stateless Programmable Switch",
"required": ["00000073-0000-1000-8000-0026BB765291"],
"optional": [
"00000023-0000-1000-8000-0026BB765291",
"000000CB-0000-1000-8000-0026BB765291",
],
},
"00000049-0000-1000-8000-0026BB765291": {
"name": "SWITCH",
"description": "Switch",
"required": ["00000025-0000-1000-8000-0026BB765291"],
"optional": ["00000023-0000-1000-8000-0026BB765291"],
},
"0000008A-0000-1000-8000-0026BB765291": {
"name": "TEMPERATURE_SENSOR",
"description": "Temperature Sensor",
"required": ["00000011-0000-1000-8000-0026BB765291"],
"optional": [
"00000075-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"00000079-0000-1000-8000-0026BB765291",
"0000007A-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"0000004A-0000-1000-8000-0026BB765291": {
"name": "THERMOSTAT",
"description": "Thermostat",
"required": [
"0000000F-0000-1000-8000-0026BB765291",
"00000033-0000-1000-8000-0026BB765291",
"00000011-0000-1000-8000-0026BB765291",
"00000035-0000-1000-8000-0026BB765291",
"00000036-0000-1000-8000-0026BB765291",
],
"optional": [
"00000010-0000-1000-8000-0026BB765291",
"00000034-0000-1000-8000-0026BB765291",
"0000000D-0000-1000-8000-0026BB765291",
"00000012-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"000000D0-0000-1000-8000-0026BB765291": {
"name": "VALVE",
"description": "Valve",
"required": [
"000000B0-0000-1000-8000-0026BB765291",
"000000D2-0000-1000-8000-0026BB765291",
"000000D5-0000-1000-8000-0026BB765291",
],
"optional": [
"000000D3-0000-1000-8000-0026BB765291",
"000000D4-0000-1000-8000-0026BB765291",
"000000D6-0000-1000-8000-0026BB765291",
"000000CB-0000-1000-8000-0026BB765291",
"00000077-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"0000008B-0000-1000-8000-0026BB765291": {
"name": "WINDOW",
"description": "Window",
"required": [
"0000006D-0000-1000-8000-0026BB765291",
"0000007C-0000-1000-8000-0026BB765291",
"00000072-0000-1000-8000-0026BB765291",
],
"optional": [
"0000006F-0000-1000-8000-0026BB765291",
"00000024-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
"0000008C-0000-1000-8000-0026BB765291": {
"name": "WINDOW_COVERING",
"description": "Window Covering",
"required": [
"0000006D-0000-1000-8000-0026BB765291",
"0000007C-0000-1000-8000-0026BB765291",
"00000072-0000-1000-8000-0026BB765291",
],
"optional": [
"0000006F-0000-1000-8000-0026BB765291",
"0000007B-0000-1000-8000-0026BB765291",
"0000007D-0000-1000-8000-0026BB765291",
"0000006C-0000-1000-8000-0026BB765291",
"0000006E-0000-1000-8000-0026BB765291",
"00000024-0000-1000-8000-0026BB765291",
"00000023-0000-1000-8000-0026BB765291",
],
},
}
| 38.67118 | 61 | 0.575601 | 1,753 | 19,993 | 6.545351 | 0.123788 | 0.200802 | 0.301203 | 0.602405 | 0.698274 | 0.597176 | 0.54593 | 0.530591 | 0.530591 | 0.382778 | 0 | 0.583773 | 0.270695 | 19,993 | 516 | 62 | 38.746124 | 0.203141 | 0.0013 | 0 | 0.523346 | 1 | 0 | 0.63431 | 0.526521 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
2abc0894befa9b82abf14deb034d713c895c3cec | 228 | py | Python | trochilidae/tests/test_interoperable_get_arg_spec.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | null | null | null | trochilidae/tests/test_interoperable_get_arg_spec.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | null | null | null | trochilidae/tests/test_interoperable_get_arg_spec.py | MATTHEWFRAZER/trochilidae | 35e907ba9dcb1f283f79f4f32d61db6b53a1ca97 | [
"MIT"
] | 1 | 2021-11-12T18:49:15.000Z | 2021-11-12T18:49:15.000Z | import pytest
def test_interoperable_get_arg_spec():
try:
from trochilidae.interoperable_get_arg_spec import interoperable_get_arg_spec
except Exception as ex:
pytest.fail("import failed:{0}".format(ex)) | 32.571429 | 85 | 0.754386 | 31 | 228 | 5.225806 | 0.612903 | 0.296296 | 0.351852 | 0.425926 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005291 | 0.171053 | 228 | 7 | 86 | 32.571429 | 0.851852 | 0 | 0 | 0 | 0 | 0 | 0.074236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | true | 0 | 0.5 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
2ae28dd4cd496677f2171cafd5eb2db8ff2a4339 | 16,145 | py | Python | model.py | kimmo1019/Roundtrip | 685e3cce7b6c679e527ffc891b8f7d9eb6f3047c | [
"MIT"
] | 28 | 2020-04-09T23:35:09.000Z | 2022-03-16T08:53:26.000Z | model.py | kimmo1019/Roundtrip | 685e3cce7b6c679e527ffc891b8f7d9eb6f3047c | [
"MIT"
] | 8 | 2020-09-25T22:38:56.000Z | 2022-01-04T22:31:02.000Z | model.py | kimmo1019/Roundtrip | 685e3cce7b6c679e527ffc891b8f7d9eb6f3047c | [
"MIT"
] | 8 | 2020-05-08T01:11:43.000Z | 2022-03-18T15:14:30.000Z | import tensorflow as tf
import tensorflow.contrib as tc
import tensorflow.contrib.layers as tcl
def leaky_relu(x, alpha=0.2):
return tf.maximum(tf.minimum(0.0, alpha * x), x)
#return tf.maximum(0.0, x)
#return tf.nn.tanh(x)
#return tf.nn.elu(x)
class Discriminator(object):
def __init__(self, input_dim, name, nb_layers=2,nb_units=256):
self.input_dim = input_dim
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def __call__(self, x, reuse=True):
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
fc = tcl.fully_connected(
x, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
for _ in range(self.nb_layers-1):
fc = tcl.fully_connected(
fc, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
fc = tcl.batch_norm(fc)
#fc = leaky_relu(fc)
fc = tf.nn.tanh(fc)
output = tcl.fully_connected(
fc, 1,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
return output
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
#fcn
class Generator(object):
def __init__(self, input_dim, output_dim, name, nb_layers=2,nb_units=256):
self.input_dim = input_dim
self.output_dim = output_dim
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def __call__(self, z, reuse=True):
#with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
fc = tcl.fully_connected(
z, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
for _ in range(self.nb_layers-1):
fc = tcl.fully_connected(
fc, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
output = tcl.fully_connected(
fc, self.output_dim,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
return output
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
class Generator_resnet(object):
def __init__(self, input_dim, output_dim, name, nb_layers=2,nb_units=256):
self.input_dim = input_dim
self.output_dim = output_dim
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def residual_block(self, x, dim):
e = tcl.fully_connected(x, self.nb_units, activation_fn=tf.identity)
e = leaky_relu(e)
e = tcl.fully_connected(x, dim, activation_fn=tf.identity)
e = leaky_relu(e)
return x+e
def __call__(self, z, reuse=True):
#with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
fc = tcl.fully_connected(
z, self.nb_units/2,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
fc = tcl.fully_connected(
z, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
for _ in range(self.nb_layers-1):
fc = self.residual_block(fc,self.nb_units)
fc = tcl.fully_connected(
z, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
fc = tcl.fully_connected(
z, self.nb_units/2,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
output = tcl.fully_connected(
fc, self.output_dim,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
return output
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
class Generator_res(object):#skip connection
def __init__(self, input_dim, label_dim, output_dim, name, nb_layers=2,nb_units=256):
self.input_dim = input_dim
self.label_dim = label_dim
self.output_dim = output_dim
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def __call__(self, z, reuse=True):
#with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
z_latent = z[:,:self.input_dim]
z_label = z[:,self.input_dim:]
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
fc = tcl.fully_connected(
z, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
for _ in range(self.nb_layers-1):
fc = tf.concat([fc,z_label],axis=1)
fc = tcl.fully_connected(
fc, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
#fc = tf.concat([fc,z_label],axis=1)
output = tcl.fully_connected(
fc, self.output_dim,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
#weights_regularizer=tcl.l2_regularizer(2.5e-5),
activation_fn=tf.identity
)
return output
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
class Generator_Bayes(object):#y1,y2 = f(x1,x2) where p(y1|x1,x2) = p(y1|x1)
def __init__(self, input_dim1, input_dim2, output_dim1, output_dim2, name, nb_layers=2,nb_units=256,constrain=False):
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.output_dim1 = output_dim1
self.output_dim2 = output_dim2
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
self.constrain = constrain
def __call__(self, z, reuse=True):
#with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
z1 = z[:,:self.input_dim1]
z2 = z[:,self.input_dim1:]
fc1 = tcl.fully_connected(
z1, self.nb_units,
activation_fn=tf.identity,
scope='z1_0'
)
fc1 = leaky_relu(fc1)
fc2 = tcl.fully_connected(
z, self.nb_units,
activation_fn=tf.identity,
scope='z2_0'
)
fc2 = leaky_relu(fc2)
for i in range(self.nb_layers-1):
z = fc1
fc1 = tcl.fully_connected(
fc1, self.nb_units,
activation_fn=tf.identity,
scope='z1_%d'%(i+1)
)
fc1 = leaky_relu(fc1)
fc2 = tf.concat([z,fc2],axis=1)
fc2 = tcl.fully_connected(
fc2, self.nb_units,
activation_fn=tf.identity,
scope='z2_%d'%(i+1)
)
fc2 = leaky_relu(fc2)
output1 = tcl.fully_connected(
fc1, self.output_dim1,
activation_fn=tf.identity,
scope='z1_last'
)
fc2 = tf.concat([fc1,fc2],axis=1)
output2 = tcl.fully_connected(
fc2, self.output_dim2,
activation_fn=tf.identity,
scope='z2_last'
)
if self.constrain:
output2_phi = output2[:,1:2]
output2_sigma2 = output2[:,2:3]
output2_nu = output2[:,3:4]
output2_phi = tf.tanh(output2_phi)
output2_sigma2 = tf.abs(output2_sigma2)
#output2_nu = tf.abs(output2_nu)
output2 = tf.concat([output2[:,0:1],output2_phi,output2_sigma2,output2_nu,output2[:,-2:]],axis=1)
return [output1,output2]
@property
def vars(self):
vars_z1 = [var for var in tf.global_variables() if self.name+'/z1' in var.name]
vars_z2 = [var for var in tf.global_variables() if self.name+'/z2' in var.name]
all_vars = [var for var in tf.global_variables() if self.name in var.name]
return [vars_z1,vars_z2,all_vars]
class Generator_PCN(object):#partially connected network, z1<--f1(z1), z2<--f2(z1,z2)
def __init__(self, input_dim1, input_dim2, output_dim1, output_dim2, name, nb_layers=2,nb_units=256):
self.input_dim1 = input_dim1
self.input_dim2 = input_dim2
self.output_dim1 = output_dim1
self.output_dim2 = output_dim2
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def __call__(self, z, reuse=True):
#with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
with tf.variable_scope(self.name) as vs:
if reuse:
vs.reuse_variables()
z1 = z[:,:self.input_dim1]
z2 = z[:,self.input_dim1:]
fc1 = tcl.fully_connected(
z1, self.nb_units,
activation_fn=tf.identity,
scope='z1_0'
)
fc1 = leaky_relu(fc1)
#cross connections
fc_cross = tcl.fully_connected(
z1, self.nb_units,
weights_initializer=tf.zeros_initializer(),
biases_initializer=None,
activation_fn=tf.identity,
scope='zc_0'
)
fc_cross = leaky_relu(fc_cross)
fc2 = tcl.fully_connected(
z2, self.nb_units,
activation_fn=tf.identity,
scope='z2_0'
)
fc2 = leaky_relu(fc2)
fc2 = tf.add(fc2,fc_cross)
#fc2 = tf.concat([fc2,fc_cross],axis=1)
for i in range(self.nb_layers-1):
z = fc1
fc1 = tcl.fully_connected(
fc1, self.nb_units,
activation_fn=tf.identity,
scope='z1_%d'%(i+1)
)
fc1 = leaky_relu(fc1)
#cross connection
fc_cross = tcl.fully_connected(
z, self.nb_units,
activation_fn=tf.identity,
weights_initializer=tf.zeros_initializer(),
biases_initializer=None,
scope='zc_%d'%(i+1)
)
fc_cross = leaky_relu(fc_cross)
fc2 = tcl.fully_connected(
fc2, self.nb_units,
activation_fn=tf.identity,
scope='z2_%d'%(i+1)
)
fc2 = leaky_relu(fc2)
fc2 = tf.add(fc2,fc_cross)
#fc2 = tf.concat([fc2,fc_cross],axis=1)
output1 = tcl.fully_connected(
fc1, self.output_dim1,
activation_fn=tf.identity,
scope='z1_last'
)
#cross connection
output_cross = tcl.fully_connected(
fc1, self.output_dim2,
activation_fn=tf.identity,
weights_initializer=tf.zeros_initializer(),
scope='zc_last'
)
output2 = tcl.fully_connected(
fc2, self.output_dim2,
activation_fn=tf.identity,
scope='z2_last'
)
output2 = tf.add(output2,output_cross)
return [output1,output2]
@property
def vars(self):
vars_z1 = [var for var in tf.global_variables() if self.name+'/z1' in var.name]
vars_z2 = [var for var in tf.global_variables() if self.name+'/z2' in var.name]
vars_zc = [var for var in tf.global_variables() if self.name+'/zc' in var.name]
return [vars_z1,vars_z2,vars_zc]
class Encoder(object):
def __init__(self, input_dim, output_dim, feat_dim, name, nb_layers=2, nb_units=256):
self.input_dim = input_dim
self.output_dim = output_dim
self.feat_dim = feat_dim
self.name = name
self.nb_layers = nb_layers
self.nb_units = nb_units
def __call__(self, x, reuse=True):
with tf.variable_scope(self.name,reuse=tf.AUTO_REUSE) as vs:
# with tf.variable_scope(self.name) as vs:
# if reuse:
# vs.reuse_variables()
fc = tcl.fully_connected(
x, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
for _ in range(self.nb_layers-1):
fc = tcl.fully_connected(
fc, self.nb_units,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
fc = leaky_relu(fc)
output = tcl.fully_connected(
fc, self.output_dim,
#weights_initializer=tf.random_normal_initializer(stddev=0.02),
activation_fn=tf.identity
)
logits = output[:, self.feat_dim:]
y = tf.nn.softmax(logits)
return output[:, 0:self.feat_dim], y, logits
@property
def vars(self):
return [var for var in tf.global_variables() if self.name in var.name]
if __name__=='__main__':
import numpy as np
import time
from tensorflow.python.ops.parallel_for.gradients import jacobian,batch_jacobian
| 37.459397 | 127 | 0.535708 | 1,924 | 16,145 | 4.240125 | 0.071726 | 0.038612 | 0.070851 | 0.091689 | 0.863079 | 0.851312 | 0.84077 | 0.831822 | 0.799461 | 0.787938 | 0 | 0.03216 | 0.370207 | 16,145 | 430 | 128 | 37.546512 | 0.770161 | 0.147414 | 0 | 0.725904 | 0 | 0 | 0.007511 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069277 | false | 0 | 0.018072 | 0.018072 | 0.156627 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
2ae4384af68e0d6600b39ee5fa63747f94c17d9f | 72,215 | py | Python | pyidf/demand_limiting_controls.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 19 | 2015-12-08T23:33:51.000Z | 2022-01-31T04:41:10.000Z | pyidf/demand_limiting_controls.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 2 | 2019-10-04T10:57:00.000Z | 2021-10-01T06:46:17.000Z | pyidf/demand_limiting_controls.py | marcelosalles/pyidf | c2f744211572b5e14e29522aac1421ba88addb0e | [
"Apache-2.0"
] | 7 | 2015-11-04T02:25:01.000Z | 2021-12-08T03:14:28.000Z | """ Data objects in group "Demand Limiting Controls"
"""
from collections import OrderedDict
import logging
from pyidf.helper import DataObject
logger = logging.getLogger("pyidf")
logger.addHandler(logging.NullHandler())
class DemandManagerAssignmentList(DataObject):
""" Corresponds to IDD object `DemandManagerAssignmentList`
a list of meters that can be reported are available after a run on
the meter dictionary file (.mdd) if the Output:VariableDictionary has been requested.
"""
_schema = {'extensible-fields': OrderedDict([(u'demandmanager 1 object type',
{'name': u'DemandManager 1 Object Type',
'pyname': u'demandmanager_1_object_type',
'required-field': False,
'autosizable': False,
'accepted-values': [u'DemandManager:ExteriorLights',
u'DemandManager:Lights',
u'DemandManager:ElectricEquipment',
u'DemandManager:Thermostats',
u'DemandManager:Ventilation'],
'autocalculatable': False,
'type': 'alpha'}),
(u'demandmanager 1 name',
{'name': u'DemandManager 1 Name',
'pyname': u'demandmanager_1_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'meter name',
{'name': u'Meter Name',
'pyname': u'meter_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'external-list'}),
(u'demand limit schedule name',
{'name': u'Demand Limit Schedule Name',
'pyname': u'demand_limit_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'demand limit safety fraction',
{'name': u'Demand Limit Safety Fraction',
'pyname': u'demand_limit_safety_fraction',
'required-field': True,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'billing period schedule name',
{'name': u'Billing Period Schedule Name',
'pyname': u'billing_period_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'peak period schedule name',
{'name': u'Peak Period Schedule Name',
'pyname': u'peak_period_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'demand window length',
{'name': u'Demand Window Length',
'pyname': u'demand_window_length',
'minimum>': 0,
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'demand manager priority',
{'name': u'Demand Manager Priority',
'pyname': u'demand_manager_priority',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Sequential',
u'All'],
'autocalculatable': False,
'type': 'alpha'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 0,
'name': u'DemandManagerAssignmentList',
'pyname': u'DemandManagerAssignmentList',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def meter_name(self):
"""field `Meter Name`
Args:
value (str): value for IDD Field `Meter Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `meter_name` or None if not set
"""
return self["Meter Name"]
@meter_name.setter
def meter_name(self, value=None):
"""Corresponds to IDD field `Meter Name`"""
self["Meter Name"] = value
@property
def demand_limit_schedule_name(self):
"""field `Demand Limit Schedule Name`
Args:
value (str): value for IDD Field `Demand Limit Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `demand_limit_schedule_name` or None if not set
"""
return self["Demand Limit Schedule Name"]
@demand_limit_schedule_name.setter
def demand_limit_schedule_name(self, value=None):
"""Corresponds to IDD field `Demand Limit Schedule Name`"""
self["Demand Limit Schedule Name"] = value
@property
def demand_limit_safety_fraction(self):
"""field `Demand Limit Safety Fraction`
Args:
value (float): value for IDD Field `Demand Limit Safety Fraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `demand_limit_safety_fraction` or None if not set
"""
return self["Demand Limit Safety Fraction"]
@demand_limit_safety_fraction.setter
def demand_limit_safety_fraction(self, value=None):
"""Corresponds to IDD field `Demand Limit Safety Fraction`"""
self["Demand Limit Safety Fraction"] = value
@property
def billing_period_schedule_name(self):
"""field `Billing Period Schedule Name`
| This field should reference the same schedule as the month schedule name field of the
| UtilityCost:Tariff object, if used.
| If blank, defaults to regular divisions between months.
Args:
value (str): value for IDD Field `Billing Period Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `billing_period_schedule_name` or None if not set
"""
return self["Billing Period Schedule Name"]
@billing_period_schedule_name.setter
def billing_period_schedule_name(self, value=None):
"""Corresponds to IDD field `Billing Period Schedule Name`"""
self["Billing Period Schedule Name"] = value
@property
def peak_period_schedule_name(self):
"""field `Peak Period Schedule Name`
| This field should reference the same schedule as the period schedule name field of the
| UtilityCost:Tariff object, if used.
| If blank, defaults to always on peak.
Args:
value (str): value for IDD Field `Peak Period Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `peak_period_schedule_name` or None if not set
"""
return self["Peak Period Schedule Name"]
@peak_period_schedule_name.setter
def peak_period_schedule_name(self, value=None):
"""Corresponds to IDD field `Peak Period Schedule Name`"""
self["Peak Period Schedule Name"] = value
@property
def demand_window_length(self):
"""field `Demand Window Length`
| Units: minutes
Args:
value (int): value for IDD Field `Demand Window Length`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `demand_window_length` or None if not set
"""
return self["Demand Window Length"]
@demand_window_length.setter
def demand_window_length(self, value=None):
"""Corresponds to IDD field `Demand Window Length`"""
self["Demand Window Length"] = value
@property
def demand_manager_priority(self):
"""field `Demand Manager Priority`
Args:
value (str): value for IDD Field `Demand Manager Priority`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `demand_manager_priority` or None if not set
"""
return self["Demand Manager Priority"]
@demand_manager_priority.setter
def demand_manager_priority(self, value=None):
"""Corresponds to IDD field `Demand Manager Priority`"""
self["Demand Manager Priority"] = value
def add_extensible(self,
demandmanager_1_object_type=None,
demandmanager_1_name=None,
):
"""Add values for extensible fields.
Args:
demandmanager_1_object_type (str): value for IDD Field `DemandManager 1 Object Type`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
demandmanager_1_name (str): value for IDD Field `DemandManager 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
demandmanager_1_object_type = self.check_value(
"DemandManager 1 Object Type",
demandmanager_1_object_type)
vals.append(demandmanager_1_object_type)
demandmanager_1_name = self.check_value(
"DemandManager 1 Name",
demandmanager_1_name)
vals.append(demandmanager_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class DemandManagerExteriorLights(DataObject):
""" Corresponds to IDD object `DemandManager:ExteriorLights`
used for demand limiting Exterior:Lights objects.
"""
_schema = {'extensible-fields': OrderedDict([(u'exterior lights 1 name',
{'name': u'Exterior Lights 1 Name',
'pyname': u'exterior_lights_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'limit control',
{'name': u'Limit Control',
'pyname': u'limit_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Off',
u'Fixed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum limit duration',
{'name': u'Minimum Limit Duration',
'pyname': u'minimum_limit_duration',
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'maximum limit fraction',
{'name': u'Maximum Limit Fraction',
'pyname': u'maximum_limit_fraction',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'limit step change',
{'name': u'Limit Step Change',
'pyname': u'limit_step_change',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'selection control',
{'name': u'Selection Control',
'pyname': u'selection_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'All',
u'RotateMany',
u'RotateOne'],
'autocalculatable': False,
'type': 'alpha'}),
(u'rotation duration',
{'name': u'Rotation Duration',
'pyname': u'rotation_duration',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 0,
'name': u'DemandManager:ExteriorLights',
'pyname': u'DemandManagerExteriorLights',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def limit_control(self):
"""field `Limit Control`
Args:
value (str): value for IDD Field `Limit Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `limit_control` or None if not set
"""
return self["Limit Control"]
@limit_control.setter
def limit_control(self, value=None):
"""Corresponds to IDD field `Limit Control`"""
self["Limit Control"] = value
@property
def minimum_limit_duration(self):
"""field `Minimum Limit Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Minimum Limit Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_limit_duration` or None if not set
"""
return self["Minimum Limit Duration"]
@minimum_limit_duration.setter
def minimum_limit_duration(self, value=None):
"""Corresponds to IDD field `Minimum Limit Duration`"""
self["Minimum Limit Duration"] = value
@property
def maximum_limit_fraction(self):
"""field `Maximum Limit Fraction`
| value <= 1.0
Args:
value (float): value for IDD Field `Maximum Limit Fraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_limit_fraction` or None if not set
"""
return self["Maximum Limit Fraction"]
@maximum_limit_fraction.setter
def maximum_limit_fraction(self, value=None):
"""Corresponds to IDD field `Maximum Limit Fraction`"""
self["Maximum Limit Fraction"] = value
@property
def limit_step_change(self):
"""field `Limit Step Change`
| Not yet implemented
Args:
value (float): value for IDD Field `Limit Step Change`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `limit_step_change` or None if not set
"""
return self["Limit Step Change"]
@limit_step_change.setter
def limit_step_change(self, value=None):
"""Corresponds to IDD field `Limit Step Change`"""
self["Limit Step Change"] = value
@property
def selection_control(self):
"""field `Selection Control`
Args:
value (str): value for IDD Field `Selection Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `selection_control` or None if not set
"""
return self["Selection Control"]
@selection_control.setter
def selection_control(self, value=None):
"""Corresponds to IDD field `Selection Control`"""
self["Selection Control"] = value
@property
def rotation_duration(self):
"""field `Rotation Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Rotation Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `rotation_duration` or None if not set
"""
return self["Rotation Duration"]
@rotation_duration.setter
def rotation_duration(self, value=None):
"""Corresponds to IDD field `Rotation Duration`"""
self["Rotation Duration"] = value
def add_extensible(self,
exterior_lights_1_name=None,
):
"""Add values for extensible fields.
Args:
exterior_lights_1_name (str): value for IDD Field `Exterior Lights 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
exterior_lights_1_name = self.check_value(
"Exterior Lights 1 Name",
exterior_lights_1_name)
vals.append(exterior_lights_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class DemandManagerLights(DataObject):
""" Corresponds to IDD object `DemandManager:Lights`
used for demand limiting Lights objects.
"""
_schema = {'extensible-fields': OrderedDict([(u'lights 1 name',
{'name': u'Lights 1 Name',
'pyname': u'lights_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'limit control',
{'name': u'Limit Control',
'pyname': u'limit_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Off',
u'Fixed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum limit duration',
{'name': u'Minimum Limit Duration',
'pyname': u'minimum_limit_duration',
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'maximum limit fraction',
{'name': u'Maximum Limit Fraction',
'pyname': u'maximum_limit_fraction',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'limit step change',
{'name': u'Limit Step Change',
'pyname': u'limit_step_change',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'selection control',
{'name': u'Selection Control',
'pyname': u'selection_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'All',
u'RotateMany',
u'RotateOne'],
'autocalculatable': False,
'type': 'alpha'}),
(u'rotation duration',
{'name': u'Rotation Duration',
'pyname': u'rotation_duration',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 0,
'name': u'DemandManager:Lights',
'pyname': u'DemandManagerLights',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def limit_control(self):
"""field `Limit Control`
Args:
value (str): value for IDD Field `Limit Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `limit_control` or None if not set
"""
return self["Limit Control"]
@limit_control.setter
def limit_control(self, value=None):
"""Corresponds to IDD field `Limit Control`"""
self["Limit Control"] = value
@property
def minimum_limit_duration(self):
"""field `Minimum Limit Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Minimum Limit Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_limit_duration` or None if not set
"""
return self["Minimum Limit Duration"]
@minimum_limit_duration.setter
def minimum_limit_duration(self, value=None):
"""Corresponds to IDD field `Minimum Limit Duration`"""
self["Minimum Limit Duration"] = value
@property
def maximum_limit_fraction(self):
"""field `Maximum Limit Fraction`
| value <= 1.0
Args:
value (float): value for IDD Field `Maximum Limit Fraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_limit_fraction` or None if not set
"""
return self["Maximum Limit Fraction"]
@maximum_limit_fraction.setter
def maximum_limit_fraction(self, value=None):
"""Corresponds to IDD field `Maximum Limit Fraction`"""
self["Maximum Limit Fraction"] = value
@property
def limit_step_change(self):
"""field `Limit Step Change`
| Not yet implemented
Args:
value (float): value for IDD Field `Limit Step Change`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `limit_step_change` or None if not set
"""
return self["Limit Step Change"]
@limit_step_change.setter
def limit_step_change(self, value=None):
"""Corresponds to IDD field `Limit Step Change`"""
self["Limit Step Change"] = value
@property
def selection_control(self):
"""field `Selection Control`
Args:
value (str): value for IDD Field `Selection Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `selection_control` or None if not set
"""
return self["Selection Control"]
@selection_control.setter
def selection_control(self, value=None):
"""Corresponds to IDD field `Selection Control`"""
self["Selection Control"] = value
@property
def rotation_duration(self):
"""field `Rotation Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Rotation Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `rotation_duration` or None if not set
"""
return self["Rotation Duration"]
@rotation_duration.setter
def rotation_duration(self, value=None):
"""Corresponds to IDD field `Rotation Duration`"""
self["Rotation Duration"] = value
def add_extensible(self,
lights_1_name=None,
):
"""Add values for extensible fields.
Args:
lights_1_name (str): value for IDD Field `Lights 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
lights_1_name = self.check_value("Lights 1 Name", lights_1_name)
vals.append(lights_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class DemandManagerElectricEquipment(DataObject):
""" Corresponds to IDD object `DemandManager:ElectricEquipment`
used for demand limiting ElectricEquipment objects.
"""
_schema = {'extensible-fields': OrderedDict([(u'electric equipment 1 name',
{'name': u'Electric Equipment 1 Name',
'pyname': u'electric_equipment_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'limit control',
{'name': u'Limit Control',
'pyname': u'limit_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Off',
u'Fixed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum limit duration',
{'name': u'Minimum Limit Duration',
'pyname': u'minimum_limit_duration',
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'maximum limit fraction',
{'name': u'Maximum Limit Fraction',
'pyname': u'maximum_limit_fraction',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'limit step change',
{'name': u'Limit Step Change',
'pyname': u'limit_step_change',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'selection control',
{'name': u'Selection Control',
'pyname': u'selection_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'All',
u'RotateMany',
u'RotateOne'],
'autocalculatable': False,
'type': 'alpha'}),
(u'rotation duration',
{'name': u'Rotation Duration',
'pyname': u'rotation_duration',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 0,
'name': u'DemandManager:ElectricEquipment',
'pyname': u'DemandManagerElectricEquipment',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def limit_control(self):
"""field `Limit Control`
Args:
value (str): value for IDD Field `Limit Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `limit_control` or None if not set
"""
return self["Limit Control"]
@limit_control.setter
def limit_control(self, value=None):
"""Corresponds to IDD field `Limit Control`"""
self["Limit Control"] = value
@property
def minimum_limit_duration(self):
"""field `Minimum Limit Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Minimum Limit Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_limit_duration` or None if not set
"""
return self["Minimum Limit Duration"]
@minimum_limit_duration.setter
def minimum_limit_duration(self, value=None):
"""Corresponds to IDD field `Minimum Limit Duration`"""
self["Minimum Limit Duration"] = value
@property
def maximum_limit_fraction(self):
"""field `Maximum Limit Fraction`
| value <= 1.0
Args:
value (float): value for IDD Field `Maximum Limit Fraction`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_limit_fraction` or None if not set
"""
return self["Maximum Limit Fraction"]
@maximum_limit_fraction.setter
def maximum_limit_fraction(self, value=None):
"""Corresponds to IDD field `Maximum Limit Fraction`"""
self["Maximum Limit Fraction"] = value
@property
def limit_step_change(self):
"""field `Limit Step Change`
| Not yet implemented
Args:
value (float): value for IDD Field `Limit Step Change`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `limit_step_change` or None if not set
"""
return self["Limit Step Change"]
@limit_step_change.setter
def limit_step_change(self, value=None):
"""Corresponds to IDD field `Limit Step Change`"""
self["Limit Step Change"] = value
@property
def selection_control(self):
"""field `Selection Control`
Args:
value (str): value for IDD Field `Selection Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `selection_control` or None if not set
"""
return self["Selection Control"]
@selection_control.setter
def selection_control(self, value=None):
"""Corresponds to IDD field `Selection Control`"""
self["Selection Control"] = value
@property
def rotation_duration(self):
"""field `Rotation Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Rotation Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `rotation_duration` or None if not set
"""
return self["Rotation Duration"]
@rotation_duration.setter
def rotation_duration(self, value=None):
"""Corresponds to IDD field `Rotation Duration`"""
self["Rotation Duration"] = value
def add_extensible(self,
electric_equipment_1_name=None,
):
"""Add values for extensible fields.
Args:
electric_equipment_1_name (str): value for IDD Field `Electric Equipment 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
electric_equipment_1_name = self.check_value(
"Electric Equipment 1 Name",
electric_equipment_1_name)
vals.append(electric_equipment_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class DemandManagerThermostats(DataObject):
""" Corresponds to IDD object `DemandManager:Thermostats`
used for demand limiting ZoneControl:Thermostat objects.
"""
_schema = {'extensible-fields': OrderedDict([(u'thermostat 1 name',
{'name': u'Thermostat 1 Name',
'pyname': u'thermostat_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'reset control',
{'name': u'Reset Control',
'pyname': u'reset_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Off',
u'Fixed'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum reset duration',
{'name': u'Minimum Reset Duration',
'pyname': u'minimum_reset_duration',
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'maximum heating setpoint reset',
{'name': u'Maximum Heating Setpoint Reset',
'pyname': u'maximum_heating_setpoint_reset',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'maximum cooling setpoint reset',
{'name': u'Maximum Cooling Setpoint Reset',
'pyname': u'maximum_cooling_setpoint_reset',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'real',
'unit': u'C'}),
(u'reset step change',
{'name': u'Reset Step Change',
'pyname': u'reset_step_change',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'selection control',
{'name': u'Selection Control',
'pyname': u'selection_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'All',
u'RotateMany',
u'RotateOne'],
'autocalculatable': False,
'type': 'alpha'}),
(u'rotation duration',
{'name': u'Rotation Duration',
'pyname': u'rotation_duration',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 0,
'name': u'DemandManager:Thermostats',
'pyname': u'DemandManagerThermostats',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this system. Schedule value > 0 means the system is available.
| If this field is blank, the system is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def reset_control(self):
"""field `Reset Control`
Args:
value (str): value for IDD Field `Reset Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `reset_control` or None if not set
"""
return self["Reset Control"]
@reset_control.setter
def reset_control(self, value=None):
"""Corresponds to IDD field `Reset Control`"""
self["Reset Control"] = value
@property
def minimum_reset_duration(self):
"""field `Minimum Reset Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Minimum Reset Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_reset_duration` or None if not set
"""
return self["Minimum Reset Duration"]
@minimum_reset_duration.setter
def minimum_reset_duration(self, value=None):
"""Corresponds to IDD field `Minimum Reset Duration`"""
self["Minimum Reset Duration"] = value
@property
def maximum_heating_setpoint_reset(self):
"""field `Maximum Heating Setpoint Reset`
| Units: C
Args:
value (float): value for IDD Field `Maximum Heating Setpoint Reset`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_heating_setpoint_reset` or None if not set
"""
return self["Maximum Heating Setpoint Reset"]
@maximum_heating_setpoint_reset.setter
def maximum_heating_setpoint_reset(self, value=None):
"""Corresponds to IDD field `Maximum Heating Setpoint Reset`"""
self["Maximum Heating Setpoint Reset"] = value
@property
def maximum_cooling_setpoint_reset(self):
"""field `Maximum Cooling Setpoint Reset`
| Units: C
Args:
value (float): value for IDD Field `Maximum Cooling Setpoint Reset`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `maximum_cooling_setpoint_reset` or None if not set
"""
return self["Maximum Cooling Setpoint Reset"]
@maximum_cooling_setpoint_reset.setter
def maximum_cooling_setpoint_reset(self, value=None):
"""Corresponds to IDD field `Maximum Cooling Setpoint Reset`"""
self["Maximum Cooling Setpoint Reset"] = value
@property
def reset_step_change(self):
"""field `Reset Step Change`
| Not yet implemented
Args:
value (float): value for IDD Field `Reset Step Change`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `reset_step_change` or None if not set
"""
return self["Reset Step Change"]
@reset_step_change.setter
def reset_step_change(self, value=None):
"""Corresponds to IDD field `Reset Step Change`"""
self["Reset Step Change"] = value
@property
def selection_control(self):
"""field `Selection Control`
Args:
value (str): value for IDD Field `Selection Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `selection_control` or None if not set
"""
return self["Selection Control"]
@selection_control.setter
def selection_control(self, value=None):
"""Corresponds to IDD field `Selection Control`"""
self["Selection Control"] = value
@property
def rotation_duration(self):
"""field `Rotation Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Rotation Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `rotation_duration` or None if not set
"""
return self["Rotation Duration"]
@rotation_duration.setter
def rotation_duration(self, value=None):
"""Corresponds to IDD field `Rotation Duration`"""
self["Rotation Duration"] = value
def add_extensible(self,
thermostat_1_name=None,
):
"""Add values for extensible fields.
Args:
thermostat_1_name (str): value for IDD Field `Thermostat 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
thermostat_1_name = self.check_value(
"Thermostat 1 Name",
thermostat_1_name)
vals.append(thermostat_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
class DemandManagerVentilation(DataObject):
""" Corresponds to IDD object `DemandManager:Ventilation`
used for demand limiting Controller:OutdoorAir objects.
"""
_schema = {'extensible-fields': OrderedDict([(u'controller outdoor air 1 name',
{'name': u'Controller Outdoor Air 1 Name',
'pyname': u'controller_outdoor_air_1_name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'})]),
'fields': OrderedDict([(u'name',
{'name': u'Name',
'pyname': u'name',
'required-field': True,
'autosizable': False,
'autocalculatable': False,
'type': u'alpha'}),
(u'availability schedule name',
{'name': u'Availability Schedule Name',
'pyname': u'availability_schedule_name',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'object-list'}),
(u'limit control',
{'name': u'Limit Control',
'pyname': u'limit_control',
'required-field': True,
'autosizable': False,
'accepted-values': [u'Off',
u'FixedRate',
u'ReductionRatio'],
'autocalculatable': False,
'type': 'alpha'}),
(u'minimum limit duration',
{'name': u'Minimum Limit Duration',
'pyname': u'minimum_limit_duration',
'minimum>': 0,
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'}),
(u'fixed rate',
{'name': u'Fixed Rate',
'pyname': u'fixed_rate',
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real',
'unit': u'm3/s'}),
(u'reduction ratio',
{'name': u'Reduction Ratio',
'pyname': u'reduction_ratio',
'maximum': 1.0,
'required-field': False,
'autosizable': False,
'minimum': 0.0,
'autocalculatable': False,
'type': u'real'}),
(u'limit step change',
{'name': u'Limit Step Change',
'pyname': u'limit_step_change',
'required-field': False,
'autosizable': False,
'autocalculatable': False,
'type': u'real'}),
(u'selection control',
{'name': u'Selection Control',
'pyname': u'selection_control',
'default': u'All',
'required-field': True,
'autosizable': False,
'accepted-values': [u'All',
u'RotateMany',
u'RotateOne'],
'autocalculatable': False,
'type': 'alpha'}),
(u'rotation duration',
{'name': u'Rotation Duration',
'pyname': u'rotation_duration',
'required-field': False,
'autosizable': False,
'minimum': 0,
'autocalculatable': False,
'type': u'integer',
'unit': u'minutes'})]),
'format': None,
'group': u'Demand Limiting Controls',
'min-fields': 10,
'name': u'DemandManager:Ventilation',
'pyname': u'DemandManagerVentilation',
'required-object': False,
'unique-object': False}
@property
def name(self):
"""field `Name`
Args:
value (str): value for IDD Field `Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `name` or None if not set
"""
return self["Name"]
@name.setter
def name(self, value=None):
"""Corresponds to IDD field `Name`"""
self["Name"] = value
@property
def availability_schedule_name(self):
"""field `Availability Schedule Name`
| Availability schedule name for this demand manager. Schedule value > 0 means the demand manager is available.
| If this field is blank, the DR is always available.
Args:
value (str): value for IDD Field `Availability Schedule Name`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `availability_schedule_name` or None if not set
"""
return self["Availability Schedule Name"]
@availability_schedule_name.setter
def availability_schedule_name(self, value=None):
"""Corresponds to IDD field `Availability Schedule Name`"""
self["Availability Schedule Name"] = value
@property
def limit_control(self):
"""field `Limit Control`
Args:
value (str): value for IDD Field `Limit Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `limit_control` or None if not set
"""
return self["Limit Control"]
@limit_control.setter
def limit_control(self, value=None):
"""Corresponds to IDD field `Limit Control`"""
self["Limit Control"] = value
@property
def minimum_limit_duration(self):
"""field `Minimum Limit Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Minimum Limit Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `minimum_limit_duration` or None if not set
"""
return self["Minimum Limit Duration"]
@minimum_limit_duration.setter
def minimum_limit_duration(self, value=None):
"""Corresponds to IDD field `Minimum Limit Duration`"""
self["Minimum Limit Duration"] = value
@property
def fixed_rate(self):
"""field `Fixed Rate`
| Units: m3/s
Args:
value (float): value for IDD Field `Fixed Rate`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `fixed_rate` or None if not set
"""
return self["Fixed Rate"]
@fixed_rate.setter
def fixed_rate(self, value=None):
"""Corresponds to IDD field `Fixed Rate`"""
self["Fixed Rate"] = value
@property
def reduction_ratio(self):
"""field `Reduction Ratio`
| value <= 1.0
Args:
value (float): value for IDD Field `Reduction Ratio`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `reduction_ratio` or None if not set
"""
return self["Reduction Ratio"]
@reduction_ratio.setter
def reduction_ratio(self, value=None):
"""Corresponds to IDD field `Reduction Ratio`"""
self["Reduction Ratio"] = value
@property
def limit_step_change(self):
"""field `Limit Step Change`
| Not yet implemented
Args:
value (float): value for IDD Field `Limit Step Change`
Raises:
ValueError: if `value` is not a valid value
Returns:
float: the value of `limit_step_change` or None if not set
"""
return self["Limit Step Change"]
@limit_step_change.setter
def limit_step_change(self, value=None):
"""Corresponds to IDD field `Limit Step Change`"""
self["Limit Step Change"] = value
@property
def selection_control(self):
"""field `Selection Control`
| Default value: All
Args:
value (str): value for IDD Field `Selection Control`
Raises:
ValueError: if `value` is not a valid value
Returns:
str: the value of `selection_control` or None if not set
"""
return self["Selection Control"]
@selection_control.setter
def selection_control(self, value="All"):
"""Corresponds to IDD field `Selection Control`"""
self["Selection Control"] = value
@property
def rotation_duration(self):
"""field `Rotation Duration`
| If blank, duration defaults to the timestep
| Units: minutes
Args:
value (int): value for IDD Field `Rotation Duration`
Raises:
ValueError: if `value` is not a valid value
Returns:
int: the value of `rotation_duration` or None if not set
"""
return self["Rotation Duration"]
@rotation_duration.setter
def rotation_duration(self, value=None):
"""Corresponds to IDD field `Rotation Duration`"""
self["Rotation Duration"] = value
def add_extensible(self,
controller_outdoor_air_1_name=None,
):
"""Add values for extensible fields.
Args:
controller_outdoor_air_1_name (str): value for IDD Field `Controller Outdoor Air 1 Name`
if `value` is None it will not be checked against the
specification and is assumed to be a missing value
"""
vals = []
controller_outdoor_air_1_name = self.check_value(
"Controller Outdoor Air 1 Name",
controller_outdoor_air_1_name)
vals.append(controller_outdoor_air_1_name)
self._extdata.append(vals)
@property
def extensibles(self):
"""Get list of all extensibles."""
return self._extdata
@extensibles.setter
def extensibles(self, extensibles):
"""Replaces extensible fields with `extensibles`
Args:
extensibles (list): nested list of extensible values
"""
self._extdata = []
for ext in extensibles:
self.add_extensible(*ext)
| 37.417098 | 120 | 0.454933 | 6,143 | 72,215 | 5.260622 | 0.032069 | 0.026488 | 0.048273 | 0.028221 | 0.898193 | 0.838563 | 0.803163 | 0.788526 | 0.770176 | 0.752507 | 0 | 0.003159 | 0.460763 | 72,215 | 1,929 | 121 | 37.436496 | 0.826712 | 0.259531 | 0 | 0.779698 | 0 | 0 | 0.211163 | 0.019911 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12743 | false | 0 | 0.00324 | 0 | 0.204104 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
63017a1806d18666b48f8b0cccdc525def8583d1 | 12,218 | py | Python | beanie/api/stock_item_api.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | beanie/api/stock_item_api.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | beanie/api/stock_item_api.py | altoyield/python-beanieclient | 448b8dd328054eaf32dd7d0bdff700e603b5c27d | [
"Apache-2.0"
] | null | null | null | # coding: utf-8
"""
Beanie ERP API
An API specification for interacting with the Beanie ERP system # noqa: E501
OpenAPI spec version: 0.8
Contact: dev@bean.ie
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from beanie.api_client import ApiClient
class StockItemApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def add_stock_item(self, stock_items, **kwargs): # noqa: E501
"""add_stock_item # noqa: E501
Creates a new stock item in the system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_stock_item(stock_items, async=True)
>>> result = thread.get()
:param async bool
:param StockItemInput stock_items: Stock item to add to the system (required)
:return: StockItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.add_stock_item_with_http_info(stock_items, **kwargs) # noqa: E501
else:
(data) = self.add_stock_item_with_http_info(stock_items, **kwargs) # noqa: E501
return data
def add_stock_item_with_http_info(self, stock_items, **kwargs): # noqa: E501
"""add_stock_item # noqa: E501
Creates a new stock item in the system # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.add_stock_item_with_http_info(stock_items, async=True)
>>> result = thread.get()
:param async bool
:param StockItemInput stock_items: Stock item to add to the system (required)
:return: StockItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['stock_items'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method add_stock_item" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'stock_items' is set
if ('stock_items' not in params or
params['stock_items'] is None):
raise ValueError("Missing the required parameter `stock_items` when calling `add_stock_item`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'stock_items' in params:
body_params = params['stock_items']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/stock_items', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StockItem', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_stock_item_by_id(self, id, **kwargs): # noqa: E501
"""Find Stock item by ID # noqa: E501
Returns a single stock item if the user has access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.find_stock_item_by_id(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: ID of stock item to fetch (required)
:return: StockItem
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.find_stock_item_by_id_with_http_info(id, **kwargs) # noqa: E501
else:
(data) = self.find_stock_item_by_id_with_http_info(id, **kwargs) # noqa: E501
return data
def find_stock_item_by_id_with_http_info(self, id, **kwargs): # noqa: E501
"""Find Stock item by ID # noqa: E501
Returns a single stock item if the user has access # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.find_stock_item_by_id_with_http_info(id, async=True)
>>> result = thread.get()
:param async bool
:param int id: ID of stock item to fetch (required)
:return: StockItem
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['id'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_stock_item_by_id" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'id' is set
if ('id' not in params or
params['id'] is None):
raise ValueError("Missing the required parameter `id` when calling `find_stock_item_by_id`") # noqa: E501
collection_formats = {}
path_params = {}
if 'id' in params:
path_params['id'] = params['id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/stock_items/{id}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='StockItem', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def find_stock_items(self, **kwargs): # noqa: E501
"""All stock item # noqa: E501
Returns all stock item from the system that the user has access to # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.find_stock_items(async=True)
>>> result = thread.get()
:param async bool
:param list[str] tags: tags to filter by
:param int limit: Maximum number of results to return
:return: list[StockItem]
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async'):
return self.find_stock_items_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.find_stock_items_with_http_info(**kwargs) # noqa: E501
return data
def find_stock_items_with_http_info(self, **kwargs): # noqa: E501
"""All stock item # noqa: E501
Returns all stock item from the system that the user has access to # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async=True
>>> thread = api.find_stock_items_with_http_info(async=True)
>>> result = thread.get()
:param async bool
:param list[str] tags: tags to filter by
:param int limit: Maximum number of results to return
:return: list[StockItem]
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['tags', 'limit'] # noqa: E501
all_params.append('async')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method find_stock_items" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'tags' in params:
query_params.append(('tags', params['tags'])) # noqa: E501
collection_formats['tags'] = 'csv' # noqa: E501
if 'limit' in params:
query_params.append(('limit', params['limit'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['api_key'] # noqa: E501
return self.api_client.call_api(
'/stock_items', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='list[StockItem]', # noqa: E501
auth_settings=auth_settings,
async=params.get('async'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 36.690691 | 120 | 0.60239 | 1,454 | 12,218 | 4.825309 | 0.117607 | 0.055872 | 0.023945 | 0.030787 | 0.891534 | 0.87329 | 0.847349 | 0.828677 | 0.801881 | 0.800171 | 0 | 0.018304 | 0.306924 | 12,218 | 332 | 121 | 36.801205 | 0.810227 | 0.063022 | 0 | 0.71345 | 1 | 0 | 0.160233 | 0.032778 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.023392 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
2d5e0e467a97367a6344c0e98434dbc31273fa59 | 155 | py | Python | Python Basic/Week 2/10. Speacial Variables/second.py | nomancseku/NSL-HW | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | 1 | 2020-11-29T20:02:08.000Z | 2020-11-29T20:02:08.000Z | Python Basic/Week 2/10. Speacial Variables/second.py | nomancseku/NSL-RA-Training | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | 1 | 2020-10-25T18:59:03.000Z | 2020-10-25T18:59:21.000Z | Python Basic/Week 2/10. Speacial Variables/second.py | nomancseku/NSL-RA-Training | 9a130a97ce0909cff93a77378f95e44c69a8bc7c | [
"MIT"
] | null | null | null | import first
if __name__ == '__main__':
print('second script is executing as main script!')
else:
print('second script is imported from another script') | 25.833333 | 55 | 0.754839 | 22 | 155 | 4.954545 | 0.681818 | 0.201835 | 0.311927 | 0.348624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.148387 | 155 | 6 | 55 | 25.833333 | 0.825758 | 0 | 0 | 0 | 0 | 0 | 0.608974 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.4 | 0 | 0.4 | 0.4 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 8 |
2d70356472c21670cf0cf23b600c3293177a225e | 204 | py | Python | Intermediate/Day6/4SetComprehensionDemos/setcomprehensiondemo1.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null | Intermediate/Day6/4SetComprehensionDemos/setcomprehensiondemo1.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null | Intermediate/Day6/4SetComprehensionDemos/setcomprehensiondemo1.py | vishipayyallore/LearningPython_2019 | f72d5af61ad96721442b7ebfc33518c2a879eb64 | [
"MIT"
] | null | null | null | from math import factorial
factorials_list = [factorial(x) for x in range(10)]
print(factorials_list)
# Removes the duplicate
factorials_list = {factorial(x) for x in range(10)}
print(factorials_list)
| 20.4 | 51 | 0.77451 | 31 | 204 | 4.967742 | 0.483871 | 0.363636 | 0.298701 | 0.311688 | 0.727273 | 0.727273 | 0.727273 | 0.727273 | 0.727273 | 0.727273 | 0 | 0.022599 | 0.132353 | 204 | 9 | 52 | 22.666667 | 0.847458 | 0.102941 | 0 | 0.4 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0.4 | 0 | 0 | 0 | null | 1 | 1 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
2dd87f7ac0869fb14b22c965e3e2fb4c50015e62 | 39,211 | py | Python | imager_controls.py | dakota0064/Fluorescent_Robotic_Imager | 423e6df956269fb2d6c438dd5fce1a6cbc947b3d | [
"Apache-2.0"
] | null | null | null | imager_controls.py | dakota0064/Fluorescent_Robotic_Imager | 423e6df956269fb2d6c438dd5fce1a6cbc947b3d | [
"Apache-2.0"
] | null | null | null | imager_controls.py | dakota0064/Fluorescent_Robotic_Imager | 423e6df956269fb2d6c438dd5fce1a6cbc947b3d | [
"Apache-2.0"
] | 2 | 2019-11-15T11:56:36.000Z | 2020-04-08T01:01:50.000Z | import gcodesender as gc
import image_utils
import time
import tkinter as tk
from utils import *
import datetime
import cv2
import threading
import pyramid
import Automated_MARCO
class imager_controls():
def __init__(self, serial_port, app):
self.app = app
self.s = serial_port
def ping_location(self, z=False):
"""Pings the imager for its current location
Sends a signal to the imager asking for current position along the 3-axes.
Parses the received string into floating point values representing mm from home.
(Caution: Coordinates are directly updated to destination position after transit has begun,
but often before is has completed. This can serve as a suitable holding
mechanism for small distances (well to well), but doesn't work for longer transits
(well to home, home to first well, etc.))
Args:
s: The serial port connecting the imager
z: Optional variable which determines whether z-axis data is also parsed and returned
Returns:
2 or 3 floating point values representing the 'current' position of the imager
along the X, Y, and optionally Z axes.
"""
gc.sendGCode(self.s, "gcode/ping_location.gcode")
stuff = self.s.readline() # Format [VALUE] X:xx.xxx Y:yy.yyy Z:zz.zzz E:e.eeeee
grbl_out = stuff.decode() # Wait for response with carriage return
feedback = grbl_out.strip()
while (feedback[1] != 'V'):
stuff = self.s.readline() # Format [VALUE] X:xx.xxx Y:yy.yyy Z:zz.zzz E:e.eeeee
grbl_out = stuff.decode() # Wait for response with carriage return
feedback = grbl_out.strip()
if (feedback[11] == "."):
currentX = float(feedback[10:15])
feedback = feedback[16:]
elif(feedback[12] == "."):
currentX = float(feedback[10:16])
feedback = feedback[17:]
elif(feedback[13] == "."):
currentX = float(feedback[10:17])
feedback = feedback[18:]
if (feedback[3] == "."):
currentY = float(feedback[2:7])
feedback = feedback[8:]
elif (feedback[4] == "."):
currentY = float(feedback[2:8])
feedback = feedback[9:]
elif (feedback[5] == "."):
currentY = float(feedback[2:9])
feedback = feedback[10:]
if z == False:
return currentX, currentY
else:
if (feedback[3] == "."):
currentZ = float(feedback[2:7])
elif (feedback[4] == "."):
currentZ = float(feedback[2:8])
elif (feedback[5] == "."):
currentZ = float(feedback[2:9])
return currentX, currentY, currentZ
#-----------------------------------------------------------------------------------------------------------------------
def load_tray(self):
"""Moves the plate into position for easy tray loading.
Raises Z axis by 50mm and moves Y axis forward by 120mm
Args:
s: The serial port connecting to the imager
"""
self.go_home()
gc.sendGCode(self.s, "gcode/load_tray.gcode")
print("> Press 'OK' when tray is loaded")
print(" ")
#-----------------------------------------------------------------------------------------------------------------------
def set_height(self):
"""Sets the imager to an in-focus height during manual-mode initialization
Raises Z axis by 20.3mm
Args:
s: The serial port connecting to the imager"""
gc.sendGCode(self.s, "gcode/set_height.gcode")
#-----------------------------------------------------------------------------------------------------------------------
def go_home(self):
"""Moves the imager to the default home position.
Home is determined by the screws and contacts along each axis,
and can be adjusted with a small phillips screwdriver.
(Caution: DO NOT ADJUST WITHOUT CHANGING THE MAX X,Y,Z VALUES.
SEVERE MOTOR DAMAGE MAY OCCUR.)
Args:
s: The serial port connecting the imager
"""
gc.sendGCode(self.s, "gcode/home.gcode")
#-----------------------------------------------------------------------------------------------------------------------
def autocorrect(self, currentX, currentY):
"""Centers the camera on the current well.
Uses the method of Hough Circles to detemine the circle closest to the center
of the image. Calculates the distance between the center of this circle and the
center of the image, and adjusts the imager by this distance.
REQUIREMENTS:
Camera must be kept at a fixed orientation, with the cord facing
away from the front of the machine.
Lighting must also be kept constant across all images, otherwise false
positives may arise during circle detection.
Args:
currentX: The current X location, in mm
currentY: The current Y location, in mm
Returns:
Updated values for x and y
"""
mm_per_pixel = 0.0020 # Value should be between 0.0020 and 0.0025
image = self.app.cam.get_pil_image()
X, Y, R = image_utils.find_largest_circle(image)
#print("X: " + str(X))
#print("Y: " + str(Y))
#print("Radius: " + str(R))
centerX = 614
centerY = 461
#print("Center point: " + str(centerX) + " " + str(centerY))
if R == 0:
#print("unable to autocorrect")
return 0,0
delX = (centerX-(X*2))
delY = (centerY-(Y*2))
mmX = delX * mm_per_pixel
mmY = delY * mm_per_pixel
x = currentX + mmX
y = currentY + mmY
return x, y
####################################################################################################################
def run_auto_imager(self, extension, locations):
"""Runs the auto imager using one of the pre-defined well patterns.
Args:
extension: The plate extension used to send the correct G-code
locations: a list of strings representing wells to be imaged.
"""
if len(locations) == 0:
print("> Please select which wells to image")
print("")
return
project = self.app.frames["AutoFrame"].project.get()
target = self.app.frames["AutoFrame"].target.get()
plate = self.app.frames["AutoFrame"].plate.get()
prep_date = self.app.frames["AutoFrame"].date.get()
if project == "" or target == "" or plate == "":
print("> Must fill out project, target, and ")
print("> plate name before imaging can begin")
print("> ")
return
# Get the date and time at which imaging began
date = datetime.date.today()
#start_time = datetime.time
project_data = [project, target, plate, date]
# Ensure the directories exist to store the images
path = os.path.join(os.pardir, os.pardir, "Images/" + project + "/")
ensure_directory(path)
path = path + target + "/"
ensure_directory(path)
path = path + plate + "/"
ensure_directory(path)
if prep_date != "":
self.app.write_prep_date(prep_date, path)
else:
self.app.frames["AutoFrame"].date.set(self.app.read_prep_date(path))
self.app.write_prep_date(prep_date, path)
path = path + str(date) + "/"
ensure_directory(path)
self.app.disable_manual()
self.app.frames["AutoFrame"].cancelButton['state'] = tk.NORMAL
self.app.frames["AutoFrame"].goButton['state'] = tk.DISABLED
print("> Running standard configuration")
size = len(locations)
coordinates = self.app.determine_coordinates(locations)
new_coordinates = coordinates
step = 100/size
print("> Homing coordinates")
self.app.arduino.lights_on()
self.app.arduino.fan_on()
self.go_home()
x = self.app.FIRST_X
y = self.app.FIRST_Y
z = self.app.FIRST_Z
if self.app.cancelled:
self.app.abort()
self.go_home()
return
if self.app.type.get() == "Intelli-plate 96-3":
gc.writeGCode(coordinates, "gcode/96-3")
gc.writeTemporary(self.app.FIRST_X, self.app.FIRST_Y, self.app.FIRST_Z, first=True)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(20)
delX = 0
delY = 0
j = 0
for loc in locations:
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get()) # 0.5 determines the total distance
j += 1
start = time.time()
if self.app.cancelled:
self.app.abort()
self.go_home()
return
x = float(coordinates[loc][0]) - delX
y = float(coordinates[loc][1]) - delY
z = self.app.FIRST_Z
#print(z)
#print(x)
#print(y)
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
#print("> Moving to well " + loc)
#gc.sendGCode(self.s, extension + loc + '.gcode')
time.sleep(.5)
while(True):
currentX, currentY = self.ping_location()
if (abs(currentX - x) < .1) and (abs(currentY - y) < .1):
break
else:
time.sleep(.5)
# Printer is approximately above the well. Autocorrect if desired
time.sleep(1.5)
if self.app.do_autocorrect.get():
#print("> Correcting position")
newX, newY = self.autocorrect(x, y)
if (newX is not 0 and newY is not 0):
delX += x - newX
delY += y - newY
x = newX
y = newY
gc.writeTemporary(x, y)
new_coordinates[loc] = x,y
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
#When we reach this step the printer is where it needs to be
time.sleep(1.5)
well_path = path + loc + "/"
ensure_directory(well_path)
for i in range(self.app.slices.get()):
save_path = well_path + "-slice" + str(i+1) + ".jpg"
self.app.cam.save(save_path)
z = z + z_dist
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(1.5)
order = []
for file in os.listdir(well_path):
order.append(file)
order.sort()
images = []
for file in order:
image = cv2.imread(well_path + "/" + file)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
threading.Thread(target=pyramid.stack_focus, args=(images, path, loc)).start()
self.app.frames["AutoFrame"].progress.step(step)
self.app.frames["AutoFrame"].output.update_idletasks()
self.app.set_time(start, size - j)
print("> Classifying images with MARCO...")
Automated_MARCO.predict(path, project_data)
print("> Classification complete")
parts = path.split("/")
path = ""
for i in range(len(parts) - 2):
path += parts[i] + "/"
self.app.check_previous_notes(path, str(date))
if self.app.frames["AutoFrame"].laser_var.get() == 1:
print("> Running laser fluorescence imaging")
self.run_auto_laser_imager(extension, locations, new_coordinates)
return
else:
self.app.clean_up()
self.go_home()
print("> Resetting imager")
print("")
return
elif self.app.type.get() == "Greiner 1536":
gc.writeGCode(coordinates, "gcode/1536")
gc.writeTemporary(self.app.FIRST_X, self.app.FIRST_Y, self.app.FIRST_Z)
time.sleep(20)
delX = 0
delY = 0
j = 0
for loc in locations:
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get()) # 0.5 determines the total distance
j += 1
start = time.time()
if self.app.cancelled:
self.app.abort()
self.go_home()
return
x = float(coordinates[loc][0]) - delX
y = float(coordinates[loc][1]) - delY
z = self.app.FIRST_Z
# print(x)
# print(y)
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
# print("> Moving to well " + loc)
# gc.sendGCode(self.s, extension + loc + '.gcode')
time.sleep(.5)
while (True):
currentX, currentY = self.ping_location()
if (abs(currentX - x) < .1) and (abs(currentY - y) < .1):
break
else:
time.sleep(.5)
# Printer is approximately above the well. Autocorrect if desired
time.sleep(1.5)
if self.app.do_autocorrect.get():
# print("> Correcting position")
newX, newY = self.autocorrect(x, y)
if (newX is not 0 and newY is not 0):
delX += x - newX
delY += y - newY
x = newX
y = newY
gc.writeTemporary(x, y)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
# When we reach this step the printer is where it needs to be
time.sleep(1.5)
well_path = path + loc + "/"
ensure_directory(well_path)
for i in range(self.app.slices.get()):
save_path = well_path + "-slice" + str(i + 1) + ".jpg"
self.app.cam.save(save_path)
z = z + z_dist
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(1.5)
order = []
for file in os.listdir(well_path):
order.append(file)
order.sort()
images = []
for file in order:
image = cv2.imread(well_path + "/" + file)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
self.app.frames["AutoFrame"].progress.step(step)
self.app.frames["AutoFrame"].output.update_idletasks()
self.app.set_time(start, size - j)
print("> Classifying images with MARCO...")
Automated_MARCO.predict(path, project_data)
print("> Classification complete")
parts = path.split("/")
path = ""
for i in range(len(parts) - 2):
path += parts[i] + "/"
self.app.check_previous_notes(path, str(date))
self.app.clean_up()
self.go_home()
####################################################################################################################
def run_custom_imager(self, extension, locations):
"""Runs the auto imger using a custom-defined well pattern.
Ends of rows are predefined to ensure snake-like movement throughout imaging.
Args:
extension: The plate extension used to send the correct G-code
locations: a list of strings representing wells to be imaged.
"""
if len(locations) == 0:
print("> Please select which wells to image")
print("")
return
project = self.app.frames["AutoFrame"].project.get()
target = self.app.frames["AutoFrame"].target.get()
plate = self.app.frames["AutoFrame"].plate.get()
prep_date = self.app.frames["AutoFrame"].date.get()
if project == "" or target == "" or plate == "":
print("> Must fill out project, target, and ")
print("> plate name before imaging can begin")
print("")
return
self.app.disable_manual()
# Get the date and time at which imaging began
date = datetime.date.today()
#start_time = datetime.time
project_data = [project, target, plate, date]
# Ensure the directories exist to store the images
path = os.path.join(os.pardir, os.pardir, "Images/" + project + "/")
ensure_directory(path)
path = path + target + "/"
ensure_directory(path)
path = path + plate + "/"
ensure_directory(path)
if prep_date != "":
self.app.write_prep_date(prep_date, path)
else:
self.app.frames["AutoFrame"].date.set(self.app.read_prep_date(path))
path = path + str(date) + "/"
ensure_directory(path)
self.app.frames["AutoFrame"].cancelButton['state'] = tk.NORMAL
self.app.frames["AutoFrame"].goButton['state'] = tk.DISABLED
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get())
self.app.arduino.lights_on()
self.app.arduino.fan_on()
x = self.app.FIRST_X
y = self.app.FIRST_Y
z = self.app.FIRST_Z
newLocations = locations.copy()
print("> Running custom configuration")
print("> ")
cols = [[]]
for i in range(12):
cols.append([])
for loc in newLocations:
column = int(loc[1] + loc[2]) - 1
cols[column].append(loc)
for col in cols[1::2]:
col.sort(key=operator.itemgetter(0, 4), reverse=True)
self.go_home()
if self.app.cancelled:
self.app.abort()
self.go_home()
return
coordinates = self.app.determine_coordinates(newLocations)
new_coordinates = coordinates
gc.writeGCode(coordinates, "gcode/96-3")
gc.writeTemporary(self.app.FIRST_X, self.app.FIRST_Y, self.app.FIRST_Z, first=True)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(20)
delX = 0
delY = 0
j = 0
size = len(newLocations)
step = (100/size)
for i in range(12):
for loc in cols[i]:
if self.app.cancelled:
self.app.abort()
self.go_home()
return
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get()) # 0.5 determines the total distance
j += 1
start = time.time()
gc.writeGCode(coordinates, "gcode/96-3")
x = float(coordinates[loc][0]) - delX
y = float(coordinates[loc][1]) - delY
new_coordinates[loc] = x,y
gc.writeTemporary(x, y, self.app.FIRST_Z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
#print("> Moving to well " + loc)
#gc.sendGCode(self.s, extension + loc + '.gcode')
newLocations.pop(0)
time.sleep(1.5)
while (True):
currentX, currentY = self.ping_location()
if (abs(currentX - x) < .1) and (abs(currentY - y) < .1):
break
else:
time.sleep(1.5)
time.sleep(1.5)
if self.app.do_autocorrect.get():
# print("> Correcting position")
newX, newY = self.autocorrect(x, y)
if (newX is not 0 and newY is not 0):
delX += x - newX
delY += y - newY
x = newX
y = newY
gc.writeTemporary(x, y)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
# When we reach this step the printer is where it needs to be
time.sleep(1.5)
z = self.app.FIRST_Z
well_path = path + loc + "/"
ensure_directory(well_path)
for i in range(self.app.slices.get()):
save_path = well_path + "-slice" + str(i + 1) + ".jpg"
self.app.cam.save(save_path)
z = z + z_dist
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(1.5)
z = self.app.FIRST_Z
order = []
for file in os.listdir(well_path):
order.append(file)
order.sort()
images = []
for file in order:
image = cv2.imread(well_path + "/" + file)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
threading.Thread(target=pyramid.stack_focus, args=(images, path, loc)).start()
self.app.frames["AutoFrame"].progress.step(step)
self.app.frames["AutoFrame"].output.update_idletasks()
self.app.set_time(start, size - j)
if not newLocations:
break
print("> Classifying images with MARCO...")
Automated_MARCO.predict(path, project_data)
print("> Classification complete")
parts = path.split("/")
path = ""
for i in range(len(parts)-2):
path += parts[i] + "/"
self.app.check_previous_notes(path, str(date))
if self.app.frames["AutoFrame"].laser_var.get() == 1:
self.run_custom_laser_imager(extension, locations, new_coordinates)
return
else:
self.app.clean_up()
self.go_home()
print("> Resetting imager")
setup()
####################################################################################################################
def run_auto_laser_imager(self, extension, locations, coordinates):
if len(locations) == 0:
print("> Please select which wells to image")
print("")
return
project = self.app.frames["AutoFrame"].project.get()
target = self.app.frames["AutoFrame"].target.get()
plate = self.app.frames["AutoFrame"].plate.get()
prep_date = self.app.frames["AutoFrame"].date.get()
if project == "" or target == "" or plate == "":
print("> Must fill out project, target, and ")
print("> plate name before imaging can begin")
print("> ")
return
# Get the date and time at which imaging began
date = datetime.date.today()
# start_time = datetime.time
project_data = [project, target, plate, date]
# Ensure the directories exist to store the images
path = os.path.join(os.pardir, os.pardir, "Images/" + project + "/")
ensure_directory(path)
path = path + target + "/"
ensure_directory(path)
path = path + plate + "/"
ensure_directory(path)
if prep_date != "":
self.app.write_prep_date(prep_date, path)
else:
self.app.frames["AutoFrame"].date.set(self.app.read_prep_date(path))
self.app.write_prep_date(prep_date, path)
path = path + str(date) + "/"
ensure_directory(path)
path = path + "laser_fluorescence/"
ensure_directory(path)
self.app.disable_manual()
self.app.frames["AutoFrame"].cancelButton['state'] = tk.NORMAL
self.app.frames["AutoFrame"].goButton['state'] = tk.DISABLED
print("> Running standard fluorescence configuration")
size = len(locations)
step = 100 / size
print("> Homing coordinates")
self.go_home()
self.app.arduino.lights_on()
self.app.arduino.laser_on()
self.app.arduino.fan_on()
self.app.arduino.servo_0()
self.app.set_camera_fluorscent()
x = self.app.FIRST_X
y = self.app.FIRST_Y
z = self.app.FIRST_Z
if self.app.cancelled:
self.app.abort()
self.go_home()
return
if self.app.type.get() == "Intelli-plate 96-3":
gc.writeTemporary(self.app.FIRST_X, self.app.FIRST_Y, self.app.FIRST_Z, first=True)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(20)
j = 0
for loc in locations:
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get()) # 0.5 determines the total distance
j += 1
start = time.time()
if self.app.cancelled:
self.app.abort()
self.go_home()
return
x = float(coordinates[loc][0])
y = float(coordinates[loc][1])
z = self.app.FIRST_Z
#print(z)
#print(x)
#print(y)
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
#print("> Moving to well " + loc)
#gc.sendGCode(self.s, extension + loc + '.gcode')
time.sleep(.5)
while(True):
currentX, currentY = self.ping_location()
if (abs(currentX - x) < .1) and (abs(currentY - y) < .1):
break
else:
time.sleep(.5)
#When we reach this step the printer is where it needs to be
time.sleep(1.5)
well_path = path + loc + "/"
ensure_directory(well_path)
for i in range(self.app.slices.get()):
save_path = well_path + "-slice" + str(i+1) + ".jpg"
self.app.cam.save(save_path)
z = z + z_dist
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(1.5)
order = []
for file in os.listdir(well_path):
order.append(file)
order.sort()
images = []
for file in order:
image = cv2.imread(well_path + "/" + file)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
threading.Thread(target=pyramid.stack_focus, args=(images, path, loc)).start()
self.app.frames["AutoFrame"].progress.step(step)
self.app.frames["AutoFrame"].output.update_idletasks()
self.app.set_time(start, size - j)
print("> Classifying images with MARCO...")
Automated_MARCO.predict(path, project_data)
print("> Classification complete")
parts = path.split("/")
path = ""
for i in range(len(parts) - 2):
path += parts[i] + "/"
self.app.check_previous_notes(path, str(date))
self.app.clean_up()
self.go_home()
self.app.set_camera_default()
print("> Resetting imager")
########################################################################################################################
def run_custom_laser_imager(self, extension, locations, coordinates):
if len(locations) == 0:
print("> Please select which wells to image")
print("")
return
project = self.app.frames["AutoFrame"].project.get()
target = self.app.frames["AutoFrame"].target.get()
plate = self.app.frames["AutoFrame"].plate.get()
prep_date = self.app.frames["AutoFrame"].date.get()
if project == "" or target == "" or plate == "":
print("> Must fill out project, target, and ")
print("> plate name before imaging can begin")
print("")
return
self.app.disable_manual()
# Get the date and time at which imaging began
date = datetime.date.today()
#start_time = datetime.time
project_data = [project, target, plate, date]
# Ensure the directories exist to store the images
path = os.path.join(os.pardir, os.pardir, "Images/" + project + "/")
ensure_directory(path)
path = path + target + "/"
ensure_directory(path)
path = path + plate + "/"
ensure_directory(path)
if prep_date != "":
self.app.write_prep_date(prep_date, path)
else:
self.app.frames["AutoFrame"].date.set(self.app.read_prep_date(path))
path = path + str(date) + "/"
ensure_directory(path)
path = path + "fluorescence/"
self.app.frames["AutoFrame"].cancelButton['state'] = tk.NORMAL
self.app.frames["AutoFrame"].goButton['state'] = tk.DISABLED
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get())
self.app.arduino.lights_on()
self.app.arduino.laser_on()
self.app.arduino.servo_0()
self.app.set_camera_fluorscent()
x = self.app.FIRST_X
y = self.app.FIRST_Y
z = self.app.FIRST_Z
newLocations = locations
print("> Running custom configuration")
print("> ")
cols = [[]]
for i in range(12):
cols.append([])
for loc in newLocations:
column = int(loc[1] + loc[2]) - 1
cols[column].append(loc)
for col in cols[1::2]:
col.sort(key=operator.itemgetter(0, 4), reverse=True)
self.go_home()
if self.app.cancelled:
self.app.abort()
self.go_home()
return
gc.writeTemporary(self.app.FIRST_X, self.app.FIRST_Y, self.app.FIRST_Z, first=True)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(20)
j = 0
size = len(newLocations)
step = (100/size)
for i in range(12):
for loc in cols[i]:
if self.app.cancelled:
self.app.abort()
self.go_home()
return
z_dist = (float(self.app.z_dist.get()) / 10.0) / float(self.app.slices.get()) # 0.5 determines the total distance
j += 1
start = time.time()
x = float(coordinates[loc][0])
y = float(coordinates[loc][1])
gc.writeTemporary(x, y, self.app.FIRST_Z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
#print("> Moving to well " + loc)
#gc.sendGCode(self.s, extension + loc + '.gcode')
newLocations.pop(0)
time.sleep(1.5)
while (True):
currentX, currentY = self.ping_location()
if (abs(currentX - x) < .1) and (abs(currentY - y) < .1):
break
else:
time.sleep(1.5)
# When we reach this step the printer is where it needs to be
time.sleep(1.5)
z = self.app.FIRST_Z
well_path = path + loc + "/"
ensure_directory(well_path)
for i in range(self.app.slices.get()):
save_path = well_path + "-slice" + str(i + 1) + ".jpg"
self.app.cam.save(save_path)
z = z + z_dist
gc.writeTemporary(x, y, z=z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
time.sleep(1.5)
z = self.app.FIRST_Z
order = []
for file in os.listdir(well_path):
order.append(file)
order.sort()
images = []
for file in order:
image = cv2.imread(well_path + "/" + file)
# image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
images.append(image)
threading.Thread(target=pyramid.stack_focus, args=(images, path, loc)).start()
self.app.frames["AutoFrame"].progress.step(step)
self.app.frames["AutoFrame"].output.update_idletasks()
self.app.set_time(start, size - j)
if not newLocations:
break
print("> Classifying images with MARCO...")
Automated_MARCO.predict(path, project_data)
print("> Classification complete")
parts = path.split("/")
path = ""
for i in range(len(parts)-2):
path += parts[i] + "/"
self.app.check_previous_notes(path, str(date))
self.app.clean_up()
self.go_home()
self.app.set_camera_default()
print("> Resetting imager")
####################################################################################################################
def find_single_well(self):
"""Manual mode command for moving a well of the user's choice
"""
extension = ''
if self.app.type.get() == 'Intelli-plate 96-3':
extension = 'gcode/96-3/'
loc = self.app.frames["ManualFrame"].well.get()
locations = []
locations.append(loc)
coordinates = self.app.determine_coordinates(locations)
self.app.frames["ManualFrame"].x = coordinates[loc][0]
self.app.frames["ManualFrame"].y = coordinates[loc][1]
gc.writeTemporary(self.app.frames["ManualFrame"].x, self.app.frames["ManualFrame"].y, self.app.FIRST_Z)
gc.sendGCode(self.s, "gcode/temp.gcode")
os.remove("gcode/temp.gcode")
self.print_current_location()
####################################################################################################################
def right_one_well(self):
current_well = self.app.frames["ManualFrame"].well.get()
char = ord(current_well[0])
if len(current_well) == 5:
col = int(current_well[4])
else:
col = int(current_well[3])
if col == 1:
char -= 1
if char < 65:
return
else:
new_well = str(chr(char) + current_well[1:4] + str(3))
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
else:
col -= 1
new_well = current_well[:4] + str(col)
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
####################################################################################################################
def left_one_well(self):
current_well = self.app.frames["ManualFrame"].well.get()
char = ord(current_well[0])
if len(current_well) == 5:
col = int(current_well[4])
else:
col = int(current_well[3])
if col == 3:
char += 1
if char > 72:
return
else:
new_well = str(chr(char) + current_well[1:4] + str(1))
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
else:
col += 1
new_well = current_well[:4] + str(col)
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
####################################################################################################################
def up_one_well(self):
current_well = self.app.frames["ManualFrame"].well.get()
row = int(current_well[1:-2])
row -= 1
if row < 1:
return
elif row < 10:
new_well = current_well[0] + str(0) + str(row) + current_well[-2:]
else:
new_well = current_well[0] + str(row) + current_well[-2:]
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
####################################################################################################################
def down_one_well(self):
current_well = self.app.frames["ManualFrame"].well.get()
row = int(current_well[1:-2])
row += 1
if row > 12:
return
elif row < 10:
new_well = current_well[0] + str(0) + str(row) + current_well[-2:]
else:
new_well = current_well[0] + str(row) + current_well[-2:]
self.app.frames["ManualFrame"].well.set(new_well)
self.find_single_well()
####################################################################################################################
def print_current_location(self):
"""Prints the current location of the imager, in mm
"""
currentX, currentY, currentZ = self.ping_location(z=True)
print("> Current location:")
print("> X: " + str(currentX))
print("> Y: " + str(currentY))
print("> Z: " + str(currentZ))
print("")
#################################################################################################################### | 36.072677 | 130 | 0.48948 | 4,322 | 39,211 | 4.354003 | 0.099722 | 0.070677 | 0.037996 | 0.046764 | 0.812361 | 0.798863 | 0.78866 | 0.778191 | 0.774259 | 0.768466 | 0 | 0.014296 | 0.354237 | 39,211 | 1,087 | 131 | 36.072677 | 0.728881 | 0.137283 | 0 | 0.847709 | 0 | 0 | 0.080945 | 0.002123 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021563 | false | 0 | 0.013477 | 0 | 0.074124 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
93353d347b80a5b538f4bba1432709aade5fa86d | 31,980 | py | Python | yandex/cloud/mdb/greenplum/v1/config_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/greenplum/v1/config_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | yandex/cloud/mdb/greenplum/v1/config_pb2.py | ovandriyanov/python-sdk | eec7dc65ef23789388fa46d13087d4a03cdc6e57 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: yandex/cloud/mdb/greenplum/v1/config.proto
"""Generated protocol buffer code."""
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from google.protobuf import wrappers_pb2 as google_dot_protobuf_dot_wrappers__pb2
from yandex.cloud import validation_pb2 as yandex_dot_cloud_dot_validation__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='yandex/cloud/mdb/greenplum/v1/config.proto',
package='yandex.cloud.mdb.greenplum.v1',
syntax='proto3',
serialized_options=b'\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplum',
create_key=_descriptor._internal_create_key,
serialized_pb=b'\n*yandex/cloud/mdb/greenplum/v1/config.proto\x12\x1dyandex.cloud.mdb.greenplum.v1\x1a\x1egoogle/protobuf/wrappers.proto\x1a\x1dyandex/cloud/validation.proto\"P\n\tResources\x12\x1a\n\x12resource_preset_id\x18\x01 \x01(\t\x12\x11\n\tdisk_size\x18\x02 \x01(\x03\x12\x14\n\x0c\x64isk_type_id\x18\x03 \x01(\t\"\x90\x02\n\x16\x43onnectionPoolerConfig\x12L\n\x04mode\x18\x01 \x01(\x0e\x32>.yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig.PoolMode\x12)\n\x04size\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x38\n\x13\x63lient_idle_timeout\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\"C\n\x08PoolMode\x12\x19\n\x15POOL_MODE_UNSPECIFIED\x10\x00\x12\x0b\n\x07SESSION\x10\x01\x12\x0f\n\x0bTRANSACTION\x10\x02\"U\n\x16MasterSubclusterConfig\x12;\n\tresources\x18\x01 \x01(\x0b\x32(.yandex.cloud.mdb.greenplum.v1.Resources\"V\n\x17SegmentSubclusterConfig\x12;\n\tresources\x18\x01 \x01(\x0b\x32(.yandex.cloud.mdb.greenplum.v1.Resources\"\xd3\x03\n\x13GreenplumConfig6_17\x12\x34\n\x0fmax_connections\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x16max_slot_wal_keep_size\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x42\n\x1dgp_workfile_limit_per_segment\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12@\n\x1bgp_workfile_limit_per_query\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x46\n!gp_workfile_limit_files_per_query\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19max_prepared_transactions\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x17gp_workfile_compression\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\xd3\x03\n\x13GreenplumConfig6_19\x12\x34\n\x0fmax_connections\x18\x01 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x16max_slot_wal_keep_size\x18\x02 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x42\n\x1dgp_workfile_limit_per_segment\x18\x03 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12@\n\x1bgp_workfile_limit_per_query\x18\x04 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12\x46\n!gp_workfile_limit_files_per_query\x18\x05 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12>\n\x19max_prepared_transactions\x18\x06 \x01(\x0b\x32\x1b.google.protobuf.Int64Value\x12;\n\x17gp_workfile_compression\x18\x07 \x01(\x0b\x32\x1a.google.protobuf.BoolValue\"\x81\x02\n\x16GreenplumConfigSet6_17\x12R\n\x10\x65\x66\x66\x65\x63tive_config\x18\x01 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17B\x04\xe8\xc7\x31\x01\x12G\n\x0buser_config\x18\x02 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17\x12J\n\x0e\x64\x65\x66\x61ult_config\x18\x03 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17\"\x81\x02\n\x16GreenplumConfigSet6_19\x12R\n\x10\x65\x66\x66\x65\x63tive_config\x18\x01 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19B\x04\xe8\xc7\x31\x01\x12G\n\x0buser_config\x18\x02 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19\x12J\n\x0e\x64\x65\x66\x61ult_config\x18\x03 \x01(\x0b\x32\x32.yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19\"\x8d\x02\n\x19\x43onnectionPoolerConfigSet\x12U\n\x10\x65\x66\x66\x65\x63tive_config\x18\x01 \x01(\x0b\x32\x35.yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigB\x04\xe8\xc7\x31\x01\x12J\n\x0buser_config\x18\x02 \x01(\x0b\x32\x35.yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig\x12M\n\x0e\x64\x65\x66\x61ult_config\x18\x03 \x01(\x0b\x32\x35.yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigBp\n!yandex.cloud.api.mdb.greenplum.v1ZKgithub.com/yandex-cloud/go-genproto/yandex/cloud/mdb/greenplum/v1;greenplumb\x06proto3'
,
dependencies=[google_dot_protobuf_dot_wrappers__pb2.DESCRIPTOR,yandex_dot_cloud_dot_validation__pb2.DESCRIPTOR,])
_CONNECTIONPOOLERCONFIG_POOLMODE = _descriptor.EnumDescriptor(
name='PoolMode',
full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig.PoolMode',
filename=None,
file=DESCRIPTOR,
create_key=_descriptor._internal_create_key,
values=[
_descriptor.EnumValueDescriptor(
name='POOL_MODE_UNSPECIFIED', index=0, number=0,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='SESSION', index=1, number=1,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
_descriptor.EnumValueDescriptor(
name='TRANSACTION', index=2, number=2,
serialized_options=None,
type=None,
create_key=_descriptor._internal_create_key),
],
containing_type=None,
serialized_options=None,
serialized_start=428,
serialized_end=495,
)
_sym_db.RegisterEnumDescriptor(_CONNECTIONPOOLERCONFIG_POOLMODE)
_RESOURCES = _descriptor.Descriptor(
name='Resources',
full_name='yandex.cloud.mdb.greenplum.v1.Resources',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resource_preset_id', full_name='yandex.cloud.mdb.greenplum.v1.Resources.resource_preset_id', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disk_size', full_name='yandex.cloud.mdb.greenplum.v1.Resources.disk_size', index=1,
number=2, type=3, cpp_type=2, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='disk_type_id', full_name='yandex.cloud.mdb.greenplum.v1.Resources.disk_type_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=b"".decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=140,
serialized_end=220,
)
_CONNECTIONPOOLERCONFIG = _descriptor.Descriptor(
name='ConnectionPoolerConfig',
full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='mode', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig.mode', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='size', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig.size', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='client_idle_timeout', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig.client_idle_timeout', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
_CONNECTIONPOOLERCONFIG_POOLMODE,
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=223,
serialized_end=495,
)
_MASTERSUBCLUSTERCONFIG = _descriptor.Descriptor(
name='MasterSubclusterConfig',
full_name='yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resources', full_name='yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig.resources', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=497,
serialized_end=582,
)
_SEGMENTSUBCLUSTERCONFIG = _descriptor.Descriptor(
name='SegmentSubclusterConfig',
full_name='yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='resources', full_name='yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig.resources', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=584,
serialized_end=670,
)
_GREENPLUMCONFIG6_17 = _descriptor.Descriptor(
name='GreenplumConfig6_17',
full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_connections', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.max_connections', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_slot_wal_keep_size', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.max_slot_wal_keep_size', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_per_segment', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.gp_workfile_limit_per_segment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_per_query', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.gp_workfile_limit_per_query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_files_per_query', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.gp_workfile_limit_files_per_query', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_prepared_transactions', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.max_prepared_transactions', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_compression', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17.gp_workfile_compression', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=673,
serialized_end=1140,
)
_GREENPLUMCONFIG6_19 = _descriptor.Descriptor(
name='GreenplumConfig6_19',
full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='max_connections', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.max_connections', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_slot_wal_keep_size', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.max_slot_wal_keep_size', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_per_segment', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.gp_workfile_limit_per_segment', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_per_query', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.gp_workfile_limit_per_query', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_limit_files_per_query', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.gp_workfile_limit_files_per_query', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='max_prepared_transactions', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.max_prepared_transactions', index=5,
number=6, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='gp_workfile_compression', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19.gp_workfile_compression', index=6,
number=7, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1143,
serialized_end=1610,
)
_GREENPLUMCONFIGSET6_17 = _descriptor.Descriptor(
name='GreenplumConfigSet6_17',
full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='effective_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17.effective_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17.user_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17.default_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1613,
serialized_end=1870,
)
_GREENPLUMCONFIGSET6_19 = _descriptor.Descriptor(
name='GreenplumConfigSet6_19',
full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='effective_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19.effective_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19.user_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_config', full_name='yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19.default_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=1873,
serialized_end=2130,
)
_CONNECTIONPOOLERCONFIGSET = _descriptor.Descriptor(
name='ConnectionPoolerConfigSet',
full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet',
filename=None,
file=DESCRIPTOR,
containing_type=None,
create_key=_descriptor._internal_create_key,
fields=[
_descriptor.FieldDescriptor(
name='effective_config', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet.effective_config', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=b'\350\3071\001', file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='user_config', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet.user_config', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
_descriptor.FieldDescriptor(
name='default_config', full_name='yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet.default_config', index=2,
number=3, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
serialized_options=None, file=DESCRIPTOR, create_key=_descriptor._internal_create_key),
],
extensions=[
],
nested_types=[],
enum_types=[
],
serialized_options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=2133,
serialized_end=2402,
)
_CONNECTIONPOOLERCONFIG.fields_by_name['mode'].enum_type = _CONNECTIONPOOLERCONFIG_POOLMODE
_CONNECTIONPOOLERCONFIG.fields_by_name['size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_CONNECTIONPOOLERCONFIG.fields_by_name['client_idle_timeout'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_CONNECTIONPOOLERCONFIG_POOLMODE.containing_type = _CONNECTIONPOOLERCONFIG
_MASTERSUBCLUSTERCONFIG.fields_by_name['resources'].message_type = _RESOURCES
_SEGMENTSUBCLUSTERCONFIG.fields_by_name['resources'].message_type = _RESOURCES
_GREENPLUMCONFIG6_17.fields_by_name['max_connections'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['max_slot_wal_keep_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['gp_workfile_limit_per_segment'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['gp_workfile_limit_per_query'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['gp_workfile_limit_files_per_query'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['max_prepared_transactions'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_17.fields_by_name['gp_workfile_compression'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_GREENPLUMCONFIG6_19.fields_by_name['max_connections'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['max_slot_wal_keep_size'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['gp_workfile_limit_per_segment'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['gp_workfile_limit_per_query'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['gp_workfile_limit_files_per_query'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['max_prepared_transactions'].message_type = google_dot_protobuf_dot_wrappers__pb2._INT64VALUE
_GREENPLUMCONFIG6_19.fields_by_name['gp_workfile_compression'].message_type = google_dot_protobuf_dot_wrappers__pb2._BOOLVALUE
_GREENPLUMCONFIGSET6_17.fields_by_name['effective_config'].message_type = _GREENPLUMCONFIG6_17
_GREENPLUMCONFIGSET6_17.fields_by_name['user_config'].message_type = _GREENPLUMCONFIG6_17
_GREENPLUMCONFIGSET6_17.fields_by_name['default_config'].message_type = _GREENPLUMCONFIG6_17
_GREENPLUMCONFIGSET6_19.fields_by_name['effective_config'].message_type = _GREENPLUMCONFIG6_19
_GREENPLUMCONFIGSET6_19.fields_by_name['user_config'].message_type = _GREENPLUMCONFIG6_19
_GREENPLUMCONFIGSET6_19.fields_by_name['default_config'].message_type = _GREENPLUMCONFIG6_19
_CONNECTIONPOOLERCONFIGSET.fields_by_name['effective_config'].message_type = _CONNECTIONPOOLERCONFIG
_CONNECTIONPOOLERCONFIGSET.fields_by_name['user_config'].message_type = _CONNECTIONPOOLERCONFIG
_CONNECTIONPOOLERCONFIGSET.fields_by_name['default_config'].message_type = _CONNECTIONPOOLERCONFIG
DESCRIPTOR.message_types_by_name['Resources'] = _RESOURCES
DESCRIPTOR.message_types_by_name['ConnectionPoolerConfig'] = _CONNECTIONPOOLERCONFIG
DESCRIPTOR.message_types_by_name['MasterSubclusterConfig'] = _MASTERSUBCLUSTERCONFIG
DESCRIPTOR.message_types_by_name['SegmentSubclusterConfig'] = _SEGMENTSUBCLUSTERCONFIG
DESCRIPTOR.message_types_by_name['GreenplumConfig6_17'] = _GREENPLUMCONFIG6_17
DESCRIPTOR.message_types_by_name['GreenplumConfig6_19'] = _GREENPLUMCONFIG6_19
DESCRIPTOR.message_types_by_name['GreenplumConfigSet6_17'] = _GREENPLUMCONFIGSET6_17
DESCRIPTOR.message_types_by_name['GreenplumConfigSet6_19'] = _GREENPLUMCONFIGSET6_19
DESCRIPTOR.message_types_by_name['ConnectionPoolerConfigSet'] = _CONNECTIONPOOLERCONFIGSET
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
Resources = _reflection.GeneratedProtocolMessageType('Resources', (_message.Message,), {
'DESCRIPTOR' : _RESOURCES,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.Resources)
})
_sym_db.RegisterMessage(Resources)
ConnectionPoolerConfig = _reflection.GeneratedProtocolMessageType('ConnectionPoolerConfig', (_message.Message,), {
'DESCRIPTOR' : _CONNECTIONPOOLERCONFIG,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfig)
})
_sym_db.RegisterMessage(ConnectionPoolerConfig)
MasterSubclusterConfig = _reflection.GeneratedProtocolMessageType('MasterSubclusterConfig', (_message.Message,), {
'DESCRIPTOR' : _MASTERSUBCLUSTERCONFIG,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.MasterSubclusterConfig)
})
_sym_db.RegisterMessage(MasterSubclusterConfig)
SegmentSubclusterConfig = _reflection.GeneratedProtocolMessageType('SegmentSubclusterConfig', (_message.Message,), {
'DESCRIPTOR' : _SEGMENTSUBCLUSTERCONFIG,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.SegmentSubclusterConfig)
})
_sym_db.RegisterMessage(SegmentSubclusterConfig)
GreenplumConfig6_17 = _reflection.GeneratedProtocolMessageType('GreenplumConfig6_17', (_message.Message,), {
'DESCRIPTOR' : _GREENPLUMCONFIG6_17,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_17)
})
_sym_db.RegisterMessage(GreenplumConfig6_17)
GreenplumConfig6_19 = _reflection.GeneratedProtocolMessageType('GreenplumConfig6_19', (_message.Message,), {
'DESCRIPTOR' : _GREENPLUMCONFIG6_19,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.GreenplumConfig6_19)
})
_sym_db.RegisterMessage(GreenplumConfig6_19)
GreenplumConfigSet6_17 = _reflection.GeneratedProtocolMessageType('GreenplumConfigSet6_17', (_message.Message,), {
'DESCRIPTOR' : _GREENPLUMCONFIGSET6_17,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_17)
})
_sym_db.RegisterMessage(GreenplumConfigSet6_17)
GreenplumConfigSet6_19 = _reflection.GeneratedProtocolMessageType('GreenplumConfigSet6_19', (_message.Message,), {
'DESCRIPTOR' : _GREENPLUMCONFIGSET6_19,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.GreenplumConfigSet6_19)
})
_sym_db.RegisterMessage(GreenplumConfigSet6_19)
ConnectionPoolerConfigSet = _reflection.GeneratedProtocolMessageType('ConnectionPoolerConfigSet', (_message.Message,), {
'DESCRIPTOR' : _CONNECTIONPOOLERCONFIGSET,
'__module__' : 'yandex.cloud.mdb.greenplum.v1.config_pb2'
# @@protoc_insertion_point(class_scope:yandex.cloud.mdb.greenplum.v1.ConnectionPoolerConfigSet)
})
_sym_db.RegisterMessage(ConnectionPoolerConfigSet)
DESCRIPTOR._options = None
_GREENPLUMCONFIGSET6_17.fields_by_name['effective_config']._options = None
_GREENPLUMCONFIGSET6_19.fields_by_name['effective_config']._options = None
_CONNECTIONPOOLERCONFIGSET.fields_by_name['effective_config']._options = None
# @@protoc_insertion_point(module_scope)
| 52.254902 | 3,598 | 0.792558 | 4,063 | 31,980 | 5.866847 | 0.063254 | 0.035575 | 0.055628 | 0.062172 | 0.815371 | 0.809456 | 0.781978 | 0.75731 | 0.723078 | 0.695809 | 0 | 0.04771 | 0.096185 | 31,980 | 611 | 3,599 | 52.340426 | 0.776986 | 0.032489 | 0 | 0.672043 | 1 | 0.003584 | 0.283552 | 0.252183 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.010753 | 0 | 0.010753 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
fa87c2ef7b45ba06e2706803d1312651a6a6d023 | 201 | py | Python | kha/episode_patchers/episode_replacer.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 2 | 2021-06-06T15:29:08.000Z | 2021-06-07T20:37:38.000Z | kha/episode_patchers/episode_replacer.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | null | null | null | kha/episode_patchers/episode_replacer.py | claui/kommtheuteaktenzeichen | 2afbdfd1731a8dd6e222d094b0ee26c1a1945e61 | [
"Apache-2.0"
] | 1 | 2021-05-31T16:48:08.000Z | 2021-05-31T16:48:08.000Z | # pylint: disable=too-few-public-methods
"""Replaces an existing episode."""
from kha.episode_patchers.patcher import Patcher
class EpisodeReplacer(Patcher):
"""Replaces an existing episode."""
| 22.333333 | 48 | 0.756219 | 24 | 201 | 6.291667 | 0.708333 | 0.13245 | 0.238411 | 0.331126 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119403 | 201 | 8 | 49 | 25.125 | 0.853107 | 0.492537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 1 | 0 | 1 | 0 | 0 | null | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
4f0d684750909900b10230a8a0e5e1ec8f0fd095 | 8,638 | py | Python | tests/unit/db/redshift/test_redshift_db_driver_unload.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | tests/unit/db/redshift/test_redshift_db_driver_unload.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | tests/unit/db/redshift/test_redshift_db_driver_unload.py | ellyteitsworth/records-mover | 21cd56efc2d23cfff04ec1fdf582e5229546c418 | [
"Apache-2.0"
] | null | null | null | from .base_test_redshift_db_driver import BaseTestRedshiftDBDriver
from ...records.format_hints import (bluelabs_format_hints,
christmas_tree_format_1_hints,
christmas_tree_format_2_hints)
from records_mover.records.delimited.utils import logger as driver_logger
from records_mover.records import DelimitedRecordsFormat
from mock import call, patch
def fake_text(s):
return (s,)
class TestRedshiftDBDriverUnload(BaseTestRedshiftDBDriver):
maxDiff = None
@patch('records_mover.db.redshift.unloader.UnloadFromSelect')
@patch('records_mover.db.redshift.unloader.text')
def test_unload_to_non_s3(self,
mock_text,
mock_UnloadFromSelect):
mock_text.side_effect = fake_text
self.mock_records_unload_plan.processing_instructions.fail_if_dont_understand = True
self.mock_records_unload_plan.processing_instructions.fail_if_cant_handle_hint = True
self.mock_records_unload_plan.records_format =\
DelimitedRecordsFormat(variant='bluelabs',
hints=bluelabs_format_hints)
self.mock_directory.scheme = 'mumble'
self.mock_db_engine.execute.return_value.scalar.return_value = 456
rows = self.redshift_db_driver.unloader().\
unload(schema='myschema',
table='mytable',
unload_plan=self.mock_records_unload_plan,
directory=self.mock_directory)
mock_aws_creds = self.mock_s3_temp_base_loc.temporary_directory().__enter__().aws_creds()
mock_access_key_id = mock_aws_creds.access_key
mock_secret_key = mock_aws_creds.secret_key
mock_token = mock_aws_creds.token
expected_args = {
'access_key_id': mock_access_key_id,
'add_quotes': False,
'delimiter': ',',
'escape': True,
'gzip': True,
'manifest': True,
'secret_access_key': mock_secret_key,
'select': ('SELECT * FROM myschema.mytable',),
'session_token': mock_token,
'unload_location': self.mock_s3_temp_base_loc.temporary_directory().__enter__().url
}
mock_UnloadFromSelect.assert_called_with(**expected_args)
self.assertEqual(456, rows)
@patch('records_mover.db.redshift.unloader.UnloadFromSelect')
@patch('records_mover.db.redshift.unloader.text')
def test_unload(self,
mock_text,
mock_UnloadFromSelect):
mock_text.side_effect = fake_text
self.mock_records_unload_plan.processing_instructions.fail_if_dont_understand = True
self.mock_records_unload_plan.processing_instructions.fail_if_cant_handle_hint = True
self.mock_records_unload_plan.records_format =\
DelimitedRecordsFormat(variant='bluelabs',
hints=bluelabs_format_hints)
self.mock_directory.scheme = 's3'
self.mock_db_engine.execute.return_value.scalar.return_value = 456
rows = self.redshift_db_driver.unloader().\
unload(schema='myschema',
table='mytable',
unload_plan=self.mock_records_unload_plan,
directory=self.mock_directory)
expected_args = {
'access_key_id': 'fake_aws_id',
'add_quotes': False,
'delimiter': ',',
'escape': True,
'gzip': True,
'manifest': True,
'secret_access_key': 'fake_aws_secret',
'select': ('SELECT * FROM myschema.mytable',),
'session_token': 'fake_aws_token',
'unload_location': 's3://mybucket/myparent/mychild/'
}
mock_UnloadFromSelect.assert_called_with(**expected_args)
self.assertEqual(456, rows)
@patch('records_mover.db.redshift.unloader.UnloadFromSelect')
@patch('records_mover.db.redshift.unloader.text')
def test_unload_christmas_tree_unsupported_options_with_fast_warns_1(self,
mock_text,
mock_UnloadFromSelect):
mock_text.side_effect = fake_text
self.mock_directory.scheme = 's3'
with patch.object(driver_logger, 'warning') as mock_warning:
self.mock_records_unload_plan.processing_instructions.fail_if_dont_understand = False
self.mock_records_unload_plan.processing_instructions.fail_if_cant_handle_hint = False
self.mock_records_unload_plan.records_format =\
DelimitedRecordsFormat(variant='bluelabs',
hints=christmas_tree_format_1_hints)
self.mock_db_engine.execute.return_value.scalar.return_value = 456
rows = self.redshift_db_driver.unloader().\
unload(schema='myschema',
table='mytable',
unload_plan=self.mock_records_unload_plan,
directory=self.mock_directory)
self.assertCountEqual(mock_warning.mock_calls,
[call("Ignoring hint record-terminator = '\\x02'"),
call("Ignoring hint quoting = 'nonnumeric'"),
call("Ignoring hint header-row = True"),
call("Ignoring hint compression = 'LZO'"),
call("Did not understand these hints: header-row=True")])
expected_args = {
'access_key_id': 'fake_aws_id',
'delimiter': '\x01',
'escape': True,
'manifest': True,
'secret_access_key': 'fake_aws_secret',
'select': ('SELECT * FROM myschema.mytable',),
'session_token': 'fake_aws_token',
'unload_location': 's3://mybucket/myparent/mychild/'
}
mock_UnloadFromSelect.assert_called_with(**expected_args)
self.assertEqual(456, rows)
@patch('records_mover.db.redshift.unloader.UnloadFromSelect')
@patch('records_mover.db.redshift.unloader.text')
def test_unload_christmas_tree_unsupported_options_with_fast_warns_2(self,
mock_text,
mock_UnloadFromSelect):
mock_text.side_effect = fake_text
self.mock_directory.scheme = 's3'
with patch.object(driver_logger, 'warning') as mock_warning:
self.mock_records_unload_plan.processing_instructions.fail_if_dont_understand = False
self.mock_records_unload_plan.processing_instructions.fail_if_cant_handle_hint = False
self.mock_records_unload_plan.records_format =\
DelimitedRecordsFormat(variant='bluelabs',
hints=christmas_tree_format_2_hints)
self.mock_db_engine.execute.return_value.scalar.return_value = 456
rows = self.redshift_db_driver.unloader().\
unload(schema='myschema',
table='mytable',
unload_plan=self.mock_records_unload_plan,
directory=self.mock_directory)
self.assertListEqual(mock_warning.mock_calls,
[call("Ignoring hint escape = '@'"),
call("Ignoring hint datetimeformattz = 'HH:MI:SSOF YYYY-MM-DD'"),
call("Ignoring hint record-terminator = '\\x02'"),
call("Ignoring hint doublequote = True"),
call("Ignoring hint compression = 'BZIP'"),
call("Ignoring hint datetimeformattz = "
"'YYYY-MM-DD HH24:MI:SSOF'"),
call("Ignoring hint dateformat = 'MM-DD-YYYY'")])
expected_args = {
'access_key_id': 'fake_aws_id',
'escape': True,
'add_quotes': True,
'delimiter': '\x01',
'manifest': True,
'secret_access_key': 'fake_aws_secret',
'select': ('SELECT * FROM myschema.mytable',),
'session_token': 'fake_aws_token',
'unload_location': 's3://mybucket/myparent/mychild/'
}
mock_UnloadFromSelect.assert_called_with(**expected_args)
self.assertEqual(456, rows)
| 50.51462 | 99 | 0.590414 | 852 | 8,638 | 5.610329 | 0.156103 | 0.056904 | 0.050209 | 0.070293 | 0.833682 | 0.808159 | 0.796234 | 0.775523 | 0.755858 | 0.716109 | 0 | 0.008349 | 0.32056 | 8,638 | 170 | 100 | 50.811765 | 0.8061 | 0 | 0 | 0.707006 | 0 | 0 | 0.194605 | 0.052443 | 0 | 0 | 0 | 0 | 0.063694 | 1 | 0.031847 | false | 0 | 0.031847 | 0.006369 | 0.082803 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
87942938b18decae76e8c0e00e9816c383f0c711 | 30,667 | py | Python | python/ks_api_client/api/quote_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 7 | 2022-02-05T16:20:37.000Z | 2022-02-27T16:48:28.000Z | python/ks_api_client/api/quote_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 19 | 2022-02-03T12:40:08.000Z | 2022-03-30T09:12:46.000Z | python/ks_api_client/api/quote_api.py | ashwinkp/ksapi | c348765cefb4d51fd90febcbfa9ff890b67bdc7d | [
"Apache-2.0"
] | 12 | 2021-12-23T06:14:21.000Z | 2022-03-28T07:47:19.000Z | # coding: utf-8
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from ks_api_client.api_client import ApiClient
from ks_api_client.exceptions import ( # noqa: F401
ApiTypeError,
ApiValueError
)
class QuoteApi(object):
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_instruments_details(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get full details # noqa: E501
Get full details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instruments_details(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.get_instruments_details_with_http_info(consumerKey, sessionToken, instrumentTokens, **kwargs) # noqa: E501
def get_instruments_details_with_http_info(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get full details # noqa: E501
Get full details # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_instruments_details_with_http_info(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'instrumentTokens'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_instruments_details" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `get_instruments_details`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `get_instruments_details`") # noqa: E501
# verify the required parameter 'instrumentTokens' is set
if self.api_client.client_side_validation and ('instrumentTokens' not in local_var_params or # noqa: E501
local_var_params['instrumentTokens'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrumentTokens` when calling `get_instruments_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instrumentTokens' in local_var_params:
path_params['instrumentTokens'] = local_var_params['instrumentTokens'] # noqa: E501
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/quotes/v1.0/instruments/{instrumentTokens}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_ltp_quote(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get LTP quote # noqa: E501
Returns the LTP for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ltp_quote(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.get_ltp_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, **kwargs) # noqa: E501
def get_ltp_quote_with_http_info(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get LTP quote # noqa: E501
Returns the LTP for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ltp_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'instrumentTokens'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ltp_quote" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `get_ltp_quote`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `get_ltp_quote`") # noqa: E501
# verify the required parameter 'instrumentTokens' is set
if self.api_client.client_side_validation and ('instrumentTokens' not in local_var_params or # noqa: E501
local_var_params['instrumentTokens'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrumentTokens` when calling `get_ltp_quote`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instrumentTokens' in local_var_params:
path_params['instrumentTokens'] = local_var_params['instrumentTokens'] # noqa: E501
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/quotes/v1.0/ltp/instruments/{instrumentTokens}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_market_details_quote(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get market details quote # noqa: E501
Returns market depth details for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_market_details_quote(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.get_market_details_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, **kwargs) # noqa: E501
def get_market_details_quote_with_http_info(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get market details quote # noqa: E501
Returns market depth details for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_market_details_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'instrumentTokens'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_market_details_quote" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `get_market_details_quote`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `get_market_details_quote`") # noqa: E501
# verify the required parameter 'instrumentTokens' is set
if self.api_client.client_side_validation and ('instrumentTokens' not in local_var_params or # noqa: E501
local_var_params['instrumentTokens'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrumentTokens` when calling `get_market_details_quote`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instrumentTokens' in local_var_params:
path_params['instrumentTokens'] = local_var_params['instrumentTokens'] # noqa: E501
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/quotes/v1.0/depth/instruments/{instrumentTokens}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
def get_ohlc_quote(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get OHLC quote # noqa: E501
Returns the OHLC quote details for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ohlc_quote(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: object
"""
kwargs['_return_http_data_only'] = True
return self.get_ohlc_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, **kwargs) # noqa: E501
def get_ohlc_quote_with_http_info(self, consumerKey, sessionToken, instrumentTokens, **kwargs): # noqa: E501
"""Get OHLC quote # noqa: E501
Returns the OHLC quote details for an array of scrips # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_ohlc_quote_with_http_info(consumerKey, sessionToken, instrumentTokens, async_req=True)
>>> result = thread.get()
:param consumerKey: (required)
:type consumerKey: str
:param sessionToken: (required)
:type sessionToken: str
:param instrumentTokens: Instrument token of the scrip for which quote (required)
:type instrumentTokens: str
:param async_req: Whether to execute the request asynchronously.
:type async_req: bool, optional
:param _return_http_data_only: response data without head status code
and headers
:type _return_http_data_only: bool, optional
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:type _preload_content: bool, optional
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
:param _request_auth: set to override the auth_settings for an a single
request; this effectively ignores the authentication
in the spec for a single request.
:type _request_auth: dict, optional
:return: Returns the result object.
If the method is called asynchronously,
returns the request thread.
:rtype: tuple(object, status_code(int), headers(HTTPHeaderDict))
"""
local_var_params = locals()
all_params = [
'consumerKey',
'sessionToken',
'instrumentTokens'
]
all_params.extend(
[
'async_req',
'_return_http_data_only',
'_preload_content',
'_request_timeout',
'_request_auth'
]
)
for key, val in six.iteritems(local_var_params['kwargs']):
if key not in all_params:
raise ApiTypeError(
"Got an unexpected keyword argument '%s'"
" to method get_ohlc_quote" % key
)
local_var_params[key] = val
del local_var_params['kwargs']
# verify the required parameter 'consumerKey' is set
if self.api_client.client_side_validation and ('consumerKey' not in local_var_params or # noqa: E501
local_var_params['consumerKey'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `consumerKey` when calling `get_ohlc_quote`") # noqa: E501
# verify the required parameter 'sessionToken' is set
if self.api_client.client_side_validation and ('sessionToken' not in local_var_params or # noqa: E501
local_var_params['sessionToken'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `sessionToken` when calling `get_ohlc_quote`") # noqa: E501
# verify the required parameter 'instrumentTokens' is set
if self.api_client.client_side_validation and ('instrumentTokens' not in local_var_params or # noqa: E501
local_var_params['instrumentTokens'] is None): # noqa: E501
raise ApiValueError("Missing the required parameter `instrumentTokens` when calling `get_ohlc_quote`") # noqa: E501
collection_formats = {}
path_params = {}
if 'instrumentTokens' in local_var_params:
path_params['instrumentTokens'] = local_var_params['instrumentTokens'] # noqa: E501
query_params = []
header_params = {}
if 'consumerKey' in local_var_params:
header_params['consumerKey'] = local_var_params['consumerKey'] # noqa: E501
if 'sessionToken' in local_var_params:
header_params['sessionToken'] = local_var_params['sessionToken'] # noqa: E501
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/quotes/v1.0/ohlc/instruments/{instrumentTokens}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='object', # noqa: E501
auth_settings=auth_settings,
async_req=local_var_params.get('async_req'),
_return_http_data_only=local_var_params.get('_return_http_data_only'), # noqa: E501
_preload_content=local_var_params.get('_preload_content', True),
_request_timeout=local_var_params.get('_request_timeout'),
collection_formats=collection_formats,
_request_auth=local_var_params.get('_request_auth'))
| 48.218553 | 138 | 0.60945 | 3,255 | 30,667 | 5.508449 | 0.054992 | 0.041049 | 0.065588 | 0.024094 | 0.978472 | 0.977747 | 0.976352 | 0.974233 | 0.973508 | 0.971277 | 0 | 0.014449 | 0.320703 | 30,667 | 635 | 139 | 48.294488 | 0.846246 | 0.445626 | 0 | 0.77193 | 0 | 0 | 0.218577 | 0.043502 | 0 | 0 | 0 | 0 | 0 | 1 | 0.031579 | false | 0 | 0.017544 | 0 | 0.080702 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
87956e497eb88c358f6648ea959695d78a0f8c9c | 335 | py | Python | PyTests/Violet/int_divmod_sign.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | null | null | null | PyTests/Violet/int_divmod_sign.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | 6 | 2021-10-14T15:55:16.000Z | 2022-03-31T14:04:02.000Z | PyTests/Violet/int_divmod_sign.py | LiarPrincess/Violet | 0a4268649b0eec3ab631d19015d7043394c6571e | [
"MIT"
] | null | null | null | a = 7
b = 3
result = a.__divmod__(b)
assert result[0] == 2
assert result[1] == 1
a = -7
b = 3
result = a.__divmod__(b)
assert result[0] == -3
assert result[1] == 2
a = 7
b = -3
result = a.__divmod__(b)
assert result[0] == -3
assert result[1] == -2
a = -7
b = -3
result = a.__divmod__(b)
assert result[0] == 2
assert result[1] == -1
| 13.958333 | 24 | 0.597015 | 64 | 335 | 2.875 | 0.15625 | 0.521739 | 0.065217 | 0.086957 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0.090909 | 0.21194 | 335 | 23 | 25 | 14.565217 | 0.606061 | 0 | 0 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.4 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
87a6076367067bc54f9e44cf7abf12702c05975d | 172 | py | Python | hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/skew.py | frankrolf/hTools2_extension | 9d73b8640c85209853a72f8d4b167768de5e0d60 | [
"BSD-3-Clause"
] | 2 | 2019-12-18T16:12:07.000Z | 2019-12-21T01:19:23.000Z | hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/skew.py | frankrolf/hTools2_extension | 9d73b8640c85209853a72f8d4b167768de5e0d60 | [
"BSD-3-Clause"
] | null | null | null | hTools2.roboFontExt/lib/Scripts/selected glyphs/transform/skew.py | frankrolf/hTools2_extension | 9d73b8640c85209853a72f8d4b167768de5e0d60 | [
"BSD-3-Clause"
] | null | null | null | # [h] skew glyphs dialog
import hTools2.dialogs.glyphs.skew
import importlib
importlib.reload(hTools2.dialogs.glyphs.skew)
hTools2.dialogs.glyphs.skew.skewGlyphsDialog()
| 21.5 | 46 | 0.819767 | 22 | 172 | 6.409091 | 0.454545 | 0.297872 | 0.425532 | 0.510638 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018868 | 0.075581 | 172 | 7 | 47 | 24.571429 | 0.867925 | 0.127907 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.75 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 8 |
87e8b11e744ff191d4a88ad28625055784713979 | 7,208 | py | Python | src/Atlas/test/test_aero.py | swryan/Atlas | c5e8800d326d6442ceb4d0ab7910bbbda2ce4838 | [
"Apache-2.0"
] | null | null | null | src/Atlas/test/test_aero.py | swryan/Atlas | c5e8800d326d6442ceb4d0ab7910bbbda2ce4838 | [
"Apache-2.0"
] | null | null | null | src/Atlas/test/test_aero.py | swryan/Atlas | c5e8800d326d6442ceb4d0ab7910bbbda2ce4838 | [
"Apache-2.0"
] | null | null | null | import os
import unittest
from scipy.io import loadmat
from Atlas.aero import Aero, Aero2
class AeroTestCase(unittest.TestCase):
def setup(self):
pass
def test_aero(self):
""" test Aero
(with simple actuator disk method for induced velocity calculation)
"""
comp = Aero()
# populate inputs
path = os.path.join(os.path.dirname(__file__), 'aero.mat')
data = loadmat(path, struct_as_record=True, mat_dtype=True)
comp.b = int(data['b'][0][0])
comp.yN = data['yN'].flatten()
comp.Ns = max(comp.yN.shape) - 1
comp.R = 10.0
comp.dr = [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
comp.r = [ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5]
comp.h = data['h'][0][0]
comp.ycmax = data['ycmax'][0][0]
comp.rho = data['rho'][0][0]
comp.visc = data['visc'][0][0]
comp.vw = data['vw'][0][0]
comp.vc = data['vc'][0][0]
comp.Omega = data['Omega'][0][0]
comp.c = data['cE']
comp.Cl = data['Cl']
comp.d = data['d']
comp.yWire = data['yWire'][0]
comp.zWire = data['zWire'][0][0]
comp.tWire = data['tWire'][0][0]
comp.Cm = data['Cm']
comp.xtL = data['xtL']
comp.xtU = data['xtU']
# run
comp.run()
# check outputs
for i, val in enumerate(data['vi']):
self.assertAlmostEquals(comp.vi[i], val, 4,
msg='vi[%d] is %f, should be %f' % (i, comp.vi[i], val))
for i, val in enumerate(data['phi']):
self.assertAlmostEquals(comp.phi[i], val, 4,
msg='phi[%d] is %f, should be %f' % (i, comp.phi[i], val))
for i, val in enumerate(data['Re']):
self.assertAlmostEquals(comp.Re[i], val, 4,
msg='Re[%d] is %f, should be %f' % (i, comp.Re[i], val))
for i, val in enumerate(data['Cd']):
self.assertAlmostEquals(comp.Cd[i], val, 4,
msg='Cd[%d] is %f, should be %f' % (i, comp.Cd[i], val))
for i, val in enumerate(data['Fblade']['Fx'][0][0]):
self.assertAlmostEquals(comp.Fblade.Fx[i], val, 4,
msg='Fx[%d] is %f, should be %f' % (i, comp.Fblade.Fx[i], val))
for i, val in enumerate(data['Fblade']['Fz'][0][0]):
self.assertAlmostEquals(comp.Fblade.Fz[i], val, 4,
msg='Fz[%d] is %f, should be %f' % (i, comp.Fblade.Fz[i], val))
for i, val in enumerate(data['Fblade']['My'][0][0]):
self.assertAlmostEquals(comp.Fblade.My[i], val, 4,
msg='My[%d] is %f, should be %f' % (i, comp.Fblade.My[i], val))
for i, val in enumerate(data['Fblade']['Q'][0][0]):
self.assertAlmostEquals(comp.Fblade.Q[i], val, 4,
msg='Q[%d] is %f, should be %f' % (i, comp.Fblade.Q[i], val))
for i, val in enumerate(data['Fblade']['P'][0][0]):
self.assertAlmostEquals(comp.Fblade.P[i], val, 4,
msg='P[%d] is %f, should be %f' % (i, comp.Fblade.P[i], val))
for i, val in enumerate(data['Fblade']['Pi'][0][0]):
self.assertAlmostEquals(comp.Fblade.Pi[i], val, 4,
msg='Pi[%d] is %f, should be %f' % (i, comp.Fblade.Pi[i], val))
for i, val in enumerate(data['Fblade']['Pp'][0][0]):
self.assertAlmostEquals(comp.Fblade.Pp[i], val, 4,
msg='Pp[%d] is %f, should be %f' % (i, comp.Fblade.Pp[i], val))
def test_aero2(self):
""" test Aero2
(with vortex method for induced velocity calculation)
"""
comp = Aero2()
# populate inputs
path = os.path.join(os.path.dirname(__file__), 'aero2.mat')
data = loadmat(path, struct_as_record=True, mat_dtype=True)
comp.b = int(data['b'][0][0])
comp.yN = data['yN'].flatten()
comp.Ns = max(comp.yN.shape) - 1
comp.R = 10.0
comp.dr = [1., 1., 1., 1., 1., 1., 1., 1., 1., 1.]
comp.r = [ 0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5, 7.5, 8.5, 9.5]
comp.h = data['h'][0][0]
comp.ycmax = data['ycmax'][0][0]
comp.rho = data['rho'][0][0]
comp.visc = data['visc'][0][0]
comp.vw = data['vw'][0][0]
comp.vc = data['vc'][0][0]
comp.Omega = data['Omega'][0][0]
comp.c = data['cE']
comp.Cl = data['Cl']
comp.d = data['d']
comp.yWire = data['yWire'][0]
comp.zWire = data['zWire'][0][0]
comp.tWire = data['tWire'][0][0]
comp.Cm = data['Cm']
comp.xtL = data['xtL']
comp.xtU = data['xtU']
comp.q = data['q']
comp.anhedral = data['anhedral'][0][0]
# run
comp.run()
# check outputs
for i, val in enumerate(data['vi']):
self.assertAlmostEquals(comp.vi[i], val, 4,
msg='vi[%d] is %f, should be %f' % (i, comp.vi[i], val))
for i, val in enumerate(data['phi']):
self.assertAlmostEquals(comp.phi[i], val, 4,
msg='phi[%d] is %f, should be %f' % (i, comp.phi[i], val))
for i, val in enumerate(data['Re']):
self.assertAlmostEquals(comp.Re[i], val, 4,
msg='Re[%d] is %f, should be %f' % (i, comp.Re[i], val))
for i, val in enumerate(data['Cd']):
self.assertAlmostEquals(comp.Cd[i], val, 4,
msg='Cd[%d] is %f, should be %f' % (i, comp.Cd[i], val))
for i, val in enumerate(data['Fblade']['Fx'][0][0]):
self.assertAlmostEquals(comp.Fblade.Fx[i], val, 4,
msg='Fx[%d] is %f, should be %f' % (i, comp.Fblade.Fx[i], val))
for i, val in enumerate(data['Fblade']['Fz'][0][0]):
self.assertAlmostEquals(comp.Fblade.Fz[i], val, 4,
msg='Fz[%d] is %f, should be %f' % (i, comp.Fblade.Fz[i], val))
for i, val in enumerate(data['Fblade']['My'][0][0]):
self.assertAlmostEquals(comp.Fblade.My[i], val, 4,
msg='My[%d] is %f, should be %f' % (i, comp.Fblade.My[i], val))
for i, val in enumerate(data['Fblade']['Q'][0][0]):
self.assertAlmostEquals(comp.Fblade.Q[i], val, 4,
msg='Q[%d] is %f, should be %f' % (i, comp.Fblade.Q[i], val))
for i, val in enumerate(data['Fblade']['P'][0][0]):
self.assertAlmostEquals(comp.Fblade.P[i], val, 4,
msg='P[%d] is %f, should be %f' % (i, comp.Fblade.P[i], val))
for i, val in enumerate(data['Fblade']['Pi'][0][0]):
self.assertAlmostEquals(comp.Fblade.Pi[i], val, 4,
msg='Pi[%d] is %f, should be %f' % (i, comp.Fblade.Pi[i], val))
for i, val in enumerate(data['Fblade']['Pp'][0][0]):
self.assertAlmostEquals(comp.Fblade.Pp[i], val, 4,
msg='Pp[%d] is %f, should be %f' % (i, comp.Fblade.Pp[i], val))
if __name__ == "__main__":
unittest.main()
| 37.936842 | 79 | 0.482103 | 1,068 | 7,208 | 3.231273 | 0.101124 | 0.0765 | 0.044625 | 0.057375 | 0.919154 | 0.919154 | 0.896552 | 0.896552 | 0.896552 | 0.896552 | 0 | 0.034089 | 0.320339 | 7,208 | 189 | 80 | 38.137566 | 0.670341 | 0.029273 | 0 | 0.870229 | 0 | 0 | 0.120513 | 0 | 0 | 0 | 0 | 0 | 0.167939 | 1 | 0.022901 | false | 0.007634 | 0.030534 | 0 | 0.061069 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ea5c3b2a7bb87230deb922d3ec473efb96a39a04 | 49,210 | py | Python | frame_level_models.py | adalucky1234/Automatic-Youtube-labelling | 6bd44acc3936c1888c9d7e978790f964fa516c5d | [
"Apache-2.0"
] | null | null | null | frame_level_models.py | adalucky1234/Automatic-Youtube-labelling | 6bd44acc3936c1888c9d7e978790f964fa516c5d | [
"Apache-2.0"
] | null | null | null | frame_level_models.py | adalucky1234/Automatic-Youtube-labelling | 6bd44acc3936c1888c9d7e978790f964fa516c5d | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains a collection of models which operate on variable-length sequences.
"""
import math
import models
import video_level_models
import tensorflow as tf
import model_utils as utils
import numpy as np
import tensorflow.contrib.slim as slim
from tensorflow import flags
from tensorflow import logging
FLAGS = flags.FLAGS
flags.DEFINE_integer("iterations", 30,
"Number of frames per batch for DBoF.")
flags.DEFINE_bool("dbof_add_batch_norm", True,
"Adds batch normalization to the DBoF model.")
flags.DEFINE_bool(
"sample_random_frames", True,
"If true samples random frames (for frame level models). If false, a random"
"sequence of frames is sampled instead.")
flags.DEFINE_integer("dbof_cluster_size", 8192,
"Number of units in the DBoF cluster layer.")
flags.DEFINE_integer("dbof_hidden_size", 1024,
"Number of units in the DBoF hidden layer.")
flags.DEFINE_string("dbof_pooling_method", "max",
"The pooling method used in the DBoF cluster layer. "
"Choices are 'average' and 'max'.")
flags.DEFINE_string("video_level_classifier_model", "MoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_string("video_level_classifier_model_new", "NewMoeModel",
"Some Frame-Level models can be decomposed into a "
"generalized pooling operation followed by a "
"classifier layer")
flags.DEFINE_integer("lstm_cells", 128, "Number of LSTM cells.")
flags.DEFINE_integer("lstm_layers", 2, "Number of LSTM layers.")
flags.DEFINE_integer("conv_layers", 2, "Number of Conv 1D layers.")
flags.DEFINE_integer("resnet_blocks", 3, "Number of resnet blocks.")
class FrameLevelLogisticModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a logistic classifier over the average of the
frame-level features.
This class is intended to be an example for implementors of frame level
models. If you want to train a model over averaged features it is more
efficient to average them beforehand rather than on the fly.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
feature_size = model_input.get_shape().as_list()[2]
denominators = tf.reshape(
tf.tile(num_frames, [1, feature_size]), [-1, feature_size])
avg_pooled = tf.reduce_sum(model_input,
axis=[1]) / denominators
output = slim.fully_connected(
avg_pooled, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(1e-8))
return {"predictions": output}
class DbofModel(models.BaseModel):
"""Creates a Deep Bag of Frames model.
The model projects the features for each frame into a higher dimensional
'clustering' space, pools across frames in that space, and then
uses a configurable video-level model to classify the now aggregated features.
The model will randomly sample either frames or sequences of frames during
training to speed up convergence.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def create_model(self,
model_input,
vocab_size,
num_frames,
iterations=None,
add_batch_norm=None,
sample_random_frames=None,
cluster_size=None,
hidden_size=None,
is_training=True,
**unused_params):
iterations = iterations or FLAGS.iterations
add_batch_norm = add_batch_norm or FLAGS.dbof_add_batch_norm
random_frames = sample_random_frames or FLAGS.sample_random_frames
cluster_size = cluster_size or FLAGS.dbof_cluster_size
hidden1_size = hidden_size or FLAGS.dbof_hidden_size
num_frames = tf.cast(tf.expand_dims(num_frames, 1), tf.float32)
if random_frames:
model_input = utils.SampleRandomFrames(model_input, num_frames,
iterations)
else:
model_input = utils.SampleRandomSequence(model_input, num_frames,
iterations)
max_frames = model_input.get_shape().as_list()[1]
feature_size = model_input.get_shape().as_list()[2]
reshaped_input = tf.reshape(model_input, [-1, feature_size])
tf.summary.histogram("input_hist", reshaped_input)
if add_batch_norm:
reshaped_input = slim.batch_norm(
reshaped_input,
center=True,
scale=True,
is_training=is_training,
scope="input_bn")
cluster_weights = tf.get_variable("cluster_weights",
[feature_size, cluster_size],
initializer = tf.random_normal_initializer(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_weights", cluster_weights)
activation = tf.matmul(reshaped_input, cluster_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="cluster_bn")
else:
cluster_biases = tf.get_variable("cluster_biases",
[cluster_size],
initializer = tf.random_normal(stddev=1 / math.sqrt(feature_size)))
tf.summary.histogram("cluster_biases", cluster_biases)
activation += cluster_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("cluster_output", activation)
activation = tf.reshape(activation, [-1, max_frames, cluster_size])
activation = utils.FramePooling(activation, FLAGS.dbof_pooling_method)
hidden1_weights = tf.get_variable("hidden1_weights",
[cluster_size, hidden1_size],
initializer=tf.random_normal_initializer(stddev=1 / math.sqrt(cluster_size)))
tf.summary.histogram("hidden1_weights", hidden1_weights)
activation = tf.matmul(activation, hidden1_weights)
if add_batch_norm:
activation = slim.batch_norm(
activation,
center=True,
scale=True,
is_training=is_training,
scope="hidden1_bn")
else:
hidden1_biases = tf.get_variable("hidden1_biases",
[hidden1_size],
initializer = tf.random_normal_initializer(stddev=0.01))
tf.summary.histogram("hidden1_biases", hidden1_biases)
activation += hidden1_biases
activation = tf.nn.relu6(activation)
tf.summary.histogram("hidden1_output", activation)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=activation,
vocab_size=vocab_size,
**unused_params)
class NewSqueezeNetModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
def fire_module(self, x,sp,e11p,e33p):
s = tf.layers.conv1d(x,sp,1,1,"same")
s = tf.nn.relu(s)
e11 = tf.layers.conv1d(s,e11p,1,1,"same")
e11 = tf.nn.relu(e11)
e33 = tf.layers.conv1d(s,e33p,3,1,"same")
e33 = tf.nn.relu(e33)
return tf.concat([e11,e33],-1)
conv1 = tf.layers.conv1d(
model_input,
filters=64,
kernel_size=1,
strides=1,
padding='same',
activation=tf.nn.relu,
name="conv1d_%d" % 1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=3,
strides=2,
padding='same',
name="pool1d_%d" % 1)
fire2 = fire_module(self, pool1,16,64,64)
fire3 = fire_module(self, fire2,16,64,64)
pool4 = tf.layers.max_pooling1d(
fire3,
pool_size=3,
strides=2,
padding='same',
name="pool1d_%d" % 1)
fire5 = fire_module(self, pool4,32,128,128)
fire6 = fire_module(self, fire5,32,128,128)
pool7 = tf.layers.max_pooling1d(
fire6,
pool_size=3,
strides=2,
padding='same',
name="pool1d_%d" % 1)
fire8 = fire_module(self, pool7,48,192,192)
fire9 = fire_module(self, fire8,48,192,192)
fire10 = fire_module(self, fire9,64,256,256)
fire11 = fire_module(self, fire10,64,256,256)
conv12 = tf.layers.conv1d(
fire11,
filters=100,
kernel_size=1,
strides=1,
padding='same',
activation=tf.nn.relu)
avgpool13 = tf.layers.average_pooling1d(
conv12,
pool_size=4,
strides=4,
padding='same')
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
logging.info("num_frames shape: " + str(tf.shape(num_frames)))
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, avgpool13,
sequence_length=tf.div(num_frames, tf.constant(30)),
dtype=tf.float32)
# outputs, state = tf.nn.dynamic_rnn(stacked_lstm, fire11,
# sequence_length=tf.add(tf.div(num_frames, tf.constant(8)), tf.constant(1)),
# dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmCustomModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
conv1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
conv2 = tf.layers.conv1d(
pool1,
filters=64,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2 = tf.layers.max_pooling1d(
conv2,
pool_size=5,
strides=2,
padding="same")
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, pool2,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class ParallelModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
# branch 1
conv1_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=5,
strides=2,
activation=tf.nn.relu,
padding="same")
pool1_1 = tf.layers.max_pooling1d(
conv1_1,
pool_size=5,
strides=2,
padding="same")
conv1_2 = tf.layers.conv1d(
pool1_1,
filters=64,
kernel_size=5,
strides=2,
activation=tf.nn.relu,
padding="same")
pool1_2 = tf.layers.max_pooling1d(
conv1_2,
pool_size=5,
strides=2,
padding="same")
# branch 2
conv2_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="same")
pool2_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv2_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="same")
pool2_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
#branch 3
conv3_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv3_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
#branch 4
conv4_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool4_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv3_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
concat3 = tf.keras.layers.concatenate([pool1_2, pool2_2, pool3_2])
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, concat3,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class ImageModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
reshape0 = tf.reshape(model_input, [-1, 300, 384, 3])
conv1 = tf.layers.conv2d(
reshape0,
filters=32,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool1 = tf.layers.max_pooling2d(
conv1,
pool_size=5,
strides=2,
padding="same")
conv2 = tf.layers.conv2d(
pool1,
filters=64,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2 = tf.layers.max_pooling2d(
conv2,
pool_size=5,
strides=2,
padding="same")
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=pool2,
vocab_size=vocab_size,
**unused_params)
class ParallelModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
# branch 1
conv1_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=5,
strides=2,
activation=tf.nn.relu,
padding="same")
pool1_1 = tf.layers.max_pooling1d(
conv1_1,
pool_size=5,
strides=2,
padding="same")
conv1_2 = tf.layers.conv1d(
pool1_1,
filters=64,
kernel_size=5,
strides=2,
activation=tf.nn.relu,
padding="same")
pool1_2 = tf.layers.max_pooling1d(
conv1_2,
pool_size=5,
strides=2,
padding="same")
# branch 2
conv2_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="same")
pool2_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv2_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=3,
strides=2,
activation=tf.nn.relu,
padding="same")
pool2_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
#branch 3
conv3_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv3_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
#branch 4
conv4_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool4_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv3_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=7,
strides=2,
activation=tf.nn.relu,
padding="same")
pool3_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
concat3 = tf.keras.layers.concatenate([pool1_2, pool2_2, pool3_2])
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, concat3,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class InceptionModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
"""Creates a model which uses a stack of LSTMs to represent the video.
Args:
model_input: A 'batch_size' x 'max_frames' x 'num_features' matrix of
input features.
vocab_size: The number of classes in the dataset.
num_frames: A vector of length 'batch' which indicates the number of
frames for each video (before padding).
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
'batch_size' x 'num_classes'.
"""
# branch 1
conv1_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
pool1_1 = tf.layers.max_pooling1d(
conv1_1,
pool_size=5,
strides=2,
padding="same")
conv1_2 = tf.layers.conv1d(
pool1_1,
filters=64,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
pool1_2 = tf.layers.max_pooling1d(
conv1_2,
pool_size=5,
strides=2,
padding="same")
# branch 2
conv2_1 = tf.layers.conv1d(
model_input,
filters=16,
kernel_size=1,
strides=1,
activation=tf.nn.relu,
padding="same")
# branch 2, group 1
conv2_1_1 = tf.layers.conv1d(
conv2_1,
filters=32,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_1_1 = tf.layers.max_pooling1d(
conv2_1_1,
pool_size=5,
strides=2,
padding="same")
conv2_1_2 = tf.layers.conv1d(
pool2_1_1,
filters=64,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_1_2 = tf.layers.max_pooling1d(
conv2_1_2,
pool_size=5,
strides=2,
padding="same")
# branch 2, group 2
conv2_2_1 = tf.layers.conv1d(
conv2_1,
filters=32,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_2_1 = tf.layers.max_pooling1d(
conv2_2_1,
pool_size=5,
strides=2,
padding="same")
conv2_2_2 = tf.layers.conv1d(
pool2_2_1,
filters=64,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_2_2 = tf.layers.max_pooling1d(
conv2_2_2,
pool_size=5,
strides=2,
padding="same")
# branch 2, group 3
conv2_3_1 = tf.layers.conv1d(
conv2_1,
filters=32,
kernel_size=7,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_3_1 = tf.layers.max_pooling1d(
conv2_3_1,
pool_size=5,
strides=2,
padding="same")
conv2_3_2 = tf.layers.conv1d(
pool2_3_1,
filters=64,
kernel_size=7,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_3_2 = tf.layers.max_pooling1d(
conv2_3_2,
pool_size=5,
strides=2,
padding="same")
# branch 2, group 1
conv2_4_1 = tf.layers.conv1d(
conv2_1,
filters=32,
kernel_size=9,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_4_1 = tf.layers.max_pooling1d(
conv2_4_1,
pool_size=5,
strides=2,
padding="same")
conv2_4_2 = tf.layers.conv1d(
pool2_4_1,
filters=64,
kernel_size=9,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_4_2 = tf.layers.max_pooling1d(
conv2_4_2,
pool_size=5,
strides=2,
padding="same")
concat3 = tf.keras.layers.concatenate([pool1_2, pool2_1_2, pool2_2_2, pool2_3_2, pool2_4_2])
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, concat3,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmMixtureModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [5, 3, 5, 3, 5, 3]
filters = [32, 64, 64, 128, 256, 512]
conv_layers = FLAGS.conv_layers
logging.info("**** CONV LAYERS **** " + str(conv_layers))
conv1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
conv2 = tf.layers.conv1d(
pool1,
filters=64,
kernel_size=3,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2 = tf.layers.max_pooling1d(
conv2,
pool_size=5,
strides=2,
padding="same")
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
with tf.variable_scope('lstm1'):
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, pool2,
sequence_length=num_frames,
dtype=tf.float32)
# second LSTM
conv2_1 = tf.layers.conv1d(
model_input,
filters=32,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_1 = tf.layers.max_pooling1d(
conv2_1,
pool_size=5,
strides=2,
padding="same")
conv2_2 = tf.layers.conv1d(
pool2_1,
filters=64,
kernel_size=5,
strides=1,
activation=tf.nn.relu,
padding="same")
pool2_2 = tf.layers.max_pooling1d(
conv2_2,
pool_size=5,
strides=2,
padding="same")
lstm_size_2 = 256
number_of_layers_2 = 1
logging.info("**** LSTM HIDDEN UNITS 2 **** " + str(lstm_size_2))
logging.info("**** LSTM LAYERS 2 **** " + str(number_of_layers_2))
with tf.variable_scope('lstm2'):
stacked_lstm_2 = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size_2, forget_bias=1.0)
for _ in range(number_of_layers_2)
])
loss = 0.0
outputs_2, state_2 = tf.nn.dynamic_rnn(stacked_lstm_2, pool2_2,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model_new)
return aggregated_model().create_model(
model_input_1=state[-1].h,
model_input_2=state_2[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmCustomNormModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [5, 3, 5, 3, 5, 3]
filters = [32, 64, 64, 128, 256, 512]
conv_layers = FLAGS.conv_layers
logging.info("**** CONV LAYERS **** " + str(conv_layers))
conv1 = tf.layers.conv1d(
model_input,
filters=filters[0],
kernel_size=kernel_sizes[0],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
conv2 = tf.layers.conv1d(
pool1,
filters=filters[1],
kernel_size=kernel_sizes[1],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv2 = tf.layers.batch_normalization(conv2, training=True)
conv2 = tf.nn.relu(conv2)
pool2 = tf.layers.max_pooling1d(
conv2,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool2
for i in range(2, conv_layers):
conv = tf.layers.conv1d(
inputLayer,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
pool = tf.layers.max_pooling1d(
conv,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmResnetNormModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [3,3,3,3,3,3]
filters = [256,256,256,256,256,256]
resnet_blocks = FLAGS.resnet_blocks
logging.info("**** resnet_blocks **** " + str(resnet_blocks))
conv1 = tf.layers.conv1d(
model_input,
filters=256,
kernel_size=1,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool1
for i in range(0, len(filters), 2):
conv = tf.layers.conv1d(
inputLayer,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.conv1d(
conv,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, inputLayer])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
inputLayer = conv
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmCustomNormModel(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [5, 3, 5, 3, 5, 3]
filters = [32, 64, 64, 128, 256, 512]
conv_layers = FLAGS.conv_layers
logging.info("**** CONV LAYERS **** " + str(conv_layers))
conv1 = tf.layers.conv1d(
model_input,
filters=filters[0],
kernel_size=kernel_sizes[0],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
conv2 = tf.layers.conv1d(
pool1,
filters=filters[1],
kernel_size=kernel_sizes[1],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv2 = tf.layers.batch_normalization(conv2, training=True)
conv2 = tf.nn.relu(conv2)
pool2 = tf.layers.max_pooling1d(
conv2,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool2
for i in range(2, conv_layers):
conv = tf.layers.conv1d(
inputLayer,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
pool = tf.layers.max_pooling1d(
conv,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmResnetNormModel2(models.BaseModel):
def resblock(self, inputLayer, filters, kernel_sizes):
for i in range(0, len(filters), 2):
conv = tf.layers.conv1d(
inputLayer,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.conv1d(
conv,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, inputLayer])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
inputLayer = conv
return inputLayer
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [3,3,3,3,3,3]
filters = [256,256,256,256,256,256]
resnet_blocks = FLAGS.resnet_blocks
logging.info("**** resnet_blocks **** " + str(resnet_blocks))
conv1 = tf.layers.conv1d(
model_input,
filters=256,
kernel_size=5,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
inputLayer = self.resblock(pool1, filters, kernel_sizes)
# add another resnet block
residual = tf.layers.conv1d(
inputLayer,
filters=512,
kernel_size=1,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.conv1d(
inputLayer,
filters=512,
kernel_size=1,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.conv1d(
inputLayer,
filters=512,
kernel_size=1,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, residual])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
filters = [512,512,512,512,512,512]
inputLayer = self.resblock(conv, filters, kernel_sizes)
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmResnetNormModel3(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
# kernel_sizes = [3,3,3,3,3,3]
# filters = [256,256,256,256,256,256]
# resnet_blocks = FLAGS.resnet_blocks
# logging.info("**** resnet_blocks **** " + str(resnet_blocks))
conv1 = tf.layers.conv1d(
model_input,
filters=1152,
kernel_size=3,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool1
conv = tf.layers.conv1d(
inputLayer,
filters=1152,
kernel_size=3,
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.conv1d(
conv,
filters=1152,
kernel_size=3,
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, inputLayer])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
inputLayer = conv
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class LstmResnetNormModel4(models.BaseModel):
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
# kernel_sizes = [3,3,3,3,3,3]
# filters = [256,256,256,256,256,256]
# resnet_blocks = FLAGS.resnet_blocks
# logging.info("**** resnet_blocks **** " + str(resnet_blocks))
conv1 = tf.layers.conv1d(
model_input,
filters=2304,
kernel_size=3,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
logging.info("conv1 shape " + str(conv1.get_shape()))
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=5,
strides=2,
padding="same")
inputLayer = pool1
logging.info("pool1 shape " + str(pool1.get_shape()))
conv = tf.layers.conv1d(
inputLayer,
filters=2304,
kernel_size=3,
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
logging.info("conv shape " + str(conv.get_shape()))
conv = tf.layers.conv1d(
conv,
filters=2304,
kernel_size=3,
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, inputLayer])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
inputLayer = conv
logging.info("conv shape " + str(conv.get_shape()))
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=state[-1].h,
vocab_size=vocab_size,
**unused_params)
class Resnet6BlowupLSTMModel(models.BaseModel):
def resblock(self, inputLayer, filters, kernel_sizes):
for i in range(0, len(filters), 2):
conv = tf.layers.conv1d(
inputLayer,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.dropout(conv, rate=0.2)
conv = tf.layers.conv1d(
conv,
filters=filters[i],
kernel_size=kernel_sizes[i],
strides=1,
#activation=tf.nn.relu,
padding="same")
conv = tf.keras.layers.Add()([conv, inputLayer])
conv = tf.layers.batch_normalization(conv, training=True)
conv = tf.nn.relu(conv)
conv = tf.layers.dropout(conv, rate=0.2)
inputLayer = conv
return inputLayer
def create_model(self, model_input, vocab_size, num_frames, **unused_params):
kernel_sizes = [3,3,3,3,3,3]
filters = [4096,4096,4096,4096,4096,4096]
resnet_blocks = FLAGS.resnet_blocks
logging.info("**** resnet_blocks **** " + str(resnet_blocks))
conv1 = tf.layers.conv1d(
model_input,
filters=4096,
kernel_size=5,
strides=2,
#activation=tf.nn.relu,
padding="same")
conv1 = tf.layers.batch_normalization(conv1, training=True)
conv1 = tf.nn.relu(conv1)
logging.info("conv1 shape" + str(conv1.get_shape()))
pool1 = tf.layers.max_pooling1d(
conv1,
pool_size=2,
strides=2,
padding="same")
pool1 = tf.layers.dropout(pool1, rate=0.2)
logging.info("pool1 shape" + str(pool1.get_shape()))
inputLayer = self.resblock(pool1, filters, kernel_sizes)
logging.info("RESNET OUPUT shape" + str(inputLayer.get_shape()))
lstm_size = FLAGS.lstm_cells
number_of_layers = FLAGS.lstm_layers
logging.info("**** LSTM HIDDEN UNITS **** " + str(lstm_size))
logging.info("**** LSTM LAYERS **** " + str(number_of_layers))
stacked_lstm = tf.contrib.rnn.MultiRNNCell(
[
tf.contrib.rnn.BasicLSTMCell(
lstm_size, forget_bias=1.0)
for _ in range(number_of_layers)
])
loss = 0.0
outputs, state = tf.nn.dynamic_rnn(stacked_lstm, inputLayer,
sequence_length=num_frames,
dtype=tf.float32)
aggregated_model = getattr(video_level_models,
FLAGS.video_level_classifier_model)
return aggregated_model().create_model(
model_input=tf.layers.dropout(state[-1].h, rate=0.2),
vocab_size=vocab_size,
l2_penalty=1e-6,
**unused_params)
| 26.89071 | 116 | 0.597033 | 6,045 | 49,210 | 4.66402 | 0.063689 | 0.039725 | 0.024686 | 0.038944 | 0.832695 | 0.821061 | 0.804994 | 0.792332 | 0.78272 | 0.768745 | 0 | 0.040783 | 0.299431 | 49,210 | 1,829 | 117 | 26.905413 | 0.777027 | 0.136151 | 0 | 0.827869 | 0 | 0 | 0.055478 | 0.001427 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015574 | false | 0 | 0.007377 | 0 | 0.051639 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ea8f392ee59646b85d5fbf79f83367ce08d517fe | 762 | py | Python | monday/api/aula04/projeto/imdb_books/src/resources/comment.py | Synapse-CIAg/synapse-2019H2 | 3a3635c59b9d30ee26344972ce6730dc1fd2ec33 | [
"Apache-2.0"
] | null | null | null | monday/api/aula04/projeto/imdb_books/src/resources/comment.py | Synapse-CIAg/synapse-2019H2 | 3a3635c59b9d30ee26344972ce6730dc1fd2ec33 | [
"Apache-2.0"
] | null | null | null | monday/api/aula04/projeto/imdb_books/src/resources/comment.py | Synapse-CIAg/synapse-2019H2 | 3a3635c59b9d30ee26344972ce6730dc1fd2ec33 | [
"Apache-2.0"
] | 3 | 2019-09-19T10:49:42.000Z | 2019-11-19T11:48:54.000Z | import falcon
import json
class CommentResource():
def on_get(self, request, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({ "method" : "comment - get" })
def on_get_comment(self, request, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({ "method" : "comment - get" })
def on_post(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({ "method" : "comment - post" })
def on_put_comment(self, req, resp, id):
resp.status = falcon.HTTP_200
resp.body = json.dumps({ "method" : "comment - put" })
def on_delete(self, req, resp):
resp.status = falcon.HTTP_200
resp.body = json.dumps({ "method" : "comment - delete" }) | 31.75 | 65 | 0.604987 | 99 | 762 | 4.535354 | 0.232323 | 0.055679 | 0.178174 | 0.222717 | 0.741648 | 0.741648 | 0.741648 | 0.741648 | 0.741648 | 0.741648 | 0 | 0.026316 | 0.251969 | 762 | 24 | 65 | 31.75 | 0.761404 | 0 | 0 | 0.388889 | 0 | 0 | 0.129751 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.277778 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
ea9106be72e7494683411480f4cbf09e925e6270 | 5,938 | py | Python | tests/actor/test_command_light.py | sdss/lvmecp | c598f9e65d94f379449db2e6541d61e9a6844bdb | [
"BSD-3-Clause"
] | null | null | null | tests/actor/test_command_light.py | sdss/lvmecp | c598f9e65d94f379449db2e6541d61e9a6844bdb | [
"BSD-3-Clause"
] | 3 | 2021-11-12T07:20:49.000Z | 2022-03-26T13:04:28.000Z | tests/actor/test_command_light.py | sdss/lvmecp | c598f9e65d94f379449db2e6541d61e9a6844bdb | [
"BSD-3-Clause"
] | null | null | null | # encoding: utf-8
#
# test_light.py
from __future__ import annotations
import pytest
from lvmecp.actor.actor import LvmecpActor as EcpActor
from lvmecp.exceptions import LvmecpError
pytestmark = [pytest.mark.asyncio]
async def test_light_status(actor: EcpActor):
# status check of light
command = await actor.invoke_mock_command("light status")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["control room"] == 0
assert command.replies[-1].message["light"]["utilities room"] == 0
assert command.replies[-1].message["light"]["spectrograph room"] == 0
assert command.replies[-1].message["light"]["uma lights"] == 0
assert command.replies[-1].message["light"]["telescope room - bright"] == 0
assert command.replies[-1].message["light"]["telescope room - red"] == 0
# status check of light
command = await actor.invoke_mock_command("light status cr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["control room"] == 0
# status check of light
command = await actor.invoke_mock_command("light status ur")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["utilities room"] == 0
# status check of light
command = await actor.invoke_mock_command("light status sr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["spectrograph room"] == 0
# status check of light
command = await actor.invoke_mock_command("light status uma")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["uma lights"] == 0
# status check of light
command = await actor.invoke_mock_command("light status tb")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - bright"] == 0
# status check of light
command = await actor.invoke_mock_command("light status tr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - red"] == 0
async def test_light_enable(actor: EcpActor):
# on check of light
command = await actor.invoke_mock_command("light enable cr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["control room"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable cr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["control room"] == 0
# on check of light
command = await actor.invoke_mock_command("light enable ur")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["utilities room"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable ur")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["utilities room"] == 0
# on check of light
command = await actor.invoke_mock_command("light enable sr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["spectrograph room"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable sr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["spectrograph room"] == 0
# on check of light
command = await actor.invoke_mock_command("light enable uma")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["uma lights"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable uma")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["uma lights"] == 0
# on check of light
command = await actor.invoke_mock_command("light enable tb")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - bright"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable tb")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - bright"] == 0
# on check of light
command = await actor.invoke_mock_command("light enable tr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - red"] == 1
# off check of light
command = await actor.invoke_mock_command("light enable tr")
await command
assert command.status.did_succeed
assert len(command.replies) == 3
assert command.replies[-1].message["light"]["telescope room - red"] == 0
async def test_light_fail_bad_name(actor: EcpActor):
command = await actor.invoke_mock_command("light status ar")
await command
assert command.status.did_fail
async def test_light_fail_no_argument(actor: EcpActor):
command = await actor.invoke_mock_command("light enable")
await command
assert command.status.did_fail
| 32.80663 | 79 | 0.700404 | 790 | 5,938 | 5.163291 | 0.078481 | 0.143418 | 0.117676 | 0.12356 | 0.939446 | 0.933317 | 0.933317 | 0.914685 | 0.911743 | 0.851924 | 0 | 0.014052 | 0.185079 | 5,938 | 180 | 80 | 32.988889 | 0.82889 | 0.068205 | 0 | 0.791304 | 0 | 0 | 0.148094 | 0 | 0 | 0 | 0 | 0 | 0.556522 | 1 | 0 | false | 0 | 0.034783 | 0 | 0.034783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
57edab5a77136af27da445e6bffad8a73173293b | 7,071 | py | Python | pm_lookup/models.py | tommasosansone91/aqi_luftdaten | d78ffa562672095a9f9e8c763c2b021c41ed546b | [
"MIT"
] | null | null | null | pm_lookup/models.py | tommasosansone91/aqi_luftdaten | d78ffa562672095a9f9e8c763c2b021c41ed546b | [
"MIT"
] | 11 | 2020-06-06T01:39:10.000Z | 2021-06-09T17:47:07.000Z | pm_lookup/models.py | tommasosansone91/aqi_luftdaten | d78ffa562672095a9f9e8c763c2b021c41ed546b | [
"MIT"
] | null | null | null | from django.db import models
from django.utils import timezone
from datetime import datetime
# Create your models here.
# nota che i modelli sono tutti in minuscolo
# ogni modello django possiede per default
# id = models.AutoField(primary_key=True)
class target_area_input_data(models.Model):
# id = models.AutoField(primary_key=True)
# Ho reso il nome univoco così sono obbligato a specificare la diversità nel nome se anche cambio
# le coordinate del centro o il raggio
Name = models.CharField(max_length=256, blank=False, null=False, unique=True)
Latitude = models.FloatField(null=False, blank=False)
Longitude = models.FloatField(null=False, blank=False)
Radius = models.FloatField(null=False, blank=False)
# deve essere integer
def __str__(self):
return "%s --- [%s, %s - Radius: %s km]" % (self.Name, self.Latitude, self.Longitude, self.Radius)
class Meta:
ordering = ['-Radius', 'Name']
class target_area_realtime_data(models.Model):
# nota che è maiuscolo
Target_area_input_data = models.OneToOneField(
'target_area_input_data',
on_delete=models.CASCADE,
)
# name, radius lat e long le prendo dal target area input data (onetoonefield) usando il .Name. .Radius, ecc
# Target_area_name = models.ForeignKey(
# 'target_area_input_data',
# # Target_area_name = models.ForeignKey('target_area_input_data', on_delete....)
# # vuol dire: in questo campo metti l'id del modello 'target_area_input_data'
# # nota che l'attributo è in minuscolo
# on_delete=models.CASCADE,
# )
# il primo attributo è il modello cui è associato
Last_update_time = models.DateTimeField(blank=False, null=False, default=timezone.now )
PM10_mean = models.FloatField(null=False, blank=False)
PM25_mean = models.FloatField(null=False, blank=False)
PM10_quality = models.CharField(max_length=256, blank=False, null=False)
PM25_quality = models.CharField(max_length=256, blank=False, null=False)
PM10_cathegory = models.CharField(max_length=256, blank=False, null=False)
PM25_cathegory = models.CharField(max_length=256, blank=False, null=False)
n_selected_sensors = models.IntegerField(null=True)
# PM10_n_missing_data = models.IntegerField(null=True)
# PM25_n_missing_data = models.IntegerField(null=True)
# PM10_n_missing_data = models.CharField(max_length=256, null=True)
# PM25_n_missing_data = models.CharField(max_length=256, null=True)
def __str__(self):
return "%s --- [ %s ]" % (self.Target_area_input_data.Name, datetime.strftime(self.Last_update_time, "%H:%M:%S %d-%m-%Y") )
class Meta:
ordering = ['-Target_area_input_data__Radius', 'Target_area_input_data__Name']
# fixato così
# ordering = ['-Target_area_input_data.Radius', 'Target_area_input_data.Name']
class target_area_history_data(models.Model):
# nota che è maiuscolo
Target_area_input_data = models.ForeignKey(
'target_area_input_data',
on_delete=models.CASCADE,
)
# il primo attributo è il modello cui è associato
Last_update_time = models.DateTimeField(blank=False, null=False, default=timezone.now )
PM10_mean = models.FloatField(null=False, blank=False)
PM25_mean = models.FloatField(null=False, blank=False)
PM10_quality = models.CharField(max_length=256, blank=False, null=False)
PM25_quality = models.CharField(max_length=256, blank=False, null=False)
PM10_cathegory = models.CharField(max_length=256, blank=False, null=False)
PM25_cathegory = models.CharField(max_length=256, blank=False, null=False)
n_selected_sensors = models.IntegerField(null=True)
# PM10_n_missing_data = models.IntegerField(null=True)
# PM25_n_missing_data = models.IntegerField(null=True)
# PM10_n_missing_data = models.CharField(max_length=256, null=True)
# PM25_n_missing_data = models.CharField(max_length=256, null=True)
def __str__(self):
return "%s --- [ %s ]" % (self.Target_area_input_data.Name, datetime.strftime(self.Last_update_time, "%H:%M:%S %d-%m-%Y") )
class Meta:
ordering = ['-Last_update_time', '-Target_area_input_data__Radius', 'Target_area_input_data__Name']
# fixato così
# ordering = ['-Target_area_input_data.Radius', 'Target_area_input_data.Name', '-Last_update_time']
unique_together = ('Target_area_input_data', 'Last_update_time', 'PM10_mean', 'PM25_mean')
# altrimenti non ha senso salvare un altro record... se è lo stesso
# metto il try nel momento del salvataggio
# --------------------------------
class target_area_time_serie(models.Model):
# nota che è maiuscolo
Target_area_input_data = models.ForeignKey(
'target_area_input_data',
on_delete=models.CASCADE,
)
# il primo attributo è il modello cui è associato
# postgres non prende array + datetime
Record_time_values = models.TextField( blank=False, null=False)
PM10_mean_values = models.TextField( null=False, blank=False)
PM25_mean_values = models.TextField( null=False, blank=False)
PM10_quality_values = models.TextField( blank=False, null=False)
PM25_quality_values = models.TextField( blank=False, null=False)
PM10_cathegory_values = models.TextField( blank=False, null=False)
PM25_cathegory_values = models.TextField( blank=False, null=False)
n_selected_sensors_values = models.TextField(null=True)
PM10_graph_div = models.TextField()
PM25_graph_div = models.TextField()
def __str__(self):
return "%s" % (self.Target_area_input_data.Name )
class Meta:
ordering = ['-Target_area_input_data__Radius', 'Target_area_input_data__Name']
# serie giornaliere
class target_area_daily_time_serie(models.Model):
# nota che è maiuscolo
Target_area_input_data = models.ForeignKey(
'target_area_input_data',
on_delete=models.CASCADE,
)
# il primo attributo è il modello cui è associato
# postgres non prende array + datetime
Record_time_values = models.TextField( blank=False, null=False)
PM10_mean_values = models.TextField( null=False, blank=False)
PM25_mean_values = models.TextField( null=False, blank=False)
PM10_quality_values = models.TextField( blank=False, null=False)
PM25_quality_values = models.TextField( blank=False, null=False)
PM10_cathegory_values = models.TextField( blank=False, null=False)
PM25_cathegory_values = models.TextField( blank=False, null=False)
n_selected_sensors_values = models.TextField(null=True)
PM10_graph_div = models.TextField()
PM25_graph_div = models.TextField()
def __str__(self):
return "%s" % (self.Target_area_input_data.Name )
class Meta:
ordering = ['-Target_area_input_data__Radius', 'Target_area_input_data__Name'] | 33.511848 | 136 | 0.700184 | 928 | 7,071 | 5.054957 | 0.163793 | 0.076743 | 0.095928 | 0.121509 | 0.814325 | 0.808996 | 0.768493 | 0.768493 | 0.758261 | 0.727563 | 0 | 0.020225 | 0.19587 | 7,071 | 211 | 137 | 33.511848 | 0.804784 | 0.273794 | 0 | 0.752941 | 0 | 0 | 0.098821 | 0.067976 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.035294 | 0.058824 | 0.788235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
17ac4d6105cfe967423d9aeb8e686a39f6cfa16a | 590 | py | Python | tests/admin/test_pages.py | xmsec/LanCTFD | 77659cca2aae4ab68cf25ddb5a7cbe63f597a9af | [
"Apache-2.0"
] | 3 | 2020-05-13T14:02:11.000Z | 2022-03-12T08:09:34.000Z | tests/admin/test_pages.py | xmsec/LanCTFD | 77659cca2aae4ab68cf25ddb5a7cbe63f597a9af | [
"Apache-2.0"
] | 6 | 2019-01-26T15:06:07.000Z | 2019-02-11T01:48:20.000Z | tests/admin/test_pages.py | xmsec/LanCTFD | 77659cca2aae4ab68cf25ddb5a7cbe63f597a9af | [
"Apache-2.0"
] | 4 | 2019-08-01T02:16:44.000Z | 2022-03-12T08:09:35.000Z | from tests.helpers import *
def test_get_admin_pages():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/pages')
assert r.status_code == 200
destroy_ctfd(app)
def test_get_admin_pages_new():
app = create_ctfd()
with app.app_context():
register_user(app)
client = login_as_user(app, name="admin", password="password")
r = client.get('/admin/pages/new')
assert r.status_code == 200
destroy_ctfd(app)
| 26.818182 | 70 | 0.638983 | 80 | 590 | 4.45 | 0.35 | 0.089888 | 0.146067 | 0.08427 | 0.921348 | 0.808989 | 0.808989 | 0.808989 | 0.617978 | 0.617978 | 0 | 0.013274 | 0.233898 | 590 | 21 | 71 | 28.095238 | 0.774336 | 0 | 0 | 0.705882 | 0 | 0 | 0.091525 | 0 | 0 | 0 | 0 | 0 | 0.117647 | 1 | 0.117647 | false | 0.117647 | 0.058824 | 0 | 0.176471 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 7 |
17bba0d204ffecc7fbcb994ba4b5b7ebfffc5a2e | 22,813 | py | Python | data_test_case.py | whoiszyc/benchmarking-gnns | 4f22a0444b4829c1cf890ad3d9fb1d944ed56ac8 | [
"MIT"
] | null | null | null | data_test_case.py | whoiszyc/benchmarking-gnns | 4f22a0444b4829c1cf890ad3d9fb1d944ed56ac8 | [
"MIT"
] | null | null | null | data_test_case.py | whoiszyc/benchmarking-gnns | 4f22a0444b4829c1cf890ad3d9fb1d944ed56ac8 | [
"MIT"
] | null | null | null | import numpy as np
def case33_basecase():
# define the network data as a dictionary named ppc
ppc={}
ppc["basemva"]=1
# % 0---Load bus, 1---Slack bus, 2---Generator bus
# % IEEE 33-node feeder
# % Bus Bus Voltage Angle ---Load---- ------Generator----- Injected
# % No code Mag. Degree MW Mvar MW Mvar Qmin Qmax Mvar
ppc["bus"] = np.array([
[1, 1, 1.060, 0.0, 0.010, 0.000, 0.000, 0.000, 0, 0, 0],
[2, 0, 1.043, 0.0, 0.100, 0.006, 0.000, 0.000, 0, 0, 0],
[3, 0, 1.000, 0.0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[4, 0, 1.060, 0.0, 0.120, 0.080, 0.000, 0.000, 0, 0, 0],
[5, 0, 1.010, 0.0, 0.060, 0.030, 0.000, 0.000, 0, 0, 0],
[6, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[7, 0, 1.0, 0.0, 0.200, 0.100, 0.000, 0.000, 0, 0, 0],
[8, 0, 1.03, 0.0, 0.200, 0.100, 0.000, 0.000, 0, 0, 0],
[9, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[10, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[11, 0, 1.0, 0.0, 0.045, 0.030, 0.000, 0.000, 0, 0, 0],
[12, 0, 1.0, 0.0, 0.060, 0.035, 0.000, 0.000, 0, 0, 0],
[13, 0, 1.0, 0.0, 0.060, 0.035, 0.000, 0.000, 0, 0, 0],
[14, 0, 1, 0, 0.120, 0.080, 0.000, 0.000, 0, 0, 0],
[15, 0, 1, 0, 0.060, 0.010, 0.000, 0.000, 0, 0, 0],
[16, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[17, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[18, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[19, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[20, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[21, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[22, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[23, 0, 1, 0, 0.090, 0.050, 0.000, 0.000, 0, 0, 0],
[24, 0, 1, 0, 0.420, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[25, 0, 1, 0, 0.420, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[26, 0, 1, 0, 0.060, 0.025, 0.000, 0.000, 0, 0, 0],
[27, 0, 1, 0, 0.060, 0.025, 0.000, 0.000, 0, 0, 0],
[28, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[29, 0, 1, 0, 0.120, 0.070, 0.000, 0.000, 0, 0, 0],
[30, 0, 1, 0, 0.200, 0.600, 0.000, 0.000, 0, 0, 0], # reactive power large
[31, 0, 1, 0, 0.150, 0.070, 0.000, 0.000, 0, 0, 0],
[32, 0, 1, 0, 0.210, 0.100, 0.000, 0.000, 0, 0, 0],
[33, 0, 1, 0, 0.060, 0.040, 1.000, 0.000, 0, 0, 0]
])
# No from-bus to-bus R X Pmax Qmax
ppc["line"] = np.array([
[1, 1, 2, 0.000574, 0.000293, 5, 5],
[2, 2, 3, 0.003070, 0.001564, 5, 5],
[3, 3, 4, 0.002279, 0.001209, 5, 5],
[4, 4, 5, 0.002373, 0.001209, 5, 5],
[5, 5, 6, 0.005100, 0.004402, 5, 5],
[6, 6, 7, 0.001166, 0.003853, 5, 5],
[7, 7, 8, 0.004430, 0.001464, 5, 5],
[8, 8, 9, 0.006413, 0.004608, 5, 5],
[9, 9, 10, 0.006501, 0.004608, 5, 5],
[10, 10, 11, 0.001224, 0.000405, 5, 5],
[11, 11, 12, 0.002331, 0.000771, 5, 5],
[12, 12, 13, 0.009141, 0.007192, 5, 5],
[13, 13, 14, 0.003372, 0.004439, 5, 5],
[14, 14, 15, 0.003680, 0.003275, 5, 5],
[15, 15, 16, 0.004647, 0.003275, 5, 5],
[16, 16, 17, 0.008026, 0.010716, 5, 5],
[17, 17, 18, 0.004558, 0.003574, 5, 5],
[18, 2, 19, 0.001021, 0.000974, 5, 5],
[19, 19, 20, 0.009366, 0.008440, 5, 5],
[20, 20, 21, 0.002550, 0.002979, 5, 5],
[21, 21, 22, 0.004414, 0.005836, 5, 5],
[22, 3, 23, 0.002809, 0.001920, 5, 5],
[23, 23, 24, 0.005592, 0.004415, 5, 5],
[24, 24, 25, 0.005579, 0.004366, 5, 5],
[25, 6, 26, 0.001264, 0.000644, 5, 5],
[26, 26, 27, 0.001770, 0.000901, 5, 5],
[27, 27, 28, 0.006594, 0.005814, 5, 5],
[28, 28, 29, 0.005007, 0.004362, 5, 5],
[29, 29, 30, 0.003160, 0.001610, 5, 5],
[30, 30, 31, 0.006067, 0.005996, 5, 5],
[31, 31, 32, 0.001933, 0.002253, 5, 5],
[32, 32, 33, 0.002123, 0.003301, 5, 5]
])
# gen index, gen bus, Pmin, Pmax, Qmin, Qmax
ppc["gen"] = np.array([
[1, 1, 0, 100, -100, 100]
])
ppc['tieline'] = []
# call data pre-processing functions
ppc = get_iterator(ppc)
ppc = get_bus_line_gen(ppc)
ppc = get_total_load(ppc)
return ppc
def case33_tieline():
# define the network data as a dictionary named ppc
ppc={}
ppc["basemva"]=1
# % 0---Load bus, 1---Slack bus, 2---Generator bus
# % IEEE 33-node feeder
# % Bus Bus Voltage Angle ---Load---- ------Generator----- Injected
# % No code Mag. Degree MW Mvar MW Mvar Qmin Qmax Mvar
ppc["bus"] = np.array([
[1, 1, 1.060, 0.0, 0.010, 0.000, 0.000, 0.000, 0, 0, 0],
[2, 0, 1.043, 0.0, 0.100, 0.006, 0.000, 0.000, 0, 0, 0],
[3, 0, 1.000, 0.0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[4, 0, 1.060, 0.0, 0.120, 0.080, 0.000, 0.000, 0, 0, 0],
[5, 0, 1.010, 0.0, 0.060, 0.030, 0.000, 0.000, 0, 0, 0],
[6, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[7, 0, 1.0, 0.0, 0.200, 0.100, 0.000, 0.000, 0, 0, 0],
[8, 0, 1.03, 0.0, 0.200, 0.100, 0.000, 0.000, 0, 0, 0],
[9, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[10, 0, 1.0, 0.0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[11, 0, 1.0, 0.0, 0.045, 0.030, 0.000, 0.000, 0, 0, 0],
[12, 0, 1.0, 0.0, 0.060, 0.035, 0.000, 0.000, 0, 0, 0],
[13, 0, 1.0, 0.0, 0.060, 0.035, 0.000, 0.000, 0, 0, 0],
[14, 0, 1, 0, 0.120, 0.080, 0.000, 0.000, 0, 0, 0],
[15, 0, 1, 0, 0.060, 0.010, 0.000, 0.000, 0, 0, 0],
[16, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[17, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[18, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[19, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[20, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[21, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[22, 0, 1, 0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[23, 0, 1, 0, 0.090, 0.050, 0.000, 0.000, 0, 0, 0],
[24, 0, 1, 0, 0.420, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[25, 0, 1, 0, 0.420, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[26, 0, 1, 0, 0.060, 0.025, 0.000, 0.000, 0, 0, 0],
[27, 0, 1, 0, 0.060, 0.025, 0.000, 0.000, 0, 0, 0],
[28, 0, 1, 0, 0.060, 0.020, 0.000, 0.000, 0, 0, 0],
[29, 0, 1, 0, 0.120, 0.070, 0.000, 0.000, 0, 0, 0],
[30, 0, 1, 0, 0.200, 0.600, 0.000, 0.000, 0, 0, 0], # reactive power large
[31, 0, 1, 0, 0.150, 0.070, 0.000, 0.000, 0, 0, 0],
[32, 0, 1, 0, 0.210, 0.100, 0.000, 0.000, 0, 0, 0],
[33, 0, 1, 0, 0.060, 0.040, 1.000, 0.000, 0, 0, 0]
])
# No from-bus to-bus R X Pmax Qmax if-damage if-operation
ppc["line"] = np.array([
[1, 1, 2, 0.000574, 0.000293, 5, 5],
[2, 2, 3, 0.003070, 0.001564, 5, 5],
[3, 3, 4, 0.002279, 0.001209, 5, 5],
[4, 4, 5, 0.002373, 0.001209, 5, 5],
[5, 5, 6, 0.005100, 0.004402, 5, 5],
[6, 6, 7, 0.001166, 0.003853, 5, 5],
[7, 7, 8, 0.004430, 0.001464, 5, 5],
[8, 8, 9, 0.006413, 0.004608, 5, 5],
[9, 9, 10, 0.006501, 0.004608, 5, 5],
[10, 10, 11, 0.001224, 0.000405, 5, 5],
[11, 11, 12, 0.002331, 0.000771, 5, 5],
[12, 12, 13, 0.009141, 0.007192, 5, 5],
[13, 13, 14, 0.003372, 0.004439, 5, 5],
[14, 14, 15, 0.003680, 0.003275, 5, 5],
[15, 15, 16, 0.004647, 0.003275, 5, 5],
[16, 16, 17, 0.008026, 0.010716, 5, 5],
[17, 17, 18, 0.004558, 0.003574, 5, 5],
[18, 2, 19, 0.001021, 0.000974, 5, 5],
[19, 19, 20, 0.009366, 0.008440, 5, 5],
[20, 20, 21, 0.002550, 0.002979, 5, 5],
[21, 21, 22, 0.004414, 0.005836, 5, 5],
[22, 3, 23, 0.002809, 0.001920, 5, 5],
[23, 23, 24, 0.005592, 0.004415, 5, 5],
[24, 24, 25, 0.005579, 0.004366, 5, 5],
[25, 6, 26, 0.001264, 0.000644, 5, 5],
[26, 26, 27, 0.001770, 0.000901, 5, 5],
[27, 27, 28, 0.006594, 0.005814, 5, 5],
[28, 28, 29, 0.005007, 0.004362, 5, 5],
[29, 29, 30, 0.003160, 0.001610, 5, 5],
[30, 30, 31, 0.006067, 0.005996, 5, 5],
[31, 31, 32, 0.001933, 0.002253, 5, 5],
[32, 32, 33, 0.002123, 0.003301, 5, 5],
[33, 8, 21, 0.012453, 0.012453, 5, 5], # tie-line
[34, 9, 15, 0.012453, 0.012453, 5, 5], # tie-line
[35, 12, 22, 0.012453, 0.012453, 5, 5], # tie-line
[36, 18, 33, 0.003113, 0.003113, 5, 5], # tie-line
[37, 25, 29, 0.003113, 0.003113, 5, 5] # tie-line
])
ppc['tieline'] = ['line_33', 'line_34', 'line_35', 'line_36', 'line_37']
# gen index, gen bus, Pmin, Pmax, Qmin, Qmax
ppc["gen"] = np.array([
[1, 1, 0, 100, -100, 100]
])
# call data pre-processing functions
ppc = get_iterator(ppc)
ppc = get_bus_line_gen(ppc)
ppc = get_total_load(ppc)
return ppc
def case33_tieline_DG():
# define the network data as a dictionary named ppc
ppc={}
ppc["basemva"]=1
# % 0---Load bus, 1---Slack bus, 2---Generator bus
# % IEEE 33-node feeder
# % Bus Bus Voltage Angle ---Load---- ------Generator----- Injected
# % No code Mag. Degree MW Mvar MW Mvar Qmin Qmax Mvar
ppc["bus"] = np.array([
[1, 1, 1.060, 0.0, 0.016, 0.000, 0.000, 0.000, 0, 0, 0],
[2, 0, 1.043, 0.0, 0.105, 0.006, 0.000, 0.000, 0, 0, 0],
[3, 0, 1.000, 0.0, 0.090, 0.040, 0.000, 0.000, 0, 0, 0],
[4, 0, 1.060, 0.0, 0.120, 0.080, 0.000, 0.000, 0, 0, 0],
[5, 1, 1.010, 0.0, 0.060, 0.030, 0.000, 0.000, 0, 0, 0],
[6, 0, 1.0, 0.0, 0.075, 0.020, 0.000, 0.000, 0, 0, 0],
[7, 0, 1.0, 0.0, 0.240, 0.100, 0.000, 0.000, 0, 0, 0],
[8, 0, 1.03, 0.0, 0.212, 0.100, 0.000, 0.000, 0, 0, 0],
[9, 0, 1.0, 0.0, 0.061, 0.020, 0.000, 0.000, 0, 0, 0],
[10, 1, 1.0, 0.0, 0.072, 0.020, 0.000, 0.000, 0, 0, 0],
[11, 0, 1.0, 0.0, 0.145, 0.030, 0.000, 0.000, 0, 0, 0],
[12, 0, 1.0, 0.0, 0.062, 0.035, 0.000, 0.000, 0, 0, 0],
[13, 0, 1.0, 0.0, 0.063, 0.035, 0.000, 0.000, 0, 0, 0],
[14, 0, 1, 0, 0.121, 0.080, 0.000, 0.000, 0, 0, 0],
[15, 1, 1, 0, 0.064, 0.010, 0.000, 0.000, 0, 0, 0],
[16, 0, 1, 0, 0.065, 0.020, 0.000, 0.000, 0, 0, 0],
[17, 0, 1, 0, 0.066, 0.020, 0.000, 0.000, 0, 0, 0],
[18, 0, 1, 0, 0.089, 0.040, 0.000, 0.000, 0, 0, 0],
[19, 0, 1, 0, 0.092, 0.040, 0.000, 0.000, 0, 0, 0],
[20, 1, 1, 0, 0.093, 0.040, 0.000, 0.000, 0, 0, 0],
[21, 0, 1, 0, 0.095, 0.040, 0.000, 0.000, 0, 0, 0],
[22, 0, 1, 0, 0.096, 0.040, 0.000, 0.000, 0, 0, 0],
[23, 0, 1, 0, 0.097, 0.050, 0.000, 0.000, 0, 0, 0],
[24, 0, 1, 0, 0.421, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[25, 1, 1, 0, 0.422, 0.200, 0.000, 0.000, 0, 0, 0], # active power large
[26, 0, 1, 0, 0.071, 0.025, 0.000, 0.000, 0, 0, 0],
[27, 0, 1, 0, 0.067, 0.025, 0.000, 0.000, 0, 0, 0],
[28, 0, 1, 0, 0.068, 0.020, 0.000, 0.000, 0, 0, 0],
[29, 0, 1, 0, 0.122, 0.070, 0.000, 0.000, 0, 0, 0],
[30, 1, 1, 0, 0.201, 0.600, 0.000, 0.000, 0, 0, 0], # reactive power large
[31, 0, 1, 0, 0.152, 0.070, 0.000, 0.000, 0, 0, 0],
[32, 0, 1, 0, 0.213, 0.100, 0.000, 0.000, 0, 0, 0],
[33, 0, 1, 0, 0.069, 0.040, 1.000, 0.000, 0, 0, 0]
])
# No from-bus to-bus R X Pmax Qmax if-damage if-operation
ppc["line"] = np.array([
[1, 1, 2, 0.000574, 0.000293, 5, 5],
[2, 2, 3, 0.003070, 0.001564, 5, 5],
[3, 3, 4, 0.002279, 0.001209, 5, 5],
[4, 4, 5, 0.002373, 0.001209, 5, 5],
[5, 5, 6, 0.005100, 0.004402, 5, 5],
[6, 6, 7, 0.001166, 0.003853, 5, 5],
[7, 7, 8, 0.004430, 0.001464, 5, 5],
[8, 8, 9, 0.006413, 0.004608, 5, 5],
[9, 9, 10, 0.006501, 0.004608, 5, 5],
[10, 10, 11, 0.001224, 0.000405, 5, 5],
[11, 11, 12, 0.002331, 0.000771, 5, 5],
[12, 12, 13, 0.009141, 0.007192, 5, 5],
[13, 13, 14, 0.003372, 0.004439, 5, 5],
[14, 14, 15, 0.003680, 0.003275, 5, 5],
[15, 15, 16, 0.004647, 0.003275, 5, 5],
[16, 16, 17, 0.008026, 0.010716, 5, 5],
[17, 17, 18, 0.004558, 0.003574, 5, 5],
[18, 2, 19, 0.001021, 0.000974, 5, 5],
[19, 19, 20, 0.009366, 0.008440, 5, 5],
[20, 20, 21, 0.002550, 0.002979, 5, 5],
[21, 21, 22, 0.004414, 0.005836, 5, 5],
[22, 3, 23, 0.002809, 0.001920, 5, 5],
[23, 23, 24, 0.005592, 0.004415, 5, 5],
[24, 24, 25, 0.005579, 0.004366, 5, 5],
[25, 6, 26, 0.001264, 0.000644, 5, 5],
[26, 26, 27, 0.001770, 0.000901, 5, 5],
[27, 27, 28, 0.006594, 0.005814, 5, 5],
[28, 28, 29, 0.005007, 0.004362, 5, 5],
[29, 29, 30, 0.003160, 0.001610, 5, 5],
[30, 30, 31, 0.006067, 0.005996, 5, 5],
[31, 31, 32, 0.001933, 0.002253, 5, 5],
[32, 32, 33, 0.002123, 0.003301, 5, 5],
[33, 8, 21, 0.012453, 0.012453, 5, 5], # tie-line
[34, 9, 15, 0.012453, 0.012453, 5, 5], # tie-line
[35, 12, 22, 0.012453, 0.012453, 5, 5], # tie-line
[36, 18, 33, 0.003113, 0.003113, 5, 5], # tie-line
[37, 25, 29, 0.003113, 0.003113, 5, 5] # tie-line
])
ppc['tieline'] = ['line_33', 'line_34', 'line_35', 'line_36', 'line_37']
# gen index, gen bus, Pmin, Pmax, Qmin, Qmax
ppc["gen"] = np.array([
[1, 1, 0, 100, -100, 100],
[2, 5, 0, 0, -0.2, 0.2],
[3, 10, 0, 0, -0.2, 0.2],
[4, 15, 0, 0, -0.2, 0.2],
[5, 20, 0, 0, -0.2, 0.2],
[6, 25, 0, 0, -0.2, 0.2],
[7, 30, 0, 0, -0.2, 0.2]
])
ppc['varcon'] = ['gen_2', 'gen_3', 'gen_4', 'gen_5', 'gen_6', 'gen_7']
# call data pre-processing functions
ppc = get_iterator(ppc)
ppc = get_bus_line_gen(ppc)
ppc = get_total_load(ppc)
return ppc
# =================== Data pre-processing functions ==================
def get_bus_line_gen(ppc):
# =================== Tips ====================
# Here we need to get the integer index from the string like 'bus_13'
# Assume a = 'bus_13', then int(a[a.find('_')+1:]) = 13
#=========================================================
# define a bus line relation dictionary, which the input is bus index and output is branch index
bus_line = {}
for i in ppc['iter_bus']:
# get the matrix index from the component name
id = int(i[i.find('_') + 1:]) - 1
bus_line[i] = {}
bus_line[i]["line_from_this_bus"] = np.where(True == (ppc['bus'][id, 0] == ppc['line'][:, 1]))[
0] + 1 # from bus at index column 1
bus_line[i]["line_to_this_bus"] = np.where(True == (ppc['bus'][id, 0] == ppc['line'][:, 2]))[
0] + 1 # from bus at index column 2
# define a bus generator relation dictionary, which the input is bus index and output is generator index
bus_gen = {}
for i in ppc['iter_bus']:
# get the matrix index from the component name
id = int(i[i.find('_') + 1:]) - 1
bus_gen[i] = {}
bus_gen[i] = np.where(True == (ppc['bus'][id, 0] == ppc['gen'][:, 1]))[0] + 1
ppc['bus_line'] = bus_line
ppc['bus_gen'] = bus_gen
return ppc
def get_iterator(ppc):
# # get size of bus, line and generator
ppc['number_bus'] = ppc['bus'].shape[0] # shape returns (row,column)
ppc['number_line'] = ppc['line'].shape[0]
ppc['number_gen'] = ppc['gen'].shape[0]
# # create index for bus, generator and horizon
ppc['index_bus'] = np.arange(0, ppc['number_bus'])
ppc['index_line'] = np.arange(0, ppc['number_line'])
ppc['index_gen'] = np.arange(0, ppc['number_gen'])
# create name as iterator
iter_bus = []
for i in ppc['index_bus']:
iter_bus.append('bus_{}'.format(int(ppc['bus'][i, 0])))
iter_line = []
for i in ppc['index_line']:
iter_line.append('line_{}'.format(int(ppc['line'][i, 0])))
iter_gen = []
for i in ppc['index_gen']:
iter_gen.append('gen_{}'.format(int(ppc['gen'][i, 0])))
ppc['iter_bus'] = iter_bus
ppc['iter_line'] = iter_line
ppc['iter_gen'] = iter_gen
return ppc
def get_total_load(ppc):
ppc['total_P'] = sum(ppc['bus'][i, 4] for i in ppc['index_bus'])
ppc['total_Q'] = sum(ppc['bus'][i, 5] for i in ppc['index_bus'])
return ppc | 58.048346 | 119 | 0.339324 | 3,356 | 22,813 | 2.277116 | 0.078069 | 0.096572 | 0.129547 | 0.106778 | 0.877912 | 0.857891 | 0.846506 | 0.840749 | 0.837739 | 0.837739 | 0 | 0.41629 | 0.486565 | 22,813 | 393 | 120 | 58.048346 | 0.236148 | 0.10665 | 0 | 0.742475 | 0 | 0 | 0.023868 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020067 | false | 0 | 0.003344 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
aa02e396713a622a729f610813f73a2f61d26b50 | 97,322 | py | Python | QualtricsAPI/tests/test.py | arfenarf/QualtricsAPI | e8ca08364fa3e5e909cc0a3290f0ee5714dbf185 | [
"MIT"
] | 15 | 2019-05-04T00:58:01.000Z | 2022-03-24T18:27:35.000Z | QualtricsAPI/tests/test.py | arfenarf/QualtricsAPI | e8ca08364fa3e5e909cc0a3290f0ee5714dbf185 | [
"MIT"
] | 6 | 2020-07-06T17:35:47.000Z | 2021-09-13T17:06:47.000Z | QualtricsAPI/tests/test.py | arfenarf/QualtricsAPI | e8ca08364fa3e5e909cc0a3290f0ee5714dbf185 | [
"MIT"
] | 6 | 2020-08-20T15:36:46.000Z | 2022-03-01T15:55:09.000Z | # Run in the Root Directory to run tests
## python3 -m unittest QualtricsAPI/tests/test.py
import unittest
import pandas as pd
from QualtricsAPI.Setup import Credentials
from QualtricsAPI.Survey import Responses
from QualtricsAPI.JSON import Parser
from QualtricsAPI.XM import MailingList
from QualtricsAPI.XM import XMDirectory
from QualtricsAPI.Library import Messages
from QualtricsAPI.Survey import Distributions
from datetime import date, datetime, timedelta
from time import gmtime
# Setup Tests Class
class setup_tests(object):
def __init__(self):
return
def setup_test_token(self, short=False):
'''Setup for Test Case 1: qualtrics_api_credentials token parameter lengths.(40)'''
token = 'ThisIsaFakeAPITokenAndIsTooShortToWork!' if short else 'ThisIsaFakeAPITokenAndIsTooShortToWork!!!'
return token
def setup_test_directory_id(self, short=False, false_id=False):
'''Setup for Test Case 2: qualtrics_api_credentials directory id parameter lengths (20), and the incorrect id (POOL_). '''
directory_id = 'POOL_ThisIsaFakeID!' if short else 'POOL_ThisIsaFakeDirectoryID!'
bad_id = 'ThisIsaFakeIDwo/POOL' if false_id else 'POOL_ThisIsaFakeID!'
return directory_id, bad_id
def setup_test_mailing_list_id(self, short=False, false_id=False):
'''Setup for Test Case 3: Mailing List Sub-Module method's exception handling of the mailing list's length (18), and
the incorrect id (CG_).'''
mailing_list_id = 'CG_ThisIsaFakeID!' if short else 'CG_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/CG' if false_id else None
return mailing_list_id, bad_id
def setup_test_contact_id(self, short=False, false_id=False):
'''Setup for Test Case 4: XMDirectory Sub-Module method's exception handling of the contact_id's length (18), and
the incorrect id (CG_).'''
contact_id = 'CID_ThisIsaFakeID!' if short else 'CID_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/CID' if false_id else None
return contact_id, bad_id
def setup_test_survey_id(self, short=False, false_id=False):
'''Setup for Test Case 5: Responses Sub-Module method's exception handling of the survey_id's length (18), and
the incorrect id (SV_).'''
survey_id = 'SV_ThisIsaFakeID!' if short else 'SV_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/SV' if false_id else None
return survey_id, bad_id
def setup_test_library_id_ur(self, short=False, false_id=False):
'''Setup for Test Case 6: Responses Sub-Module method's exception handling of the library_id's length (18), and
the incorrect id (UR_).'''
lib_id = 'UR_ThisIsaFakeID!' if short else 'UR_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/UR' if false_id else None
return lib_id, bad_id
def setup_test_library_id_gr(self, short=False, false_id=False):
'''Setup for Test Case 7: Responses Sub-Module method's exception handling of the library_id's length (18), and
the incorrect id (GR_).'''
lib_id = 'GR_ThisIsaFakeID!' if short else 'GR_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/GR' if false_id else None
return lib_id, bad_id
def setup_test_dist_id(self, short=False, false_id=False):
'''Setup for Test Case 8: Responses Sub-Module method's exception handling of the distribution_id's length (19), and
the incorrect id (UMD_).'''
dist_id = 'EMD_ThisIsaFakeID!' if short else 'EMD_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/EMD' if false_id else None
return dist_id, bad_id
def setup_test_message_id(self, short=False, false_id=False):
'''Setup for Test Case 9: Responses Sub-Module method's exception handling of the message_id's length (18), and
the incorrect id (MS_).'''
msg_id = 'MS_ThisIsaFakeID!' if short else 'MS_ThisIsaFakeMailingID!'
bad_id = 'ThisIsaFakeIDwo/MS' if false_id else None
return msg_id, bad_id
#UnitTest Class
class TestQualtricsAPI(unittest.TestCase):
correct_token = 'ThisIsaFakeAPITokenAndIsTooShortToWork!!'
def test_credentials_long_token(self):
'''This method tests that an assertion is raised in the Credentials Module when the user enters an api token that is too long.'''
token = setup_tests().setup_test_token(short=False)
directory_id, bad_id = setup_tests().setup_test_directory_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Credentials().qualtrics_api_credentials(token=token, data_center='FAKE', directory_id=directory_id)
#Test Assertion Error is handled: Short Token
def test_credentials_short_token(self):
'''This method tests that an assertion is raised in the Credentials Module when the user enters an api token that is too short.'''
token = setup_tests().setup_test_token(short=True)
directory_id, bad_id = setup_tests().setup_test_directory_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Credentials().qualtrics_api_credentials(token=token, data_center='FAKE', directory_id=directory_id)
## API Credentials: Directory ID
#Test Assertion Error is handled: Short Directory id
def test_credentials_short_directory_id(self):
'''This method tests that an assertion is raised in the Credentials Module when the user enters a directory id that is too short.'''
token = setup_tests().setup_test_token(short=True)
directory_id, bad_id = setup_tests().setup_test_directory_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Credentials().qualtrics_api_credentials(token=TestQualtricsAPI.correct_token, data_center='FAKE', directory_id=directory_id)
#Test Assertion Error is handled: Long Directory id
def test_credentials_long_directory_id(self):
'''This method tests that an assertion is raised in the Credentials Module when the user enters a directory id that is too long.'''
token = setup_tests().setup_test_token(short=True)
directory_id, bad_id = setup_tests().setup_test_directory_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Credentials().qualtrics_api_credentials(token=TestQualtricsAPI.correct_token, data_center='FAKE', directory_id=directory_id)
#Test Assertion Error is handled: Incorrect Dictonary ID
def test_credentials_bad_directory_id(self):
'''This method tests that an assertion is raised in the Credentials Module when the user enters a directory id that is incorrect.'''
token = setup_tests().setup_test_token(short=True)
directory_id, bad_id = setup_tests().setup_test_directory_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Credentials().qualtrics_api_credentials(token=TestQualtricsAPI.correct_token, data_center='FAKE', directory_id=bad_id)
## MailingList: Mailing List IDs (rename_list)
#Test Assertion Error is handled: Long Mailing List ID
def test_ml_short_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too long.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=mailing_list_id, name='Fake')
#Test Assertion Error is handled: Short Mailing List ID
def test_ml_long_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=mailing_list_id, name='Fake')
#Test Assertion Error is handled: Incorrect Mailing List ID
def test_ml_bad_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=bad_id, name='Fake')
#Test Assertion Error is handled: (None) Mailing List ID
def test_ml_none_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=bad_id, name='Fake')
#Test Assertion Error is handled: bool Mailing List ID
def test_ml_bool_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = bool()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
#Test Assertion Error is handled: bool Mailing List ID
def test_ml_list_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = list()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
#Test Assertion Error is handled: float Mailing List ID
def test_ml_float_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = float()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
#Test Assertion Error is handled: int Mailing List ID
def test_ml_int_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = int()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
#Test Assertion Error is handled: dict Mailing List ID
def test_ml_dict_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = dict()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
#Test Assertion Error is handled: tuple Mailing List ID
def test_ml_tuple_ml_id_rename(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
NonString = tuple()
with self.assertRaises(AssertionError):
MailingList().rename_list(mailing_list=NonString, name='Fake')
## MailingList: Mailing List IDs (delete_list)
#Test Assertion Error is handled: Long Mailing List ID
def test_ml_short_ml_id_delete(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too long.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
MailingList().delete_list(mailing_list=mailing_list_id)
#Test Assertion Error is handled: Short Mailing List ID
def test_ml_long_ml_id_delete(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
MailingList().delete_list(mailing_list=mailing_list_id)
#Test Assertion Error is handled: None Mailing List ID
def test_ml_none_ml_id_delete(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=True)
with self.assertRaises(AssertionError):
MailingList().delete_list(mailing_list=mailing_list_id)
#Test Assertion Error is handled: bool Mailing List ID
def test_ml_bool_ml_id_delete(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short.'''
NonString = bool()
with self.assertRaises(AssertionError):
MailingList().delete_list(mailing_list=NonString)
#Test Assertion Error is handled: Incorrect Mailing List ID
def test_ml_bad_ml_id_delete(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id is that is incorrect.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
MailingList().delete_list(mailing_list=bad_id)
## MailingList: Mailing List IDs (list_contacts)
#Test Assertion Error is handled: Long Mailing List ID
def test_ml_short_ml_id_list_contacts(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too long.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
MailingList().list_contacts(mailing_list=mailing_list_id)
#Test Assertion Error is handled: Short Mailing List ID
def test_ml_long_ml_id_list_contacts(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
MailingList().list_contacts(mailing_list=mailing_list_id)
#Test Assertion Error is handled: Incorrect Mailing List ID
def test_ml_bad_ml_id_list_contacts(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is incorrect. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
MailingList().list_contacts(mailing_list=bad_id)
## MailingList: Mailing List IDs (create_contact_in_list)
#Test Assertion Error is handled: Long Mailing List ID
def test_ml_short_ml_id_create(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too long.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
MailingList().create_contact_in_list(mailing_list=mailing_list_id)
#Test Assertion Error is handled: Short Mailing List ID
def test_ml_long_ml_id_create(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id that is too short.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
MailingList().create_contact_in_list(mailing_list=mailing_list_id)
#Test Assertion Error is handled: Incorrect Mailing List ID
def test_ml_bad_ml_id_create(self):
'''This method tests that an assertion is raised in the MailingList Module when the user enters a mailing_list_id is that is incorrect.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
MailingList().create_contact_in_list(mailing_list=bad_id)
## XMDirectory: Contact IDs (delete_contact)
#Test Assertion Error is handled: Long Contact id
def test_xm_short_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too long. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=contact_id)
#Test Assertion Error is handled: Short Contact id
def test_xm_long_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too short. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=contact_id)
#Test Assertion Error is handled: Incorrect Contact id
def test_xm_bad_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_none_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=bad_id)
#Test Assertion Error is handled: bool Contact id
def test_xm_bool_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = bool()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
#Test Assertion Error is handled: list Contact id
def test_xm_list_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = list()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
#Test Assertion Error is handled: int Contact id
def test_xm_int_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = int()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
#Test Assertion Error is handled: float Contact id
def test_xm_float_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = float()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
#Test Assertion Error is handled: dict Contact id
def test_xm_dict_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = dict()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
#Test Assertion Error is handled: tuple Contact id
def test_xm_tuple_contact_id_delete(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = tuple()
with self.assertRaises(AssertionError):
XMDirectory().delete_contact(contact_id=NonString)
## XMDirectory: Contact IDs (update_contact)
#Test Assertion Error is handled: Long Contact id
def test_xm_short_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too long. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=contact_id)
#Test Assertion Error is handled: Short Contact id
def test_xm_long_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too short. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=contact_id)
#Test Assertion Error is handled: Incorrect Contact id
def test_xm_bad_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_bad_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_bad_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_none_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=bad_id)
#Test Assertion Error is handled: bool Contact id
def test_xm_bool_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = bool()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: list Contact id
def test_xm_list_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = list()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: int Contact id
def test_xm_int_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = int()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: float Contact id
def test_xm_float_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = float()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: dict Contact id
def test_xm_dict_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = dict()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: tuple Contact id
def test_xm_tuple_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = tuple()
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=NonString)
#Test Assertion Error is handled: (None) Contact id
def test_xm_bad_contact_id_update(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().update_contact(contact_id=bad_id)
## XMDirectory: Contact IDs (get_contact)
#Test Assertion Error is handled: Long Contact id
def test_xm_short_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too long. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=contact_id)
#Test Assertion Error is handled: Short Contact id
def test_xm_long_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too short. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=contact_id)
#Test Assertion Error is handled: Incorrect Contact id
def test_xm_bad_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_none_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: bool Contact id
def test_xm_bool_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = bool()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: list Contact id
def test_xm_list_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = list()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: float Contact id
def test_xm_float_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = float()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: dict Contact id
def test_xm_dict_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = dict()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: tuple Contact id
def test_xm_tuple_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = tuple()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
#Test Assertion Error is handled: int Contact id
def test_xm_int_contact_id_get(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
bad_id = int()
with self.assertRaises(AssertionError):
XMDirectory().get_contact(contact_id=bad_id)
## XMDirectory: Contact IDs (get_contact_additional_info)
#Test Assertion Error is handled: Long Contact id
def test_xm_short_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too long. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=contact_id)
#Test Assertion Error is handled: Short Contact id
def test_xm_long_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is too short. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=contact_id)
#Test Assertion Error is handled: Incorrect Contact id
def test_xm_bad_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=bad_id)
#Test Assertion Error is handled: (None) Contact id
def test_xm_none_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
contact_id, bad_id = setup_tests().setup_test_contact_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=bad_id)
#Test Assertion Error is handled: (None) Content
def test_xm_none_content_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id='ThisisaFakeContact', content=None)
#Test Assertion Error is handled: bool Contact id
def test_xm_bool_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = bool()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
#Test Assertion Error is handled: int Contact id
def test_xm_int_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = int()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
#Test Assertion Error is handled: list Contact id
def test_xm_list_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = list()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
#Test Assertion Error is handled: float Contact id
def test_xm_float_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = float()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
#Test Assertion Error is handled: dict Contact id
def test_xm_dict_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = dict()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
#Test Assertion Error is handled: tuple Contact id
def test_xm_tuple_contact_id_get_add(self):
'''This method tests that an assertion is raised in the XMDirectory Module when the user enters a contact_id that is incorrect. '''
NonString = tuple()
with self.assertRaises(AssertionError):
XMDirectory().get_contact_additional_info(contact_id=NonString)
## Responses: Survey IDs ##
#Test Assertion Error is handled: Long Survey id
def test_responses_short_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is too long. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=survey_id)
#Test Assertion Error is handled: Short Survey id
def test_responses_long_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is too short. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=survey_id)
#Test Assertion Error is handled: Incorrect Survey id
def test_responses_bad_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=bad_id)
#Test Assertion Error is handled: (None) Survey id
def test_responses_none_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=bad_id)
#Test Assertion Error is handled: bool Survey id
def test_responses_bool_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = bool()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
#Test Assertion Error is handled: list Survey id
def test_responses_list_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = list()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
#Test Assertion Error is handled: int Survey id
def test_responses_int_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = int()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
#Test Assertion Error is handled: float Survey id
def test_responses_float_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = float()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
#Test Assertion Error is handled: dict Survey id
def test_responses_dict_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = dict()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
#Test Assertion Error is handled: tuple Survey id
def test_responses_tuple_survey_id(self):
'''This method tests that an assertion is raised in the Responses Module when the user enters a survey_id that is incorrect. '''
NonString = tuple()
with self.assertRaises(AssertionError):
Responses().setup_request(file_format='csv', survey=NonString)
## Messages: Library IDs ##
#Test Assertion Error is handled: Long Library id (list_messages: UR)
def test_responses_short_library_id_list_msgs_ur(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters a
library_id (UR: User-Defined Resource) that is too long. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=False)
with self.assertRaises(AssertionError):
Messages().list_messages(library=lib_id)
#Test Assertion Error is handled: Short Library id (list_messages: UR)
def test_responses_long_library_id_lst_msgs_ur(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters a
library_id (UR: User-Defined Resource) that is too short. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=True, false_id=False)
with self.assertRaises(AssertionError):
Messages().list_messages(library=lib_id)
#Test Assertion Error is handled: Incorrect Library id (list_messages: UR)
def test_responses_bad_libary_id_lst_msgs_ur(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters
a library_id (UR: User-Defined Resource) that is incorrect. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=True)
with self.assertRaises(AssertionError):
Messages().list_messages(library=bad_id)
#Test Assertion Error is handled: Long Library id (list_messages: GR)
def test_responses_short_library_id_list_msgs_gr(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters
a library_id (GR: Global Resource) that is too long. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=False)
with self.assertRaises(AssertionError):
Messages().list_messages(library=lib_id)
#Test Assertion Error is handled: Short Library id (list_messages: GR)
def test_responses_long_library_id_lst_msgs_gr(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters
a library_id (GR: Global Resource) that is too short. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=True, false_id=False)
with self.assertRaises(AssertionError):
Messages().list_messages(library=lib_id)
#Test Assertion Error is handled: Incorrect Library id (list_messages: GR)
def test_responses_bad_libary_id_lst_msgs_gr(self):
'''This method tests that an assertion is raised in the Messages Module's list_messages method when the user enters
a library_id (GR: Global Resource) that is incorrect. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=True)
with self.assertRaises(AssertionError):
Messages().list_messages(library=bad_id)
## Messages: Library IDs ##
#Test Assertion Error is handled: Long Library id (get_message: UR)
def test_responses_short_library_id_get_msg_ur(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters a
library_id (UR: User-Defined Resource) that is too long. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library=lib_id, message="MS_ThisIsaFakeMsg!")
#Test Assertion Error is handled: Short Library id (get_message: UR)
def test_responses_long_library_id_get_msg_ur(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters a
library_id (UR: User-Defined Resource) that is too short. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=True, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library=lib_id, message="MS_ThisIsaFakeMsg!")
#Test Assertion Error is handled: Incorrect Library id (get_message: UR)
def test_responses_bad_libary_id_get_msg_ur(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters
a library_id (UR: User-Defined Resource) that is incorrect. '''
lib_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=True)
with self.assertRaises(AssertionError):
Messages().get_message(library=bad_id, message="MS_ThisIsaFakeMsg!")
#Test Assertion Error is handled: Long Library id (get_message: GR)
def test_responses_short_library_id_get_msg_gr(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters
a library_id (GR: Global Resource) that is too long. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library=lib_id, message="MS_ThisIsaFakeMsg!")
#Test Assertion Error is handled: Short Library id (get_message: GR)
def test_responses_long_library_id_get_msg_gr(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters
a library_id (GR: Global Resource) that is too short. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=True, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library=lib_id, message="MS_ThisIsaFakeMsg!")
#Test Assertion Error is handled: Incorrect Library id (get_message: GR)
def test_responses_bad_libary_id_get_msg_gr(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters
a library_id (GR: Global Resource) that is incorrect. '''
lib_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=True)
with self.assertRaises(AssertionError):
Messages().get_message(library=bad_id, message="MS_ThisIsaFakeMsg!")
## Messages: Message IDs ##
#Test Assertion Error is handled: Long Message ID (get_message)
def test_responses_short_msg_id_get_msgs(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters a
message_id that is too long. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library="GR_ThisIsaFakeLib!", message=msg_id)
#Test Assertion Error is handled: Short Message ID (get_message)
def test_responses_long_msg_id_get_msgs(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters a
message_id that is too short. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Messages().get_message(library="GR_ThisIsaFakeLib!", message=msg_id)
#Test Assertion Error is handled: Incorrect Message ID (get_message)
def test_responses_bad_msg_id_get_msgs(self):
'''This method tests that an assertion is raised in the Messages Module's get_message method when the user enters
a message_id that is incorrect. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Messages().get_message(library="GR_ThisIsaFakeLib!", message=msg_id)
## Distribution: MailiingList IDs ##
#Test Assertion Error is handled: Long MailiingListID (create_distribution)
def test_responses_short_ml_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
mailing_list_id that is too long. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(mailing_list=mailing_list_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake", survey="SV_ThisIsaFakeID!!")
#Test Assertion Error is handled: Short MailiingList ID (create_distribution)
def test_responses_long_ml_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
mailing_list_id that is too short.'''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(mailing_list=mailing_list_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake", survey="SV_ThisIsaFakeID!!")
#Test Assertion Error is handled: Incorrect MailiingList ID (create_distribution)
def test_responses_bad_ml_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters
a mailing_list_id that is incorrect. '''
mailing_list_id, bad_id = setup_tests().setup_test_mailing_list_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_distribution(mailing_list=bad_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake", survey="SV_ThisIsaFakeID!!")
## Distribution: Survey IDs ##
#Test Assertion Error is handled: Long Survey ID (create_distribution)
def test_responses_short_sv_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
survey_id that is too long. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(survey=survey_id, mailing_list="CG_ThisIsaFakeMail", library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Survey ID (create_distribution)
def test_responses_long_sv_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
survey_id that is too short.'''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(survey=survey_id, mailing_list="CG_ThisIsaFakeMail", library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Survey ID (create_distribution)
def test_responses_bad_sv_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters
a survey_id that is incorrect. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_distribution(survey=bad_id, mailing_list="CG_ThisIsaFakeMail", library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Survey IDs ##
#Test Assertion Error is handled: Long Survey ID (list_distributions)
def test_responses_short_sv_id_list_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's list_distributions method when the user enters a
survey_id that is too long. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().list_distributions(survey=survey_id)
#Test Assertion Error is handled: Short Survey ID (list_distributions)
def test_responses_long_sv_id_list_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's list_distributions method when the user enters a
survey_id that is too short.'''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().list_distributions(survey=survey_id)
#Test Assertion Error is handled: Incorrect Survey ID (list_distributions)
def test_responses_bad_sv_id_list_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's list_distributions method when the user enters
a survey_id that is incorrect. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().list_distributions(survey=bad_id)
## Distribution: Survey IDs ##
#Test Assertion Error is handled: Long Survey ID (get_distribution)
def test_responses_short_sv_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distributionmethod when the user enters a
survey_id that is too long. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey=survey_id, distribution="EMD_ThisIsaFakeID!")
#Test Assertion Error is handled: Short Survey ID (get_distribution)
def test_responses_long_sv_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distribution method when the user enters a
survey_id that is too short.'''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey=survey_id, distribution="EMD_ThisIsaFakeID!")
#Test Assertion Error is handled: Incorrect Survey ID (get_distribution)
def test_responses_bad_sv_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distribution method when the user enters
a survey_id that is incorrect. '''
survey_id, bad_id = setup_tests().setup_test_survey_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey=bad_id, distribution="EMD_ThisIsaFakeID!")
## Distribution: Library IDs (UR) ##
#Test Assertion Error is handled: Long Library ID (create_distribution: UR)
def test_responses_short_lib_id_create_dist_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
library_id (UR: User-Defined Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=library_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_distribution: UR)
def test_responses_long_lib_id_create_dist_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
library_id (UR: User-Defined Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=library_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_distribution: UR)
def test_responses_bad_lib_id_create_dist_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters
a library_id (UR: User-Defined Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=bad_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Library IDs (GR) ##
#Test Assertion Error is handled: Long Library ID (create_distribution: GR)
def test_responses_short_lib_id_create_dist_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
library_id (GR: Global Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=library_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_distribution: GR)
def test_responses_long_lib_id_create_dist_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
library_id (GR: Global Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=library_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_distribution: GR)
def test_responses_bad_lib_id_create_dist_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters
a library_id (GR: Global Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_distribution(library=bad_id, survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", message="MS_ThisIsaFakeMsg!", send_date=gmtime(), subject="Fake", reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Library IDs (UR) ##
#Test Assertion Error is handled: Long Library ID (create_reminder: UR)
def test_responses_short_lib_id_create_remind_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
library_id (UR: User-Defined Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_reminder: UR)
def test_responses_long_lib_id_create_remind_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
library_id (UR: User-Defined Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_reminder: UR)
def test_responses_bad_lib_id_create_remind_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters
a library_id (UR: User-Defined Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=bad_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Library IDs (GR) ##
#Test Assertion Error is handled: Long Library ID (create_reminder: GR)
def test_responses_short_lib_id_create_remind_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
library_id (GR: Global Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_reminder: GR)
def test_responses_long_lib_id_create_remind_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
library_id (GR: Global Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_reminder: GR)
def test_responses_bad_lib_id_create_remind_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters
a library_id (GR: Global Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_reminder(library=bad_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Library IDs ##
#Test Assertion Error is handled: Long Library ID (create_thank_you: UR)
def test_responses_short_lib_id_create_ty_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
library_id (UR: User-Defined Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_thank_you: UR)
def test_responses_long_lib_id_create_ty_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
library_id (UR: User-Defined Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_thank_you: UR)
def test_responses_bad_lib_id_create_ty_ur(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters
a library_id (UR: User-Defined Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_ur(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=bad_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Library IDs (GR) ##
#Test Assertion Error is handled: Long Library ID (create_reminder: GR)
def test_responses_short_lib_id_create_ty_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
library_id (GR: Global Resource) that is too long. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Library ID (create_reminder: GR)
def test_responses_long_lib_id_create_ty_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
library_id (GR: Global Resource) that is too short.'''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=library_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Library ID (create_reminder: GR)
def test_responses_bad_lib_id_create_ty_gr(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters
a library_id (GR: Global Resource) that is incorrect. '''
library_id, bad_id = setup_tests().setup_test_library_id_gr(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(library=bad_id, distribution="EMD_ThisIsaFakeID!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Distribution IDs ##
#Test Assertion Error is handled: Long Distribution ID (get_distribution)
def test_responses_short_dist_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distribution method when the user enters a
distribution_id that is too long. '''
dist_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey="SV_ThisIsaFakeID!!", distribution=dist_id)
#Test Assertion Error is handled: Short Distribution ID (get_distribution)
def test_responses_long_dist_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distribution method when the user enters a
distribution_id that is too short.'''
dist_id, bad_id = setup_tests().setup_test_dist_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey="SV_ThisIsaFakeID!!", distribution=dist_id)
#Test Assertion Error is handled: Incorrect Distribution ID (get_distribution)
def test_responses_bad_dist_id_get_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's get_distribution method when the user enters
a distribution_id that is incorrect. '''
dist_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().get_distribution(survey="SV_ThisIsaFakeID!!", distribution=bad_id)
## Distribution. Distribution IDs ##
#Test Assertion Error is handled: Long Distribution ID (create_reminder)
def test_responses_short_dist_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
distrubution_id that is too long. '''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution=distribution_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Distribution ID (create_reminder)
def test_responses_long_dist_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
distrubution_id that is too short.'''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution=distribution_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Distribution ID (create_reminder)
def test_responses_bad_dist_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters
a distrubution_id that is incorrect. '''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution=bad_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Distribution IDs ##
#Test Assertion Error is handled: Long Distribution ID (create_thank_you)
def test_responses_short_dist_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
distrubution_id that is too long. '''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution=distribution_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Distribution ID (create_thank_you)
def test_responses_long_dist_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
distrubution_id that is too short.'''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution=distribution_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Distribution ID (create_thank_you)
def test_responses_bad_dist_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters
a distrubution_id that is incorrect. '''
distribution_id, bad_id = setup_tests().setup_test_dist_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution=bad_id, library="GR_ThisIsaFakeLib!", message="MS_ThisIsaFakeMsg!", subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Message IDs ##
#Test Assertion Error is handled: Long Message ID (create_distribution)
def test_responses_short_msg_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
message_id that is too long. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(message=msg_id, library="GR_ThisIsaFakeLib!", survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Message ID (create_distribution)
def test_responses_long_msg_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters a
message_id that is too short.'''
msg_id, bad_id = setup_tests().setup_test_message_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_distribution(message=msg_id ,library="GR_ThisIsaFakeLib!", survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Message ID (create_distribution)
def test_responses_bad_msg_id_create_dist(self):
'''This method tests that an assertion is raised in the Distribution Module's create_distribution method when the user enters
a message_id that is incorrect. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_distribution(message=bad_id, library="GR_ThisIsaFakeLib!", survey="SV_ThisIsaFakeID!!", mailing_list="CG_ThisIsaFakeMail", subject="Fake", send_date=gmtime(), reply_email="Fake@Fake.com", from_email="Fake@Fake.com", from_name="Fake")
## Distribution: Message IDs ##
#Test Assertion Error is handled: Long Message ID (create_reminder)
def test_responses_short_msg_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
message_id that is too long. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=msg_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Message ID (create_reminder)
def test_responses_long_msg_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters a
message_id that is too short.'''
msg_id, bad_id = setup_tests().setup_test_message_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=msg_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Message ID (create_reminder)
def test_responses_bad_msg_id_create_remind(self):
'''This method tests that an assertion is raised in the Distribution Module's create_reminder method when the user enters
a message_id that is incorrect. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_reminder(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=bad_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
## Distribution Message IDs ##
#Test Assertion Error is handled: Long Message ID (create_thank_you)
def test_responses_short_msg_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
message_id that is too long. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=msg_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Short Message ID (create_thank_you)
def test_responses_long_msg_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters a
message_id that is too short.'''
msg_id, bad_id = setup_tests().setup_test_message_id(short=True, false_id=False)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=msg_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
#Test Assertion Error is handled: Incorrect Message ID (create_thank_you)
def test_responses_bad_msg_id_create_ty(self):
'''This method tests that an assertion is raised in the Distribution Module's create_thank_you method when the user enters
a message_id that is incorrect. '''
msg_id, bad_id = setup_tests().setup_test_message_id(short=False, false_id=True)
with self.assertRaises(AssertionError):
Distributions().create_thank_you(distribution="EMD_ThisIsaFakeID!", library="GR_ThisIsaFakeLib!", message=bad_id, subject="Fake", reply_email="Fake@Fake.com", send_date=gmtime(), from_email="Fake@Fake.com", from_name="Fake")
# #Test Assertion Error is handled: bool first name (Create Contact in XM)
# def test_bad_first_name_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type bool.'''
# boolFirstName = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=boolFirstName)
# #Test Assertion Error is handled: list first name (Create Contact in XM)
# def test_bad_first_name_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type list.'''
# listFirstName = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=listFirstName)
# #Test Assertion Error is handled: float first name (Create Contact in XM)
# def test_bad_first_name_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type float.'''
# floatFirstName = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=floatFirstName)
# #Test Assertion Error is handled: int first name (Create Contact in XM)
# def test_bad_first_name_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type int.'''
# intFirstName = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=intFirstName)
# #Test Assertion Error is handled: dict first name (Create Contact in XM)
# def test_bad_first_name_dict_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type dict.'''
# dictFirstName = dict()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=dictFirstName)
# #Test Assertion Error is handled: tuple first name (Create Contact in XM)
# def test_bad_first_name_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a first_name that is a type tuple.'''
# tupleFirstName = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(first_name=tupleFirstName)
# #Test Assertion Error is handled: bool last name (Create Contact in XM)
# def test_bad_last_name_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type bool.'''
# boolLastName = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=boolLastName)
# #Test Assertion Error is handled: list last name (Create Contact in XM)
# def test_bad_last_name_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type list.'''
# listLastName = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=listLastName)
# #Test Assertion Error is handled: float last name (Create Contact in XM)
# def test_bad_last_name_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type float.'''
# floatLastName = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=floatLastName)
# #Test Assertion Error is handled: int last name (Create Contact in XM)
# def test_bad_last_name_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type int.'''
# intLastName = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=intLastName)
# #Test Assertion Error is handled: dict last name (Create Contact in XM)
# def test_bad_last_name_dict_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type dict.'''
# dictLastName = dict()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=dictLastName)
# #Test Assertion Error is handled: tuple last name (Create Contact in XM)
# def test_bad_last_name_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a last_name that is a type tuple.'''
# tupleLastName = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(last_name=tupleLastName)
# #Test Assertion Error is handled: bool email (Create Contact in XM)
# def test_bad_email_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type bool.'''
# boolEmail = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=boolEmail)
# #Test Assertion Error is handled: list email (Create Contact in XM)
# def test_bad_email_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type list.'''
# listEmail = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=listEmail)
# #Test Assertion Error is handled: float email (Create Contact in XM)
# def test_bad_email_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type float.'''
# floatEmail = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=floatEmail)
# #Test Assertion Error is handled: int email (Create Contact in XM)
# def test_bad_email_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type int.'''
# intEmail = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=intEmail)
# #Test Assertion Error is handled: dict email (Create Contact in XM)
# def test_bad_email_dict_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type dict.'''
# dictEmail = dict()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=dictEmail)
# #Test Assertion Error is handled: tuple email (Create Contact in XM)
# def test_bad_email_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a email that is a type tuple.'''
# tupleEmail = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(email=tupleEmail)
# #Test Assertion Error is handled: bool phone (Create Contact in XM)
# def test_bad_phone_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type bool.'''
# boolPhone = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=boolPhone)
# #Test Assertion Error is handled: list phone (Create Contact in XM)
# def test_bad_phone_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type list.'''
# listPhone = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=listPhone)
# #Test Assertion Error is handled: float phone (Create Contact in XM)
# def test_bad_phone_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type float.'''
# floatPhone = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=floatPhone)
# #Test Assertion Error is handled: int phone (Create Contact in XM)
# def test_bad_phone_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type int.'''
# intPhone = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=intPhone)
# #Test Assertion Error is handled: dict phone (Create Contact in XM)
# def test_bad_phone_dict_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type dict.'''
# dictPhone = dict()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=dictPhone)
# #Test Assertion Error is handled: tuple phone (Create Contact in XM)
# def test_bad_phone_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a phone that is a type tuple.'''
# tuplePhone = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(phone=tuplePhone)
# #Test Assertion Error is handled: bool language (Create Contact in XM)
# def test_bad_language_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type bool.'''
# boolLanguage = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=boolLanguage)
# #Test Assertion Error is handled: list language (Create Contact in XM)
# def test_bad_language_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type list.'''
# listLanguage = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=listLanguage)
# #Test Assertion Error is handled: float language (Create Contact in XM)
# def test_bad_language_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type float.'''
# floatLanguage = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=floatLanguage)
# #Test Assertion Error is handled: int language (Create Contact in XM)
# def test_bad_language_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type int.'''
# intLanguage = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=intLanguage)
# #Test Assertion Error is handled: dict language (Create Contact in XM)
# def test_bad_language_dict_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type dict.'''
# dictLanguage = dict()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=dictLanguage)
# #Test Assertion Error is handled: tuple language (Create Contact in XM)
# def test_bad_language_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a language that is a type tuple.'''
# tupleLanguage = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(language=tupleLanguage)
# #Test Assertion Error is handled: bool metadata (Create Contact in XM)
# def test_bad_metadata_bool_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type bool.'''
# boolMetadata = bool()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=boolMetadata)
# #Test Assertion Error is handled: list Metadata (Create Contact in XM)
# def test_bad_metadata_list_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type list.'''
# listMetadata = list()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=listMetadata)
# #Test Assertion Error is handled: float Metadata (Create Contact in XM)
# def test_bad_metadata_float_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type float.'''
# floatMetadata = float()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=floatMetadata)
# #Test Assertion Error is handled: int Metadata (Create Contact in XM)
# def test_bad_metadata_int_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type int.'''
# intMetadata = int()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=intMetadata)
# #Test Assertion Error is handled: str Metadata (Create Contact in XM)
# def test_bad_metadata_str_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type dict.'''
# strMetadata = str()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=strMetadata)
# #Test Assertion Error is handled: tuple Metadata (Create Contact in XM)
# def test_bad_metadata_tuple_create_contact_xm(self):
# '''This method tests that an assertion is raised in the XMDirectory Module's create_contact_in_XM method when the user enters
# a metadata that is a type tuple.'''
# tupleMetadata = tuple()
# with self.assertRaises(AssertionError):
# XMDirectory().create_contact_in_XM(metadata=tupleMetadata)
if __name__ == "__main__":
unittest.main()
| 65.625084 | 277 | 0.72965 | 13,545 | 97,322 | 5.00502 | 0.015947 | 0.029738 | 0.036965 | 0.050167 | 0.956205 | 0.948549 | 0.926305 | 0.9121 | 0.893898 | 0.874943 | 0 | 0.000354 | 0.187768 | 97,322 | 1,482 | 278 | 65.669366 | 0.857264 | 0.477312 | 0 | 0.651828 | 0 | 0 | 0.081904 | 0.006494 | 0 | 0 | 0 | 0 | 0.227345 | 1 | 0.243243 | false | 0 | 0.017488 | 0.00159 | 0.281399 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
aa07056441c120fc26584746c0854bbdc53f332a | 1,015 | py | Python | humanist_app/decorators.py | DH-Cologne/humanist-django | 3372e32d2ef43d64f1e8b6ebfcd94f73df5e2af1 | [
"MIT"
] | null | null | null | humanist_app/decorators.py | DH-Cologne/humanist-django | 3372e32d2ef43d64f1e8b6ebfcd94f73df5e2af1 | [
"MIT"
] | null | null | null | humanist_app/decorators.py | DH-Cologne/humanist-django | 3372e32d2ef43d64f1e8b6ebfcd94f73df5e2af1 | [
"MIT"
] | null | null | null | from django.shortcuts import redirect
def require_user(function):
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.is_active:
return function(request, *args, **kwargs)
else:
return redirect('/Restricted/denied')
else:
return redirect('/Restricted/?next={}'.format(
request.get_full_path()))
wrap.__doc__ = function.__doc__
# wrap.__name__ = function.__name__
return wrap
def require_editor(function):
def wrap(request, *args, **kwargs):
if request.user.is_authenticated:
if request.user.is_staff:
return function(request, *args, **kwargs)
else:
return redirect('/Restricted/denied')
else:
return redirect('/Restricted/?next={}'.format(
request.get_full_path()))
wrap.__doc__ = function.__doc__
# wrap.__name__ = function.__name__
return wrap
| 29.852941 | 58 | 0.595074 | 103 | 1,015 | 5.456311 | 0.291262 | 0.078292 | 0.120996 | 0.106762 | 0.868327 | 0.868327 | 0.868327 | 0.868327 | 0.868327 | 0.868327 | 0 | 0 | 0.295567 | 1,015 | 33 | 59 | 30.757576 | 0.786014 | 0.068966 | 0 | 0.8 | 0 | 0 | 0.080937 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.16 | false | 0 | 0.04 | 0 | 0.52 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 8 |
aa1b07e722e31b1261d0c3a324b6b43d08238a46 | 2,579 | py | Python | Image/glance_setup.py | laxmena/OpenStack-Automation | f5629da6d1e9ac70e1b4a5819191d1d97e3a0664 | [
"MIT"
] | 1 | 2017-09-24T17:38:58.000Z | 2017-09-24T17:38:58.000Z | Image/glance_setup.py | laxmena/OpenStack-Automation | f5629da6d1e9ac70e1b4a5819191d1d97e3a0664 | [
"MIT"
] | null | null | null | Image/glance_setup.py | laxmena/OpenStack-Automation | f5629da6d1e9ac70e1b4a5819191d1d97e3a0664 | [
"MIT"
] | null | null | null | import sys
import ConfigParser
ip = sys.argv[1]
file_name = "/etc/glance/glance-api.conf"
#file_name = "UnitTest/glance-api.conf"
config = ConfigParser.ConfigParser(allow_no_value=True)
config.read(file_name)
if 'database' not in config.sections():
config.add_section('database')
if 'keystone_authtoken' not in config.sections():
config.add_section('keystone_authtoken')
if 'paste_deploy' not in config.sections():
config.add_section('paste_deploy')
if 'glance_store' not in config.sections():
config.add_section('glance_store')
config.set('glance_store','stores','file,http')
config.set('glance_store','default_store','file')
config.set('glance_store','filesystem_store_datadir','/var/lib/glance/images/')
config.set('database','connection','mysql+pymysql://glance:GLANCE_DBPASS@'+ip+'/glance')
config.set('keystone_authtoken','auth_uri','http://'+ip+':5000')
config.set('keystone_authtoken','auth_url','http://'+ip+':35357')
config.set('keystone_authtoken','memcached_servers',''+ip+':11211')
config.set('keystone_authtoken','auth_type','password')
config.set('keystone_authtoken','project_domain_name','Default')
config.set('keystone_authtoken','user_domain_name','Default')
config.set('keystone_authtoken','project_name','service')
config.set('keystone_authtoken','username','glance')
config.set('keystone_authtoken','password','GLANCE_PASS')
config.set('paste_deploy','flavor','keystone')
with open(file_name, 'wb') as configfile:
config.write(configfile)
file_name = "/etc/glance/glance-registry.conf"
#file_name = "UnitTest/glance-registry.conf"
if 'database' not in config.sections():
config.add_section('database')
if 'keystone_authtoken' not in config.sections():
config.add_section('keystone_authtoken')
if 'paste_deploy' not in config.sections():
config.add_section('paste_deploy')
config.set('database','connection','mysql+pymysql://glance:GLANCE_DBPASS@'+ip+'/glance')
config.set('keystone_authtoken','auth_uri','http://'+ip+':5000')
config.set('keystone_authtoken','auth_url','http://'+ip+':35357')
config.set('keystone_authtoken','memcached_servers',''+ip+':11211')
config.set('keystone_authtoken','auth_type','password')
config.set('keystone_authtoken','project_domain_name','Default')
config.set('keystone_authtoken','user_domain_name','Default')
config.set('keystone_authtoken','project_name','service')
config.set('keystone_authtoken','username','glance')
config.set('keystone_authtoken','password','GLANCE_PASS')
config.set('paste_deploy','flavor','keystone')
with open(file_name, 'wb') as configfile:
config.write(configfile) | 38.492537 | 88 | 0.761535 | 342 | 2,579 | 5.526316 | 0.19883 | 0.119048 | 0.161905 | 0.247619 | 0.849206 | 0.797355 | 0.797355 | 0.778836 | 0.778836 | 0.778836 | 0 | 0.011856 | 0.05157 | 2,579 | 67 | 89 | 38.492537 | 0.760834 | 0.031408 | 0 | 0.76 | 0 | 0 | 0.487785 | 0.072087 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.12 | 0.04 | 0 | 0.04 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 9 |
5e096dc92d3c0c2469cd33b47e1527003fcb1f66 | 14,202 | py | Python | test/arith/arith.py | thautwarm/rbnf-rts | f491c02bad453652d089dc0634183ee3213d7f21 | [
"MIT"
] | 4 | 2019-09-10T04:11:38.000Z | 2020-08-02T13:27:25.000Z | test/arith/arith.py | thautwarm/rbnf-rts | f491c02bad453652d089dc0634183ee3213d7f21 | [
"MIT"
] | 2 | 2020-01-10T16:23:44.000Z | 2020-08-02T21:39:16.000Z | test/arith/arith.py | thautwarm/rbnf-rts | f491c02bad453652d089dc0634183ee3213d7f21 | [
"MIT"
] | null | null | null | # this file is auto-generated by RBNF.hs and the Python package rbnf-rts
from rbnf_rts.rbnf_linker import link
from rbnf_rts.utils import ImmutableMap
from rbnf_rts.lexical import *
__all__ = ['lexicals', 'run_lexer', 'mk_parser']
(lexicals, run_lexer) = lexer(r(number='[+-]?\\d+'), r(space='\\s+'), l['/'], l['-'], l['+'], l['*'], l[')'], l['('], ignores=['space'], reserved_map=ImmutableMap.from_dict({'*': 'quote *', '/': 'quote /', '+': 'quote +', '-': 'quote -', '(': 'quote (', ')': 'quote )'}), numbering={'BOF': 0, 'EOF': 1, 'quote *': 2, 'quote /': 3, 'quote +': 4, 'quote -': 5, 'quote (': 6, 'quote )': 7, 'number': 8, 'space': 9})
def mk_parser(arith, unwrap):
from rbnf_rts.rts import AST as prim__mk__ast, Cons as prim__cons, _nil as prim__nil
def lr_step_Add(_slot_0, prim__state, prim__tokens):
Add_lhs_0 = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 5):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_1 = lcl_3
Add_op_1 = _slot_1
lcl_3 = parse_Mul(prim__state, prim__tokens)
_slot_2_check = lcl_3
lcl_3 = _slot_2_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_2_check
else:
lcl_4 = _slot_2_check[1]
lcl_4 = lcl_4
_slot_2 = lcl_4
Add_rhs_1 = _slot_2
lcl_4 = arith(Add_op_1, Add_lhs_0, Add_rhs_1)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 4):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_1 = lcl_3
Add_op_1 = _slot_1
lcl_3 = parse_Mul(prim__state, prim__tokens)
_slot_2_check = lcl_3
lcl_3 = _slot_2_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_2_check
else:
lcl_4 = _slot_2_check[1]
lcl_4 = lcl_4
_slot_2 = lcl_4
Add_rhs_1 = _slot_2
lcl_4 = arith(Add_op_1, Add_lhs_0, Add_rhs_1)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
else:
lcl_3 = (_off_0, 'Add lookahead failed')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'Add got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def lr_loop_Add(_slot_0, prim__state, prim__tokens):
lr_Add_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_Add(lr_Add_reduce, prim__state, prim__tokens)
lr_Add_try = lcl_0
lcl_0 = lr_Add_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_Add_try[1]
lcl_1 = lcl_1
lr_Add_reduce = lcl_1
lcl_1 = lr_step_Add(lr_Add_reduce, prim__state, prim__tokens)
lr_Add_try = lcl_1
lcl_1 = lr_Add_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_Add_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_Add_try
return lcl_0
def lr_step_Mul(_slot_0, prim__state, prim__tokens):
Mul_lhs_0 = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 3):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_1 = lcl_3
Mul_op_1 = _slot_1
lcl_3 = parse_Atom(prim__state, prim__tokens)
_slot_2_check = lcl_3
lcl_3 = _slot_2_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_2_check
else:
lcl_4 = _slot_2_check[1]
lcl_4 = lcl_4
_slot_2 = lcl_4
Mul_rhs_1 = _slot_2
lcl_4 = arith(Mul_op_1, Mul_lhs_0, Mul_rhs_1)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 2):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_1 = lcl_3
Mul_op_1 = _slot_1
lcl_3 = parse_Atom(prim__state, prim__tokens)
_slot_2_check = lcl_3
lcl_3 = _slot_2_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_2_check
else:
lcl_4 = _slot_2_check[1]
lcl_4 = lcl_4
_slot_2 = lcl_4
Mul_rhs_1 = _slot_2
lcl_4 = arith(Mul_op_1, Mul_lhs_0, Mul_rhs_1)
_slot_local__1 = lcl_4
lcl_4 = (True, _slot_local__1)
lcl_3 = lcl_4
lcl_1 = lcl_3
else:
lcl_3 = (_off_0, 'Mul lookahead failed')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'Mul got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def lr_loop_Mul(_slot_0, prim__state, prim__tokens):
lr_Mul_reduce = _slot_0
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = lr_step_Mul(lr_Mul_reduce, prim__state, prim__tokens)
lr_Mul_try = lcl_0
lcl_0 = lr_Mul_try[0]
lcl_0 = (lcl_0 is not False)
while lcl_0:
lcl_1 = prim__tokens.offset
_off_0 = lcl_1
lcl_1 = lr_Mul_try[1]
lcl_1 = lcl_1
lr_Mul_reduce = lcl_1
lcl_1 = lr_step_Mul(lr_Mul_reduce, prim__state, prim__tokens)
lr_Mul_try = lcl_1
lcl_1 = lr_Mul_try[0]
lcl_1 = (lcl_1 is not False)
lcl_0 = lcl_1
lcl_0 = prim__tokens.offset
lcl_0 = (lcl_0 is _off_0)
if lcl_0:
lcl_1 = (True, lr_Mul_reduce)
lcl_0 = lcl_1
else:
lcl_0 = lr_Mul_try
return lcl_0
def parse_Add(prim__state, prim__tokens):
lcl_0 = parse_Mul(prim__state, prim__tokens)
_slot_0_check = lcl_0
lcl_0 = _slot_0_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_0_check
else:
lcl_1 = _slot_0_check[1]
lcl_1 = lcl_1
_slot_0 = lcl_1
Add_a_0 = _slot_0
_slot_local__1 = Add_a_0
lcl_1 = lr_loop_Add(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_Atom(prim__state, prim__tokens):
lcl_0 = prim__tokens.offset
_off_0 = lcl_0
lcl_0 = (len(prim__tokens.array) > (prim__tokens.offset + 0))
if lcl_0:
lcl_2 = prim__tokens.array[(prim__tokens.offset + 0)]
lcl_2 = lcl_2.idint
if (lcl_2 == 6):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
lcl_3 = parse_Add(prim__state, prim__tokens)
_slot_1_check = lcl_3
lcl_3 = _slot_1_check[0]
lcl_3 = (lcl_3 is False)
if lcl_3:
lcl_3 = _slot_1_check
else:
lcl_4 = _slot_1_check[1]
lcl_4 = lcl_4
_slot_1 = lcl_4
Atom_a_1 = _slot_1
lcl_4 = 7
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_4):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_4 = _py_local_tk
_slot_2 = lcl_4
lcl_4 = (_slot_2 is None)
if lcl_4:
lcl_5 = prim__tokens.offset
lcl_5 = (lcl_5, 'quote ) not match')
lcl_5 = prim__cons(lcl_5, prim__nil)
lcl_5 = lcl_5
lcl_5 = (False, lcl_5)
lcl_4 = lcl_5
else:
_slot_local__1 = Atom_a_1
lcl_5 = (True, _slot_local__1)
lcl_4 = lcl_5
lcl_3 = lcl_4
lcl_1 = lcl_3
elif (lcl_2 == 8):
_py_local_i = prim__tokens.offset
_py_local_t = prim__tokens.array[_py_local_i]
prim__tokens.offset = (_py_local_i + 1)
lcl_3 = _py_local_t
_slot_0 = lcl_3
Atom_a_1 = _slot_0
lcl_3 = unwrap(Atom_a_1)
_slot_local__1 = lcl_3
lcl_3 = (True, _slot_local__1)
lcl_1 = lcl_3
else:
lcl_3 = (_off_0, 'Atom lookahead failed')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_1 = lcl_3
lcl_0 = lcl_1
else:
lcl_1 = (_off_0, 'Atom got EOF')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
return lcl_0
def parse_Mul(prim__state, prim__tokens):
lcl_0 = parse_Atom(prim__state, prim__tokens)
_slot_0_check = lcl_0
lcl_0 = _slot_0_check[0]
lcl_0 = (lcl_0 is False)
if lcl_0:
lcl_0 = _slot_0_check
else:
lcl_1 = _slot_0_check[1]
lcl_1 = lcl_1
_slot_0 = lcl_1
Mul_a_0 = _slot_0
_slot_local__1 = Mul_a_0
lcl_1 = lr_loop_Mul(_slot_local__1, prim__state, prim__tokens)
lcl_0 = lcl_1
return lcl_0
def parse_START(prim__state, prim__tokens):
lcl_0 = 0
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_0):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_0 = _py_local_tk
_slot_0 = lcl_0
lcl_0 = (_slot_0 is None)
if lcl_0:
lcl_1 = prim__tokens.offset
lcl_1 = (lcl_1, 'BOF not match')
lcl_1 = prim__cons(lcl_1, prim__nil)
lcl_1 = lcl_1
lcl_1 = (False, lcl_1)
lcl_0 = lcl_1
else:
lcl_1 = parse_Add(prim__state, prim__tokens)
_slot_1_check = lcl_1
lcl_1 = _slot_1_check[0]
lcl_1 = (lcl_1 is False)
if lcl_1:
lcl_1 = _slot_1_check
else:
lcl_2 = _slot_1_check[1]
lcl_2 = lcl_2
_slot_1 = lcl_2
START_a_1 = _slot_1
lcl_2 = 1
try:
_py_local_tk = prim__tokens.array[prim__tokens.offset]
if (_py_local_tk.idint is lcl_2):
prim__tokens.offset += 1
else:
_py_local_tk = None
except IndexError:
_py_local_tk = None
lcl_2 = _py_local_tk
_slot_2 = lcl_2
lcl_2 = (_slot_2 is None)
if lcl_2:
lcl_3 = prim__tokens.offset
lcl_3 = (lcl_3, 'EOF not match')
lcl_3 = prim__cons(lcl_3, prim__nil)
lcl_3 = lcl_3
lcl_3 = (False, lcl_3)
lcl_2 = lcl_3
else:
_slot_local__1 = START_a_1
lcl_3 = (True, _slot_local__1)
lcl_2 = lcl_3
lcl_1 = lcl_2
lcl_0 = lcl_1
return lcl_0
return parse_START
| 37.771277 | 412 | 0.484439 | 1,942 | 14,202 | 2.943872 | 0.052523 | 0.067868 | 0.048977 | 0.036383 | 0.863915 | 0.833479 | 0.792723 | 0.714536 | 0.714536 | 0.701767 | 0 | 0.069844 | 0.444515 | 14,202 | 375 | 413 | 37.872 | 0.654836 | 0.004929 | 0 | 0.701657 | 1 | 0 | 0.020878 | 0 | 0.002762 | 0 | 0 | 0 | 0 | 1 | 0.024862 | false | 0 | 0.01105 | 0 | 0.060773 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
5e357b4b88c762d997d49f984f6e89eb4323641f | 331 | py | Python | python/thermostat2441TH.py | zdima/insteon-terminal | 22f86384f5f2cbe7be5d324c949b9f17e8b66e62 | [
"Unlicense"
] | 65 | 2015-09-28T21:12:56.000Z | 2022-02-01T12:11:32.000Z | python/thermostat2441TH.py | vastsuperking/insteon-terminal | 687194db232ea3760e1b67a2c0dfacb5c743a205 | [
"Unlicense"
] | 25 | 2016-02-12T02:47:53.000Z | 2022-02-07T02:54:25.000Z | python/thermostat2441TH.py | vastsuperking/insteon-terminal | 687194db232ea3760e1b67a2c0dfacb5c743a205 | [
"Unlicense"
] | 36 | 2015-12-09T03:47:47.000Z | 2022-02-10T03:36:17.000Z | #-------------------------------------------------------------------------------
#
# Insteon Thermostat 2441TH
#
from thermostat import Thermostat
class Thermostat2441TH(Thermostat):
"""============== Insteon Thermostat 2441TH ==============="""
def __init__(self, name, addr):
Thermostat.__init__(self, name, addr, 0x1fff)
| 27.583333 | 80 | 0.501511 | 24 | 331 | 6.583333 | 0.541667 | 0.21519 | 0.291139 | 0.202532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.04698 | 0.099698 | 331 | 11 | 81 | 30.090909 | 0.483221 | 0.492447 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037736 | 0 | 0 | 1 | 0.25 | false | 0 | 0.25 | 0 | 0.75 | 0 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 7 |
d8116fea6eac382a5acf0e9685b568bd6c0be145 | 129 | py | Python | webpay_plus_mall_deferred/__init__.py | TransbankDevelopers/transbank-sdk-python-webpay-rest-example | 81bdd421e3efe1285122b82ad0634cec8f499dff | [
"BSD-3-Clause"
] | 4 | 2021-03-23T19:51:02.000Z | 2022-02-07T20:05:27.000Z | webpay_plus_mall_deferred/__init__.py | TransbankDevelopers/transbank-sdk-python-webpay-rest-example | 81bdd421e3efe1285122b82ad0634cec8f499dff | [
"BSD-3-Clause"
] | 2 | 2019-07-30T18:35:23.000Z | 2019-07-30T18:35:43.000Z | webpay_plus_mall_deferred/__init__.py | TransbankDevelopers/transbank-sdk-python-webpay-rest-example | 81bdd421e3efe1285122b82ad0634cec8f499dff | [
"BSD-3-Clause"
] | 1 | 2021-01-25T17:58:34.000Z | 2021-01-25T17:58:34.000Z | from flask import Blueprint
bp = Blueprint('webpay_plus_mall_deferred', __name__)
from webpay_plus_mall_deferred import routes
| 21.5 | 53 | 0.844961 | 18 | 129 | 5.5 | 0.611111 | 0.20202 | 0.282828 | 0.444444 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108527 | 129 | 5 | 54 | 25.8 | 0.86087 | 0 | 0 | 0 | 0 | 0 | 0.193798 | 0.193798 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.666667 | 0 | 0.666667 | 0.666667 | 1 | 0 | 0 | null | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 1 | 0 | 8 |
d8289e539a70deafa86275b42d119d9f4cf39c31 | 7,765 | py | Python | cosmpy/protos/cosmos/tx/v1beta1/service_pb2_grpc.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 15 | 2021-09-08T05:27:14.000Z | 2022-03-29T06:48:08.000Z | cosmpy/protos/cosmos/tx/v1beta1/service_pb2_grpc.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 39 | 2021-08-19T20:09:35.000Z | 2022-03-22T19:51:59.000Z | cosmpy/protos/cosmos/tx/v1beta1/service_pb2_grpc.py | evsmithx/cosmpy | 7dfc81528b287f90190d6d4387942340f8ab88cf | [
"Apache-2.0"
] | 5 | 2020-12-25T03:06:55.000Z | 2021-01-21T10:37:12.000Z | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from cosmos.tx.v1beta1 import service_pb2 as cosmos_dot_tx_dot_v1beta1_dot_service__pb2
class ServiceStub(object):
"""Service defines a gRPC service for interacting with transactions.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Simulate = channel.unary_unary(
'/cosmos.tx.v1beta1.Service/Simulate',
request_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateRequest.SerializeToString,
response_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateResponse.FromString,
)
self.GetTx = channel.unary_unary(
'/cosmos.tx.v1beta1.Service/GetTx',
request_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxRequest.SerializeToString,
response_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxResponse.FromString,
)
self.BroadcastTx = channel.unary_unary(
'/cosmos.tx.v1beta1.Service/BroadcastTx',
request_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxRequest.SerializeToString,
response_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxResponse.FromString,
)
self.GetTxsEvent = channel.unary_unary(
'/cosmos.tx.v1beta1.Service/GetTxsEvent',
request_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventRequest.SerializeToString,
response_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventResponse.FromString,
)
class ServiceServicer(object):
"""Service defines a gRPC service for interacting with transactions.
"""
def Simulate(self, request, context):
"""Simulate simulates executing a transaction for estimating gas usage.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTx(self, request, context):
"""GetTx fetches a tx by hash.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def BroadcastTx(self, request, context):
"""BroadcastTx broadcast transaction.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetTxsEvent(self, request, context):
"""GetTxsEvent fetches txs by event.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_ServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'Simulate': grpc.unary_unary_rpc_method_handler(
servicer.Simulate,
request_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateRequest.FromString,
response_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateResponse.SerializeToString,
),
'GetTx': grpc.unary_unary_rpc_method_handler(
servicer.GetTx,
request_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxRequest.FromString,
response_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxResponse.SerializeToString,
),
'BroadcastTx': grpc.unary_unary_rpc_method_handler(
servicer.BroadcastTx,
request_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxRequest.FromString,
response_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxResponse.SerializeToString,
),
'GetTxsEvent': grpc.unary_unary_rpc_method_handler(
servicer.GetTxsEvent,
request_deserializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventRequest.FromString,
response_serializer=cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'cosmos.tx.v1beta1.Service', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Service(object):
"""Service defines a gRPC service for interacting with transactions.
"""
@staticmethod
def Simulate(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.tx.v1beta1.Service/Simulate',
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateRequest.SerializeToString,
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.SimulateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTx(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.tx.v1beta1.Service/GetTx',
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxRequest.SerializeToString,
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def BroadcastTx(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.tx.v1beta1.Service/BroadcastTx',
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxRequest.SerializeToString,
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.BroadcastTxResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def GetTxsEvent(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/cosmos.tx.v1beta1.Service/GetTxsEvent',
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventRequest.SerializeToString,
cosmos_dot_tx_dot_v1beta1_dot_service__pb2.GetTxsEventResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| 44.884393 | 121 | 0.68152 | 774 | 7,765 | 6.449612 | 0.144703 | 0.052083 | 0.055088 | 0.070112 | 0.807492 | 0.79347 | 0.79347 | 0.731771 | 0.72496 | 0.698918 | 0 | 0.016441 | 0.248036 | 7,765 | 172 | 122 | 45.145349 | 0.8385 | 0.083065 | 0 | 0.484848 | 1 | 0 | 0.075199 | 0.044126 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.015152 | 0.030303 | 0.143939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
dc44239e29aa5c74bea8ee34e1fcecf7ce90bbac | 117 | py | Python | GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 5.py | CodedLadiesInnovateTech/-python-challenge-solutions | 430cd3eb84a2905a286819eef384ee484d8eb9e7 | [
"MIT"
] | 6 | 2020-05-23T19:53:25.000Z | 2021-05-08T20:21:30.000Z | GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 5.py | GREENFONTS/python-challenge-solutions | a9aad85a250892fe41961a7d5e77f67b8d14fc1b | [
"MIT"
] | 8 | 2020-05-14T18:53:12.000Z | 2020-07-03T00:06:20.000Z | GodwillOnyewuchi/Phase 1/Python Basic 2/day 9 task/task 5.py | GREENFONTS/python-challenge-solutions | a9aad85a250892fe41961a7d5e77f67b8d14fc1b | [
"MIT"
] | 39 | 2020-05-10T20:55:02.000Z | 2020-09-12T17:40:59.000Z | #Python program to get the copyright information
import sys
print(f'Python Copyright Information: {sys.copyright}')
| 23.4 | 55 | 0.803419 | 16 | 117 | 5.875 | 0.6875 | 0.425532 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.119658 | 117 | 4 | 56 | 29.25 | 0.912621 | 0.401709 | 0 | 0 | 0 | 0 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.5 | 0 | 0.5 | 0.5 | 1 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 0 | 1 | 0 | 7 |
dc487b86cd69877814e7e18ae6037ce9d274e57d | 39,661 | py | Python | main.py | Jwdr29link/BigTxt | 4e2a5f4f650c4fff2572cd58c7ce319da5f3d19f | [
"MIT",
"Unlicense"
] | null | null | null | main.py | Jwdr29link/BigTxt | 4e2a5f4f650c4fff2572cd58c7ce319da5f3d19f | [
"MIT",
"Unlicense"
] | null | null | null | main.py | Jwdr29link/BigTxt | 4e2a5f4f650c4fff2572cd58c7ce319da5f3d19f | [
"MIT",
"Unlicense"
] | null | null | null | # Imports -
import time
# Start message -
input("Press enter to start the text file generation process ->")
print("")
time.sleep(1)
# Generates these words 18 quintiliion times -
f= open("GeneratedTextFile.txt","w+")
for i in range(18000000000000000000):# Writes paragraph inside clause 18 quintillion times
f.write("This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text This is text \r\n" % ())
f.close()
| 2,333 | 39,337 | 0.76869 | 9,120 | 39,661 | 3.342873 | 0.004825 | 0.595139 | 0.991898 | 1.388198 | 0.991898 | 0.991898 | 0.991898 | 0.991898 | 0.991898 | 0.991898 | 0 | 0.000819 | 0.230151 | 39,661 | 16 | 39,338 | 2,478.8125 | 0.997675 | 0.003076 | 0 | 0 | 1 | 0.125 | 0.996484 | 0.000531 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 13 |
dca510f8a2d0fd26043d5f3a3bb2d55c14cd09a2 | 5,390 | py | Python | tests/test_metrics/test_threshold.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 6 | 2015-01-28T05:59:08.000Z | 2018-01-09T07:48:57.000Z | tests/test_metrics/test_threshold.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 2 | 2020-05-09T16:36:43.000Z | 2020-05-09T16:52:35.000Z | tests/test_metrics/test_threshold.py | wikimedia/analytics-wikimetrics | 1d2036657b06ccd16ecfc76edd3f9a6119ff75f4 | [
"MIT"
] | 1 | 2016-01-13T07:19:44.000Z | 2016-01-13T07:19:44.000Z | from datetime import datetime, timedelta
from time import time
from nose.tools import assert_equal
from tests.fixtures import DatabaseTest, i, tz_note
from wikimetrics.utils import format_date, CENSORED
from wikimetrics.metrics import Threshold
class ThresholdTest(DatabaseTest):
def setUp(self):
one_hour = timedelta(hours=1)
reg = datetime.now() - one_hour * 48
DatabaseTest.setUp(self)
self.create_test_cohort(
editor_count=2,
revisions_per_editor=2,
user_registrations=i(reg),
revision_timestamps=[
[i(reg + one_hour) , i(reg + one_hour * 25)],
[i(reg + one_hour * 30), i(reg + one_hour * 40)],
],
revision_lengths=10
)
self.helper_insert_editors(
editor_count=2,
revisions_per_editor=2,
user_registrations=i(reg),
revision_timestamps=[
[i(reg + one_hour) , i(reg + one_hour * 25)],
[i(reg + one_hour * 30), i(reg + one_hour * 40)],
],
revision_lengths=10
)
self.e1 = self.editors[0].user_id
self.e2 = self.editors[1].user_id
def test_filters_out_other_editors(self):
metric = Threshold(
namespaces=[0],
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(len(results), 2)
def test_runs_for_an_entire_wiki(self):
metric = Threshold(
namespaces=[0],
)
results = metric(None, self.mwSession)
assert_equal(len(results), 4)
assert_equal(results[self.e1][Threshold.id], True, tz_note)
assert_equal(results[self.e2][Threshold.id], False, tz_note)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], 1, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e1][CENSORED], False, tz_note)
assert_equal(results[self.e2][CENSORED], False, tz_note)
# NOTE: this is a bit precarious as it assumes the order of test data inserts
assert_equal(results[self.e1 + 2][Threshold.id], True, tz_note)
assert_equal(results[self.e1 + 2][Threshold.time_to_threshold_id], 1, tz_note)
assert_equal(results[self.e1 + 2][CENSORED], False, tz_note)
def test_case1_24h_count1(self):
metric = Threshold(
namespaces=[0],
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(results[self.e1][Threshold.id], True, tz_note)
assert_equal(results[self.e2][Threshold.id], False, tz_note)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], 1, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e1][CENSORED], False, tz_note)
assert_equal(results[self.e2][CENSORED], False, tz_note)
def test_case1_72h_count1(self):
metric = Threshold(
namespaces=[0],
threshold_hours=72
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(results[self.e1][Threshold.id], True, tz_note)
assert_equal(results[self.e2][Threshold.id], True, tz_note)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], 1, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], 30, tz_note)
assert_equal(results[self.e1][CENSORED], False, tz_note)
assert_equal(results[self.e2][CENSORED], False, tz_note)
def test_case1_72h_count3(self):
metric = Threshold(
namespaces=[0],
threshold_hours=72,
number_of_edits=3,
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(results[self.e1][Threshold.id], False, tz_note)
assert_equal(results[self.e2][Threshold.id], False, tz_note)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e1][CENSORED], True, tz_note)
assert_equal(results[self.e2][CENSORED], True, tz_note)
def test_case1_24h_count3(self):
metric = Threshold(
namespaces=[0],
number_of_edits=3,
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(results[self.e1][Threshold.id], False, tz_note)
assert_equal(results[self.e2][Threshold.id], False, tz_note)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], None, tz_note)
assert_equal(results[self.e1][CENSORED], False, tz_note)
assert_equal(results[self.e2][CENSORED], False, tz_note)
def test_time_to_thershold(self):
metric = Threshold(
namespaces=[0],
threshold_hours=1 * 25,
number_of_edits=2,
)
results = metric(self.editor_ids, self.mwSession)
assert_equal(results[self.e1][Threshold.time_to_threshold_id], 25, tz_note)
assert_equal(results[self.e2][Threshold.time_to_threshold_id], None, tz_note)
| 40.526316 | 86 | 0.636178 | 695 | 5,390 | 4.673381 | 0.143885 | 0.128695 | 0.193966 | 0.237069 | 0.828202 | 0.828202 | 0.79064 | 0.746613 | 0.704126 | 0.704126 | 0 | 0.026846 | 0.253618 | 5,390 | 132 | 87 | 40.833333 | 0.780512 | 0.013915 | 0 | 0.585586 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.342342 | 1 | 0.072072 | false | 0 | 0.054054 | 0 | 0.135135 | 0 | 0 | 0 | 0 | null | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
f4e17dc205fb45d7dc20fadc38783627fe579782 | 56 | py | Python | orders/services/__init__.py | brunocamposal/ekommerce | 7f05f589bce064729ac3cb013284fa239d705232 | [
"MIT"
] | null | null | null | orders/services/__init__.py | brunocamposal/ekommerce | 7f05f589bce064729ac3cb013284fa239d705232 | [
"MIT"
] | 1 | 2021-03-07T21:19:52.000Z | 2021-03-07T21:19:52.000Z | orders/services/__init__.py | brunocamposal/ekommerce | 7f05f589bce064729ac3cb013284fa239d705232 | [
"MIT"
] | null | null | null | from .calculate_total_price import calculate_total_price | 56 | 56 | 0.928571 | 8 | 56 | 6 | 0.625 | 0.583333 | 0.791667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053571 | 56 | 1 | 56 | 56 | 0.90566 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 | 0 | 1 | 0 | 0 | 7 |
522796190ad468ccb6cbea143005ec4efa42ee05 | 8,644 | py | Python | util/generate_plots.py | sch0ngut/Physics-Informed-Machine-Learning | 0459c9b5a2d5868d193c9d3c5828d886d91c559c | [
"MIT"
] | 4 | 2021-04-07T18:35:34.000Z | 2021-11-01T13:43:06.000Z | util/generate_plots.py | sch0ngut/Physics-Informed-Machine-Learning | 0459c9b5a2d5868d193c9d3c5828d886d91c559c | [
"MIT"
] | null | null | null | util/generate_plots.py | sch0ngut/Physics-Informed-Machine-Learning | 0459c9b5a2d5868d193c9d3c5828d886d91c559c | [
"MIT"
] | 3 | 2021-05-23T23:43:24.000Z | 2022-02-09T19:14:55.000Z | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import gridspec
def generate_contour_plot(u: np.ndarray, savefig_path="", legend_loc='upper right', **kwargs) -> None:
"""
Generates a contour plot
:param u: An array containing the solution to plot. Should have the dimensions (n_spatial x n_temporal).
:param savefig_path: The path were to store the plot. Leave empty if saving of the file is not desired.
:param legend_loc: The location of the legend.
"""
# Define grid
n_spatial = u.shape[0]
n_temporal = u.shape[1]
x = np.linspace(-1, 1, n_spatial)
t = np.linspace(0, 1, n_temporal)
x_mesh, t_mesh = np.meshgrid(x, t)
# Plot
plt.figure(figsize=(9.33, 4))
cf0 = plt.contourf(t_mesh, x_mesh, u.T, np.arange(-1.0, 1.01, .01), vmin=-1, vmax=1, cmap=plt.cm.jet)
cbar0 = plt.colorbar(cf0)
cbar0.set_ticks(np.linspace(-1, 1, 5, endpoint=True))
plt.ylabel(r'$x$')
plt.xlabel(r'$t$')
plt.title(r'$u(x,t)$')
# Add the training data if desired
if 'train_feat' in kwargs:
train_feat = kwargs.get('train_feat')
plt.plot(train_feat[:, 1], train_feat[:, 0], 'kx', label='Training data', markersize=4, clip_on=False)
plt.legend(frameon=False, loc=legend_loc)
# Save
if savefig_path:
plt.savefig(savefig_path, dpi=500)
plt.show()
def generate_snapshots_plot(u: np.ndarray,
t_vec: np.array = np.array([0, 0.25, 0.5, 0.75, 1]),
savefig_path: str = "") -> None:
"""
Generates a time snapshots plot
:param u: An array containing the solution to plot. Should have the dimensions (n_spatial x n_temporal)
:param t_vec: A vector containing the desired time points to plot
:param savefig_path: The path were to store the plot. Leave empty if saving of the file is not desired.
"""
# Define grid
n_spatial = u.shape[0]
n_temporal = u.shape[1]
x = np.linspace(-1, 1, n_spatial)
# Plot
for t_val in t_vec:
j = int(t_val * (n_temporal-1))
plt.plot(x, u[:, j], label=r'$t={{{}}}$'.format(t_val))
plt.legend()
plt.ylabel(r'$u(x,t)$')
plt.xlabel(r'$x$')
# Save
if savefig_path:
plt.savefig(savefig_path, dpi=500)
plt.show()
def generate_contour_and_snapshots_plot(u: np.ndarray,
t_vec: np.array = np.array([0, 0.25, 0.5, 0.75, 1]),
savefig_path: str = "",
legend_loc: str = 'upper right',
**kwargs) -> None:
"""
Generates a contour and time snapshots plot in one figure
:param u: An array containing the solution to plot. Should have the dimensions (n_spatial x n_temporal)
:param t_vec: A vector containing the desired time points to plot
:param savefig_path: The path were to store the plot. Leave empty if saving of the file is not desired
:param legend_loc: The location of the legend for the contour plot.
"""
# Define grid
n_spatial = u.shape[0]
n_temporal = u.shape[1]
x = np.linspace(-1, 1, n_spatial)
t = np.linspace(0, 1, n_temporal)
x_mesh, t_mesh = np.meshgrid(x, t)
# Create figure
plt.figure(figsize=(14, 4))
gs = gridspec.GridSpec(1, 2, width_ratios=[2, 1])
# Contour Plot
ax0 = plt.subplot(gs[0])
cf0 = ax0.contourf(t_mesh, x_mesh, u.T, np.arange(-1.01, 1.01, .01), vmin=-1, vmax=1, cmap=plt.cm.jet)
cbar0 = plt.colorbar(cf0)
cbar0.set_ticks(np.linspace(-1, 1, 5, endpoint=True))
ax0.set_ylabel(r'$x$')
ax0.set_xlabel(r'$t$')
ax0.set_title(r'$u(x,t)$')
# Add the training data if desired
if 'train_feat' in kwargs:
train_feat = kwargs.get('train_feat')
plt.plot(train_feat[:, 1], train_feat[:, 0], 'kx', label='Training data', markersize=4, clip_on=False)
ax0.legend(frameon=False, loc=legend_loc)
# Time snapshots plot
ax1 = plt.subplot(gs[1])
for t_val in t_vec:
j = int(t_val * (n_temporal-1))
ax1.plot(x, u[:, j], label=r'$t={{{}}}$'.format(t_val))
ax1.legend()
ax1.set_ylabel(r'$u(x,t)$')
ax1.set_xlabel(r'$x$')
# Save
if savefig_path:
plt.savefig(savefig_path, dpi=500)
plt.show()
def generate_two_contour_and_snapshots_plots(u1: np.ndarray,
u2: np.ndarray,
t_vec: np.array = np.array([0, 0.25, 0.5, 0.75, 1]),
savefig_path: str = "",
legend_loc: str = 'upper right',
**kwargs) -> None:
"""
Generates two contour and time snapshots plot in one figure
:param u1: An array containing the first solution to plot. Should have the dimensions (n_spatial x n_temporal)
:param u2: An array containing the second solution to plot. Should have the dimensions (n_spatial x n_temporal)
:param t_vec: A vector containing the desired time points to plot
:param savefig_path: The path were to store the plot. Leave empty if saving of the file is not desired
:param legend_loc: The location of the legend for the PINN contour plot
"""
# Create figure
plt.figure(figsize=(14, 10))
gs = gridspec.GridSpec(2, 2, width_ratios=[2, 1])
# Define grid 1
n_spatial = u1.shape[0]
n_temporal = u1.shape[1]
x = np.linspace(-1, 1, n_spatial)
t = np.linspace(0, 1, n_temporal)
x_mesh, t_mesh = np.meshgrid(x, t)
# Contour Plot 1
ax0 = plt.subplot(gs[0])
cf0 = ax0.contourf(t_mesh, x_mesh, u1.T, np.arange(-1.0, 1.01, .01), vmin=-1, vmax=1, cmap=plt.cm.jet)
cbar0 = plt.colorbar(cf0)
cbar0.set_ticks(np.linspace(-1, 1, 5, endpoint=True))
ax0.set_ylabel(r'$x$')
ax0.set_xlabel(r'$t$')
ax0.set_title(r'$u(x,t)$')
# Add the training data if desired
if 'train_feat' in kwargs:
train_feat = kwargs.get('train_feat')
plt.plot(train_feat[:, 1], train_feat[:, 0], 'kx', label='Training data', markersize=4, clip_on=False)
ax0.legend(frameon=False, loc=legend_loc)
# Time snapshots plot 1
ax1 = plt.subplot(gs[1])
for t_val in t_vec:
j = int(t_val * (n_temporal-1))
# plt.plot(x, u_exact[:, j], label=r'$t={{{}}}$'.format(t_val))
ax1.plot(x, u1[:, j], label=r'$t={{{}}}$'.format(t_val))
ax1.legend()
ax1.set_ylabel(r'$u(x,t)$')
ax1.set_xlabel(r'$x$')
# Define grid 2
n_spatial = u2.shape[0]
n_temporal = u2.shape[1]
x = np.linspace(-1, 1, n_spatial)
t = np.linspace(0, 1, n_temporal)
x_mesh, t_mesh = np.meshgrid(x, t)
# Contour Plot 2
ax0 = plt.subplot(gs[2])
cf0 = ax0.contourf(t_mesh, x_mesh, u2.T, np.arange(-1.0, 1.01, .01), vmin=-1, vmax=1, cmap=plt.cm.jet)
cbar0 = plt.colorbar(cf0)
cbar0.set_ticks(np.linspace(-1, 1, 5, endpoint=True))
ax0.set_ylabel(r'$x$')
ax0.set_xlabel(r'$t$')
ax0.set_title(r'$u(x,t)$')
# Time snapshots plot 2
ax1 = plt.subplot(gs[3])
for t_val in t_vec:
j = int(t_val * (n_temporal - 1))
# plt.plot(x, u_exact[:, j], label=r'$t={{{}}}$'.format(t_val))
ax1.plot(x, u2[:, j], label=r'$t={{{}}}$'.format(t_val))
ax1.legend()
ax1.set_ylabel(r'$u(x,t)$')
ax1.set_xlabel(r'$x$')
# Save
if savefig_path:
plt.savefig(savefig_path, dpi=500)
plt.show()
def generate_loss_plot(loss_df: pd.DataFrame, savefig_path: str = None, **kwargs) -> None:
"""
Generates a plot of the losses against the epochs
:param loss_df: The data frame containing the different losses in the columns and the epochs as indices
:param savefig_path: The path were to store the plot. Leave empty if saving of the file is not desired.
:param kwargs:
- color_dict: A dictionary assigning the column names of loss_df a color for plotting
- label_dict: A dictionary assigning the column names of loss_df a label for the legend
"""
if 'color_dict' in kwargs:
color_dict = kwargs.get('color_dict')
loss_df.plot(color=[color_dict.get(x, '#333333') for x in loss_df.columns])
else:
loss_df.plot()
plt.ylabel('Loss')
plt.xlabel('Epoch')
if 'label_dict' in kwargs:
label_dict = kwargs.get('label_dict')
plt.legend([label_dict.get(x) for x in loss_df.columns])
else:
plt.legend()
if savefig_path:
plt.savefig(savefig_path, dpi=500)
plt.show()
| 36.167364 | 115 | 0.602615 | 1,372 | 8,644 | 3.668367 | 0.115889 | 0.043712 | 0.01967 | 0.021458 | 0.8031 | 0.795549 | 0.777667 | 0.756209 | 0.756209 | 0.740115 | 0 | 0.036256 | 0.259718 | 8,644 | 238 | 116 | 36.319328 | 0.750273 | 0.280194 | 0 | 0.680851 | 1 | 0 | 0.055271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035461 | false | 0 | 0.028369 | 0 | 0.06383 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
526fd3b548dbcaddd459d11a9e654b963509caf9 | 144,645 | py | Python | PsychoPy_Sample1.py | daviddevito/PsychoPy | 5b0f3915e787d12bcbb8da92aa178d994f41f28a | [
"MIT"
] | null | null | null | PsychoPy_Sample1.py | daviddevito/PsychoPy | 5b0f3915e787d12bcbb8da92aa178d994f41f28a | [
"MIT"
] | null | null | null | PsychoPy_Sample1.py | daviddevito/PsychoPy | 5b0f3915e787d12bcbb8da92aa178d994f41f28a | [
"MIT"
] | null | null | null | from psychopy import visual, core, data, event, logging, gui, misc, monitors
import numpy as np # whole numpy lib is available, prepend 'np.'
import os # handy system and path functions
from psychopy.constants import * # things like STARTED, FINISHED
import random
import math
#import win32api
#import Image
import ctypes
import time
import pylab
random.seed() #initialize random number generator
#global variables
NUM_SEARCH_POS = 8
NUM_TARGET_POS = 8
NUM_STIMULUS_POS = 4
ISI = 1.0 #duration of time separating trials
ISA = 0.2 #time between search arrays
TEXT_HEIGHT = 0.75
TEXT_WRAPPING = 30
tCol = [.0,.0,.0] #text and cue colour
STIM_TIME = 0.5
FIXATION_TIME = 0.15
FIRST_CUE_TIME = 0.2
SECOND_CUE_TIME = 0.2
FIRST_RETENTION_INTERVAL = 0.8
SECOND_RETENTION_INTERVAL = 0.8
THIRD_RETENTION_INTERVAL = 0.8
FOURTH_RETENTION_INTERVAL = 0.8
FIFTH_RETENTION_INTERVAL = 0.8
FIRST_PROBE_ON_SCREEN = 1.0
FIRST_PROBE_OFF_SCREEN = 1.0
SECOND_PROBE_ON_SCREEN = 1.0
SECOND_PROBE_OFF_SCREEN = 1.0
EVAL_FIXATION_TIME = 0.15
MEMORY_TEXT_TIME = 0.4
REMEMBER_TEXT_TIME = 0.4
EVAL_TEXT_TIME = 0.4
EVAL_STIM_ON_SCREEN = 1.0
EVAL_STIM_OFF_SCREEN = 1.0
INTER_TRIAL_INTERVAL = 0.5
FEEDBACK_TIME = 0.15
SEARCH_ARRAY_TIME = 6.0
EVAL_BEFORE_RETRO_BLANK = FIRST_CUE_TIME + SECOND_RETENTION_INTERVAL
EVAL_AFTER_RETRO_BLANK = SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME
bCol = [1,1,1] #background colour
sCol=[0.35,0.35,0.35] #search stim colour
tCol = [.0,.0,.0] #text and cue colour
TOTAL_DEGREES = 360
SEARCH_ECCEN = 3.66 #width of search array
idxSearchPos = range(0, NUM_SEARCH_POS)
#DEBUGGING/TESTING TIMING
#STIM_TIME = 0.05
#RETENTION_INTERVAL = 0.05
#FIXATION_TIME = 0.05
#FIRST_CUE_TIME = 0.05
#SECOND_CUE_TIME = 0.05
#FIRST_RETENTION_INTERVAL = 0.05
#SECOND_RETENTION_INTERVAL = 0.05
#THIRD_RETENTION_INTERVAL = 0.05
#FOURTH_RETENTION_INTERVAL = 0.05
#FIFTH_RETENTION_INTERVAL = 0.05
#FIRST_PROBE_ON_SCREEN = 0.05
#FIRST_PROBE_OFF_SCREEN = 0.05
#SECOND_PROBE_ON_SCREEN = 0.05
#SECOND_PROBE_OFF_SCREEN = 0.05
#EVAL_FIXATION_TIME = 0.05
#MEMORY_TEXT_TIME = 0.05
#EVAL_TEXT_TIME = 0.05
#EVAL_STIM_ON_SCREEN = 0.05
#EVAL_STIM_OFF_SCREEN = 0.05
#INTER_TRIAL_INTERVAL = 0.05
#Array of Positions of stimuli in each array, randomized before each array is presented
POSARRAY = [[-3,-3],[-3,3],[3,3],[3,-3]]
SIZE = 2
NUM_REPS = 2
NUM_SEARCH_REPS = 6
NUM_STIMULI = 480
#Get subject number
expName = 'ActAccess_Halves_SearchTask_Mar2017'
fileName = ''
while True:
expInfo = {'subjNum':''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel so exit experiment
if expInfo['subjNum'].isdigit(): #make sure subject number is a number
fileName = expName + '_' + expInfo['subjNum'] + '.txt'
if int(expInfo['subjNum']) == 999: break #always accept in testing
if not os.path.isfile(fileName): break #check if data file already exists
else: #if data file already exists, ask experimenter if should overwrite
# if ctypes.windll.user32.MessageBoxA(0,'File already exists. Press OK to overwrite.','File already exists',1) == 1:
# if ctypes.windll.user32.MessageBoxA(0,'Not that I don\'t trust you, but are you sure you want to overwrite? \n\nPress OK to overwrite.','File already exists',1) == 1:
# break
break
else:
#ctypes.windll.user32.MessageBoxA(0,'Please specify a number','Invalid participant number',0)
break
while True:
expInfo2 = {'age':''}
dlg = gui.DlgFromDict(dictionary=expInfo2, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel so exit experiment
else:
break
while True:
expInfo3 = {'gender':''}
dlg = gui.DlgFromDict(dictionary=expInfo3, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel so exit experiment
else:
break
while True:
expInfo4 = {'handedness':''}
dlg = gui.DlgFromDict(dictionary=expInfo4, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel so exit experiment
else:
break
while True:
expInfo5 = {'glasses/contacts?':''}
dlg = gui.DlgFromDict(dictionary=expInfo5, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel so exit experiment
else:
break
outputFile = open(fileName, 'w')
subjectNumber = expInfo['subjNum']
buttonCounterbalance = int(subjectNumber)%(2)
if buttonCounterbalance == 0:
butVar1 = 'Circle'
butVar2 = 'Square'
if buttonCounterbalance == 1:
butVar1 = 'Square'
butVar2 = 'Circle'
#buttonCounterbalance = 0: up same, down different, buttonCounterbalance = 1: up different, down same
#up circle button
#down square button
#Column headers for output file
outStr = "Time\tTrialNum\tTrialType\tBlueStim\tBluePos\tGreenStim\tGreenPos\tPurpleStim\tPurplePos\tPinkStim\tPinkPos\tOrangeStim\tOrangePos\tFirstCuedSide(0=Left,1=Right)\tFirstCuedPos\tSecondCuedPos\tFirstProbeStim\tFirstProbeSameorDifferent(0=Same,1=Different)\tFirstProbeResp\tFirstProbeACC\tFirstProbeRT\tFirstProbeErrorCode\tFirstProbeErrorName\tSecondProbeStim\tSecondProbeSameorDifferent(0=Same,1=Different)\tSecondProbeResp\tSecondProbeACC\tSecondProbeRT\tSecondProbeErrorCode\tSecondProbeErrorName\tEvaluatedStim\tEvalStimType(0=NeverCuedSecondCuedSide,1=FirstCuedStim)\tEvalResp\tEvalRT\tEvalErrorCode\tEvalErrorName\tArrayPresTime\tWhenEvalHappens(0=BeforeRetroCue,1=AfterRetroCue,2=SearchTask)\tSingletonType(0=FirstCuedStimColour,1=NeverCuedSecondCuedSideColour,2=NovelColour)\tSingletonColour\tSingletonPosition\tSingletonOrientation\tTargetOrientation\tTargetPosition\tSearchResp\tSearchRT\tSearchErrorCode\tSearchErrorName\tPart(0=Practice,1=NoSearch,2=Search)\tAge\tGender\tHandedness\tGlassesOrContacts\t"
outputFile.write(outStr + "eol\n")
# Setup the Psycho variables (screen, stimuli, sounds, ect)
win = visual.Window(fullscr=True, screen=0, allowGUI=False, allowStencil=False, monitor='FenskeLabTestingComps', color='white', colorSpace='rgb', units='deg')
mon = monitors.Monitor('FenskeLabTestingComps')
trialClock = core.Clock()
eventClock = core.Clock()
evalClock = core.Clock()
isiClock = core.Clock()
keyResp = event.BuilderKeyResponse() # create an object of type KeyResponse
#creating the fixation cross
fixationVertical = visual.Line(win,start=(0,-0.3), end=(0,0.3), lineColor = u'black',lineWidth=3.0)
fixationHorizontal = visual.Line(win,start=(-0.3,0), end=(0.3,0), lineColor = u'black',lineWidth=3.0)
#create search stim (landolt c's) #literally drawing a landolt c
def GetVertices():
#vertices = [[-1,.5],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1][-1,-.5]]
SEARCH_TARG_SIZE = 0.44 #size of landolt c's
vertices = [[-SEARCH_TARG_SIZE,SEARCH_TARG_SIZE/2.0],[-SEARCH_TARG_SIZE,SEARCH_TARG_SIZE],[SEARCH_TARG_SIZE,SEARCH_TARG_SIZE],[SEARCH_TARG_SIZE,-SEARCH_TARG_SIZE],[-SEARCH_TARG_SIZE,-SEARCH_TARG_SIZE],[-SEARCH_TARG_SIZE,-SEARCH_TARG_SIZE/2.0]]
return vertices
trialsArray=[]
trial=0
for rep in range(0, NUM_REPS):
for firstCuedSide in range (0,2):#0=Left, 1=Right
for firstCuedStim in range (0,2):#0=Top, 1=Bottom
for secondCuedStim in range (0,2):#0=Diagonal from FirstCued, 1=Horizontal from FirstCued
for whichEvaluated in range (0,2):#0=NeverCuedStimSecondCuedSide, 1=FirstCuedStim
for whenEvaluate in range (0,2):#0=BeforeRetroCue, 1=AfterRetroCue
trialsArray.append([trial,firstCuedSide,firstCuedStim,secondCuedStim,whichEvaluated,whenEvaluate,5])
trial = trial+1
trialsArray2=[]
trial=0
for rep in range(0, NUM_REPS):
for firstCuedSide in range (0,2):#0=Left, 1=Right
for firstCuedStim in range (0,2):#0=Top, 1=Bottom
for secondCuedStim in range (0,2):#0=Diagonal from FirstCued, 1=Horizontal from FirstCued
for whichEvaluated in range (0,2):#0=NeverCuedStimSecondCuedSide, 1=FirstCuedStim
for whenEvaluate in range (0,2):#0=BeforeRetroCue, 1=AfterRetroCue
trialsArray2.append([trial,firstCuedSide,firstCuedStim,secondCuedStim,whichEvaluated,whenEvaluate,5])
trial = trial+1
for rep in range(0, NUM_SEARCH_REPS):
for firstCuedSide in range (0,2):#0=Left, 1=Right
for firstCuedStim in range (0,2):#0=Top, 1=Bottom
for singletonType in range (0,3):#0=activematching,1=accessorymatching,2=novelcolour
trialsArray2.append([trial,firstCuedSide,firstCuedStim,5,5,2,singletonType])
trial = trial+1
#create arbitrary column that is shuffled to randomize trialsArray
idxTrialsArray = range(0, len(trialsArray)); random.shuffle(idxTrialsArray)
idxTrialsArray2 = range(0, len(trialsArray2)); random.shuffle(idxTrialsArray2)
idxBlueStim = range(0,NUM_STIMULI); random.shuffle(idxBlueStim)
idxGreenStim = range(0,NUM_STIMULI); random.shuffle(idxGreenStim)
idxPurpleStim = range(0,NUM_STIMULI); random.shuffle(idxPurpleStim)
idxPinkStim = range(0,NUM_STIMULI); random.shuffle(idxPinkStim)
idxOrangeStim = range(0,NUM_STIMULI); random.shuffle(idxOrangeStim)
BlueStimCount = -1
GreenStimCount = -1
PurpleStimCount = -1
PinkStimCount = -1
OrangeStimCount = -1
blocktrial = 0
totalTrials = 0
pracTrialCount = 0
TrialType = "Practice"
colourArray = ['Blue','Green','Purple','Pink','Orange']
arrowArray = ['Arrows/BottomLeft.png','Arrows/TopLeft.png','Arrows/TopRight.png','Arrows/BottomRight.png']
#INSTRUCTIONS
Instruct1 = visual.ImageStim(win=win,image= 'Slide01.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide02.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide03.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide04.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide05.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide06.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide07.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide08.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide09.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide10.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide11.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide12.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
win.flip()
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide20.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide21.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
win.flip()
core.wait(2.0)
lastFourTrialsArray = [0,0,0,0]
previousTrial = 0
twoTrialsAgo = 0
threeTrialsAgo = 0
fourTrialsAgo = 0
ratingScaleText = visual.TextStim(win=win, ori=0, name='ratingScaleText', text='1 - 2 - 3 - 4', font=u'Arial', pos=[0, -5], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
#Practice Trials
for ptrial in range(0, 20):
pracTrialCount = pracTrialCount + 1
random.shuffle(colourArray)
runTime = time.strftime("%c")
#put memory task text on screen
memTextOnScreen = visual.TextStim(win=win, ori=0, name='memTextOnScreen', text='Memorize', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
memTextOnScreen.setAutoDraw(True)
win.flip()
trialClock.reset()
while trialClock.getTime() < MEMORY_TEXT_TIME:pass
#Fixation Cross
memTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Decide Positions of Each Color
FirstColour = colourArray[0]
SecondColour = colourArray[1]
ThirdColour = colourArray[2]
FourthColour = colourArray[3]
FirstPos = POSARRAY[0]
SecondPos = POSARRAY[1]
ThirdPos = POSARRAY[2]
FourthPos = POSARRAY[3]
#Create Stimuli
FirstItem = visual.ImageStim(win=win,image= 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp', pos= FirstPos, size=(SIZE,SIZE))
SecondItem = visual.ImageStim(win=win,image= 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp', pos= SecondPos, size=(SIZE,SIZE))
ThirdItem = visual.ImageStim(win=win,image= 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp', pos= ThirdPos, size=(SIZE,SIZE))
FourthItem = visual.ImageStim(win=win,image= 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp', pos= FourthPos, size=(SIZE,SIZE))
#Put Stimulus Array on Screen
FirstItem.setAutoDraw(True);SecondItem.setAutoDraw(True);ThirdItem.setAutoDraw(True);FourthItem.setAutoDraw(True)
win.flip()
while trialClock.getTime() < STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Take Stimulus Array off Screen
FirstItem.setAutoDraw(False);SecondItem.setAutoDraw(False);ThirdItem.setAutoDraw(False);FourthItem.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#decide where first cue points and create first cue
firstPracCuePos = random.randint(0,3)
firstCueArrow = visual.ImageStim(win=win,image= arrowArray[firstPracCuePos], pos= [0,0], size=(1,1))
#decide where second cue points and create second cue
secondPracCuePos = random.randint(0,1)
if firstPracCuePos == 0 or firstPracCuePos == 1:#First cue pointed to left side of screen
secondPracCuePos = secondPracCuePos + 2
secondCueArrow = visual.ImageStim(win=win,image= arrowArray[secondPracCuePos], pos= [0,0], size=(1,1))
if firstPracCuePos == 2 or firstPracCuePos == 3:#First cue point to right side of screen
secondCueArrow = visual.ImageStim(win=win,image= arrowArray[secondPracCuePos], pos= [0,0], size=(1,1))
#Find out position of evaluated stimulus
pracWhichEval = random.randint(0,1)#0=NeverCuedStimSecondCuedSide, 1=FirstCuedStim
if pracWhichEval == 0:#NeverCuedStimSeconCuedSide
if secondPracCuePos == 0:pracEvalPos = POSARRAY[1]
if secondPracCuePos == 1:pracEvalPos = POSARRAY[0]
if secondPracCuePos == 2:pracEvalPos = POSARRAY[3]
if secondPracCuePos == 3:pracEvalPos = POSARRAY[2]
if pracWhichEval == 1: #FirstCuedStim
pracEvalPos = POSARRAY[firstPracCuePos]
#Determine which actual stimulus will be evaluated
if pracEvalPos == FirstPos:pracEvalImgName = 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp'
if pracEvalPos == SecondPos:pracEvalImgName = 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp'
if pracEvalPos == ThirdPos:pracEvalImgName = 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp'
if pracEvalPos == FourthPos:pracEvalImgName = 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp'
whenEvalHappens = random.randint(0,1)#0=BeforeRetroCue,1=AfterRetroCue, - No search task practice 2=SearchTask
if whenEvalHappens == 0:#Evaluate before RetroCue
while trialClock.getTime() < EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1 or whenEvalHappens == 2:#Evaluate After RetroCue or SearchTask
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 2:#Search Task
#determine singleton type
singType = random.randint(0,2)#0=activematching,1=accessorymatching,2=novelcolour
#setup target locations in a circle around fixation
targetPositions = []
for pos in range(0, NUM_TARGET_POS):#8 target positions
angle = math.radians(360 / NUM_TARGET_POS * pos)
targetPositions.append([math.cos(angle)*SEARCH_ECCEN, math.sin(angle)*SEARCH_ECCEN])#setup target locations in a circle around fixation
#set stimulus properties
distractorOris = [0, 180] #orientations for the landolt c stimuli #gap pointing left or right for all distractors.
idxDistractorOris = range(len(distractorOris))
targetOris = [90,270]
idxTargetOris = range(len(targetOris))
#setup search stim
searchStim = [] #create a list of 8 distractor stim to be used on each trial -- these stim are all leftOpen so later on we will randomly assign them an orientation (90 or 180) so they become top/bottom open
for distractor in range (0, NUM_SEARCH_POS):
distractor = visual.ShapeStim(win, lineColorSpace='rgb', fillColorSpace = 'rgb', fillColor=bCol, lineColor=sCol, vertices=GetVertices(), closeShape=False, lineWidth = 9)
searchStim.append(distractor)
#set colour of singleton based on condition
if singType == 0:#activematching
singCol = colourArray[firstPracCuePos]
elif singType == 1:#accessorymatching
if secondPracCuePos == 0:
singCol = colourArray[1]
if secondPracCuePos == 1:
singCol = colourArray[0]
if secondPracCuePos == 2:
singCol = colourArray[3]
if secondPracCuePos == 3:
singCol = colourArray[2]
else:#novelmatching
singCol = colourArray[4]
if singCol == 'Blue':
singColName = 'Blue'
singCol = [-.875,-.827,.639]
if singCol == 'Green':
singColName = 'Green'
singCol = [-.671,.129,-.537]
if singCol == 'Purple':
singColName = 'Purple'
singCol = [.545,-.576,.78]
#if singCol == 'Yellow':
# singCol = [.843,.835,-.522]
if singCol == 'Orange':
singColName = 'Orange'
singCol = [.435,-.255,-.741]
if singCol == 'Pink':
singColName = 'Pink'
singCol = [.89,.231,.757]
#if singCol == 'Red':
# singCol = [.075,-.757,-.725]
random.shuffle(idxSearchPos) #randomly shuffle the list of positions in the search array
#Search display #create 8 distractors
for item in range (0, NUM_TARGET_POS):
random.shuffle(idxDistractorOris)
searchStim[item].ori=distractorOris[idxDistractorOris[0]]
searchStim[item].pos = targetPositions[idxSearchPos[item]]
searchStim[item].setAutoDraw(True)
tarPos = searchStim[0].pos
singPos = searchStim[1].pos
#here you're changing one of the previously created distractors to be the target. So first item in SearchStim list is being changed to have targetOrientation.
random.shuffle(idxTargetOris)
targetPos = 0
targetOri = targetOris[idxTargetOris[0]]
searchStim[targetPos].ori=targetOri
searchStim[targetPos].setAutoDraw(True)
#here you're changing one of the previously created distractors to be the singleton. So second item in SearchStim list is being changed to singletoncolour.
singletonPos = 1
# if searchCond == 4:
searchStim[singletonPos].setLineColor(singCol)
# else:
# searchStim[singletonPos].setLineColor('blue')
searchStim[singletonPos].setAutoDraw(True)
#put the whole search task on the screen including target, singleton, and 6 neutral distractors
win.flip()
#SEARCH TASK: wait for response
#io.clearEvents('all')
eventClock.reset()
keyResp.status = NOT_STARTED
searchErrorCode = -1
searchErrorName = ''
noResponseYet = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
searchResponse = keyResp.keys
searchRT = keyResp.rt
noResponseYet = False
if (targetOri != 90 and keyResp.keys=='up') or (targetOri != 270 and keyResp.keys=='down'):
searchErrorCode = 2
searchErrorName = "Incorrect response"
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#Search delay
for item in range (0, NUM_TARGET_POS):
searchStim[item].setAutoDraw(False)
win.flip()
else:
#Evaluation Task
fixationVertical.setAutoDraw(False); fixationHorizontal.setAutoDraw(False)
evalTextOnScreen = visual.TextStim(win=win, ori=0, name='evalTextOnScreen', text='Evaluate', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
evalTextOnScreen.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
evalTextOnScreen.setAutoDraw(False)
ratingScaleText.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Create evaluated stimulus
evaluatedstimulus = visual.ImageStim(win=win,image = pracEvalImgName, pos=[0,0], size=(SIZE,SIZE))
evaluatedstimulus.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
evalErrorCode = -1
evalErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['1','2','3','4'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
evalResponse = keyResp.keys
evalRT = keyResp.rt
noResponseYet = False
if stimOnScreen and t > EVAL_STIM_ON_SCREEN:
evaluatedstimulus.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
evaluatedstimulus.setAutoDraw(False)
ratingScaleText.setAutoDraw(False)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
if whenEvalHappens == 0:
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
if whenEvalHappens == 1:
while trialClock.getTime() < EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Back to Memory Task
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
rememberTextOnScreen = visual.TextStim(win=win, ori=0, name='rememberTextOnScreen', text='Same?', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
rememberTextOnScreen.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
rememberTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
#First Probe
firstPracProbeCuedUncued = random.randint(0,1)#0=Cued,1=Uncued
firstPracProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
firstPosition = POSARRAY[firstPracCuePos]#position of first cued stimulus
if firstPracProbeCuedUncued == 0:#Cued Stim is Probed
if firstPosition == FirstPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[0] + '498'
if firstPosition == SecondPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[1] + '498'
if firstPosition == ThirdPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[2] + '498'
if firstPosition == FourthPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[3] + '498'
if firstPracProbeCuedUncued == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(firstPosition)
if UncuedProbedStim == 0:
if firstPracProbeRand == 0:FirstProbePos = POSARRAY[1]
if firstPracProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstPracProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if firstPracProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstPracProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstPracProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if firstPracProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstPracProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstPracProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if firstPracProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstPracProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstPracProbeRand == 2:FirstProbePos = POSARRAY[2]
if FirstProbePos == FirstPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[0] + '498'
if FirstProbePos == SecondPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[1] + '498'
if FirstProbePos == ThirdPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[2] + '498'
if FirstProbePos == FourthPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = colourArray[3] + '498'
FirstProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
firstProbeErrorCode = -1
firstProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
FirstProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
FirstProbeResponse = keyResp.keys
FirstProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and firstPracProbeCuedUncued == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and firstPracProbeCuedUncued == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and firstPracProbeCuedUncued == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and firstPracProbeCuedUncued == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
FirstProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
FirstProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
if firstProbeErrorCode == -1:
feedbackTextOnScreen = visual.TextStim(win=win, ori=0, name='feedbackTextOnScreen', text='Correct', font=u'Arial', pos=[0, 2], height=0.75, wrapWidth=None, color='green', colorSpace=u'rgb', opacity=1, depth=-1.0)
if firstProbeErrorCode == 2:
feedbackTextOnScreen = visual.TextStim(win=win, ori=0, name='feedbackTextOnScreen', text='Incorrect', font=u'Arial', pos=[0, 2], height=0.75, wrapWidth=None, color='red', colorSpace=u'rgb', opacity=1, depth=-1.0)
feedbackTextOnScreen.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
feedbackTextOnScreen.setAutoDraw(False)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False)
fixationHorizontal.setAutoDraw(False)
secondCueArrow.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(True)
fixationHorizontal.setAutoDraw(True)
secondCueArrow.setAutoDraw(False)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Second Probe
secondPracProbeCuedUncued = random.randint(0,1)#0=Cued,1=Uncued
secondPracProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
secondPosition = POSARRAY[secondPracCuePos]#position of second cued stimulus
if secondPracProbeCuedUncued == 0:#Cued Stim is Probed
if secondPosition == FirstPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[0] + '498'
if secondPosition == SecondPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[1] + '498'
if secondPosition == ThirdPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[2] + '498'
if secondPosition == FourthPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[3] + '498'
if secondPracProbeCuedUncued == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(secondPosition)
if UncuedProbedStim == 0:
if secondPracProbeRand == 0:SecondProbePos= POSARRAY[1]
if secondPracProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondPracProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if secondPracProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondPracProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondPracProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if secondPracProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondPracProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondPracProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if secondPracProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondPracProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondPracProbeRand == 2:SecondProbePos = POSARRAY[2]
if SecondProbePos == FirstPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[0] + '/' + colourArray[0] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[0] + '498'
if SecondProbePos == SecondPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[1] + '/' + colourArray[1] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[1] + '498'
if SecondProbePos == ThirdPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[2] + '/' + colourArray[2] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[2] + '498'
if SecondProbePos == FourthPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/' + colourArray[3] + '/' + colourArray[3] + '498.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = colourArray[3] + '498'
SecondProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
secondProbeErrorCode = -1
secondProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
SecondProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
SecondProbeResponse = keyResp.keys
SecondProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and secondPracProbeCuedUncued == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and secondPracProbeCuedUncued == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and secondPracProbeCuedUncued == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and secondPracProbeCuedUncued == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
SecondProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
SecondProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
if secondProbeErrorCode == -1:
feedbackTextOnScreen = visual.TextStim(win=win, ori=0, name='feedbackTextOnScreen', text='Correct', font=u'Arial', pos=[0, 2], height=0.75, wrapWidth=None, color='green', colorSpace=u'rgb', opacity=1, depth=-1.0)
if secondProbeErrorCode == 2:
feedbackTextOnScreen = visual.TextStim(win=win, ori=0, name='feedbackTextOnScreen', text='Incorrect', font=u'Arial', pos=[0, 2], height=0.75, wrapWidth=None, color='red', colorSpace=u'rgb', opacity=1, depth=-1.0)
feedbackTextOnScreen.setAutoDraw(True)
win.flip()
if whenEvalHappens == 0:
while trialClock.getTime() < FEEDBACK_TIME + SecondProbeRT + FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if whenEvalHappens == 1:
while trialClock.getTime() < FEEDBACK_TIME + SecondProbeRT + FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FEEDBACK_TIME + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
feedbackTextOnScreen.setAutoDraw(False)
win.flip()
#Inter-trial interval
eventClock.reset()
fixationHorizontal.setAutoDraw(False);fixationVertical.setAutoDraw(False)
win.flip()
while eventClock.getTime() < INTER_TRIAL_INTERVAL: pass
outStr = str(runTime) + '\t'
outStr = outStr + str(pracTrialCount) + '\t'
outStr = outStr + str(TrialType) + '\t'
outStr = outStr + 'Blue498' + '\t'
if colourArray.index('Blue') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Blue')]) + '\t'
if colourArray.index('Blue') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Green498' + '\t'
if colourArray.index('Green') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Green')]) + '\t'
if colourArray.index('Green') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Purple498' + '\t'
if colourArray.index('Purple') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Purple')]) + '\t'
if colourArray.index('Purple') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Pink498' + '\t'
if colourArray.index('Pink') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Pink')]) + '\t'
if colourArray.index('Pink') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Orange498' + '\t'
if colourArray.index('Orange') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Orange')]) + '\t'
if colourArray.index('Orange') == 4:
outStr = outStr + 'novelcolour' + '\t'
if firstPracCuePos == 0 or firstPracCuePos == 1:
outStr = outStr + '0' + '\t'
if firstPracCuePos == 2 or firstPracCuePos == 3:
outStr = outStr + '1' + '\t'
outStr = outStr + str(POSARRAY[firstPracCuePos]) + '\t'
if whenEvalHappens < 2:
outStr = outStr + str(secondPosition) + '\t'
outStr = outStr + str(firstProbeStim) + '\t'
outStr = outStr + str(firstPracProbeCuedUncued) + '\t'
outStr = outStr + str(FirstProbeResponse) + '\t'
if firstProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if firstProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(FirstProbeRT) + '\t'
outStr = outStr + str(firstProbeErrorCode) + '\t'
outStr = outStr + str(firstProbeErrorName) + '\t'
outStr = outStr + str(secondProbeStim) + '\t'
outStr = outStr + str(secondPracProbeCuedUncued) + '\t'
outStr = outStr + str(SecondProbeResponse) + '\t'
if secondProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if secondProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(SecondProbeRT) + '\t'
outStr = outStr + str(secondProbeErrorCode) + '\t'
outStr = outStr + str(secondProbeErrorName) + '\t'
pracEvalImgName2 = pracEvalImgName.rsplit("/")[-1]
outStr = outStr + str(pracEvalImgName2) + '\t'
outStr = outStr + str(pracWhichEval) + '\t'
outStr = outStr + str(evalResponse) + '\t'
outStr = outStr + str(evalRT) + '\t'
outStr = outStr + str(evalErrorCode) + '\t'
outStr = outStr + str(evalErrorName) + '\t'
if whenEvalHappens == 2:
outStr = outStr + 'searchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\t'
outStr = outStr + str(STIM_TIME) + '\t'
outStr = outStr + str(whenEvalHappens) + '\t'
if whenEvalHappens == 2:
outStr = outStr + str(singType) + '\t'
outStr = outStr + str(singColName) + '\t'
outStr = outStr + str(singPos) + '\t'
if searchStim[1].ori == 0:
outStr = outStr + 'left' + '\t'
if searchStim[1].ori == 180:
outStr = outStr + 'right' + '\t'
if targetOri == 90:
outStr = outStr + 'up' + '\t'
if targetOri == 270:
outStr = outStr + 'down' + '\t'
outStr = outStr + str(tarPos) + '\t'
outStr = outStr + str(searchResponse) + '\t'
outStr = outStr + str(searchRT) + '\t'
outStr = outStr + str(searchErrorCode) + '\t'
outStr = outStr + str(searchErrorName) + '\t'
if whenEvalHappens < 2:
outStr = outStr + 'nosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\t'
outStr = outStr + '0' + '\t'
outStr = outStr + str(expInfo2['age']) + '\t'
outStr = outStr + str(expInfo3['gender']) + '\t'
outStr = outStr + str(expInfo4['handedness']) + '\t'
outStr = outStr + str(expInfo5['glasses/contacts?']) + '\t'
outputFile.write(outStr + 'eol\n')
Instruct1 = visual.ImageStim(win=win,image= 'Slide13.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide11.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide12.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
win.flip()
if buttonCounterbalance == 0:
Instruct1 = visual.ImageStim(win=win,image= 'Slide20.jpg', pos= [0,0], size=(36,23))
if buttonCounterbalance == 1:
Instruct1 = visual.ImageStim(win=win,image= 'Slide21.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
Instruct1.setAutoDraw(False)
win.flip()
core.wait(2.0)
#Actual Memory Trials
TrialType = "FirstHalf"
for trial in range(0, 64):
random.shuffle(colourArray)
runTime = time.strftime("%c")
curRow = idxTrialsArray[trial]
#put memory task text on screen
memTextOnScreen = visual.TextStim(win=win, ori=0, name='memTextOnScreen', text='Memorize', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
memTextOnScreen.setAutoDraw(True)
win.flip()
trialClock.reset()
while trialClock.getTime() < MEMORY_TEXT_TIME:pass
#Fixation Cross
memTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Decide Positions of Each Color
FirstColour = colourArray[0]
SecondColour = colourArray[1]
ThirdColour = colourArray[2]
FourthColour = colourArray[3]
FirstPos = POSARRAY[0]
SecondPos = POSARRAY[1]
ThirdPos = POSARRAY[2]
FourthPos = POSARRAY[3]
#Create Stimuli
BlueStimCount = BlueStimCount + 1
GreenStimCount = GreenStimCount + 1
PurpleStimCount = PurpleStimCount + 1
PinkStimCount = PinkStimCount + 1
OrangeStimCount = OrangeStimCount + 1
BlueCount = idxBlueStim[BlueStimCount]
GreenCount = idxGreenStim[GreenStimCount]
PurpleCount = idxPurpleStim[PurpleStimCount]
PinkCount = idxPinkStim[PinkStimCount]
OrangeCount = idxOrangeStim[OrangeStimCount]
if colourArray.index('Blue') < 4:
BluePos = POSARRAY[colourArray.index('Blue')]
FirstItem = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= BluePos, size=(SIZE,SIZE))
if colourArray.index('Green') < 4:
GreenPos = POSARRAY[colourArray.index('Green')]
SecondItem = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= GreenPos, size=(SIZE,SIZE))
if colourArray.index('Purple') < 4:
PurplePos = POSARRAY[colourArray.index('Purple')]
ThirdItem = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= PurplePos, size=(SIZE,SIZE))
if colourArray.index('Pink') < 4:
PinkPos = POSARRAY[colourArray.index('Pink')]
FourthItem = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= PinkPos, size=(SIZE,SIZE))
if colourArray.index('Orange') < 4:
OrangePos = POSARRAY[colourArray.index('Orange')]
if colourArray.index('Blue') == 4:
FirstItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Green') == 4:
SecondItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Purple') == 4:
ThirdItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Pink') == 4:
FourthItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
#Put Stimulus Array on Screen
FirstItem.setAutoDraw(True);SecondItem.setAutoDraw(True);ThirdItem.setAutoDraw(True);FourthItem.setAutoDraw(True)
win.flip()
while trialClock.getTime() < STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Take Stimulus Array off Screen
FirstItem.setAutoDraw(False);SecondItem.setAutoDraw(False);ThirdItem.setAutoDraw(False);FourthItem.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#decide where first cue points
if trialsArray[curRow][1] == 0 and trialsArray[curRow][2] == 0:firstCuePos=1#left top
if trialsArray[curRow][1] == 0 and trialsArray[curRow][2] == 1:firstCuePos=0#left bottom
if trialsArray[curRow][1] == 1 and trialsArray[curRow][2] == 0:firstCuePos=2#right top
if trialsArray[curRow][1] == 1 and trialsArray[curRow][2] == 1:firstCuePos=3#right bottom
firstCueArrow = visual.ImageStim(win=win,image= arrowArray[firstCuePos], pos= [0,0], size=(1,1))
if trialsArray[curRow][5] == 0 or trialsArray[curRow][5] == 1:
#where second cue points
if firstCuePos == 0:#left bottom
if trialsArray[curRow][3] == 0:secondCuePos=2#diagonal from left bottom is right top
if trialsArray[curRow][3] == 1:secondCuePos=3#horizontal from left bottom is right bottom
if firstCuePos == 1:#left top
if trialsArray[curRow][3] == 0:secondCuePos=3#diagonal from left top is right bottom
if trialsArray[curRow][3] == 1:secondCuePos=2#horizontal from left top is right top
if firstCuePos == 2:#right top
if trialsArray[curRow][3] == 0:secondCuePos=0#diagonal from right top is left bottom
if trialsArray[curRow][3] == 1:secondCuePos=1#horizontal from right top is left top
if firstCuePos == 3:#right bottom
if trialsArray[curRow][3] == 0:secondCuePos=1#diagonal from right bottom is left top
if trialsArray[curRow][3] == 1:secondCuePos=0#horizontal from right bottom is left bottom
secondCueArrow = visual.ImageStim(win=win,image= arrowArray[secondCuePos], pos= [0,0], size=(1,1))
#Find out position of evaluated stimulus
if trialsArray[curRow][4] == 0:#NeverCuedStimSecondCuedSide
if secondCuePos==0:EvalPos=POSARRAY[1]
if secondCuePos==1:EvalPos=POSARRAY[0]
if secondCuePos==2:EvalPos=POSARRAY[3]
if secondCuePos==3:EvalPos=POSARRAY[2]
if trialsArray[curRow][4] == 1:#FirstCuedStim
EvalPos=POSARRAY[firstCuePos]
#Determine which actual stimulus will be evaluated
if colourArray.index('Blue') < 4:
if EvalPos == BluePos:evalImgName = 'Images/Blue/Blue' + str(BlueCount) + '.bmp'
if colourArray.index('Green') < 4:
if EvalPos == GreenPos:evalImgName = 'Images/Green/Green' + str(GreenCount) + '.bmp'
if colourArray.index('Purple') < 4:
if EvalPos == PurplePos:evalImgName = 'Images/Purple/Purple' + str(PurpleCount) + '.bmp'
if colourArray.index('Pink') < 4:
if EvalPos == PinkPos:evalImgName = 'Images/Pink/Pink' + str(PinkCount) + '.bmp'
if colourArray.index('Orange') < 4:
if EvalPos == OrangePos:evalImgName = 'Images/Orange/Orange' + str(OrangeCount) + '.bmp'
if trialsArray[curRow][5] == 0:#Evaluate before RetroCue
while trialClock.getTime() < EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1 or trialsArray[curRow][5] == 2:#Evaluate After RetroCue or SearchTask
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 2:#Search Task
#determine singleton type
singType = trialsArray[curRow][6]#0=activematching,1=accessorymatching,2=novelcolour
#setup target locations in a circle around fixation
targetPositions = []
for pos in range(0, NUM_TARGET_POS):#8 target positions
angle = math.radians(360 / NUM_TARGET_POS * pos)
targetPositions.append([math.cos(angle)*SEARCH_ECCEN, math.sin(angle)*SEARCH_ECCEN])#setup target locations in a circle around fixation
#set stimulus properties
distractorOris = [0, 180] #orientations for the landolt c stimuli #gap pointing left or right for all distractors.
idxDistractorOris = range(len(distractorOris))
targetOris = [90,270]
idxTargetOris = range(len(targetOris))
#setup search stim
searchStim = [] #create a list of 8 distractor stim to be used on each trial -- these stim are all leftOpen so later on we will randomly assign them an orientation (90 or 180) so they become top/bottom open
for distractor in range (0, NUM_SEARCH_POS):
distractor = visual.ShapeStim(win, lineColorSpace='rgb', fillColorSpace = 'rgb', fillColor=bCol, lineColor=sCol, vertices=GetVertices(), closeShape=False, lineWidth = 9)
searchStim.append(distractor)
#set colour of singleton based on condition
if singType == 0:#activematching
singCol = colourArray[firstCuePos]
elif singType == 1:#accessorymatching
secondCuedRand = random.randint(0,1)#0=top,1=bottom
if firstCuePos == 0 or firstCuePos == 1:
if secondCuedRand == 0:
singCol = colourArray[2]
if secondCuedRand == 1:
singCol = colourArray[3]
if firstCuePos == 2 or firstCuePos == 3:
if secondCuedRand == 0:
singCol = colourArray[1]
if secondCuedRand == 1:
singCol = colourArray[0]
else:#novelmatching
singCol = colourArray[4]
if singCol == 'Blue':
singColName = 'Blue'
singCol = [-.875,-.827,.639]
if singCol == 'Green':
singColName = 'Green'
singCol = [-.671,.129,-.537]
if singCol == 'Purple':
singColName = 'Purple'
singCol = [.545,-.576,.78]
#if singCol == 'Yellow':
# singCol = [.843,.835,-.522]
if singCol == 'Orange':
singColName = 'Orange'
singCol = [.435,-.255,-.741]
if singCol == 'Pink':
singColName = 'Pink'
singCol = [.89,.231,.757]
#if singCol == 'Red':
# singCol = [.075,-.757,-.725]
random.shuffle(idxSearchPos) #randomly shuffle the list of positions in the search array
#Search display #create 8 distractors
for item in range (0, NUM_TARGET_POS):
random.shuffle(idxDistractorOris)
searchStim[item].ori=distractorOris[idxDistractorOris[0]]
searchStim[item].pos = targetPositions[idxSearchPos[item]]
searchStim[item].setAutoDraw(True)
tarPos = searchStim[0].pos
singPos = searchStim[1].pos
#here you're changing one of the previously created distractors to be the target. So first item in SearchStim list is being changed to have targetOrientation.
random.shuffle(idxTargetOris)
targetPos = 0
targetOri = targetOris[idxTargetOris[0]]
searchStim[targetPos].ori=targetOri
searchStim[targetPos].setAutoDraw(True)
#here you're changing one of the previously created distractors to be the singleton. So second item in SearchStim list is being changed to singletoncolour.
singletonPos = 1
# if searchCond == 4:
searchStim[singletonPos].setLineColor(singCol)
# else:
# searchStim[singletonPos].setLineColor('blue')
searchStim[singletonPos].setAutoDraw(True)
#put the whole search task on the screen including target, singleton, and 6 neutral distractors
win.flip()
#SEARCH TASK: wait for response
#io.clearEvents('all')
eventClock.reset()
keyResp.status = NOT_STARTED
searchErrorCode = -1
searchErrorName = ''
noResponseYet = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
searchResponse = keyResp.keys
searchRT = keyResp.rt
noResponseYet = False
if (targetOri != 90 and keyResp.keys=='up') or (targetOri != 270 and keyResp.keys=='down'):
searchErrorCode = 2
searchErrorName = "Incorrect response"
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#Search delay
for item in range (0, NUM_TARGET_POS):
searchStim[item].setAutoDraw(False)
win.flip()
else:
#Evaluation Task
fixationVertical.setAutoDraw(False); fixationHorizontal.setAutoDraw(False)
evalTextOnScreen = visual.TextStim(win=win, ori=0, name='evalTextOnScreen', text='Evaluate', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
evalTextOnScreen.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
evalTextOnScreen.setAutoDraw(False)
ratingScaleText.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Create evaluated stimulus
evaluatedstimulus = visual.ImageStim(win=win,image = evalImgName, pos=[0,0], size=(SIZE,SIZE))
evaluatedstimulus.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
evalErrorCode = -1
evalErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['1','2','3','4'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
evalResponse = keyResp.keys
evalRT = keyResp.rt
noResponseYet = False
if stimOnScreen and t > EVAL_STIM_ON_SCREEN:
evaluatedstimulus.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
evaluatedstimulus.setAutoDraw(False)
ratingScaleText.setAutoDraw(False)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
if trialsArray[curRow][5] == 0:
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Back to Memory Task
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
rememberTextOnScreen = visual.TextStim(win=win, ori=0, name='rememberTextOnScreen', text='Same?', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
rememberTextOnScreen.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
rememberTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
#First Probe
firstProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
firstPosition = POSARRAY[firstCuePos]#position of first cued stimulus
firstCuedUncuedRand = random.randint(0,1)#Random number for whether cued or uncued is probed
if firstCuedUncuedRand == 0:#Cued Stim is Probed
if colourArray.index('Blue') < 4:
if firstPosition == BluePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if firstPosition == GreenPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if firstPosition == PurplePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if firstPosition == PinkPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if firstPosition == OrangePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Orange' + str(OrangeCount)
if firstCuedUncuedRand == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(firstPosition)
if UncuedProbedStim == 0:
if firstProbeRand == 0:FirstProbePos = POSARRAY[1]
if firstProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstProbeRand == 2:FirstProbePos = POSARRAY[2]
if colourArray.index('Blue') < 4:
if FirstProbePos == BluePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if FirstProbePos == GreenPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if FirstProbePos == PurplePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if FirstProbePos == PinkPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if FirstProbePos == OrangePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Orange' + str(OrangeCount)
FirstProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
firstProbeErrorCode = -1
firstProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
FirstProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
FirstProbeResponse = keyResp.keys
FirstProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and firstCuedUncuedRand == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and firstCuedUncuedRand == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and firstCuedUncuedRand == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and firstCuedUncuedRand == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
FirstProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
FirstProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False)
fixationHorizontal.setAutoDraw(False)
secondCueArrow.setAutoDraw(True)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(True)
fixationHorizontal.setAutoDraw(True)
secondCueArrow.setAutoDraw(False)
win.flip()
if trialsArray[curRow][5] == 0:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray[curRow][5] == 1:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Second Probe
secondProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
secondPosition = POSARRAY[secondCuePos]#position of second cued stimulus
secondCuedUncuedRand = random.randint(0,1)#Random number for whether cued or uncued is probed
if secondCuedUncuedRand == 0:#Cued Stim is Probed
if colourArray.index('Blue') < 4:
if secondPosition == BluePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if secondPosition == GreenPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if secondPosition == PurplePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if secondPosition == PinkPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if secondPosition == OrangePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Orange' + str(OrangeCount)
if secondCuedUncuedRand == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(secondPosition)
if UncuedProbedStim == 0:
if secondProbeRand == 0:SecondProbePos= POSARRAY[1]
if secondProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondProbeRand == 2:SecondProbePos = POSARRAY[2]
if colourArray.index('Blue') < 4:
if SecondProbePos == BluePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if SecondProbePos == GreenPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if SecondProbePos == PurplePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if SecondProbePos == PinkPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if SecondProbePos == OrangePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Orange' + str(OrangeCount)
SecondProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
secondProbeErrorCode = -1
secondProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
SecondProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
SecondProbeResponse = keyResp.keys
SecondProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and secondCuedUncuedRand == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and secondCuedUncuedRand == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and secondCuedUncuedRand == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and secondCuedUncuedRand == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
SecondProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
SecondProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
#Inter-trial interval
eventClock.reset()
fixationHorizontal.setAutoDraw(False);fixationVertical.setAutoDraw(False)
win.flip()
while eventClock.getTime() < INTER_TRIAL_INTERVAL: pass
if trialsArray[curRow][5] == 0 or trialsArray[curRow][5] == 1:
fourTrialsAgo = threeTrialsAgo
threeTrialsAgo = twoTrialsAgo
twoTrialsAgo = previousTrial
previousTrial = 0
if firstProbeErrorCode == -1 and secondProbeErrorCode == -1:
previousTrial = 1
lastFourTrialsArray = [previousTrial,twoTrialsAgo,threeTrialsAgo,fourTrialsAgo]
totalACC = previousTrial + twoTrialsAgo + threeTrialsAgo + fourTrialsAgo
accValue = totalACC/4
if totalTrials > 3:
if accValue > .8:
STIM_TIME = STIM_TIME - 0.05
if accValue < .7:
STIM_TIME = STIM_TIME + 0.05
if STIM_TIME < 0.15:
STIM_TIME = 0.15
if STIM_TIME > 1.0:
STIM_TIME = 1.0
blocktrial = blocktrial + 1
totalTrials = totalTrials + 1
if blocktrial == 16:
blocktrial = 0
breakTextOnScreen = visual.TextStim(win=win, ori=0, name='breakTextOnScreen', text='Take a Quick Break\nPress the space bar to start the next block...', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
buttonReminderTextOnScreen = visual.TextStim(win=win, ori=0, name='buttonReminderTextOnScreen', text='Remember, press the ' + butVar1 + ' key to answer YES. Press the ' + butVar2 + ' key to answer NO.', font=u'Arial', pos=[0, -3], height=0.75, wrapWidth=40, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
breakTextOnScreen.setAutoDraw(True)
buttonReminderTextOnScreen.setAutoDraw(True)
win.flip()
event.waitKeys()
breakTextOnScreen.setAutoDraw(False)
buttonReminderTextOnScreen.setAutoDraw(False)
win.flip()
outStr = str(runTime) + '\t'
outStr = outStr + str(totalTrials) + '\t'
outStr = outStr + str(TrialType) + '\t'
outStr = outStr + 'Blue' + str(BlueCount) + '\t'
if colourArray.index('Blue') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Blue')]) + '\t'
if colourArray.index('Blue') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Green' + str(GreenCount) + '\t'
if colourArray.index('Green') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Green')]) + '\t'
if colourArray.index('Green') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Purple' + str(PurpleCount) + '\t'
if colourArray.index('Purple') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Purple')]) + '\t'
if colourArray.index('Purple') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Pink' + str(PinkCount) + '\t'
if colourArray.index('Pink') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Pink')]) + '\t'
if colourArray.index('Pink') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Orange' + str(OrangeCount) + '\t'
if colourArray.index('Orange') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Orange')]) + '\t'
if colourArray.index('Orange') == 4:
outStr = outStr + 'novelcolour' + '\t'
if firstCuePos == 0 or firstCuePos == 1:
outStr = outStr + '0' + '\t'
if firstCuePos == 2 or firstCuePos == 3:
outStr = outStr + '1' + '\t'
outStr = outStr + str(POSARRAY[firstCuePos]) + '\t'
if trialsArray[curRow][5] < 2:
outStr = outStr + str(secondPosition) + '\t'
outStr = outStr + str(firstProbeStim) + '\t'
outStr = outStr + str(firstCuedUncuedRand) + '\t'
outStr = outStr + str(FirstProbeResponse) + '\t'
if firstProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if firstProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(FirstProbeRT) + '\t'
outStr = outStr + str(firstProbeErrorCode) + '\t'
outStr = outStr + str(firstProbeErrorName) + '\t'
outStr = outStr + str(secondProbeStim) + '\t'
outStr = outStr + str(secondCuedUncuedRand) + '\t'
outStr = outStr + str(SecondProbeResponse) + '\t'
if secondProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if secondProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(SecondProbeRT) + '\t'
outStr = outStr + str(secondProbeErrorCode) + '\t'
outStr = outStr + str(secondProbeErrorName) + '\t'
evalImgName2 = evalImgName.rsplit("/")[-1]
outStr = outStr + str(evalImgName2) + '\t'
outStr = outStr + str(trialsArray[curRow][4]) + '\t'
outStr = outStr + str(evalResponse) + '\t'
outStr = outStr + str(evalRT) + '\t'
outStr = outStr + str(evalErrorCode) + '\t'
outStr = outStr + str(evalErrorName) + '\t'
if trialsArray[curRow][5] == 2:
outStr = outStr + 'searchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\t'
outStr = outStr + str(STIM_TIME) + '\t'
outStr = outStr + str(trialsArray[curRow][5]) + '\t'
if trialsArray[curRow][5] == 2:
outStr = outStr + str(singType) + '\t'
outStr = outStr + str(singColName) + '\t'
outStr = outStr + str(singPos) + '\t'
if searchStim[1].ori == 0:
outStr = outStr + 'left' + '\t'
if searchStim[1].ori == 180:
outStr = outStr + 'right' + '\t'
if targetOri == 90:
outStr = outStr + 'up' + '\t'
if targetOri == 270:
outStr = outStr + 'down' + '\t'
outStr = outStr + str(tarPos) + '\t'
outStr = outStr + str(searchResponse) + '\t'
outStr = outStr + str(searchRT) + '\t'
outStr = outStr + str(searchErrorCode) + '\t'
outStr = outStr + str(searchErrorName) + '\t'
if trialsArray[curRow][5] < 2:
outStr = outStr + 'nosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\t'
outStr = outStr + '1' + '\t'
outStr = outStr + str(expInfo2['age']) + '\t'
outStr = outStr + str(expInfo3['gender']) + '\t'
outStr = outStr + str(expInfo4['handedness']) + '\t'
outStr = outStr + str(expInfo5['glasses/contacts?']) + '\t'
outputFile.write(outStr + 'eol\n')
breakTextOnScreen = visual.TextStim(win=win, ori=0, name='breakTextOnScreen', text='The first half of the experiment is done.\nPlease go get the experimenter.', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
breakTextOnScreen.setAutoDraw(True)
win.flip()
event.waitKeys()
event.waitKeys()
event.waitKeys()
event.waitKeys()
event.waitKeys()
event.waitKeys()
event.waitKeys()
breakTextOnScreen.setAutoDraw(False)
Instruct1 = visual.ImageStim(win=win,image= 'Slide30.jpg', pos= [0,0], size=(36,23))
Instruct1.setAutoDraw(True)
win.flip()
event.waitKeys()
event.waitKeys()
Instruct1.setAutoDraw(False)
TrialType = "SecondHalf"
for trial in range(0, 136):
random.shuffle(colourArray)
runTime = time.strftime("%c")
curRow = idxTrialsArray2[trial]
#put memory task text on screen
memTextOnScreen = visual.TextStim(win=win, ori=0, name='memTextOnScreen', text='Memorize', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
memTextOnScreen.setAutoDraw(True)
win.flip()
trialClock.reset()
while trialClock.getTime() < MEMORY_TEXT_TIME:pass
#Fixation Cross
memTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Decide Positions of Each Color
FirstColour = colourArray[0]
SecondColour = colourArray[1]
ThirdColour = colourArray[2]
FourthColour = colourArray[3]
FirstPos = POSARRAY[0]
SecondPos = POSARRAY[1]
ThirdPos = POSARRAY[2]
FourthPos = POSARRAY[3]
#Create Stimuli
BlueStimCount = BlueStimCount + 1
GreenStimCount = GreenStimCount + 1
PurpleStimCount = PurpleStimCount + 1
PinkStimCount = PinkStimCount + 1
OrangeStimCount = OrangeStimCount + 1
BlueCount = idxBlueStim[BlueStimCount]
GreenCount = idxGreenStim[GreenStimCount]
PurpleCount = idxPurpleStim[PurpleStimCount]
PinkCount = idxPinkStim[PinkStimCount]
OrangeCount = idxOrangeStim[OrangeStimCount]
if colourArray.index('Blue') < 4:
BluePos = POSARRAY[colourArray.index('Blue')]
FirstItem = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= BluePos, size=(SIZE,SIZE))
if colourArray.index('Green') < 4:
GreenPos = POSARRAY[colourArray.index('Green')]
SecondItem = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= GreenPos, size=(SIZE,SIZE))
if colourArray.index('Purple') < 4:
PurplePos = POSARRAY[colourArray.index('Purple')]
ThirdItem = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= PurplePos, size=(SIZE,SIZE))
if colourArray.index('Pink') < 4:
PinkPos = POSARRAY[colourArray.index('Pink')]
FourthItem = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= PinkPos, size=(SIZE,SIZE))
if colourArray.index('Orange') < 4:
OrangePos = POSARRAY[colourArray.index('Orange')]
if colourArray.index('Blue') == 4:
FirstItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Green') == 4:
SecondItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Purple') == 4:
ThirdItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
if colourArray.index('Pink') == 4:
FourthItem = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= OrangePos, size=(SIZE,SIZE))
#Put Stimulus Array on Screen
FirstItem.setAutoDraw(True);SecondItem.setAutoDraw(True);ThirdItem.setAutoDraw(True);FourthItem.setAutoDraw(True)
win.flip()
while trialClock.getTime() < STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Take Stimulus Array off Screen
FirstItem.setAutoDraw(False);SecondItem.setAutoDraw(False);ThirdItem.setAutoDraw(False);FourthItem.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#decide where first cue points
if trialsArray2[curRow][1] == 0 and trialsArray2[curRow][2] == 0:firstCuePos=1#left top
if trialsArray2[curRow][1] == 0 and trialsArray2[curRow][2] == 1:firstCuePos=0#left bottom
if trialsArray2[curRow][1] == 1 and trialsArray2[curRow][2] == 0:firstCuePos=2#right top
if trialsArray2[curRow][1] == 1 and trialsArray2[curRow][2] == 1:firstCuePos=3#right bottom
firstCueArrow = visual.ImageStim(win=win,image= arrowArray[firstCuePos], pos= [0,0], size=(1,1))
if trialsArray2[curRow][5] == 0 or trialsArray2[curRow][5] == 1:
#where second cue points
if firstCuePos == 0:#left bottom
if trialsArray2[curRow][3] == 0:secondCuePos=2#diagonal from left bottom is right top
if trialsArray2[curRow][3] == 1:secondCuePos=3#horizontal from left bottom is right bottom
if firstCuePos == 1:#left top
if trialsArray2[curRow][3] == 0:secondCuePos=3#diagonal from left top is right bottom
if trialsArray2[curRow][3] == 1:secondCuePos=2#horizontal from left top is right top
if firstCuePos == 2:#right top
if trialsArray2[curRow][3] == 0:secondCuePos=0#diagonal from right top is left bottom
if trialsArray2[curRow][3] == 1:secondCuePos=1#horizontal from right top is left top
if firstCuePos == 3:#right bottom
if trialsArray2[curRow][3] == 0:secondCuePos=1#diagonal from right bottom is left top
if trialsArray2[curRow][3] == 1:secondCuePos=0#horizontal from right bottom is left bottom
secondCueArrow = visual.ImageStim(win=win,image= arrowArray[secondCuePos], pos= [0,0], size=(1,1))
#Find out position of evaluated stimulus
if trialsArray2[curRow][4] == 0:#NeverCuedStimSecondCuedSide
if secondCuePos==0:EvalPos=POSARRAY[1]
if secondCuePos==1:EvalPos=POSARRAY[0]
if secondCuePos==2:EvalPos=POSARRAY[3]
if secondCuePos==3:EvalPos=POSARRAY[2]
if trialsArray2[curRow][4] == 1:#FirstCuedStim
EvalPos=POSARRAY[firstCuePos]
#Determine which actual stimulus will be evaluated
if colourArray.index('Blue') < 4:
if EvalPos == BluePos:evalImgName = 'Images/Blue/Blue' + str(BlueCount) + '.bmp'
if colourArray.index('Green') < 4:
if EvalPos == GreenPos:evalImgName = 'Images/Green/Green' + str(GreenCount) + '.bmp'
if colourArray.index('Purple') < 4:
if EvalPos == PurplePos:evalImgName = 'Images/Purple/Purple' + str(PurpleCount) + '.bmp'
if colourArray.index('Pink') < 4:
if EvalPos == PinkPos:evalImgName = 'Images/Pink/Pink' + str(PinkCount) + '.bmp'
if colourArray.index('Orange') < 4:
if EvalPos == OrangePos:evalImgName = 'Images/Orange/Orange' + str(OrangeCount) + '.bmp'
if trialsArray2[curRow][5] == 0:#Evaluate before RetroCue
while trialClock.getTime() < EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1 or trialsArray2[curRow][5] == 2:#Evaluate After RetroCue or SearchTask
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 2:#Search Task
#determine singleton type
singType = trialsArray2[curRow][6]#0=activematching,1=accessorymatching,2=novelcolour
#setup target locations in a circle around fixation
targetPositions = []
for pos in range(0, NUM_TARGET_POS):#8 target positions
angle = math.radians(360 / NUM_TARGET_POS * pos)
targetPositions.append([math.cos(angle)*SEARCH_ECCEN, math.sin(angle)*SEARCH_ECCEN])#setup target locations in a circle around fixation
#set stimulus properties
distractorOris = [0, 180] #orientations for the landolt c stimuli #gap pointing left or right for all distractors.
idxDistractorOris = range(len(distractorOris))
targetOris = [90,270]
idxTargetOris = range(len(targetOris))
#setup search stim
searchStim = [] #create a list of 8 distractor stim to be used on each trial -- these stim are all leftOpen so later on we will randomly assign them an orientation (90 or 180) so they become top/bottom open
for distractor in range (0, NUM_SEARCH_POS):
distractor = visual.ShapeStim(win, lineColorSpace='rgb', fillColorSpace = 'rgb', fillColor=bCol, lineColor=sCol, vertices=GetVertices(), closeShape=False, lineWidth = 9)
searchStim.append(distractor)
#set colour of singleton based on condition
if singType == 0:#activematching
singCol = colourArray[firstCuePos]
elif singType == 1:#accessorymatching
secondCuedRand = random.randint(0,1)#0=top,1=bottom
if firstCuePos == 0 or firstCuePos == 1:
if secondCuedRand == 0:
singCol = colourArray[2]
if secondCuedRand == 1:
singCol = colourArray[3]
if firstCuePos == 2 or firstCuePos == 3:
if secondCuedRand == 0:
singCol = colourArray[1]
if secondCuedRand == 1:
singCol = colourArray[0]
else:#novelmatching
singCol = colourArray[4]
if singCol == 'Blue':
singColName = 'Blue'
singCol = [-.875,-.827,.639]
if singCol == 'Green':
singColName = 'Green'
singCol = [-.671,.129,-.537]
if singCol == 'Purple':
singColName = 'Purple'
singCol = [.545,-.576,.78]
#if singCol == 'Yellow':
# singCol = [.843,.835,-.522]
if singCol == 'Orange':
singColName = 'Orange'
singCol = [.435,-.255,-.741]
if singCol == 'Pink':
singColName = 'Pink'
singCol = [.89,.231,.757]
#if singCol == 'Red':
# singCol = [.075,-.757,-.725]
random.shuffle(idxSearchPos) #randomly shuffle the list of positions in the search array
#Search display #create 8 distractors
for item in range (0, NUM_TARGET_POS):
random.shuffle(idxDistractorOris)
searchStim[item].ori=distractorOris[idxDistractorOris[0]]
searchStim[item].pos = targetPositions[idxSearchPos[item]]
searchStim[item].setAutoDraw(True)
tarPos = searchStim[0].pos
singPos = searchStim[1].pos
#here you're changing one of the previously created distractors to be the target. So first item in SearchStim list is being changed to have targetOrientation.
random.shuffle(idxTargetOris)
targetPos = 0
targetOri = targetOris[idxTargetOris[0]]
searchStim[targetPos].ori=targetOri
searchStim[targetPos].setAutoDraw(True)
#here you're changing one of the previously created distractors to be the singleton. So second item in SearchStim list is being changed to singletoncolour.
singletonPos = 1
# if searchCond == 4:
searchStim[singletonPos].setLineColor(singCol)
# else:
# searchStim[singletonPos].setLineColor('blue')
searchStim[singletonPos].setAutoDraw(True)
#put the whole search task on the screen including target, singleton, and 6 neutral distractors
win.flip()
#SEARCH TASK: wait for response
#io.clearEvents('all')
eventClock.reset()
keyResp.status = NOT_STARTED
searchErrorCode = -1
searchErrorName = ''
noResponseYet = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
searchResponse = keyResp.keys
searchRT = keyResp.rt
noResponseYet = False
if (targetOri != 90 and keyResp.keys=='up') or (targetOri != 270 and keyResp.keys=='down'):
searchErrorCode = 2
searchErrorName = "Incorrect response"
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
#Search delay
for item in range (0, NUM_TARGET_POS):
searchStim[item].setAutoDraw(False)
win.flip()
else:
#Evaluation Task
fixationVertical.setAutoDraw(False); fixationHorizontal.setAutoDraw(False)
evalTextOnScreen = visual.TextStim(win=win, ori=0, name='evalTextOnScreen', text='Evaluate', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
evalTextOnScreen.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
evalTextOnScreen.setAutoDraw(False)
ratingScaleText.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Create evaluated stimulus
evaluatedstimulus = visual.ImageStim(win=win,image = evalImgName, pos=[0,0], size=(SIZE,SIZE))
evaluatedstimulus.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
evalErrorCode = -1
evalErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['1','2','3','4'])
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
evalResponse = keyResp.keys
evalRT = keyResp.rt
noResponseYet = False
if stimOnScreen and t > EVAL_STIM_ON_SCREEN:
evaluatedstimulus.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
evaluatedstimulus.setAutoDraw(False)
ratingScaleText.setAutoDraw(False)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
if trialsArray2[curRow][5] == 0:
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
while trialClock.getTime() < FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#First cue arrow on screen
firstCueArrow.setAutoDraw(True)
#Take fixation cross off screen
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
win.flip()
while trialClock.getTime() < FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
#Fixation on Screen
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
#Take off first cue arrow
firstCueArrow.setAutoDraw(False)
win.flip()
while trialClock.getTime() < SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Back to Memory Task
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
rememberTextOnScreen = visual.TextStim(win=win, ori=0, name='rememberTextOnScreen', text='Same?', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
rememberTextOnScreen.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
rememberTextOnScreen.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False);fixationHorizontal.setAutoDraw(False)
#First Probe
firstProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
firstPosition = POSARRAY[firstCuePos]#position of first cued stimulus
firstCuedUncuedRand = random.randint(0,1)#Random number for whether cued or uncued is probed
if firstCuedUncuedRand == 0:#Cued Stim is Probed
if colourArray.index('Blue') < 4:
if firstPosition == BluePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if firstPosition == GreenPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if firstPosition == PurplePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if firstPosition == PinkPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if firstPosition == OrangePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Orange' + str(OrangeCount)
if firstCuedUncuedRand == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(firstPosition)
if UncuedProbedStim == 0:
if firstProbeRand == 0:FirstProbePos = POSARRAY[1]
if firstProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[2]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstProbeRand == 2:FirstProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if firstProbeRand == 0:FirstProbePos = POSARRAY[0]
if firstProbeRand == 1:FirstProbePos = POSARRAY[1]
if firstProbeRand == 2:FirstProbePos = POSARRAY[2]
if colourArray.index('Blue') < 4:
if FirstProbePos == BluePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if FirstProbePos == GreenPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if FirstProbePos == PurplePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if FirstProbePos == PinkPos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if FirstProbePos == OrangePos:FirstProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));firstProbeStim = 'Orange' + str(OrangeCount)
FirstProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
firstProbeErrorCode = -1
firstProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
FirstProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
FirstProbeResponse = keyResp.keys
FirstProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and firstCuedUncuedRand == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and firstCuedUncuedRand == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and firstCuedUncuedRand == 1:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and firstCuedUncuedRand == 0:
firstProbeErrorCode = 2
firstProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
FirstProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
FirstProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(False)
fixationHorizontal.setAutoDraw(False)
secondCueArrow.setAutoDraw(True)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
fixationVertical.setAutoDraw(True)
fixationHorizontal.setAutoDraw(True)
secondCueArrow.setAutoDraw(False)
win.flip()
if trialsArray2[curRow][5] == 0:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIXATION_TIME + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + EVAL_BEFORE_RETRO_BLANK + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME: pass
if trialsArray2[curRow][5] == 1:
while trialClock.getTime() < FOURTH_RETENTION_INTERVAL + SECOND_CUE_TIME + THIRD_RETENTION_INTERVAL + FirstProbeRT + FIXATION_TIME + REMEMBER_TEXT_TIME + EVAL_AFTER_RETRO_BLANK + FIXATION_TIME + evalRT + EVAL_FIXATION_TIME + EVAL_TEXT_TIME + SECOND_RETENTION_INTERVAL + FIRST_CUE_TIME + FIRST_RETENTION_INTERVAL + STIM_TIME + FIXATION_TIME + MEMORY_TEXT_TIME:pass
#Second Probe
secondProbeRand = random.randint(0,2)#Random number for if it's an uncued stimulus being probed
secondPosition = POSARRAY[secondCuePos]#position of second cued stimulus
secondCuedUncuedRand = random.randint(0,1)#Random number for whether cued or uncued is probed
if secondCuedUncuedRand == 0:#Cued Stim is Probed
if colourArray.index('Blue') < 4:
if secondPosition == BluePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if secondPosition == GreenPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if secondPosition == PurplePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if secondPosition == PinkPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if secondPosition == OrangePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Orange' + str(OrangeCount)
if secondCuedUncuedRand == 1:#Uncued Stim is Probed
UncuedProbedStim = POSARRAY.index(secondPosition)
if UncuedProbedStim == 0:
if secondProbeRand == 0:SecondProbePos= POSARRAY[1]
if secondProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 1:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[2]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 2:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondProbeRand == 2:SecondProbePos = POSARRAY[3]
if UncuedProbedStim == 3:
if secondProbeRand == 0:SecondProbePos = POSARRAY[0]
if secondProbeRand == 1:SecondProbePos = POSARRAY[1]
if secondProbeRand == 2:SecondProbePos = POSARRAY[2]
if colourArray.index('Blue') < 4:
if SecondProbePos == BluePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Blue/Blue' + str(BlueCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Blue' + str(BlueCount)
if colourArray.index('Green') < 4:
if SecondProbePos == GreenPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Green/Green' + str(GreenCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Green' + str(GreenCount)
if colourArray.index('Purple') < 4:
if SecondProbePos == PurplePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Purple/Purple' + str(PurpleCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Purple' + str(PurpleCount)
if colourArray.index('Pink') < 4:
if SecondProbePos == PinkPos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Pink/Pink' + str(PinkCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Pink' + str(PinkCount)
if colourArray.index('Orange') < 4:
if SecondProbePos == OrangePos:SecondProbe = visual.ImageStim(win=win,image= 'Images/Orange/Orange' + str(OrangeCount) + '.bmp', pos= [0,0], size=(SIZE,SIZE));secondProbeStim = 'Orange' + str(OrangeCount)
SecondProbe.setAutoDraw(True)
win.flip()
eventClock.reset()
stimOnScreen = True
keyResp.status = NOT_STARTED
secondProbeErrorCode = -1
secondProbeErrorName = ''
noResponseYet = True
probeOnScreen = True
keyResp.keys = [] #just the last key pressed
keyResp.rt = -1
while noResponseYet:
t = eventClock.getTime()
#initialize key checker
if keyResp.status == NOT_STARTED:
keyResp.tStart = t
keyResp.status = STARTED
keyResp.clock.reset()
#event.getKeys()
event.clearEvents()
#check for a keyboard response
theseKeys = event.getKeys(keyList=['up','down'])
if noResponseYet == True:
if len(theseKeys) > 0: #test if atleast one key pressed
if keyResp.keys == []:
SecondProbe.setAutoDraw(False)
keyResp.keys = theseKeys[-1] #just the last key pressed
keyResp.rt = keyResp.clock.getTime()
SecondProbeResponse = keyResp.keys
SecondProbeRT = keyResp.rt
if buttonCounterbalance == 0:
if keyResp.keys == 'up' and secondCuedUncuedRand == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'down' and secondCuedUncuedRand == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if buttonCounterbalance == 1:
if keyResp.keys == 'down' and secondCuedUncuedRand == 1:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
if keyResp.keys == 'up' and secondCuedUncuedRand == 0:
secondProbeErrorCode = 2
secondProbeErrorName = 'incorrect response'
noResponseYet = False
if stimOnScreen and t > FIRST_PROBE_ON_SCREEN:
SecondProbe.setAutoDraw(False)
win.flip()
stimOnScreen = False
# check for quit (the [Esc] key)
if event.getKeys(["escape"]):
core.quit()
SecondProbe.setAutoDraw(False)
fixationVertical.setAutoDraw(True);fixationHorizontal.setAutoDraw(True)
win.flip()
#Inter-trial interval
eventClock.reset()
fixationHorizontal.setAutoDraw(False);fixationVertical.setAutoDraw(False)
win.flip()
while eventClock.getTime() < INTER_TRIAL_INTERVAL: pass
if trialsArray2[curRow][5] == 0 or trialsArray2[curRow][5] == 1:
fourTrialsAgo = threeTrialsAgo
threeTrialsAgo = twoTrialsAgo
twoTrialsAgo = previousTrial
previousTrial = 0
if firstProbeErrorCode == -1 and secondProbeErrorCode == -1:
previousTrial = 1
lastFourTrialsArray = [previousTrial,twoTrialsAgo,threeTrialsAgo,fourTrialsAgo]
totalACC = previousTrial + twoTrialsAgo + threeTrialsAgo + fourTrialsAgo
accValue = totalACC/4
if totalTrials > 3:
if accValue > .8:
STIM_TIME = STIM_TIME - 0.05
if accValue < .7:
STIM_TIME = STIM_TIME + 0.05
if STIM_TIME < 0.15:
STIM_TIME = 0.15
if STIM_TIME > 1.0:
STIM_TIME = 1.0
blocktrial = blocktrial + 1
totalTrials = totalTrials + 1
if blocktrial == 16:
blocktrial = 0
breakTextOnScreen = visual.TextStim(win=win, ori=0, name='breakTextOnScreen', text='Take a Quick Break\nPress the space bar to start the next block...', font=u'Arial', pos=[0, 0], height=0.75, wrapWidth=None, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
buttonReminderTextOnScreen = visual.TextStim(win=win, ori=0, name='buttonReminderTextOnScreen', text='Remember, press the ' + butVar1 + ' key to answer YES. Press the ' + butVar2 + ' key to answer NO.', font=u'Arial', pos=[0, -3], height=0.75, wrapWidth=40, color='black', colorSpace=u'rgb', opacity=1, depth=-1.0)
breakTextOnScreen.setAutoDraw(True)
buttonReminderTextOnScreen.setAutoDraw(True)
win.flip()
event.waitKeys()
breakTextOnScreen.setAutoDraw(False)
buttonReminderTextOnScreen.setAutoDraw(False)
win.flip()
outStr = str(runTime) + '\t'
outStr = outStr + str(totalTrials) + '\t'
outStr = outStr + str(TrialType) + '\t'
outStr = outStr + 'Blue' + str(BlueCount) + '\t'
if colourArray.index('Blue') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Blue')]) + '\t'
if colourArray.index('Blue') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Green' + str(GreenCount) + '\t'
if colourArray.index('Green') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Green')]) + '\t'
if colourArray.index('Green') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Purple' + str(PurpleCount) + '\t'
if colourArray.index('Purple') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Purple')]) + '\t'
if colourArray.index('Purple') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Pink' + str(PinkCount) + '\t'
if colourArray.index('Pink') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Pink')]) + '\t'
if colourArray.index('Pink') == 4:
outStr = outStr + 'novelcolour' + '\t'
outStr = outStr + 'Orange' + str(OrangeCount) + '\t'
if colourArray.index('Orange') < 4:
outStr = outStr + str(POSARRAY[colourArray.index('Orange')]) + '\t'
if colourArray.index('Orange') == 4:
outStr = outStr + 'novelcolour' + '\t'
if firstCuePos == 0 or firstCuePos == 1:
outStr = outStr + '0' + '\t'
if firstCuePos == 2 or firstCuePos == 3:
outStr = outStr + '1' + '\t'
outStr = outStr + str(POSARRAY[firstCuePos]) + '\t'
if trialsArray2[curRow][5] < 2:
outStr = outStr + str(secondPosition) + '\t'
outStr = outStr + str(firstProbeStim) + '\t'
outStr = outStr + str(firstCuedUncuedRand) + '\t'
outStr = outStr + str(FirstProbeResponse) + '\t'
if firstProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if firstProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if firstProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(FirstProbeRT) + '\t'
outStr = outStr + str(firstProbeErrorCode) + '\t'
outStr = outStr + str(firstProbeErrorName) + '\t'
outStr = outStr + str(secondProbeStim) + '\t'
outStr = outStr + str(secondCuedUncuedRand) + '\t'
outStr = outStr + str(SecondProbeResponse) + '\t'
if secondProbeErrorCode == -1:
outStr = outStr + '1' + '\t'
if secondProbeErrorCode == 2:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 5:
outStr = outStr + '0' + '\t'
if secondProbeErrorCode == 6:
outStr = outStr + '0' + '\t'
outStr = outStr + str(SecondProbeRT) + '\t'
outStr = outStr + str(secondProbeErrorCode) + '\t'
outStr = outStr + str(secondProbeErrorName) + '\t'
evalImgName2 = evalImgName.rsplit("/")[-1]
outStr = outStr + str(evalImgName2) + '\t'
outStr = outStr + str(trialsArray2[curRow][4]) + '\t'
outStr = outStr + str(evalResponse) + '\t'
outStr = outStr + str(evalRT) + '\t'
outStr = outStr + str(evalErrorCode) + '\t'
outStr = outStr + str(evalErrorName) + '\t'
if trialsArray2[curRow][5] == 2:
outStr = outStr + 'searchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\tsearchtask\t'
outStr = outStr + str(STIM_TIME) + '\t'
outStr = outStr + str(trialsArray2[curRow][5]) + '\t'
if trialsArray2[curRow][5] == 2:
outStr = outStr + str(singType) + '\t'
outStr = outStr + str(singColName) + '\t'
outStr = outStr + str(singPos) + '\t'
if searchStim[1].ori == 0:
outStr = outStr + 'left' + '\t'
if searchStim[1].ori == 180:
outStr = outStr + 'right' + '\t'
if targetOri == 90:
outStr = outStr + 'up' + '\t'
if targetOri == 270:
outStr = outStr + 'down' + '\t'
outStr = outStr + str(tarPos) + '\t'
outStr = outStr + str(searchResponse) + '\t'
outStr = outStr + str(searchRT) + '\t'
outStr = outStr + str(searchErrorCode) + '\t'
outStr = outStr + str(searchErrorName) + '\t'
if trialsArray2[curRow][5] < 2:
outStr = outStr + 'nosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\tnosearch\t'
outStr = outStr + '2' + '\t'
outStr = outStr + str(expInfo2['age']) + '\t'
outStr = outStr + str(expInfo3['gender']) + '\t'
outStr = outStr + str(expInfo4['handedness']) + '\t'
outStr = outStr + str(expInfo5['glasses/contacts?']) + '\t'
outputFile.write(outStr + 'eol\n')
| 53.911666 | 1,024 | 0.632445 | 15,483 | 144,645 | 5.801008 | 0.045017 | 0.033535 | 0.02298 | 0.024784 | 0.924257 | 0.915718 | 0.907824 | 0.900731 | 0.89168 | 0.885835 | 0 | 0.023072 | 0.258059 | 144,645 | 2,682 | 1,025 | 53.931767 | 0.813852 | 0.090829 | 0 | 0.871745 | 0 | 0.003375 | 0.061855 | 0.017094 | 0 | 0 | 0 | 0 | 0 | 1 | 0.000482 | false | 0.04243 | 0.004339 | 0 | 0.005304 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8 |
8705f9f635df4211335d15e09295b59042758779 | 9,976 | py | Python | src/mlsquare/models/irt.py | mlsquare/mlsquare | 187c3b36a12b564b9dc8bd1320055272844793eb | [
"MIT"
] | 1 | 2020-04-02T04:25:49.000Z | 2020-04-02T04:25:49.000Z | src/mlsquare/models/irt.py | mlsquare/mlsquare | 187c3b36a12b564b9dc8bd1320055272844793eb | [
"MIT"
] | 2 | 2019-09-24T01:55:02.000Z | 2019-12-02T17:52:17.000Z | src/mlsquare/models/irt.py | mlsquare/mlsquare | 187c3b36a12b564b9dc8bd1320055272844793eb | [
"MIT"
] | 2 | 2019-08-01T10:54:00.000Z | 2019-11-12T12:09:39.000Z | from sklearn.base import BaseEstimator
import numpy as np
import pandas as pd
class fourPL(BaseEstimator):
'''
Input Args:
data: a pandas dataframe (that will be converted to a numpy matrix)
type_: a character string indicating the type of model to fit. Available options are `rasch' that assumes equal discrimination parameter among items, and `latent_trait'
(default) that assumes a different discrimination parameter per item.
constraint: constraint must be a numpy array containing 3 columns, first col. containing tuple of items, 2nd column defines
type of parameter to constrain(i.e., 1 denotes the guessing parameters, 2 the easiness parameters, and 3
the discrimination parameters); and the third column specifies the value at which the corresponding parameter should be fixed.
example: constraint = np.array([(2, 5, 6), 2, 0.8])
max_guessing: a value between 0 and 1 denoting the upper bound for the guessing parameters.
IRT_param: Boolean type Value
start_val:
na_action:
control: list type value
`Methods`: .fit()
Returns:
Model signature of IRT 4PL which is to be used as primal model in dope.
'''
def __init__(self,data=None, type_ = "latent_trait", constraint = 'NULL', max_guessing = 1, IRT_param = True, start_val = 'NULL', na_action = 'NULL', control = list()):
self.data=data
self.type_ = type_
self.constraint = constraint
self.max_guessing= max_guessing
self.Irt_param= IRT_param
self.start_val= start_val
self.na_action= na_action
self.control=control
def fit(self, data):
if not isinstance(self.data, (pd.core.frame.DataFrame, np.ndarray)):
raise ValueError('Data must be either pandas Dataframe or numpy array; got (data={})'.format(type(self.data)))
if type_ not in ['latent_trait', 'rasch']:
raise ValueError('type_ must be either one of `latent_trait` for 3PL model or `rasch` for 1PL model; got (type_={})'.format(self.type_))
if not isinstance(self.constraint, (np.ndarray)) and self.constraint.shape==3 and isinstance(self.constraint[0], tuple):
raise ValueError('constraint must be a numpy array of shape 3, of which first element must be of type tuple; got (constraint of type={} and 1st element type:{})'.format(type(self.constraint),type(self.constraint[0])))
if not isinstance(self.max_guessing, (int, float)) and 0<=self.max_guessing<=1:
raise ValueError('max_guessing must be of type int or float and between 0 and 1; got (max_guessing type ={}, with value: {})'.format(type(self.max_guessing), self.max_guessing))
if not isinstance(self.Irt_param, bool):
raise ValueError('Irt_param must be either True or numpy False; got (Irt_param type={})'.format(type(self.Irt_param)))
self.xuser= data[0]
self.xitems= data[1]
self._y= data[-1]
def predict(self,data):
return self._y
class tpm(BaseEstimator):
'''
Input Args:
data: a pandas dataframe (that will be converted to a numpy matrix)
type_: a character string indicating the type of model to fit. Available options are `rasch' that assumes equal discrimination parameter among items, and `latent_trait'
(default) that assumes a different discrimination parameter per item.
constraint: constraint must be a numpy array containing 3 columns, first col. containing tuple of items, 2nd column defines
type of parameter to constrain(i.e., 1 denotes the guessing parameters, 2 the easiness parameters, and 3
the discrimination parameters); and the third column specifies the value at which the corresponding parameter should be fixed.
example: constraint = np.array([(2, 5, 6), 2, 0.8])
max_guessing: a value between 0 and 1 denoting the upper bound for the guessing parameters.
IRT_param: Boolean type Value
start_val:
na_action:
control: list type value
`Methods`: .fit()
Returns:
Model signature of IRT 3PL which is to be used as primal model in dope.
'''
def __init__(self,data=None, type_ = "latent_trait", constraint = 'NULL', max_guessing = 1, IRT_param = True, start_val = 'NULL', na_action = 'NULL', control = list()):
self.data=data
self.type_ = type_
self.constraint = constraint
self.max_guessing= max_guessing
self.Irt_param= IRT_param
self.start_val= start_val
self.na_action= na_action
self.control=control
def fit(self, data):
if not isinstance(self.data, (pd.core.frame.DataFrame, np.ndarray)):
raise ValueError('Data must be either pandas Dataframe or numpy array; got (data={})'.format(type(self.data)))
if type_ not in ['latent_trait', 'rasch']:
raise ValueError('type_ must be either one of `latent_trait` for 3PL model or `rasch` for 1PL model; got (type_={})'.format(self.type_))
if not isinstance(self.constraint, (np.ndarray)) and self.constraint.shape==3 and isinstance(self.constraint[0], tuple):
raise ValueError('constraint must be a numpy array of shape 3, of which first element must be of type tuple; got (constraint of type={} and 1st element type:{})'.format(type(self.constraint),type(self.constraint[0])))
if not isinstance(self.max_guessing, (int, float)) and 0<=self.max_guessing<=1:
raise ValueError('max_guessing must be of type int or float and between 0 and 1; got (max_guessing type ={}, with value: {})'.format(type(self.max_guessing), self.max_guessing))
if not isinstance(self.Irt_param, bool):
raise ValueError('Irt_param must be either True or numpy False; got (Irt_param type={})'.format(type(self.Irt_param)))
self.xuser= data[0]
self.xitems= data[1]
self._y= data[-1]
def predict(self,data):
return self._y
class twoPl(BaseEstimator):
'''
Input Args:
data: a pandas dataframe (that will be converted to a numpy matrix)
type_: a character string indicating the type of model to fit. Available options are `rasch' that assumes equal discrimination parameter among items, and `latent_trait'
(default) that assumes a different discrimination parameter per item.
constraint: constraint must be a numpy array containing 3 columns, first col. containing tuple of items, 2nd column defines
type of parameter to constrain(i.e., 1 denotes the guessing parameters, 2 the easiness parameters, and 3
the discrimination parameters); and the third column specifies the value at which the corresponding parameter should be fixed.
example: constraint = np.array([(2, 5, 6), 2, 0.8])
IRT_param: Boolean type Value
start_val:
na_action:
control: list type value
`Methods`: .fit()
Returns:
Model signature of IRT 2PL which is to be used as primal model in dope.
'''
def __init__(self, data=None, constraint = 'NULL', IRT_param = True, start_val = 'NULL', na_action = 'NULL', control = list(), Hessian = True):
self.data=data
self.constraint = constraint
self.IRT_param= IRT_param
self.start_val= start_val
self.na_action= na_action
self.control=control
self.Hessian=Hessian
def fit(self, data):
if not isinstance(self.data, (pd.core.frame.DataFrame, np.ndarray)):
raise ValueError('Data must be either pandas Dataframe or numpy array; got (data={})'.format(type(self.data)))
if not isinstance(self.constraint, (np.ndarray)) and self.constraint.shape==3 and isinstance(self.constraint[0], tuple):
raise ValueError('constraint must be a numpy array of shape 3, of which first element must be of type tuple; got (constraint of type={} and 1st element type:{})'.format(type(self.constraint),type(self.constraint[0])))
if not isinstance(self.Irt_param, bool):
raise ValueError('Irt_param must be either True or numpy False; got (Irt_param type={})'.format(type(self.Irt_param)))
self.xuser= data.iloc[0][0]
self.xitems= data.iloc[1][0]
self._y= data.iloc[-1][0]
def predict(self,data):
return self._y
class rasch(BaseEstimator):
'''
Input Args:
data: a pandas dataframe (that will be converted to a numpy matrix)
constraint:
IRT_param:
start_val:
na_action:
control: list type value
Hessian: Boolean type Value
`Methods`: .fit()
Returns:
Model signature of IRT 1PL which is to be used as primal model in dope.
'''
def __init__(self, data=None, constraint = 'NULL', IRT_param = True, start_val = 'NULL', na_action = 'NULL', control = list(), Hessian = True):
self.data=data
self.constraint = constraint
self.IRT_param= IRT_param
self.start_val= start_val
self.na_action= na_action
self.control=control
self.Hessian=Hessian
def fit(self, data):
if not isinstance(self.data, (pd.core.frame.DataFrame, np.ndarray)):
raise ValueError('Data must be either pandas Dataframe or numpy array; got (data={})'.format(type(self.data)))
if not isinstance(self.constraint, (np.ndarray)) and self.constraint.shape==3 and isinstance(self.constraint[0], tuple):
raise ValueError('constraint must be a numpy array of shape 3, of which first element must be of type tuple; got (constraint of type={} and 1st element type:{})'.format(type(self.constraint),type(self.constraint[0])))
if not isinstance(self.Irt_param, bool):
raise ValueError('Irt_param must be either True or numpy False; got (Irt_param type={})'.format(type(self.Irt_param)))
self.xuser= data[0]
self.xitems= data[1]
self._y= data[-1]
def predict(self,data):
return self._y | 47.279621 | 229 | 0.677626 | 1,424 | 9,976 | 4.65309 | 0.101826 | 0.038636 | 0.031693 | 0.040145 | 0.97374 | 0.97374 | 0.97374 | 0.97374 | 0.963175 | 0.956384 | 0 | 0.011632 | 0.224439 | 9,976 | 211 | 230 | 47.279621 | 0.844772 | 0.320269 | 0 | 0.896907 | 0 | 0.082474 | 0.249961 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.123711 | false | 0 | 0.030928 | 0.041237 | 0.237113 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
872b5c9622c1c08e8ee3540c46349b87eb5a88c1 | 711 | py | Python | ImageConverter/cli_dir/banner.py | xXSD11Xx/imageconversion | f4314ad9b8ca40dbff89fab881928fe51c42e12f | [
"MIT"
] | null | null | null | ImageConverter/cli_dir/banner.py | xXSD11Xx/imageconversion | f4314ad9b8ca40dbff89fab881928fe51c42e12f | [
"MIT"
] | null | null | null | ImageConverter/cli_dir/banner.py | xXSD11Xx/imageconversion | f4314ad9b8ca40dbff89fab881928fe51c42e12f | [
"MIT"
] | null | null | null | class Banner():
# """
# Display the country details banner
# """
def intro(self):
print("Hello")
print(" ___ ____ _ ")
print(" |_ _|_ __ ___ __ _ __ _ ___ / ___|___ _ ____ _____ _ __| |_ ___ _ __ ")
print(" | || '_ ` _ \ / _` |/ _` |/ _ \ | | / _ \| '_ \ \ / / _ \ '__| __/ _ \ '__| ")
print(" | || | | | | | (_| | (_| | __/ | |__| (_) | | | \ V / __/ | | || __/ | ")
print(" |___|_| |_| |_|\__,_|\__, |\___| \____\___/|_| |_|\_/ \___|_| \__\___|_| ")
print(" |___/ ")
| 50.785714 | 97 | 0.292546 | 19 | 711 | 4.947368 | 0.631579 | 0.425532 | 0.319149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.503516 | 711 | 13 | 98 | 54.692308 | 0.266289 | 0.070323 | 0 | 0 | 0 | 0.444444 | 0.738132 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.222222 | 0.777778 | 1 | 0 | 0 | null | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 8 |
5e7aeb2fee71d4d6fd455214fefa91b4a1717359 | 702,194 | py | Python | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | sdk/python/pulumi_azure_nextgen/recoveryservices/latest/outputs.py | test-wiz-sec/pulumi-azure-nextgen | 20a695af0d020b34b0f1c336e1b69702755174cc | [
"Apache-2.0"
] | null | null | null | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
from . import outputs
__all__ = [
'A2APolicyDetailsResponse',
'A2AProtectedDiskDetailsResponse',
'A2AProtectedManagedDiskDetailsResponse',
'A2AProtectionContainerMappingDetailsResponse',
'A2AReplicationDetailsResponse',
'A2AUnprotectedDiskDetailsResponse',
'AgentDetailsResponse',
'AgentDiskDetailsResponse',
'AzureFabricSpecificDetailsResponse',
'AzureFileshareProtectedItemExtendedInfoResponse',
'AzureFileshareProtectedItemResponse',
'AzureIaaSVMHealthDetailsResponse',
'AzureIaaSVMProtectedItemExtendedInfoResponse',
'AzureIaaSVMProtectedItemResponse',
'AzureIaaSVMProtectionPolicyResponse',
'AzureRecoveryServiceVaultProtectionIntentResponse',
'AzureResourceProtectionIntentResponse',
'AzureSqlContainerResponse',
'AzureSqlProtectedItemExtendedInfoResponse',
'AzureSqlProtectedItemResponse',
'AzureSqlProtectionPolicyResponse',
'AzureStorageContainerResponse',
'AzureToAzureNetworkMappingSettingsResponse',
'AzureToAzureVmSyncedConfigDetailsResponse',
'AzureVmDiskDetailsResponse',
'AzureVmWorkloadProtectedItemExtendedInfoResponse',
'AzureVmWorkloadProtectedItemResponse',
'AzureWorkloadContainerExtendedInfoResponse',
'AzureWorkloadContainerResponse',
'ContainerIdentityInfoResponse',
'CurrentJobDetailsResponse',
'CurrentScenarioDetailsResponse',
'DPMContainerExtendedInfoResponse',
'DPMProtectedItemExtendedInfoResponse',
'DPMProtectedItemResponse',
'DailyRetentionFormatResponse',
'DailyRetentionScheduleResponse',
'DataStoreResponse',
'DayResponse',
'DiskDetailsResponse',
'DiskExclusionPropertiesResponse',
'DistributedNodesInfoResponse',
'DpmContainerResponse',
'DraDetailsResponse',
'EncryptionDetailsResponse',
'ErrorDetailResponse',
'ExtendedPropertiesResponse',
'FabricPropertiesResponse',
'GenericContainerExtendedInfoResponse',
'GenericContainerResponse',
'GenericProtectedItemResponse',
'HealthErrorResponse',
'HyperVReplicaAzurePolicyDetailsResponse',
'HyperVReplicaAzureReplicationDetailsResponse',
'HyperVReplicaBasePolicyDetailsResponse',
'HyperVReplicaBaseReplicationDetailsResponse',
'HyperVReplicaBluePolicyDetailsResponse',
'HyperVReplicaBlueReplicationDetailsResponse',
'HyperVReplicaPolicyDetailsResponse',
'HyperVReplicaReplicationDetailsResponse',
'HyperVSiteDetailsResponse',
'IPConfigResponse',
'IaaSVMContainerResponse',
'IdentityDataResponse',
'IdentityProviderDetailsResponse',
'InMageAgentDetailsResponse',
'InMageAzureV2ManagedDiskDetailsResponse',
'InMageAzureV2PolicyDetailsResponse',
'InMageAzureV2ProtectedDiskDetailsResponse',
'InMageAzureV2ReplicationDetailsResponse',
'InMageBasePolicyDetailsResponse',
'InMagePolicyDetailsResponse',
'InMageProtectedDiskDetailsResponse',
'InMageRcmAgentUpgradeBlockingErrorDetailsResponse',
'InMageRcmFabricSpecificDetailsResponse',
'InMageRcmLastAgentUpgradeErrorDetailsResponse',
'InMageRcmMobilityAgentDetailsResponse',
'InMageRcmNicDetailsResponse',
'InMageRcmPolicyDetailsResponse',
'InMageRcmProtectedDiskDetailsResponse',
'InMageRcmReplicationDetailsResponse',
'InMageReplicationDetailsResponse',
'InitialReplicationDetailsResponse',
'InnerHealthErrorResponse',
'InputEndpointResponse',
'InquiryInfoResponse',
'InquiryValidationResponse',
'KPIResourceHealthDetailsResponse',
'LongTermRetentionPolicyResponse',
'LongTermSchedulePolicyResponse',
'MABContainerHealthDetailsResponse',
'MabContainerExtendedInfoResponse',
'MabContainerResponse',
'MabFileFolderProtectedItemExtendedInfoResponse',
'MabFileFolderProtectedItemResponse',
'MabProtectionPolicyResponse',
'MasterTargetServerResponse',
'MigrationItemPropertiesResponse',
'MobilityServiceUpdateResponse',
'MonthlyRetentionScheduleResponse',
'NetworkMappingPropertiesResponse',
'OSDetailsResponse',
'OSDiskDetailsResponse',
'PolicyPropertiesResponse',
'PrivateEndpointConnectionResponse',
'PrivateEndpointConnectionVaultPropertiesResponse',
'PrivateEndpointResponse',
'PrivateLinkServiceConnectionStateResponse',
'ProcessServerDetailsResponse',
'ProcessServerResponse',
'ProtectionContainerMappingPropertiesResponse',
'PushInstallerDetailsResponse',
'RcmAzureMigrationPolicyDetailsResponse',
'RcmProxyDetailsResponse',
'RecoveryPlanA2ADetailsResponse',
'RecoveryPlanActionResponse',
'RecoveryPlanAutomationRunbookActionDetailsResponse',
'RecoveryPlanGroupResponse',
'RecoveryPlanManualActionDetailsResponse',
'RecoveryPlanPropertiesResponse',
'RecoveryPlanProtectedItemResponse',
'RecoveryPlanScriptActionDetailsResponse',
'RecoveryServicesProviderPropertiesResponse',
'ReplicationAgentDetailsResponse',
'ReplicationProtectedItemPropertiesResponse',
'ReprotectAgentDetailsResponse',
'ResourceHealthDetailsResponse',
'RetentionDurationResponse',
'RetentionVolumeResponse',
'RunAsAccountResponse',
'SimpleRetentionPolicyResponse',
'SimpleSchedulePolicyResponse',
'SkuResponse',
'StorageClassificationMappingPropertiesResponse',
'UpgradeDetailsResponse',
'VCenterPropertiesResponse',
'VMNicDetailsResponse',
'VMwareCbtMigrationDetailsResponse',
'VMwareCbtNicDetailsResponse',
'VMwareCbtProtectedDiskDetailsResponse',
'VMwareCbtProtectionContainerMappingDetailsResponse',
'VMwareDetailsResponse',
'VMwareV2FabricSpecificDetailsResponse',
'VaultPropertiesResponse',
'VersionDetailsResponse',
'VmmDetailsResponse',
'VmmToAzureNetworkMappingSettingsResponse',
'VmmToVmmNetworkMappingSettingsResponse',
'VmwareCbtPolicyDetailsResponse',
'WeeklyRetentionFormatResponse',
'WeeklyRetentionScheduleResponse',
'WorkloadInquiryDetailsResponse',
'YearlyRetentionScheduleResponse',
]
@pulumi.output_type
class A2APolicyDetailsResponse(dict):
"""
A2A specific policy details.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
crash_consistent_frequency_in_minutes: Optional[int] = None,
multi_vm_sync_status: Optional[str] = None,
recovery_point_history: Optional[int] = None,
recovery_point_threshold_in_minutes: Optional[int] = None):
"""
A2A specific policy details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param int crash_consistent_frequency_in_minutes: The crash consistent snapshot frequency in minutes.
:param str multi_vm_sync_status: A value indicating whether multi-VM sync has to be enabled.
:param int recovery_point_history: The duration in minutes until which the recovery points need to be stored.
:param int recovery_point_threshold_in_minutes: The recovery point threshold in minutes.
"""
pulumi.set(__self__, "instance_type", 'A2A')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if crash_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "crash_consistent_frequency_in_minutes", crash_consistent_frequency_in_minutes)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if recovery_point_history is not None:
pulumi.set(__self__, "recovery_point_history", recovery_point_history)
if recovery_point_threshold_in_minutes is not None:
pulumi.set(__self__, "recovery_point_threshold_in_minutes", recovery_point_threshold_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="crashConsistentFrequencyInMinutes")
def crash_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The crash consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "crash_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="recoveryPointHistory")
def recovery_point_history(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history")
@property
@pulumi.getter(name="recoveryPointThresholdInMinutes")
def recovery_point_threshold_in_minutes(self) -> Optional[int]:
"""
The recovery point threshold in minutes.
"""
return pulumi.get(self, "recovery_point_threshold_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class A2AProtectedDiskDetailsResponse(dict):
"""
A2A protected disk details.
"""
def __init__(__self__, *,
allowed_disk_level_operation: Optional[Sequence[str]] = None,
data_pending_at_source_agent_in_mb: Optional[float] = None,
data_pending_in_staging_storage_account_in_mb: Optional[float] = None,
dek_key_vault_arm_id: Optional[str] = None,
disk_capacity_in_bytes: Optional[int] = None,
disk_name: Optional[str] = None,
disk_state: Optional[str] = None,
disk_type: Optional[str] = None,
disk_uri: Optional[str] = None,
is_disk_encrypted: Optional[bool] = None,
is_disk_key_encrypted: Optional[bool] = None,
kek_key_vault_arm_id: Optional[str] = None,
key_identifier: Optional[str] = None,
monitoring_job_type: Optional[str] = None,
monitoring_percentage_completion: Optional[int] = None,
primary_disk_azure_storage_account_id: Optional[str] = None,
primary_staging_azure_storage_account_id: Optional[str] = None,
recovery_azure_storage_account_id: Optional[str] = None,
recovery_disk_uri: Optional[str] = None,
resync_required: Optional[bool] = None,
secret_identifier: Optional[str] = None):
"""
A2A protected disk details.
:param Sequence[str] allowed_disk_level_operation: The disk level operations list.
:param float data_pending_at_source_agent_in_mb: The data pending at source virtual machine in MB.
:param float data_pending_in_staging_storage_account_in_mb: The data pending for replication in MB at staging account.
:param str dek_key_vault_arm_id: The KeyVault resource id for secret (BEK).
:param int disk_capacity_in_bytes: The disk capacity in bytes.
:param str disk_name: The disk name.
:param str disk_state: The disk state.
:param str disk_type: The type of disk.
:param str disk_uri: The disk uri.
:param bool is_disk_encrypted: A value indicating whether vm has encrypted os disk or not.
:param bool is_disk_key_encrypted: A value indicating whether disk key got encrypted or not.
:param str kek_key_vault_arm_id: The KeyVault resource id for key (KEK).
:param str key_identifier: The key URL / identifier (KEK).
:param str monitoring_job_type: The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
:param int monitoring_percentage_completion: The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
:param str primary_disk_azure_storage_account_id: The primary disk storage account.
:param str primary_staging_azure_storage_account_id: The primary staging storage account.
:param str recovery_azure_storage_account_id: The recovery disk storage account.
:param str recovery_disk_uri: Recovery disk uri.
:param bool resync_required: A value indicating whether resync is required for this disk.
:param str secret_identifier: The secret URL / identifier (BEK).
"""
if allowed_disk_level_operation is not None:
pulumi.set(__self__, "allowed_disk_level_operation", allowed_disk_level_operation)
if data_pending_at_source_agent_in_mb is not None:
pulumi.set(__self__, "data_pending_at_source_agent_in_mb", data_pending_at_source_agent_in_mb)
if data_pending_in_staging_storage_account_in_mb is not None:
pulumi.set(__self__, "data_pending_in_staging_storage_account_in_mb", data_pending_in_staging_storage_account_in_mb)
if dek_key_vault_arm_id is not None:
pulumi.set(__self__, "dek_key_vault_arm_id", dek_key_vault_arm_id)
if disk_capacity_in_bytes is not None:
pulumi.set(__self__, "disk_capacity_in_bytes", disk_capacity_in_bytes)
if disk_name is not None:
pulumi.set(__self__, "disk_name", disk_name)
if disk_state is not None:
pulumi.set(__self__, "disk_state", disk_state)
if disk_type is not None:
pulumi.set(__self__, "disk_type", disk_type)
if disk_uri is not None:
pulumi.set(__self__, "disk_uri", disk_uri)
if is_disk_encrypted is not None:
pulumi.set(__self__, "is_disk_encrypted", is_disk_encrypted)
if is_disk_key_encrypted is not None:
pulumi.set(__self__, "is_disk_key_encrypted", is_disk_key_encrypted)
if kek_key_vault_arm_id is not None:
pulumi.set(__self__, "kek_key_vault_arm_id", kek_key_vault_arm_id)
if key_identifier is not None:
pulumi.set(__self__, "key_identifier", key_identifier)
if monitoring_job_type is not None:
pulumi.set(__self__, "monitoring_job_type", monitoring_job_type)
if monitoring_percentage_completion is not None:
pulumi.set(__self__, "monitoring_percentage_completion", monitoring_percentage_completion)
if primary_disk_azure_storage_account_id is not None:
pulumi.set(__self__, "primary_disk_azure_storage_account_id", primary_disk_azure_storage_account_id)
if primary_staging_azure_storage_account_id is not None:
pulumi.set(__self__, "primary_staging_azure_storage_account_id", primary_staging_azure_storage_account_id)
if recovery_azure_storage_account_id is not None:
pulumi.set(__self__, "recovery_azure_storage_account_id", recovery_azure_storage_account_id)
if recovery_disk_uri is not None:
pulumi.set(__self__, "recovery_disk_uri", recovery_disk_uri)
if resync_required is not None:
pulumi.set(__self__, "resync_required", resync_required)
if secret_identifier is not None:
pulumi.set(__self__, "secret_identifier", secret_identifier)
@property
@pulumi.getter(name="allowedDiskLevelOperation")
def allowed_disk_level_operation(self) -> Optional[Sequence[str]]:
"""
The disk level operations list.
"""
return pulumi.get(self, "allowed_disk_level_operation")
@property
@pulumi.getter(name="dataPendingAtSourceAgentInMB")
def data_pending_at_source_agent_in_mb(self) -> Optional[float]:
"""
The data pending at source virtual machine in MB.
"""
return pulumi.get(self, "data_pending_at_source_agent_in_mb")
@property
@pulumi.getter(name="dataPendingInStagingStorageAccountInMB")
def data_pending_in_staging_storage_account_in_mb(self) -> Optional[float]:
"""
The data pending for replication in MB at staging account.
"""
return pulumi.get(self, "data_pending_in_staging_storage_account_in_mb")
@property
@pulumi.getter(name="dekKeyVaultArmId")
def dek_key_vault_arm_id(self) -> Optional[str]:
"""
The KeyVault resource id for secret (BEK).
"""
return pulumi.get(self, "dek_key_vault_arm_id")
@property
@pulumi.getter(name="diskCapacityInBytes")
def disk_capacity_in_bytes(self) -> Optional[int]:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "disk_capacity_in_bytes")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> Optional[str]:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskState")
def disk_state(self) -> Optional[str]:
"""
The disk state.
"""
return pulumi.get(self, "disk_state")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[str]:
"""
The type of disk.
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter(name="diskUri")
def disk_uri(self) -> Optional[str]:
"""
The disk uri.
"""
return pulumi.get(self, "disk_uri")
@property
@pulumi.getter(name="isDiskEncrypted")
def is_disk_encrypted(self) -> Optional[bool]:
"""
A value indicating whether vm has encrypted os disk or not.
"""
return pulumi.get(self, "is_disk_encrypted")
@property
@pulumi.getter(name="isDiskKeyEncrypted")
def is_disk_key_encrypted(self) -> Optional[bool]:
"""
A value indicating whether disk key got encrypted or not.
"""
return pulumi.get(self, "is_disk_key_encrypted")
@property
@pulumi.getter(name="kekKeyVaultArmId")
def kek_key_vault_arm_id(self) -> Optional[str]:
"""
The KeyVault resource id for key (KEK).
"""
return pulumi.get(self, "kek_key_vault_arm_id")
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> Optional[str]:
"""
The key URL / identifier (KEK).
"""
return pulumi.get(self, "key_identifier")
@property
@pulumi.getter(name="monitoringJobType")
def monitoring_job_type(self) -> Optional[str]:
"""
The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
"""
return pulumi.get(self, "monitoring_job_type")
@property
@pulumi.getter(name="monitoringPercentageCompletion")
def monitoring_percentage_completion(self) -> Optional[int]:
"""
The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
"""
return pulumi.get(self, "monitoring_percentage_completion")
@property
@pulumi.getter(name="primaryDiskAzureStorageAccountId")
def primary_disk_azure_storage_account_id(self) -> Optional[str]:
"""
The primary disk storage account.
"""
return pulumi.get(self, "primary_disk_azure_storage_account_id")
@property
@pulumi.getter(name="primaryStagingAzureStorageAccountId")
def primary_staging_azure_storage_account_id(self) -> Optional[str]:
"""
The primary staging storage account.
"""
return pulumi.get(self, "primary_staging_azure_storage_account_id")
@property
@pulumi.getter(name="recoveryAzureStorageAccountId")
def recovery_azure_storage_account_id(self) -> Optional[str]:
"""
The recovery disk storage account.
"""
return pulumi.get(self, "recovery_azure_storage_account_id")
@property
@pulumi.getter(name="recoveryDiskUri")
def recovery_disk_uri(self) -> Optional[str]:
"""
Recovery disk uri.
"""
return pulumi.get(self, "recovery_disk_uri")
@property
@pulumi.getter(name="resyncRequired")
def resync_required(self) -> Optional[bool]:
"""
A value indicating whether resync is required for this disk.
"""
return pulumi.get(self, "resync_required")
@property
@pulumi.getter(name="secretIdentifier")
def secret_identifier(self) -> Optional[str]:
"""
The secret URL / identifier (BEK).
"""
return pulumi.get(self, "secret_identifier")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class A2AProtectedManagedDiskDetailsResponse(dict):
"""
A2A protected managed disk details.
"""
def __init__(__self__, *,
allowed_disk_level_operation: Optional[Sequence[str]] = None,
data_pending_at_source_agent_in_mb: Optional[float] = None,
data_pending_in_staging_storage_account_in_mb: Optional[float] = None,
dek_key_vault_arm_id: Optional[str] = None,
disk_capacity_in_bytes: Optional[int] = None,
disk_id: Optional[str] = None,
disk_name: Optional[str] = None,
disk_state: Optional[str] = None,
disk_type: Optional[str] = None,
failover_disk_name: Optional[str] = None,
is_disk_encrypted: Optional[bool] = None,
is_disk_key_encrypted: Optional[bool] = None,
kek_key_vault_arm_id: Optional[str] = None,
key_identifier: Optional[str] = None,
monitoring_job_type: Optional[str] = None,
monitoring_percentage_completion: Optional[int] = None,
primary_staging_azure_storage_account_id: Optional[str] = None,
recovery_disk_encryption_set_id: Optional[str] = None,
recovery_replica_disk_account_type: Optional[str] = None,
recovery_replica_disk_id: Optional[str] = None,
recovery_resource_group_id: Optional[str] = None,
recovery_target_disk_account_type: Optional[str] = None,
recovery_target_disk_id: Optional[str] = None,
resync_required: Optional[bool] = None,
secret_identifier: Optional[str] = None,
tfo_disk_name: Optional[str] = None):
"""
A2A protected managed disk details.
:param Sequence[str] allowed_disk_level_operation: The disk level operations list.
:param float data_pending_at_source_agent_in_mb: The data pending at source virtual machine in MB.
:param float data_pending_in_staging_storage_account_in_mb: The data pending for replication in MB at staging account.
:param str dek_key_vault_arm_id: The KeyVault resource id for secret (BEK).
:param int disk_capacity_in_bytes: The disk capacity in bytes.
:param str disk_id: The managed disk Arm id.
:param str disk_name: The disk name.
:param str disk_state: The disk state.
:param str disk_type: The type of disk.
:param str failover_disk_name: The failover name for the managed disk.
:param bool is_disk_encrypted: A value indicating whether vm has encrypted os disk or not.
:param bool is_disk_key_encrypted: A value indicating whether disk key got encrypted or not.
:param str kek_key_vault_arm_id: The KeyVault resource id for key (KEK).
:param str key_identifier: The key URL / identifier (KEK).
:param str monitoring_job_type: The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
:param int monitoring_percentage_completion: The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
:param str primary_staging_azure_storage_account_id: The primary staging storage account.
:param str recovery_disk_encryption_set_id: The recovery disk encryption set Id.
:param str recovery_replica_disk_account_type: The replica disk type. Its an optional value and will be same as source disk type if not user provided.
:param str recovery_replica_disk_id: Recovery replica disk Arm Id.
:param str recovery_resource_group_id: The recovery disk resource group Arm Id.
:param str recovery_target_disk_account_type: The target disk type after failover. Its an optional value and will be same as source disk type if not user provided.
:param str recovery_target_disk_id: Recovery target disk Arm Id.
:param bool resync_required: A value indicating whether resync is required for this disk.
:param str secret_identifier: The secret URL / identifier (BEK).
:param str tfo_disk_name: The test failover name for the managed disk.
"""
if allowed_disk_level_operation is not None:
pulumi.set(__self__, "allowed_disk_level_operation", allowed_disk_level_operation)
if data_pending_at_source_agent_in_mb is not None:
pulumi.set(__self__, "data_pending_at_source_agent_in_mb", data_pending_at_source_agent_in_mb)
if data_pending_in_staging_storage_account_in_mb is not None:
pulumi.set(__self__, "data_pending_in_staging_storage_account_in_mb", data_pending_in_staging_storage_account_in_mb)
if dek_key_vault_arm_id is not None:
pulumi.set(__self__, "dek_key_vault_arm_id", dek_key_vault_arm_id)
if disk_capacity_in_bytes is not None:
pulumi.set(__self__, "disk_capacity_in_bytes", disk_capacity_in_bytes)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if disk_name is not None:
pulumi.set(__self__, "disk_name", disk_name)
if disk_state is not None:
pulumi.set(__self__, "disk_state", disk_state)
if disk_type is not None:
pulumi.set(__self__, "disk_type", disk_type)
if failover_disk_name is not None:
pulumi.set(__self__, "failover_disk_name", failover_disk_name)
if is_disk_encrypted is not None:
pulumi.set(__self__, "is_disk_encrypted", is_disk_encrypted)
if is_disk_key_encrypted is not None:
pulumi.set(__self__, "is_disk_key_encrypted", is_disk_key_encrypted)
if kek_key_vault_arm_id is not None:
pulumi.set(__self__, "kek_key_vault_arm_id", kek_key_vault_arm_id)
if key_identifier is not None:
pulumi.set(__self__, "key_identifier", key_identifier)
if monitoring_job_type is not None:
pulumi.set(__self__, "monitoring_job_type", monitoring_job_type)
if monitoring_percentage_completion is not None:
pulumi.set(__self__, "monitoring_percentage_completion", monitoring_percentage_completion)
if primary_staging_azure_storage_account_id is not None:
pulumi.set(__self__, "primary_staging_azure_storage_account_id", primary_staging_azure_storage_account_id)
if recovery_disk_encryption_set_id is not None:
pulumi.set(__self__, "recovery_disk_encryption_set_id", recovery_disk_encryption_set_id)
if recovery_replica_disk_account_type is not None:
pulumi.set(__self__, "recovery_replica_disk_account_type", recovery_replica_disk_account_type)
if recovery_replica_disk_id is not None:
pulumi.set(__self__, "recovery_replica_disk_id", recovery_replica_disk_id)
if recovery_resource_group_id is not None:
pulumi.set(__self__, "recovery_resource_group_id", recovery_resource_group_id)
if recovery_target_disk_account_type is not None:
pulumi.set(__self__, "recovery_target_disk_account_type", recovery_target_disk_account_type)
if recovery_target_disk_id is not None:
pulumi.set(__self__, "recovery_target_disk_id", recovery_target_disk_id)
if resync_required is not None:
pulumi.set(__self__, "resync_required", resync_required)
if secret_identifier is not None:
pulumi.set(__self__, "secret_identifier", secret_identifier)
if tfo_disk_name is not None:
pulumi.set(__self__, "tfo_disk_name", tfo_disk_name)
@property
@pulumi.getter(name="allowedDiskLevelOperation")
def allowed_disk_level_operation(self) -> Optional[Sequence[str]]:
"""
The disk level operations list.
"""
return pulumi.get(self, "allowed_disk_level_operation")
@property
@pulumi.getter(name="dataPendingAtSourceAgentInMB")
def data_pending_at_source_agent_in_mb(self) -> Optional[float]:
"""
The data pending at source virtual machine in MB.
"""
return pulumi.get(self, "data_pending_at_source_agent_in_mb")
@property
@pulumi.getter(name="dataPendingInStagingStorageAccountInMB")
def data_pending_in_staging_storage_account_in_mb(self) -> Optional[float]:
"""
The data pending for replication in MB at staging account.
"""
return pulumi.get(self, "data_pending_in_staging_storage_account_in_mb")
@property
@pulumi.getter(name="dekKeyVaultArmId")
def dek_key_vault_arm_id(self) -> Optional[str]:
"""
The KeyVault resource id for secret (BEK).
"""
return pulumi.get(self, "dek_key_vault_arm_id")
@property
@pulumi.getter(name="diskCapacityInBytes")
def disk_capacity_in_bytes(self) -> Optional[int]:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "disk_capacity_in_bytes")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[str]:
"""
The managed disk Arm id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> Optional[str]:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskState")
def disk_state(self) -> Optional[str]:
"""
The disk state.
"""
return pulumi.get(self, "disk_state")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[str]:
"""
The type of disk.
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter(name="failoverDiskName")
def failover_disk_name(self) -> Optional[str]:
"""
The failover name for the managed disk.
"""
return pulumi.get(self, "failover_disk_name")
@property
@pulumi.getter(name="isDiskEncrypted")
def is_disk_encrypted(self) -> Optional[bool]:
"""
A value indicating whether vm has encrypted os disk or not.
"""
return pulumi.get(self, "is_disk_encrypted")
@property
@pulumi.getter(name="isDiskKeyEncrypted")
def is_disk_key_encrypted(self) -> Optional[bool]:
"""
A value indicating whether disk key got encrypted or not.
"""
return pulumi.get(self, "is_disk_key_encrypted")
@property
@pulumi.getter(name="kekKeyVaultArmId")
def kek_key_vault_arm_id(self) -> Optional[str]:
"""
The KeyVault resource id for key (KEK).
"""
return pulumi.get(self, "kek_key_vault_arm_id")
@property
@pulumi.getter(name="keyIdentifier")
def key_identifier(self) -> Optional[str]:
"""
The key URL / identifier (KEK).
"""
return pulumi.get(self, "key_identifier")
@property
@pulumi.getter(name="monitoringJobType")
def monitoring_job_type(self) -> Optional[str]:
"""
The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
"""
return pulumi.get(self, "monitoring_job_type")
@property
@pulumi.getter(name="monitoringPercentageCompletion")
def monitoring_percentage_completion(self) -> Optional[int]:
"""
The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
"""
return pulumi.get(self, "monitoring_percentage_completion")
@property
@pulumi.getter(name="primaryStagingAzureStorageAccountId")
def primary_staging_azure_storage_account_id(self) -> Optional[str]:
"""
The primary staging storage account.
"""
return pulumi.get(self, "primary_staging_azure_storage_account_id")
@property
@pulumi.getter(name="recoveryDiskEncryptionSetId")
def recovery_disk_encryption_set_id(self) -> Optional[str]:
"""
The recovery disk encryption set Id.
"""
return pulumi.get(self, "recovery_disk_encryption_set_id")
@property
@pulumi.getter(name="recoveryReplicaDiskAccountType")
def recovery_replica_disk_account_type(self) -> Optional[str]:
"""
The replica disk type. Its an optional value and will be same as source disk type if not user provided.
"""
return pulumi.get(self, "recovery_replica_disk_account_type")
@property
@pulumi.getter(name="recoveryReplicaDiskId")
def recovery_replica_disk_id(self) -> Optional[str]:
"""
Recovery replica disk Arm Id.
"""
return pulumi.get(self, "recovery_replica_disk_id")
@property
@pulumi.getter(name="recoveryResourceGroupId")
def recovery_resource_group_id(self) -> Optional[str]:
"""
The recovery disk resource group Arm Id.
"""
return pulumi.get(self, "recovery_resource_group_id")
@property
@pulumi.getter(name="recoveryTargetDiskAccountType")
def recovery_target_disk_account_type(self) -> Optional[str]:
"""
The target disk type after failover. Its an optional value and will be same as source disk type if not user provided.
"""
return pulumi.get(self, "recovery_target_disk_account_type")
@property
@pulumi.getter(name="recoveryTargetDiskId")
def recovery_target_disk_id(self) -> Optional[str]:
"""
Recovery target disk Arm Id.
"""
return pulumi.get(self, "recovery_target_disk_id")
@property
@pulumi.getter(name="resyncRequired")
def resync_required(self) -> Optional[bool]:
"""
A value indicating whether resync is required for this disk.
"""
return pulumi.get(self, "resync_required")
@property
@pulumi.getter(name="secretIdentifier")
def secret_identifier(self) -> Optional[str]:
"""
The secret URL / identifier (BEK).
"""
return pulumi.get(self, "secret_identifier")
@property
@pulumi.getter(name="tfoDiskName")
def tfo_disk_name(self) -> Optional[str]:
"""
The test failover name for the managed disk.
"""
return pulumi.get(self, "tfo_disk_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class A2AProtectionContainerMappingDetailsResponse(dict):
"""
A2A provider specific settings.
"""
def __init__(__self__, *,
instance_type: str,
agent_auto_update_status: Optional[str] = None,
automation_account_arm_id: Optional[str] = None,
job_schedule_name: Optional[str] = None,
schedule_name: Optional[str] = None):
"""
A2A provider specific settings.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str agent_auto_update_status: A value indicating whether the auto update is enabled.
:param str automation_account_arm_id: The automation account arm id.
:param str job_schedule_name: The job schedule arm name.
:param str schedule_name: The schedule arm name.
"""
pulumi.set(__self__, "instance_type", 'A2A')
if agent_auto_update_status is not None:
pulumi.set(__self__, "agent_auto_update_status", agent_auto_update_status)
if automation_account_arm_id is not None:
pulumi.set(__self__, "automation_account_arm_id", automation_account_arm_id)
if job_schedule_name is not None:
pulumi.set(__self__, "job_schedule_name", job_schedule_name)
if schedule_name is not None:
pulumi.set(__self__, "schedule_name", schedule_name)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="agentAutoUpdateStatus")
def agent_auto_update_status(self) -> Optional[str]:
"""
A value indicating whether the auto update is enabled.
"""
return pulumi.get(self, "agent_auto_update_status")
@property
@pulumi.getter(name="automationAccountArmId")
def automation_account_arm_id(self) -> Optional[str]:
"""
The automation account arm id.
"""
return pulumi.get(self, "automation_account_arm_id")
@property
@pulumi.getter(name="jobScheduleName")
def job_schedule_name(self) -> Optional[str]:
"""
The job schedule arm name.
"""
return pulumi.get(self, "job_schedule_name")
@property
@pulumi.getter(name="scheduleName")
def schedule_name(self) -> Optional[str]:
"""
The schedule arm name.
"""
return pulumi.get(self, "schedule_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class A2AReplicationDetailsResponse(dict):
"""
A2A provider specific settings.
"""
def __init__(__self__, *,
initial_primary_fabric_location: str,
initial_primary_zone: str,
initial_recovery_fabric_location: str,
initial_recovery_zone: str,
instance_type: str,
vm_encryption_type: str,
agent_version: Optional[str] = None,
fabric_object_id: Optional[str] = None,
is_replication_agent_update_required: Optional[bool] = None,
last_heartbeat: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
lifecycle_id: Optional[str] = None,
management_id: Optional[str] = None,
monitoring_job_type: Optional[str] = None,
monitoring_percentage_completion: Optional[int] = None,
multi_vm_group_create_option: Optional[str] = None,
multi_vm_group_id: Optional[str] = None,
multi_vm_group_name: Optional[str] = None,
os_type: Optional[str] = None,
primary_fabric_location: Optional[str] = None,
protected_disks: Optional[Sequence['outputs.A2AProtectedDiskDetailsResponse']] = None,
protected_managed_disks: Optional[Sequence['outputs.A2AProtectedManagedDiskDetailsResponse']] = None,
recovery_availability_set: Optional[str] = None,
recovery_availability_zone: Optional[str] = None,
recovery_azure_resource_group_id: Optional[str] = None,
recovery_azure_vm_name: Optional[str] = None,
recovery_azure_vm_size: Optional[str] = None,
recovery_boot_diag_storage_account_id: Optional[str] = None,
recovery_cloud_service: Optional[str] = None,
recovery_fabric_location: Optional[str] = None,
recovery_fabric_object_id: Optional[str] = None,
recovery_proximity_placement_group_id: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
selected_recovery_azure_network_id: Optional[str] = None,
selected_tfo_azure_network_id: Optional[str] = None,
test_failover_recovery_fabric_object_id: Optional[str] = None,
tfo_azure_vm_name: Optional[str] = None,
unprotected_disks: Optional[Sequence['outputs.A2AUnprotectedDiskDetailsResponse']] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None,
vm_synced_config_details: Optional['outputs.AzureToAzureVmSyncedConfigDetailsResponse'] = None):
"""
A2A provider specific settings.
:param str initial_primary_fabric_location: The initial primary fabric location.
:param str initial_primary_zone: The initial primary availability zone.
:param str initial_recovery_fabric_location: The initial recovery fabric location.
:param str initial_recovery_zone: The initial recovery availability zone.
:param str instance_type: Gets the Instance type.
:param str vm_encryption_type: The encryption type of the VM.
:param str agent_version: The agent version.
:param str fabric_object_id: The fabric specific object Id of the virtual machine.
:param bool is_replication_agent_update_required: A value indicating whether replication agent update is required.
:param str last_heartbeat: The last heartbeat received from the source server.
:param str last_rpo_calculated_time: The time (in UTC) when the last RPO value was calculated by Protection Service.
:param str lifecycle_id: An id associated with the PE that survives actions like switch protection which change the backing PE/CPE objects internally.The lifecycle id gets carried forward to have a link/continuity in being able to have an Id that denotes the "same" protected item even though other internal Ids/ARM Id might be changing.
:param str management_id: The management Id.
:param str monitoring_job_type: The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
:param int monitoring_percentage_completion: The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
:param str multi_vm_group_create_option: Whether Multi VM group is auto created or specified by user.
:param str multi_vm_group_id: The multi vm group Id.
:param str multi_vm_group_name: The multi vm group name.
:param str os_type: The type of operating system.
:param str primary_fabric_location: Primary fabric location.
:param Sequence['A2AProtectedDiskDetailsResponseArgs'] protected_disks: The list of protected disks.
:param Sequence['A2AProtectedManagedDiskDetailsResponseArgs'] protected_managed_disks: The list of protected managed disks.
:param str recovery_availability_set: The recovery availability set.
:param str recovery_availability_zone: The recovery availability zone.
:param str recovery_azure_resource_group_id: The recovery resource group.
:param str recovery_azure_vm_name: The name of recovery virtual machine.
:param str recovery_azure_vm_size: The size of recovery virtual machine.
:param str recovery_boot_diag_storage_account_id: The recovery boot diagnostic storage account Arm Id.
:param str recovery_cloud_service: The recovery cloud service.
:param str recovery_fabric_location: The recovery fabric location.
:param str recovery_fabric_object_id: The recovery fabric object Id.
:param str recovery_proximity_placement_group_id: The recovery proximity placement group Id.
:param int rpo_in_seconds: The last RPO value in seconds.
:param str selected_recovery_azure_network_id: The recovery virtual network.
:param str selected_tfo_azure_network_id: The test failover virtual network.
:param str test_failover_recovery_fabric_object_id: The test failover fabric object Id.
:param str tfo_azure_vm_name: The test failover VM name.
:param Sequence['A2AUnprotectedDiskDetailsResponseArgs'] unprotected_disks: The list of unprotected disks.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The virtual machine nic details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
:param 'AzureToAzureVmSyncedConfigDetailsResponseArgs' vm_synced_config_details: The synced configuration details.
"""
pulumi.set(__self__, "initial_primary_fabric_location", initial_primary_fabric_location)
pulumi.set(__self__, "initial_primary_zone", initial_primary_zone)
pulumi.set(__self__, "initial_recovery_fabric_location", initial_recovery_fabric_location)
pulumi.set(__self__, "initial_recovery_zone", initial_recovery_zone)
pulumi.set(__self__, "instance_type", 'A2A')
pulumi.set(__self__, "vm_encryption_type", vm_encryption_type)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if fabric_object_id is not None:
pulumi.set(__self__, "fabric_object_id", fabric_object_id)
if is_replication_agent_update_required is not None:
pulumi.set(__self__, "is_replication_agent_update_required", is_replication_agent_update_required)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if lifecycle_id is not None:
pulumi.set(__self__, "lifecycle_id", lifecycle_id)
if management_id is not None:
pulumi.set(__self__, "management_id", management_id)
if monitoring_job_type is not None:
pulumi.set(__self__, "monitoring_job_type", monitoring_job_type)
if monitoring_percentage_completion is not None:
pulumi.set(__self__, "monitoring_percentage_completion", monitoring_percentage_completion)
if multi_vm_group_create_option is not None:
pulumi.set(__self__, "multi_vm_group_create_option", multi_vm_group_create_option)
if multi_vm_group_id is not None:
pulumi.set(__self__, "multi_vm_group_id", multi_vm_group_id)
if multi_vm_group_name is not None:
pulumi.set(__self__, "multi_vm_group_name", multi_vm_group_name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if primary_fabric_location is not None:
pulumi.set(__self__, "primary_fabric_location", primary_fabric_location)
if protected_disks is not None:
pulumi.set(__self__, "protected_disks", protected_disks)
if protected_managed_disks is not None:
pulumi.set(__self__, "protected_managed_disks", protected_managed_disks)
if recovery_availability_set is not None:
pulumi.set(__self__, "recovery_availability_set", recovery_availability_set)
if recovery_availability_zone is not None:
pulumi.set(__self__, "recovery_availability_zone", recovery_availability_zone)
if recovery_azure_resource_group_id is not None:
pulumi.set(__self__, "recovery_azure_resource_group_id", recovery_azure_resource_group_id)
if recovery_azure_vm_name is not None:
pulumi.set(__self__, "recovery_azure_vm_name", recovery_azure_vm_name)
if recovery_azure_vm_size is not None:
pulumi.set(__self__, "recovery_azure_vm_size", recovery_azure_vm_size)
if recovery_boot_diag_storage_account_id is not None:
pulumi.set(__self__, "recovery_boot_diag_storage_account_id", recovery_boot_diag_storage_account_id)
if recovery_cloud_service is not None:
pulumi.set(__self__, "recovery_cloud_service", recovery_cloud_service)
if recovery_fabric_location is not None:
pulumi.set(__self__, "recovery_fabric_location", recovery_fabric_location)
if recovery_fabric_object_id is not None:
pulumi.set(__self__, "recovery_fabric_object_id", recovery_fabric_object_id)
if recovery_proximity_placement_group_id is not None:
pulumi.set(__self__, "recovery_proximity_placement_group_id", recovery_proximity_placement_group_id)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if selected_recovery_azure_network_id is not None:
pulumi.set(__self__, "selected_recovery_azure_network_id", selected_recovery_azure_network_id)
if selected_tfo_azure_network_id is not None:
pulumi.set(__self__, "selected_tfo_azure_network_id", selected_tfo_azure_network_id)
if test_failover_recovery_fabric_object_id is not None:
pulumi.set(__self__, "test_failover_recovery_fabric_object_id", test_failover_recovery_fabric_object_id)
if tfo_azure_vm_name is not None:
pulumi.set(__self__, "tfo_azure_vm_name", tfo_azure_vm_name)
if unprotected_disks is not None:
pulumi.set(__self__, "unprotected_disks", unprotected_disks)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
if vm_synced_config_details is not None:
pulumi.set(__self__, "vm_synced_config_details", vm_synced_config_details)
@property
@pulumi.getter(name="initialPrimaryFabricLocation")
def initial_primary_fabric_location(self) -> str:
"""
The initial primary fabric location.
"""
return pulumi.get(self, "initial_primary_fabric_location")
@property
@pulumi.getter(name="initialPrimaryZone")
def initial_primary_zone(self) -> str:
"""
The initial primary availability zone.
"""
return pulumi.get(self, "initial_primary_zone")
@property
@pulumi.getter(name="initialRecoveryFabricLocation")
def initial_recovery_fabric_location(self) -> str:
"""
The initial recovery fabric location.
"""
return pulumi.get(self, "initial_recovery_fabric_location")
@property
@pulumi.getter(name="initialRecoveryZone")
def initial_recovery_zone(self) -> str:
"""
The initial recovery availability zone.
"""
return pulumi.get(self, "initial_recovery_zone")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="vmEncryptionType")
def vm_encryption_type(self) -> str:
"""
The encryption type of the VM.
"""
return pulumi.get(self, "vm_encryption_type")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The agent version.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="fabricObjectId")
def fabric_object_id(self) -> Optional[str]:
"""
The fabric specific object Id of the virtual machine.
"""
return pulumi.get(self, "fabric_object_id")
@property
@pulumi.getter(name="isReplicationAgentUpdateRequired")
def is_replication_agent_update_required(self) -> Optional[bool]:
"""
A value indicating whether replication agent update is required.
"""
return pulumi.get(self, "is_replication_agent_update_required")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from the source server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The time (in UTC) when the last RPO value was calculated by Protection Service.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="lifecycleId")
def lifecycle_id(self) -> Optional[str]:
"""
An id associated with the PE that survives actions like switch protection which change the backing PE/CPE objects internally.The lifecycle id gets carried forward to have a link/continuity in being able to have an Id that denotes the "same" protected item even though other internal Ids/ARM Id might be changing.
"""
return pulumi.get(self, "lifecycle_id")
@property
@pulumi.getter(name="managementId")
def management_id(self) -> Optional[str]:
"""
The management Id.
"""
return pulumi.get(self, "management_id")
@property
@pulumi.getter(name="monitoringJobType")
def monitoring_job_type(self) -> Optional[str]:
"""
The type of the monitoring job. The progress is contained in MonitoringPercentageCompletion property.
"""
return pulumi.get(self, "monitoring_job_type")
@property
@pulumi.getter(name="monitoringPercentageCompletion")
def monitoring_percentage_completion(self) -> Optional[int]:
"""
The percentage of the monitoring job. The type of the monitoring job is defined by MonitoringJobType property.
"""
return pulumi.get(self, "monitoring_percentage_completion")
@property
@pulumi.getter(name="multiVmGroupCreateOption")
def multi_vm_group_create_option(self) -> Optional[str]:
"""
Whether Multi VM group is auto created or specified by user.
"""
return pulumi.get(self, "multi_vm_group_create_option")
@property
@pulumi.getter(name="multiVmGroupId")
def multi_vm_group_id(self) -> Optional[str]:
"""
The multi vm group Id.
"""
return pulumi.get(self, "multi_vm_group_id")
@property
@pulumi.getter(name="multiVmGroupName")
def multi_vm_group_name(self) -> Optional[str]:
"""
The multi vm group name.
"""
return pulumi.get(self, "multi_vm_group_name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The type of operating system.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="primaryFabricLocation")
def primary_fabric_location(self) -> Optional[str]:
"""
Primary fabric location.
"""
return pulumi.get(self, "primary_fabric_location")
@property
@pulumi.getter(name="protectedDisks")
def protected_disks(self) -> Optional[Sequence['outputs.A2AProtectedDiskDetailsResponse']]:
"""
The list of protected disks.
"""
return pulumi.get(self, "protected_disks")
@property
@pulumi.getter(name="protectedManagedDisks")
def protected_managed_disks(self) -> Optional[Sequence['outputs.A2AProtectedManagedDiskDetailsResponse']]:
"""
The list of protected managed disks.
"""
return pulumi.get(self, "protected_managed_disks")
@property
@pulumi.getter(name="recoveryAvailabilitySet")
def recovery_availability_set(self) -> Optional[str]:
"""
The recovery availability set.
"""
return pulumi.get(self, "recovery_availability_set")
@property
@pulumi.getter(name="recoveryAvailabilityZone")
def recovery_availability_zone(self) -> Optional[str]:
"""
The recovery availability zone.
"""
return pulumi.get(self, "recovery_availability_zone")
@property
@pulumi.getter(name="recoveryAzureResourceGroupId")
def recovery_azure_resource_group_id(self) -> Optional[str]:
"""
The recovery resource group.
"""
return pulumi.get(self, "recovery_azure_resource_group_id")
@property
@pulumi.getter(name="recoveryAzureVMName")
def recovery_azure_vm_name(self) -> Optional[str]:
"""
The name of recovery virtual machine.
"""
return pulumi.get(self, "recovery_azure_vm_name")
@property
@pulumi.getter(name="recoveryAzureVMSize")
def recovery_azure_vm_size(self) -> Optional[str]:
"""
The size of recovery virtual machine.
"""
return pulumi.get(self, "recovery_azure_vm_size")
@property
@pulumi.getter(name="recoveryBootDiagStorageAccountId")
def recovery_boot_diag_storage_account_id(self) -> Optional[str]:
"""
The recovery boot diagnostic storage account Arm Id.
"""
return pulumi.get(self, "recovery_boot_diag_storage_account_id")
@property
@pulumi.getter(name="recoveryCloudService")
def recovery_cloud_service(self) -> Optional[str]:
"""
The recovery cloud service.
"""
return pulumi.get(self, "recovery_cloud_service")
@property
@pulumi.getter(name="recoveryFabricLocation")
def recovery_fabric_location(self) -> Optional[str]:
"""
The recovery fabric location.
"""
return pulumi.get(self, "recovery_fabric_location")
@property
@pulumi.getter(name="recoveryFabricObjectId")
def recovery_fabric_object_id(self) -> Optional[str]:
"""
The recovery fabric object Id.
"""
return pulumi.get(self, "recovery_fabric_object_id")
@property
@pulumi.getter(name="recoveryProximityPlacementGroupId")
def recovery_proximity_placement_group_id(self) -> Optional[str]:
"""
The recovery proximity placement group Id.
"""
return pulumi.get(self, "recovery_proximity_placement_group_id")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
The last RPO value in seconds.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="selectedRecoveryAzureNetworkId")
def selected_recovery_azure_network_id(self) -> Optional[str]:
"""
The recovery virtual network.
"""
return pulumi.get(self, "selected_recovery_azure_network_id")
@property
@pulumi.getter(name="selectedTfoAzureNetworkId")
def selected_tfo_azure_network_id(self) -> Optional[str]:
"""
The test failover virtual network.
"""
return pulumi.get(self, "selected_tfo_azure_network_id")
@property
@pulumi.getter(name="testFailoverRecoveryFabricObjectId")
def test_failover_recovery_fabric_object_id(self) -> Optional[str]:
"""
The test failover fabric object Id.
"""
return pulumi.get(self, "test_failover_recovery_fabric_object_id")
@property
@pulumi.getter(name="tfoAzureVMName")
def tfo_azure_vm_name(self) -> Optional[str]:
"""
The test failover VM name.
"""
return pulumi.get(self, "tfo_azure_vm_name")
@property
@pulumi.getter(name="unprotectedDisks")
def unprotected_disks(self) -> Optional[Sequence['outputs.A2AUnprotectedDiskDetailsResponse']]:
"""
The list of unprotected disks.
"""
return pulumi.get(self, "unprotected_disks")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The virtual machine nic details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
@property
@pulumi.getter(name="vmSyncedConfigDetails")
def vm_synced_config_details(self) -> Optional['outputs.AzureToAzureVmSyncedConfigDetailsResponse']:
"""
The synced configuration details.
"""
return pulumi.get(self, "vm_synced_config_details")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class A2AUnprotectedDiskDetailsResponse(dict):
"""
A2A unprotected disk details.
"""
def __init__(__self__, *,
disk_lun_id: Optional[int] = None):
"""
A2A unprotected disk details.
:param int disk_lun_id: The source lun Id for the data disk.
"""
if disk_lun_id is not None:
pulumi.set(__self__, "disk_lun_id", disk_lun_id)
@property
@pulumi.getter(name="diskLunId")
def disk_lun_id(self) -> Optional[int]:
"""
The source lun Id for the data disk.
"""
return pulumi.get(self, "disk_lun_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AgentDetailsResponse(dict):
"""
Agent details.
"""
def __init__(__self__, *,
agent_id: str,
bios_id: str,
disks: Sequence['outputs.AgentDiskDetailsResponse'],
fqdn: str,
machine_id: str):
"""
Agent details.
:param str agent_id: The Id of the agent running on the server.
:param str bios_id: The machine BIOS Id.
:param Sequence['AgentDiskDetailsResponseArgs'] disks: The details of agent disks.
:param str fqdn: The machine FQDN.
:param str machine_id: The Id of the machine to which the agent is registered.
"""
pulumi.set(__self__, "agent_id", agent_id)
pulumi.set(__self__, "bios_id", bios_id)
pulumi.set(__self__, "disks", disks)
pulumi.set(__self__, "fqdn", fqdn)
pulumi.set(__self__, "machine_id", machine_id)
@property
@pulumi.getter(name="agentId")
def agent_id(self) -> str:
"""
The Id of the agent running on the server.
"""
return pulumi.get(self, "agent_id")
@property
@pulumi.getter(name="biosId")
def bios_id(self) -> str:
"""
The machine BIOS Id.
"""
return pulumi.get(self, "bios_id")
@property
@pulumi.getter
def disks(self) -> Sequence['outputs.AgentDiskDetailsResponse']:
"""
The details of agent disks.
"""
return pulumi.get(self, "disks")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
The machine FQDN.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter(name="machineId")
def machine_id(self) -> str:
"""
The Id of the machine to which the agent is registered.
"""
return pulumi.get(self, "machine_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AgentDiskDetailsResponse(dict):
"""
Agent disk details.
"""
def __init__(__self__, *,
capacity_in_bytes: int,
disk_id: str,
disk_name: str,
is_os_disk: str,
lun_id: int):
"""
Agent disk details.
:param int capacity_in_bytes: The disk capacity in bytes.
:param str disk_id: The disk Id.
:param str disk_name: The disk name.
:param str is_os_disk: A value indicating whether the disk is the OS disk.
:param int lun_id: The lun of disk.
"""
pulumi.set(__self__, "capacity_in_bytes", capacity_in_bytes)
pulumi.set(__self__, "disk_id", disk_id)
pulumi.set(__self__, "disk_name", disk_name)
pulumi.set(__self__, "is_os_disk", is_os_disk)
pulumi.set(__self__, "lun_id", lun_id)
@property
@pulumi.getter(name="capacityInBytes")
def capacity_in_bytes(self) -> int:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "capacity_in_bytes")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> str:
"""
The disk Id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> str:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="isOSDisk")
def is_os_disk(self) -> str:
"""
A value indicating whether the disk is the OS disk.
"""
return pulumi.get(self, "is_os_disk")
@property
@pulumi.getter(name="lunId")
def lun_id(self) -> int:
"""
The lun of disk.
"""
return pulumi.get(self, "lun_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFabricSpecificDetailsResponse(dict):
"""
Azure Fabric Specific Details.
"""
def __init__(__self__, *,
instance_type: str,
container_ids: Optional[Sequence[str]] = None,
location: Optional[str] = None):
"""
Azure Fabric Specific Details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param Sequence[str] container_ids: The container Ids for the Azure fabric.
:param str location: The Location for the Azure fabric.
"""
pulumi.set(__self__, "instance_type", 'Azure')
if container_ids is not None:
pulumi.set(__self__, "container_ids", container_ids)
if location is not None:
pulumi.set(__self__, "location", location)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="containerIds")
def container_ids(self) -> Optional[Sequence[str]]:
"""
The container Ids for the Azure fabric.
"""
return pulumi.get(self, "container_ids")
@property
@pulumi.getter
def location(self) -> Optional[str]:
"""
The Location for the Azure fabric.
"""
return pulumi.get(self, "location")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFileshareProtectedItemExtendedInfoResponse(dict):
"""
Additional information about Azure File Share backup item.
"""
def __init__(__self__, *,
resource_state: str,
resource_state_sync_time: str,
oldest_recovery_point: Optional[str] = None,
policy_state: Optional[str] = None,
recovery_point_count: Optional[int] = None):
"""
Additional information about Azure File Share backup item.
:param str resource_state: Indicates the state of this resource. Possible values are from enum ResourceState {Invalid, Active, SoftDeleted, Deleted}
:param str resource_state_sync_time: The resource state sync time for this backup item.
:param str oldest_recovery_point: The oldest backup copy available for this item in the service.
:param str policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param int recovery_point_count: Number of available backup copies associated with this backup item.
"""
pulumi.set(__self__, "resource_state", resource_state)
pulumi.set(__self__, "resource_state_sync_time", resource_state_sync_time)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="resourceState")
def resource_state(self) -> str:
"""
Indicates the state of this resource. Possible values are from enum ResourceState {Invalid, Active, SoftDeleted, Deleted}
"""
return pulumi.get(self, "resource_state")
@property
@pulumi.getter(name="resourceStateSyncTime")
def resource_state_sync_time(self) -> str:
"""
The resource state sync time for this backup item.
"""
return pulumi.get(self, "resource_state_sync_time")
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[str]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureFileshareProtectedItemResponse(dict):
"""
Azure File Share workload-specific backup item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.AzureFileshareProtectedItemExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
kpis_healths: Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[str] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
protection_state: Optional[str] = None,
protection_status: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Azure File Share workload-specific backup item.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'AzureFileshareProtectedItemExtendedInfoResponseArgs' extended_info: Additional information with this backup item.
:param str friendly_name: Friendly name of the fileshare represented by this backup item.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param Mapping[str, 'KPIResourceHealthDetailsResponseArgs'] kpis_healths: Health details of different KPIs
:param str last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param str last_backup_time: Timestamp of the last backup operation on this backup item.
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protection_state: Backup state of this backup item.
:param str protection_status: Backup status of this backup item.
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureFileShareProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureFileshareProtectedItemExtendedInfoResponse']:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the fileshare represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[str]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[str]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureIaaSVMHealthDetailsResponse(dict):
"""
Azure IaaS VM workload-specific Health Details.
"""
def __init__(__self__, *,
code: int,
message: str,
recommendations: Sequence[str],
title: str):
"""
Azure IaaS VM workload-specific Health Details.
:param int code: Health Code
:param str message: Health Message
:param Sequence[str] recommendations: Health Recommended Actions
:param str title: Health Title
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "recommendations", recommendations)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def code(self) -> int:
"""
Health Code
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
Health Message
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def recommendations(self) -> Sequence[str]:
"""
Health Recommended Actions
"""
return pulumi.get(self, "recommendations")
@property
@pulumi.getter
def title(self) -> str:
"""
Health Title
"""
return pulumi.get(self, "title")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureIaaSVMProtectedItemExtendedInfoResponse(dict):
"""
Additional information on Azure IaaS VM specific backup item.
"""
def __init__(__self__, *,
oldest_recovery_point: Optional[str] = None,
policy_inconsistent: Optional[bool] = None,
recovery_point_count: Optional[int] = None):
"""
Additional information on Azure IaaS VM specific backup item.
:param str oldest_recovery_point: The oldest backup copy available for this backup item.
:param bool policy_inconsistent: Specifies if backup policy associated with the backup item is inconsistent.
:param int recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_inconsistent is not None:
pulumi.set(__self__, "policy_inconsistent", policy_inconsistent)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="policyInconsistent")
def policy_inconsistent(self) -> Optional[bool]:
"""
Specifies if backup policy associated with the backup item is inconsistent.
"""
return pulumi.get(self, "policy_inconsistent")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureIaaSVMProtectedItemResponse(dict):
"""
IaaS VM workload-specific backup item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.AzureIaaSVMProtectedItemExtendedInfoResponse'] = None,
extended_properties: Optional['outputs.ExtendedPropertiesResponse'] = None,
friendly_name: Optional[str] = None,
health_details: Optional[Sequence['outputs.AzureIaaSVMHealthDetailsResponse']] = None,
health_status: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
kpis_healths: Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[str] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
protected_item_data_id: Optional[str] = None,
protection_state: Optional[str] = None,
protection_status: Optional[str] = None,
source_resource_id: Optional[str] = None,
virtual_machine_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
IaaS VM workload-specific backup item.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'AzureIaaSVMProtectedItemExtendedInfoResponseArgs' extended_info: Additional information for this backup item.
:param 'ExtendedPropertiesResponseArgs' extended_properties: Extended Properties for Azure IaasVM Backup.
:param str friendly_name: Friendly name of the VM represented by this backup item.
:param Sequence['AzureIaaSVMHealthDetailsResponseArgs'] health_details: Health details on this backup item.
:param str health_status: Health status of protected item.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param Mapping[str, 'KPIResourceHealthDetailsResponseArgs'] kpis_healths: Health details of different KPIs
:param str last_backup_status: Last backup operation status.
:param str last_backup_time: Timestamp of the last backup operation on this backup item.
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protected_item_data_id: Data ID of the protected item.
:param str protection_state: Backup state of this backup item.
:param str protection_status: Backup status of this backup item.
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str virtual_machine_id: Fully qualified ARM ID of the virtual machine represented by this item.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureIaaSVMProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if extended_properties is not None:
pulumi.set(__self__, "extended_properties", extended_properties)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_details is not None:
pulumi.set(__self__, "health_details", health_details)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureIaaSVMProtectedItemExtendedInfoResponse']:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="extendedProperties")
def extended_properties(self) -> Optional['outputs.ExtendedPropertiesResponse']:
"""
Extended Properties for Azure IaasVM Backup.
"""
return pulumi.get(self, "extended_properties")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthDetails")
def health_details(self) -> Optional[Sequence['outputs.AzureIaaSVMHealthDetailsResponse']]:
"""
Health details on this backup item.
"""
return pulumi.get(self, "health_details")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Health status of protected item.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Last backup operation status.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[str]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[str]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[str]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[str]:
"""
Fully qualified ARM ID of the virtual machine represented by this item.
"""
return pulumi.get(self, "virtual_machine_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureIaaSVMProtectionPolicyResponse(dict):
"""
Azure VM (also known as IaaS VM) workload-specific backup policy.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
protected_items_count: Optional[int] = None,
retention_policy: Optional[Any] = None,
schedule_policy: Optional[Any] = None):
"""
Azure VM (also known as IaaS VM) workload-specific backup policy.
:param str backup_management_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
:param int protected_items_count: The number of items associated with this policy.
:param Union['LongTermRetentionPolicyResponseArgs', 'SimpleRetentionPolicyResponseArgs'] retention_policy: The retention policy with the details on backup copy retention ranges.
:param Union['LongTermSchedulePolicyResponseArgs', 'SimpleSchedulePolicyResponseArgs'] schedule_policy: The backup schedule specified as part of backup policy.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", 'AzureIaasVM')
if protected_items_count is not None:
pulumi.set(__self__, "protected_items_count", protected_items_count)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
if schedule_policy is not None:
pulumi.set(__self__, "schedule_policy", schedule_policy)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="protectedItemsCount")
def protected_items_count(self) -> Optional[int]:
"""
The number of items associated with this policy.
"""
return pulumi.get(self, "protected_items_count")
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[Any]:
"""
The retention policy with the details on backup copy retention ranges.
"""
return pulumi.get(self, "retention_policy")
@property
@pulumi.getter(name="schedulePolicy")
def schedule_policy(self) -> Optional[Any]:
"""
The backup schedule specified as part of backup policy.
"""
return pulumi.get(self, "schedule_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureRecoveryServiceVaultProtectionIntentResponse(dict):
"""
Azure Recovery Services Vault specific protection intent item.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
item_id: Optional[str] = None,
policy_id: Optional[str] = None,
protection_intent_item_type: Optional[str] = None,
protection_state: Optional[str] = None,
source_resource_id: Optional[str] = None):
"""
Azure Recovery Services Vault specific protection intent item.
:param str backup_management_type: Type of backup management for the backed up item.
:param str item_id: ID of the item which is getting protected, In case of Azure Vm , it is ProtectedItemId
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protection_intent_item_type: backup protectionIntent type.
:param str protection_state: Backup state of this backup item.
:param str source_resource_id: ARM ID of the resource to be backed up.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if item_id is not None:
pulumi.set(__self__, "item_id", item_id)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_intent_item_type is not None:
pulumi.set(__self__, "protection_intent_item_type", 'RecoveryServiceVaultItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="itemId")
def item_id(self) -> Optional[str]:
"""
ID of the item which is getting protected, In case of Azure Vm , it is ProtectedItemId
"""
return pulumi.get(self, "item_id")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectionIntentItemType")
def protection_intent_item_type(self) -> Optional[str]:
"""
backup protectionIntent type.
"""
return pulumi.get(self, "protection_intent_item_type")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureResourceProtectionIntentResponse(dict):
"""
IaaS VM specific backup protection intent item.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
friendly_name: Optional[str] = None,
item_id: Optional[str] = None,
policy_id: Optional[str] = None,
protection_intent_item_type: Optional[str] = None,
protection_state: Optional[str] = None,
source_resource_id: Optional[str] = None):
"""
IaaS VM specific backup protection intent item.
:param str backup_management_type: Type of backup management for the backed up item.
:param str friendly_name: Friendly name of the VM represented by this backup item.
:param str item_id: ID of the item which is getting protected, In case of Azure Vm , it is ProtectedItemId
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protection_intent_item_type: backup protectionIntent type.
:param str protection_state: Backup state of this backup item.
:param str source_resource_id: ARM ID of the resource to be backed up.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if item_id is not None:
pulumi.set(__self__, "item_id", item_id)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_intent_item_type is not None:
pulumi.set(__self__, "protection_intent_item_type", 'AzureResourceItem')
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the VM represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="itemId")
def item_id(self) -> Optional[str]:
"""
ID of the item which is getting protected, In case of Azure Vm , it is ProtectedItemId
"""
return pulumi.get(self, "item_id")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectionIntentItemType")
def protection_intent_item_type(self) -> Optional[str]:
"""
backup protectionIntent type.
"""
return pulumi.get(self, "protection_intent_item_type")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureSqlContainerResponse(dict):
"""
Azure Sql workload-specific container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
container_type: Optional[str] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
registration_status: Optional[str] = None):
"""
Azure Sql workload-specific container.
:param str backup_management_type: Type of backup management for the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'AzureSqlContainer')
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureSqlProtectedItemExtendedInfoResponse(dict):
"""
Additional information on Azure Sql specific protected item.
"""
def __init__(__self__, *,
oldest_recovery_point: Optional[str] = None,
policy_state: Optional[str] = None,
recovery_point_count: Optional[int] = None):
"""
Additional information on Azure Sql specific protected item.
:param str oldest_recovery_point: The oldest backup copy available for this item in the service.
:param str policy_state: State of the backup policy associated with this backup item.
:param int recovery_point_count: Number of available backup copies associated with this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
The oldest backup copy available for this item in the service.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[str]:
"""
State of the backup policy associated with this backup item.
"""
return pulumi.get(self, "policy_state")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
Number of available backup copies associated with this backup item.
"""
return pulumi.get(self, "recovery_point_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureSqlProtectedItemResponse(dict):
"""
Azure SQL workload-specific backup item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.AzureSqlProtectedItemExtendedInfoResponse'] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
protected_item_data_id: Optional[str] = None,
protection_state: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Azure SQL workload-specific backup item.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'AzureSqlProtectedItemExtendedInfoResponseArgs' extended_info: Additional information for this backup item.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protected_item_data_id: Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
:param str protection_state: Backup state of the backed up item.
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'Microsoft.Sql/servers/databases')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_id is not None:
pulumi.set(__self__, "protected_item_data_id", protected_item_data_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureSqlProtectedItemExtendedInfoResponse']:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectedItemDataId")
def protected_item_data_id(self) -> Optional[str]:
"""
Internal ID of a backup item. Used by Azure SQL Backup engine to contact Recovery Services.
"""
return pulumi.get(self, "protected_item_data_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of the backed up item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureSqlProtectionPolicyResponse(dict):
"""
The Azure SQL workload-specific backup policy.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
protected_items_count: Optional[int] = None,
retention_policy: Optional[Any] = None):
"""
The Azure SQL workload-specific backup policy.
:param str backup_management_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
:param int protected_items_count: The number of items associated with this policy.
:param Union['LongTermRetentionPolicyResponseArgs', 'SimpleRetentionPolicyResponseArgs'] retention_policy: The retention policy details.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", 'AzureSql')
if protected_items_count is not None:
pulumi.set(__self__, "protected_items_count", protected_items_count)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="protectedItemsCount")
def protected_items_count(self) -> Optional[int]:
"""
The number of items associated with this policy.
"""
return pulumi.get(self, "protected_items_count")
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[Any]:
"""
The retention policy details.
"""
return pulumi.get(self, "retention_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureStorageContainerResponse(dict):
"""
Azure Storage Account workload-specific container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
container_type: Optional[str] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
protected_item_count: Optional[int] = None,
registration_status: Optional[str] = None,
resource_group: Optional[str] = None,
source_resource_id: Optional[str] = None,
storage_account_version: Optional[str] = None):
"""
Azure Storage Account workload-specific container.
:param str backup_management_type: Type of backup management for the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param int protected_item_count: Number of items backed up in this container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
:param str resource_group: Resource group name of Recovery Services Vault.
:param str source_resource_id: Fully qualified ARM url.
:param str storage_account_version: Storage account version.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'StorageContainer')
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if protected_item_count is not None:
pulumi.set(__self__, "protected_item_count", protected_item_count)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if storage_account_version is not None:
pulumi.set(__self__, "storage_account_version", storage_account_version)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="protectedItemCount")
def protected_item_count(self) -> Optional[int]:
"""
Number of items backed up in this container.
"""
return pulumi.get(self, "protected_item_count")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
Resource group name of Recovery Services Vault.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
Fully qualified ARM url.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="storageAccountVersion")
def storage_account_version(self) -> Optional[str]:
"""
Storage account version.
"""
return pulumi.get(self, "storage_account_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureToAzureNetworkMappingSettingsResponse(dict):
"""
A2A Network Mapping fabric specific settings.
"""
def __init__(__self__, *,
instance_type: str,
primary_fabric_location: Optional[str] = None,
recovery_fabric_location: Optional[str] = None):
"""
A2A Network Mapping fabric specific settings.
:param str instance_type: Gets the Instance type.
:param str primary_fabric_location: The primary fabric location.
:param str recovery_fabric_location: The recovery fabric location.
"""
pulumi.set(__self__, "instance_type", 'AzureToAzure')
if primary_fabric_location is not None:
pulumi.set(__self__, "primary_fabric_location", primary_fabric_location)
if recovery_fabric_location is not None:
pulumi.set(__self__, "recovery_fabric_location", recovery_fabric_location)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="primaryFabricLocation")
def primary_fabric_location(self) -> Optional[str]:
"""
The primary fabric location.
"""
return pulumi.get(self, "primary_fabric_location")
@property
@pulumi.getter(name="recoveryFabricLocation")
def recovery_fabric_location(self) -> Optional[str]:
"""
The recovery fabric location.
"""
return pulumi.get(self, "recovery_fabric_location")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureToAzureVmSyncedConfigDetailsResponse(dict):
"""
Azure to Azure VM synced configuration details.
"""
def __init__(__self__, *,
input_endpoints: Optional[Sequence['outputs.InputEndpointResponse']] = None,
tags: Optional[Mapping[str, str]] = None):
"""
Azure to Azure VM synced configuration details.
:param Sequence['InputEndpointResponseArgs'] input_endpoints: The Azure VM input endpoints.
:param Mapping[str, str] tags: The Azure VM tags.
"""
if input_endpoints is not None:
pulumi.set(__self__, "input_endpoints", input_endpoints)
if tags is not None:
pulumi.set(__self__, "tags", tags)
@property
@pulumi.getter(name="inputEndpoints")
def input_endpoints(self) -> Optional[Sequence['outputs.InputEndpointResponse']]:
"""
The Azure VM input endpoints.
"""
return pulumi.get(self, "input_endpoints")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
The Azure VM tags.
"""
return pulumi.get(self, "tags")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureVmDiskDetailsResponse(dict):
"""
Disk details for E2A provider.
"""
def __init__(__self__, *,
disk_encryption_set_id: Optional[str] = None,
disk_id: Optional[str] = None,
lun_id: Optional[str] = None,
max_size_mb: Optional[str] = None,
target_disk_location: Optional[str] = None,
target_disk_name: Optional[str] = None,
vhd_id: Optional[str] = None,
vhd_name: Optional[str] = None,
vhd_type: Optional[str] = None):
"""
Disk details for E2A provider.
:param str disk_encryption_set_id: The DiskEncryptionSet ARM ID.
:param str disk_id: The disk resource id.
:param str lun_id: Ordinal\LunId of the disk for the Azure VM.
:param str max_size_mb: Max side in MB.
:param str target_disk_location: Blob uri of the Azure disk.
:param str target_disk_name: The target Azure disk name.
:param str vhd_id: The VHD id.
:param str vhd_name: VHD name.
:param str vhd_type: VHD type.
"""
if disk_encryption_set_id is not None:
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if lun_id is not None:
pulumi.set(__self__, "lun_id", lun_id)
if max_size_mb is not None:
pulumi.set(__self__, "max_size_mb", max_size_mb)
if target_disk_location is not None:
pulumi.set(__self__, "target_disk_location", target_disk_location)
if target_disk_name is not None:
pulumi.set(__self__, "target_disk_name", target_disk_name)
if vhd_id is not None:
pulumi.set(__self__, "vhd_id", vhd_id)
if vhd_name is not None:
pulumi.set(__self__, "vhd_name", vhd_name)
if vhd_type is not None:
pulumi.set(__self__, "vhd_type", vhd_type)
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> Optional[str]:
"""
The DiskEncryptionSet ARM ID.
"""
return pulumi.get(self, "disk_encryption_set_id")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[str]:
"""
The disk resource id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="lunId")
def lun_id(self) -> Optional[str]:
"""
Ordinal\LunId of the disk for the Azure VM.
"""
return pulumi.get(self, "lun_id")
@property
@pulumi.getter(name="maxSizeMB")
def max_size_mb(self) -> Optional[str]:
"""
Max side in MB.
"""
return pulumi.get(self, "max_size_mb")
@property
@pulumi.getter(name="targetDiskLocation")
def target_disk_location(self) -> Optional[str]:
"""
Blob uri of the Azure disk.
"""
return pulumi.get(self, "target_disk_location")
@property
@pulumi.getter(name="targetDiskName")
def target_disk_name(self) -> Optional[str]:
"""
The target Azure disk name.
"""
return pulumi.get(self, "target_disk_name")
@property
@pulumi.getter(name="vhdId")
def vhd_id(self) -> Optional[str]:
"""
The VHD id.
"""
return pulumi.get(self, "vhd_id")
@property
@pulumi.getter(name="vhdName")
def vhd_name(self) -> Optional[str]:
"""
VHD name.
"""
return pulumi.get(self, "vhd_name")
@property
@pulumi.getter(name="vhdType")
def vhd_type(self) -> Optional[str]:
"""
VHD type.
"""
return pulumi.get(self, "vhd_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureVmWorkloadProtectedItemExtendedInfoResponse(dict):
"""
Additional information on Azure Workload for SQL specific backup item.
"""
def __init__(__self__, *,
oldest_recovery_point: Optional[str] = None,
policy_state: Optional[str] = None,
recovery_point_count: Optional[int] = None):
"""
Additional information on Azure Workload for SQL specific backup item.
:param str oldest_recovery_point: The oldest backup copy available for this backup item.
:param str policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param int recovery_point_count: Number of backup copies available for this backup item.
"""
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
The oldest backup copy available for this backup item.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[str]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
Number of backup copies available for this backup item.
"""
return pulumi.get(self, "recovery_point_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureVmWorkloadProtectedItemResponse(dict):
"""
Azure VM workload-specific protected item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.AzureVmWorkloadProtectedItemExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
kpis_healths: Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']] = None,
last_backup_error_detail: Optional['outputs.ErrorDetailResponse'] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[str] = None,
last_recovery_point: Optional[str] = None,
parent_name: Optional[str] = None,
parent_type: Optional[str] = None,
policy_id: Optional[str] = None,
protected_item_data_source_id: Optional[str] = None,
protected_item_health_status: Optional[str] = None,
protection_state: Optional[str] = None,
protection_status: Optional[str] = None,
server_name: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Azure VM workload-specific protected item.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'AzureVmWorkloadProtectedItemExtendedInfoResponseArgs' extended_info: Additional information for this backup item.
:param str friendly_name: Friendly name of the DB represented by this backup item.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param Mapping[str, 'KPIResourceHealthDetailsResponseArgs'] kpis_healths: Health details of different KPIs
:param 'ErrorDetailResponseArgs' last_backup_error_detail: Error details in last backup
:param str last_backup_status: Last backup operation status. Possible values: Healthy, Unhealthy.
:param str last_backup_time: Timestamp of the last backup operation on this backup item.
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str parent_name: Parent name of the DB such as Instance or Availability Group.
:param str parent_type: Parent type of protected item, example: for a DB, standalone server or distributed
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protected_item_data_source_id: Data ID of the protected item.
:param str protected_item_health_status: Health status of the backup item, evaluated based on last heartbeat received
:param str protection_state: Backup state of this backup item.
:param str protection_status: Backup status of this backup item.
:param str server_name: Host/Cluster Name for instance or AG
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'AzureVmWorkloadProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if kpis_healths is not None:
pulumi.set(__self__, "kpis_healths", kpis_healths)
if last_backup_error_detail is not None:
pulumi.set(__self__, "last_backup_error_detail", last_backup_error_detail)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if parent_name is not None:
pulumi.set(__self__, "parent_name", parent_name)
if parent_type is not None:
pulumi.set(__self__, "parent_type", parent_type)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protected_item_data_source_id is not None:
pulumi.set(__self__, "protected_item_data_source_id", protected_item_data_source_id)
if protected_item_health_status is not None:
pulumi.set(__self__, "protected_item_health_status", protected_item_health_status)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if server_name is not None:
pulumi.set(__self__, "server_name", server_name)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureVmWorkloadProtectedItemExtendedInfoResponse']:
"""
Additional information for this backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the DB represented by this backup item.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="kpisHealths")
def kpis_healths(self) -> Optional[Mapping[str, 'outputs.KPIResourceHealthDetailsResponse']]:
"""
Health details of different KPIs
"""
return pulumi.get(self, "kpis_healths")
@property
@pulumi.getter(name="lastBackupErrorDetail")
def last_backup_error_detail(self) -> Optional['outputs.ErrorDetailResponse']:
"""
Error details in last backup
"""
return pulumi.get(self, "last_backup_error_detail")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Last backup operation status. Possible values: Healthy, Unhealthy.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[str]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="parentName")
def parent_name(self) -> Optional[str]:
"""
Parent name of the DB such as Instance or Availability Group.
"""
return pulumi.get(self, "parent_name")
@property
@pulumi.getter(name="parentType")
def parent_type(self) -> Optional[str]:
"""
Parent type of protected item, example: for a DB, standalone server or distributed
"""
return pulumi.get(self, "parent_type")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectedItemDataSourceId")
def protected_item_data_source_id(self) -> Optional[str]:
"""
Data ID of the protected item.
"""
return pulumi.get(self, "protected_item_data_source_id")
@property
@pulumi.getter(name="protectedItemHealthStatus")
def protected_item_health_status(self) -> Optional[str]:
"""
Health status of the backup item, evaluated based on last heartbeat received
"""
return pulumi.get(self, "protected_item_health_status")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[str]:
"""
Backup status of this backup item.
"""
return pulumi.get(self, "protection_status")
@property
@pulumi.getter(name="serverName")
def server_name(self) -> Optional[str]:
"""
Host/Cluster Name for instance or AG
"""
return pulumi.get(self, "server_name")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureWorkloadContainerExtendedInfoResponse(dict):
"""
Extended information of the container.
"""
def __init__(__self__, *,
host_server_name: Optional[str] = None,
inquiry_info: Optional['outputs.InquiryInfoResponse'] = None,
nodes_list: Optional[Sequence['outputs.DistributedNodesInfoResponse']] = None):
"""
Extended information of the container.
:param str host_server_name: Host Os Name in case of Stand Alone and Cluster Name in case of distributed container.
:param 'InquiryInfoResponseArgs' inquiry_info: Inquiry Status for the container.
:param Sequence['DistributedNodesInfoResponseArgs'] nodes_list: List of the nodes in case of distributed container.
"""
if host_server_name is not None:
pulumi.set(__self__, "host_server_name", host_server_name)
if inquiry_info is not None:
pulumi.set(__self__, "inquiry_info", inquiry_info)
if nodes_list is not None:
pulumi.set(__self__, "nodes_list", nodes_list)
@property
@pulumi.getter(name="hostServerName")
def host_server_name(self) -> Optional[str]:
"""
Host Os Name in case of Stand Alone and Cluster Name in case of distributed container.
"""
return pulumi.get(self, "host_server_name")
@property
@pulumi.getter(name="inquiryInfo")
def inquiry_info(self) -> Optional['outputs.InquiryInfoResponse']:
"""
Inquiry Status for the container.
"""
return pulumi.get(self, "inquiry_info")
@property
@pulumi.getter(name="nodesList")
def nodes_list(self) -> Optional[Sequence['outputs.DistributedNodesInfoResponse']]:
"""
List of the nodes in case of distributed container.
"""
return pulumi.get(self, "nodes_list")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class AzureWorkloadContainerResponse(dict):
"""
Container for the workloads running inside Azure Compute or Classic Compute.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
container_type: Optional[str] = None,
extended_info: Optional['outputs.AzureWorkloadContainerExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
last_updated_time: Optional[str] = None,
operation_type: Optional[str] = None,
registration_status: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Container for the workloads running inside Azure Compute or Classic Compute.
:param str backup_management_type: Type of backup management for the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param 'AzureWorkloadContainerExtendedInfoResponseArgs' extended_info: Additional details of a workload container.
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param str last_updated_time: Time stamp when this container was updated.
:param str operation_type: Re-Do Operation
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
:param str source_resource_id: ARM ID of the virtual machine represented by this Azure Workload Container
:param str workload_type: Workload type for which registration was sent.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'AzureWorkloadContainer')
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if last_updated_time is not None:
pulumi.set(__self__, "last_updated_time", last_updated_time)
if operation_type is not None:
pulumi.set(__self__, "operation_type", operation_type)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.AzureWorkloadContainerExtendedInfoResponse']:
"""
Additional details of a workload container.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="lastUpdatedTime")
def last_updated_time(self) -> Optional[str]:
"""
Time stamp when this container was updated.
"""
return pulumi.get(self, "last_updated_time")
@property
@pulumi.getter(name="operationType")
def operation_type(self) -> Optional[str]:
"""
Re-Do Operation
"""
return pulumi.get(self, "operation_type")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the virtual machine represented by this Azure Workload Container
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Workload type for which registration was sent.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ContainerIdentityInfoResponse(dict):
"""
Container identity information
"""
def __init__(__self__, *,
aad_tenant_id: Optional[str] = None,
audience: Optional[str] = None,
service_principal_client_id: Optional[str] = None,
unique_name: Optional[str] = None):
"""
Container identity information
:param str aad_tenant_id: Protection container identity - AAD Tenant
:param str audience: Protection container identity - Audience
:param str service_principal_client_id: Protection container identity - AAD Service Principal
:param str unique_name: Unique name of the container
"""
if aad_tenant_id is not None:
pulumi.set(__self__, "aad_tenant_id", aad_tenant_id)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if service_principal_client_id is not None:
pulumi.set(__self__, "service_principal_client_id", service_principal_client_id)
if unique_name is not None:
pulumi.set(__self__, "unique_name", unique_name)
@property
@pulumi.getter(name="aadTenantId")
def aad_tenant_id(self) -> Optional[str]:
"""
Protection container identity - AAD Tenant
"""
return pulumi.get(self, "aad_tenant_id")
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
Protection container identity - Audience
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter(name="servicePrincipalClientId")
def service_principal_client_id(self) -> Optional[str]:
"""
Protection container identity - AAD Service Principal
"""
return pulumi.get(self, "service_principal_client_id")
@property
@pulumi.getter(name="uniqueName")
def unique_name(self) -> Optional[str]:
"""
Unique name of the container
"""
return pulumi.get(self, "unique_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CurrentJobDetailsResponse(dict):
"""
Current job details of the migration item.
"""
def __init__(__self__, *,
job_id: Optional[str] = None,
job_name: Optional[str] = None,
start_time: Optional[str] = None):
"""
Current job details of the migration item.
:param str job_id: The ARM Id of the job being executed.
:param str job_name: The job name.
:param str start_time: The start time of the job.
"""
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if job_name is not None:
pulumi.set(__self__, "job_name", job_name)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[str]:
"""
The ARM Id of the job being executed.
"""
return pulumi.get(self, "job_id")
@property
@pulumi.getter(name="jobName")
def job_name(self) -> Optional[str]:
"""
The job name.
"""
return pulumi.get(self, "job_name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
The start time of the job.
"""
return pulumi.get(self, "start_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class CurrentScenarioDetailsResponse(dict):
"""
Current scenario details of the protected entity.
"""
def __init__(__self__, *,
job_id: Optional[str] = None,
scenario_name: Optional[str] = None,
start_time: Optional[str] = None):
"""
Current scenario details of the protected entity.
:param str job_id: ARM Id of the job being executed.
:param str scenario_name: Scenario name.
:param str start_time: Start time of the workflow.
"""
if job_id is not None:
pulumi.set(__self__, "job_id", job_id)
if scenario_name is not None:
pulumi.set(__self__, "scenario_name", scenario_name)
if start_time is not None:
pulumi.set(__self__, "start_time", start_time)
@property
@pulumi.getter(name="jobId")
def job_id(self) -> Optional[str]:
"""
ARM Id of the job being executed.
"""
return pulumi.get(self, "job_id")
@property
@pulumi.getter(name="scenarioName")
def scenario_name(self) -> Optional[str]:
"""
Scenario name.
"""
return pulumi.get(self, "scenario_name")
@property
@pulumi.getter(name="startTime")
def start_time(self) -> Optional[str]:
"""
Start time of the workflow.
"""
return pulumi.get(self, "start_time")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DPMContainerExtendedInfoResponse(dict):
"""
Additional information of the DPMContainer.
"""
def __init__(__self__, *,
last_refreshed_at: Optional[str] = None):
"""
Additional information of the DPMContainer.
:param str last_refreshed_at: Last refresh time of the DPMContainer.
"""
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[str]:
"""
Last refresh time of the DPMContainer.
"""
return pulumi.get(self, "last_refreshed_at")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DPMProtectedItemExtendedInfoResponse(dict):
"""
Additional information of DPM Protected item.
"""
def __init__(__self__, *,
disk_storage_used_in_bytes: Optional[str] = None,
is_collocated: Optional[bool] = None,
is_present_on_cloud: Optional[bool] = None,
last_backup_status: Optional[str] = None,
last_refreshed_at: Optional[str] = None,
oldest_recovery_point: Optional[str] = None,
on_premise_latest_recovery_point: Optional[str] = None,
on_premise_oldest_recovery_point: Optional[str] = None,
on_premise_recovery_point_count: Optional[int] = None,
protectable_object_load_path: Optional[Mapping[str, str]] = None,
protected: Optional[bool] = None,
protection_group_name: Optional[str] = None,
recovery_point_count: Optional[int] = None,
total_disk_storage_size_in_bytes: Optional[str] = None):
"""
Additional information of DPM Protected item.
:param str disk_storage_used_in_bytes: Used Disk storage in bytes.
:param bool is_collocated: To check if backup item is collocated.
:param bool is_present_on_cloud: To check if backup item is cloud protected.
:param str last_backup_status: Last backup status information on backup item.
:param str last_refreshed_at: Last refresh time on backup item.
:param str oldest_recovery_point: Oldest cloud recovery point time.
:param str on_premise_latest_recovery_point: latest disk recovery point time.
:param str on_premise_oldest_recovery_point: Oldest disk recovery point time.
:param int on_premise_recovery_point_count: disk recovery point count.
:param Mapping[str, str] protectable_object_load_path: Attribute to provide information on various DBs.
:param bool protected: To check if backup item is disk protected.
:param str protection_group_name: Protection group name of the backup item.
:param int recovery_point_count: cloud recovery point count.
:param str total_disk_storage_size_in_bytes: total Disk storage in bytes.
"""
if disk_storage_used_in_bytes is not None:
pulumi.set(__self__, "disk_storage_used_in_bytes", disk_storage_used_in_bytes)
if is_collocated is not None:
pulumi.set(__self__, "is_collocated", is_collocated)
if is_present_on_cloud is not None:
pulumi.set(__self__, "is_present_on_cloud", is_present_on_cloud)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if on_premise_latest_recovery_point is not None:
pulumi.set(__self__, "on_premise_latest_recovery_point", on_premise_latest_recovery_point)
if on_premise_oldest_recovery_point is not None:
pulumi.set(__self__, "on_premise_oldest_recovery_point", on_premise_oldest_recovery_point)
if on_premise_recovery_point_count is not None:
pulumi.set(__self__, "on_premise_recovery_point_count", on_premise_recovery_point_count)
if protectable_object_load_path is not None:
pulumi.set(__self__, "protectable_object_load_path", protectable_object_load_path)
if protected is not None:
pulumi.set(__self__, "protected", protected)
if protection_group_name is not None:
pulumi.set(__self__, "protection_group_name", protection_group_name)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
if total_disk_storage_size_in_bytes is not None:
pulumi.set(__self__, "total_disk_storage_size_in_bytes", total_disk_storage_size_in_bytes)
@property
@pulumi.getter(name="diskStorageUsedInBytes")
def disk_storage_used_in_bytes(self) -> Optional[str]:
"""
Used Disk storage in bytes.
"""
return pulumi.get(self, "disk_storage_used_in_bytes")
@property
@pulumi.getter(name="isCollocated")
def is_collocated(self) -> Optional[bool]:
"""
To check if backup item is collocated.
"""
return pulumi.get(self, "is_collocated")
@property
@pulumi.getter(name="isPresentOnCloud")
def is_present_on_cloud(self) -> Optional[bool]:
"""
To check if backup item is cloud protected.
"""
return pulumi.get(self, "is_present_on_cloud")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Last backup status information on backup item.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[str]:
"""
Last refresh time on backup item.
"""
return pulumi.get(self, "last_refreshed_at")
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
Oldest cloud recovery point time.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="onPremiseLatestRecoveryPoint")
def on_premise_latest_recovery_point(self) -> Optional[str]:
"""
latest disk recovery point time.
"""
return pulumi.get(self, "on_premise_latest_recovery_point")
@property
@pulumi.getter(name="onPremiseOldestRecoveryPoint")
def on_premise_oldest_recovery_point(self) -> Optional[str]:
"""
Oldest disk recovery point time.
"""
return pulumi.get(self, "on_premise_oldest_recovery_point")
@property
@pulumi.getter(name="onPremiseRecoveryPointCount")
def on_premise_recovery_point_count(self) -> Optional[int]:
"""
disk recovery point count.
"""
return pulumi.get(self, "on_premise_recovery_point_count")
@property
@pulumi.getter(name="protectableObjectLoadPath")
def protectable_object_load_path(self) -> Optional[Mapping[str, str]]:
"""
Attribute to provide information on various DBs.
"""
return pulumi.get(self, "protectable_object_load_path")
@property
@pulumi.getter
def protected(self) -> Optional[bool]:
"""
To check if backup item is disk protected.
"""
return pulumi.get(self, "protected")
@property
@pulumi.getter(name="protectionGroupName")
def protection_group_name(self) -> Optional[str]:
"""
Protection group name of the backup item.
"""
return pulumi.get(self, "protection_group_name")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
cloud recovery point count.
"""
return pulumi.get(self, "recovery_point_count")
@property
@pulumi.getter(name="totalDiskStorageSizeInBytes")
def total_disk_storage_size_in_bytes(self) -> Optional[str]:
"""
total Disk storage in bytes.
"""
return pulumi.get(self, "total_disk_storage_size_in_bytes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DPMProtectedItemResponse(dict):
"""
Additional information on Backup engine specific backup item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_engine_name: Optional[str] = None,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.DPMProtectedItemExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
protection_state: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Additional information on Backup engine specific backup item.
:param str protected_item_type: backup item type.
:param str backup_engine_name: Backup Management server protecting this backup item
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'DPMProtectedItemExtendedInfoResponseArgs' extended_info: Extended info of the backup item.
:param str friendly_name: Friendly name of the managed item
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protection_state: Protection state of the backup engine
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'DPMProtectedItem')
if backup_engine_name is not None:
pulumi.set(__self__, "backup_engine_name", backup_engine_name)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupEngineName")
def backup_engine_name(self) -> Optional[str]:
"""
Backup Management server protecting this backup item
"""
return pulumi.get(self, "backup_engine_name")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.DPMProtectedItemExtendedInfoResponse']:
"""
Extended info of the backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the managed item
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Protection state of the backup engine
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DailyRetentionFormatResponse(dict):
"""
Daily retention format.
"""
def __init__(__self__, *,
days_of_the_month: Optional[Sequence['outputs.DayResponse']] = None):
"""
Daily retention format.
:param Sequence['DayResponseArgs'] days_of_the_month: List of days of the month.
"""
if days_of_the_month is not None:
pulumi.set(__self__, "days_of_the_month", days_of_the_month)
@property
@pulumi.getter(name="daysOfTheMonth")
def days_of_the_month(self) -> Optional[Sequence['outputs.DayResponse']]:
"""
List of days of the month.
"""
return pulumi.get(self, "days_of_the_month")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DailyRetentionScheduleResponse(dict):
"""
Daily retention schedule.
"""
def __init__(__self__, *,
retention_duration: Optional['outputs.RetentionDurationResponse'] = None,
retention_times: Optional[Sequence[str]] = None):
"""
Daily retention schedule.
:param 'RetentionDurationResponseArgs' retention_duration: The retention duration of retention policy.
:param Sequence[str] retention_times: The retention times of retention policy.
"""
if retention_duration is not None:
pulumi.set(__self__, "retention_duration", retention_duration)
if retention_times is not None:
pulumi.set(__self__, "retention_times", retention_times)
@property
@pulumi.getter(name="retentionDuration")
def retention_duration(self) -> Optional['outputs.RetentionDurationResponse']:
"""
The retention duration of retention policy.
"""
return pulumi.get(self, "retention_duration")
@property
@pulumi.getter(name="retentionTimes")
def retention_times(self) -> Optional[Sequence[str]]:
"""
The retention times of retention policy.
"""
return pulumi.get(self, "retention_times")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DataStoreResponse(dict):
"""
The data store details of the MT.
"""
def __init__(__self__, *,
capacity: Optional[str] = None,
free_space: Optional[str] = None,
symbolic_name: Optional[str] = None,
type: Optional[str] = None,
uuid: Optional[str] = None):
"""
The data store details of the MT.
:param str capacity: The capacity of data store in GBs.
:param str free_space: The free space of data store in GBs.
:param str symbolic_name: The symbolic name of data store.
:param str type: The type of data store.
:param str uuid: The uuid of data store.
"""
if capacity is not None:
pulumi.set(__self__, "capacity", capacity)
if free_space is not None:
pulumi.set(__self__, "free_space", free_space)
if symbolic_name is not None:
pulumi.set(__self__, "symbolic_name", symbolic_name)
if type is not None:
pulumi.set(__self__, "type", type)
if uuid is not None:
pulumi.set(__self__, "uuid", uuid)
@property
@pulumi.getter
def capacity(self) -> Optional[str]:
"""
The capacity of data store in GBs.
"""
return pulumi.get(self, "capacity")
@property
@pulumi.getter(name="freeSpace")
def free_space(self) -> Optional[str]:
"""
The free space of data store in GBs.
"""
return pulumi.get(self, "free_space")
@property
@pulumi.getter(name="symbolicName")
def symbolic_name(self) -> Optional[str]:
"""
The symbolic name of data store.
"""
return pulumi.get(self, "symbolic_name")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
The type of data store.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def uuid(self) -> Optional[str]:
"""
The uuid of data store.
"""
return pulumi.get(self, "uuid")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DayResponse(dict):
"""
Day of the week.
"""
def __init__(__self__, *,
date: Optional[int] = None,
is_last: Optional[bool] = None):
"""
Day of the week.
"""
if date is not None:
pulumi.set(__self__, "date", date)
if is_last is not None:
pulumi.set(__self__, "is_last", is_last)
@property
@pulumi.getter
def date(self) -> Optional[int]:
return pulumi.get(self, "date")
@property
@pulumi.getter(name="isLast")
def is_last(self) -> Optional[bool]:
return pulumi.get(self, "is_last")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DiskDetailsResponse(dict):
"""
On-prem disk details data.
"""
def __init__(__self__, *,
max_size_mb: Optional[int] = None,
vhd_id: Optional[str] = None,
vhd_name: Optional[str] = None,
vhd_type: Optional[str] = None):
"""
On-prem disk details data.
:param int max_size_mb: The hard disk max size in MB.
:param str vhd_id: The VHD Id.
:param str vhd_name: The VHD name.
:param str vhd_type: The type of the volume.
"""
if max_size_mb is not None:
pulumi.set(__self__, "max_size_mb", max_size_mb)
if vhd_id is not None:
pulumi.set(__self__, "vhd_id", vhd_id)
if vhd_name is not None:
pulumi.set(__self__, "vhd_name", vhd_name)
if vhd_type is not None:
pulumi.set(__self__, "vhd_type", vhd_type)
@property
@pulumi.getter(name="maxSizeMB")
def max_size_mb(self) -> Optional[int]:
"""
The hard disk max size in MB.
"""
return pulumi.get(self, "max_size_mb")
@property
@pulumi.getter(name="vhdId")
def vhd_id(self) -> Optional[str]:
"""
The VHD Id.
"""
return pulumi.get(self, "vhd_id")
@property
@pulumi.getter(name="vhdName")
def vhd_name(self) -> Optional[str]:
"""
The VHD name.
"""
return pulumi.get(self, "vhd_name")
@property
@pulumi.getter(name="vhdType")
def vhd_type(self) -> Optional[str]:
"""
The type of the volume.
"""
return pulumi.get(self, "vhd_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DiskExclusionPropertiesResponse(dict):
def __init__(__self__, *,
disk_lun_list: Optional[Sequence[int]] = None,
is_inclusion_list: Optional[bool] = None):
"""
:param Sequence[int] disk_lun_list: List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
:param bool is_inclusion_list: Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
if disk_lun_list is not None:
pulumi.set(__self__, "disk_lun_list", disk_lun_list)
if is_inclusion_list is not None:
pulumi.set(__self__, "is_inclusion_list", is_inclusion_list)
@property
@pulumi.getter(name="diskLunList")
def disk_lun_list(self) -> Optional[Sequence[int]]:
"""
List of Disks' Logical Unit Numbers (LUN) to be used for VM Protection.
"""
return pulumi.get(self, "disk_lun_list")
@property
@pulumi.getter(name="isInclusionList")
def is_inclusion_list(self) -> Optional[bool]:
"""
Flag to indicate whether DiskLunList is to be included/ excluded from backup.
"""
return pulumi.get(self, "is_inclusion_list")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DistributedNodesInfoResponse(dict):
"""
This is used to represent the various nodes of the distributed container.
"""
def __init__(__self__, *,
error_detail: Optional['outputs.ErrorDetailResponse'] = None,
node_name: Optional[str] = None,
status: Optional[str] = None):
"""
This is used to represent the various nodes of the distributed container.
:param 'ErrorDetailResponseArgs' error_detail: Error Details if the Status is non-success.
:param str node_name: Name of the node under a distributed container.
:param str status: Status of this Node.
Failed | Succeeded
"""
if error_detail is not None:
pulumi.set(__self__, "error_detail", error_detail)
if node_name is not None:
pulumi.set(__self__, "node_name", node_name)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="errorDetail")
def error_detail(self) -> Optional['outputs.ErrorDetailResponse']:
"""
Error Details if the Status is non-success.
"""
return pulumi.get(self, "error_detail")
@property
@pulumi.getter(name="nodeName")
def node_name(self) -> Optional[str]:
"""
Name of the node under a distributed container.
"""
return pulumi.get(self, "node_name")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status of this Node.
Failed | Succeeded
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DpmContainerResponse(dict):
"""
DPM workload-specific protection container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
can_re_register: Optional[bool] = None,
container_id: Optional[str] = None,
container_type: Optional[str] = None,
dpm_agent_version: Optional[str] = None,
dpm_servers: Optional[Sequence[str]] = None,
extended_info: Optional['outputs.DPMContainerExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
protected_item_count: Optional[int] = None,
protection_status: Optional[str] = None,
registration_status: Optional[str] = None,
upgrade_available: Optional[bool] = None):
"""
DPM workload-specific protection container.
:param str backup_management_type: Type of backup management for the container.
:param bool can_re_register: Specifies whether the container is re-registrable.
:param str container_id: ID of container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param str dpm_agent_version: Backup engine Agent version
:param Sequence[str] dpm_servers: List of BackupEngines protecting the container
:param 'DPMContainerExtendedInfoResponseArgs' extended_info: Extended Info of the container.
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param int protected_item_count: Number of protected items in the BackupEngine
:param str protection_status: Protection status of the container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
:param bool upgrade_available: To check if upgrade available
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if can_re_register is not None:
pulumi.set(__self__, "can_re_register", can_re_register)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_type is not None:
pulumi.set(__self__, "container_type", 'DPMContainer')
if dpm_agent_version is not None:
pulumi.set(__self__, "dpm_agent_version", dpm_agent_version)
if dpm_servers is not None:
pulumi.set(__self__, "dpm_servers", dpm_servers)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if protected_item_count is not None:
pulumi.set(__self__, "protected_item_count", protected_item_count)
if protection_status is not None:
pulumi.set(__self__, "protection_status", protection_status)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if upgrade_available is not None:
pulumi.set(__self__, "upgrade_available", upgrade_available)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="canReRegister")
def can_re_register(self) -> Optional[bool]:
"""
Specifies whether the container is re-registrable.
"""
return pulumi.get(self, "can_re_register")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[str]:
"""
ID of container.
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="dpmAgentVersion")
def dpm_agent_version(self) -> Optional[str]:
"""
Backup engine Agent version
"""
return pulumi.get(self, "dpm_agent_version")
@property
@pulumi.getter(name="dpmServers")
def dpm_servers(self) -> Optional[Sequence[str]]:
"""
List of BackupEngines protecting the container
"""
return pulumi.get(self, "dpm_servers")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.DPMContainerExtendedInfoResponse']:
"""
Extended Info of the container.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="protectedItemCount")
def protected_item_count(self) -> Optional[int]:
"""
Number of protected items in the BackupEngine
"""
return pulumi.get(self, "protected_item_count")
@property
@pulumi.getter(name="protectionStatus")
def protection_status(self) -> Optional[str]:
"""
Protection status of the container.
"""
return pulumi.get(self, "protection_status")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
@property
@pulumi.getter(name="upgradeAvailable")
def upgrade_available(self) -> Optional[bool]:
"""
To check if upgrade available
"""
return pulumi.get(self, "upgrade_available")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class DraDetailsResponse(dict):
"""
DRA details.
"""
def __init__(__self__, *,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
id: str,
last_heartbeat_utc: str,
name: str,
version: str):
"""
DRA details.
:param str health: The health of the DRA.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str id: The DRA Id.
:param str last_heartbeat_utc: The last heartbeat received from the DRA.
:param str name: The DRA name.
:param str version: The DRA version.
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the DRA.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> str:
"""
The DRA Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the DRA.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
The DRA name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
The DRA version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class EncryptionDetailsResponse(dict):
"""
Encryption details for the fabric.
"""
def __init__(__self__, *,
kek_cert_expiry_date: Optional[str] = None,
kek_cert_thumbprint: Optional[str] = None,
kek_state: Optional[str] = None):
"""
Encryption details for the fabric.
:param str kek_cert_expiry_date: The key encryption key certificate expiry date.
:param str kek_cert_thumbprint: The key encryption key certificate thumbprint.
:param str kek_state: The key encryption key state for the Vmm.
"""
if kek_cert_expiry_date is not None:
pulumi.set(__self__, "kek_cert_expiry_date", kek_cert_expiry_date)
if kek_cert_thumbprint is not None:
pulumi.set(__self__, "kek_cert_thumbprint", kek_cert_thumbprint)
if kek_state is not None:
pulumi.set(__self__, "kek_state", kek_state)
@property
@pulumi.getter(name="kekCertExpiryDate")
def kek_cert_expiry_date(self) -> Optional[str]:
"""
The key encryption key certificate expiry date.
"""
return pulumi.get(self, "kek_cert_expiry_date")
@property
@pulumi.getter(name="kekCertThumbprint")
def kek_cert_thumbprint(self) -> Optional[str]:
"""
The key encryption key certificate thumbprint.
"""
return pulumi.get(self, "kek_cert_thumbprint")
@property
@pulumi.getter(name="kekState")
def kek_state(self) -> Optional[str]:
"""
The key encryption key state for the Vmm.
"""
return pulumi.get(self, "kek_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ErrorDetailResponse(dict):
"""
Error Detail class which encapsulates Code, Message and Recommendations.
"""
def __init__(__self__, *,
code: str,
message: str,
recommendations: Sequence[str]):
"""
Error Detail class which encapsulates Code, Message and Recommendations.
:param str code: Error code.
:param str message: Error Message related to the Code.
:param Sequence[str] recommendations: List of recommendation strings.
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "recommendations", recommendations)
@property
@pulumi.getter
def code(self) -> str:
"""
Error code.
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
Error Message related to the Code.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def recommendations(self) -> Sequence[str]:
"""
List of recommendation strings.
"""
return pulumi.get(self, "recommendations")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ExtendedPropertiesResponse(dict):
"""
Extended Properties for Azure IaasVM Backup.
"""
def __init__(__self__, *,
disk_exclusion_properties: Optional['outputs.DiskExclusionPropertiesResponse'] = None):
"""
Extended Properties for Azure IaasVM Backup.
:param 'DiskExclusionPropertiesResponseArgs' disk_exclusion_properties: Extended Properties for Disk Exclusion.
"""
if disk_exclusion_properties is not None:
pulumi.set(__self__, "disk_exclusion_properties", disk_exclusion_properties)
@property
@pulumi.getter(name="diskExclusionProperties")
def disk_exclusion_properties(self) -> Optional['outputs.DiskExclusionPropertiesResponse']:
"""
Extended Properties for Disk Exclusion.
"""
return pulumi.get(self, "disk_exclusion_properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class FabricPropertiesResponse(dict):
"""
Fabric properties.
"""
def __init__(__self__, *,
bcdr_state: Optional[str] = None,
custom_details: Optional[Any] = None,
encryption_details: Optional['outputs.EncryptionDetailsResponse'] = None,
friendly_name: Optional[str] = None,
health: Optional[str] = None,
health_error_details: Optional[Sequence['outputs.HealthErrorResponse']] = None,
internal_identifier: Optional[str] = None,
rollover_encryption_details: Optional['outputs.EncryptionDetailsResponse'] = None):
"""
Fabric properties.
:param str bcdr_state: BCDR state of the fabric.
:param Union['AzureFabricSpecificDetailsResponseArgs', 'HyperVSiteDetailsResponseArgs', 'InMageRcmFabricSpecificDetailsResponseArgs', 'VMwareDetailsResponseArgs', 'VMwareV2FabricSpecificDetailsResponseArgs', 'VmmDetailsResponseArgs'] custom_details: Fabric specific settings.
:param 'EncryptionDetailsResponseArgs' encryption_details: Encryption details for the fabric.
:param str friendly_name: Friendly name of the fabric.
:param str health: Health of fabric.
:param Sequence['HealthErrorResponseArgs'] health_error_details: Fabric health error details.
:param str internal_identifier: Dra Registration Id.
:param 'EncryptionDetailsResponseArgs' rollover_encryption_details: Rollover encryption details for the fabric.
"""
if bcdr_state is not None:
pulumi.set(__self__, "bcdr_state", bcdr_state)
if custom_details is not None:
pulumi.set(__self__, "custom_details", custom_details)
if encryption_details is not None:
pulumi.set(__self__, "encryption_details", encryption_details)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health is not None:
pulumi.set(__self__, "health", health)
if health_error_details is not None:
pulumi.set(__self__, "health_error_details", health_error_details)
if internal_identifier is not None:
pulumi.set(__self__, "internal_identifier", internal_identifier)
if rollover_encryption_details is not None:
pulumi.set(__self__, "rollover_encryption_details", rollover_encryption_details)
@property
@pulumi.getter(name="bcdrState")
def bcdr_state(self) -> Optional[str]:
"""
BCDR state of the fabric.
"""
return pulumi.get(self, "bcdr_state")
@property
@pulumi.getter(name="customDetails")
def custom_details(self) -> Optional[Any]:
"""
Fabric specific settings.
"""
return pulumi.get(self, "custom_details")
@property
@pulumi.getter(name="encryptionDetails")
def encryption_details(self) -> Optional['outputs.EncryptionDetailsResponse']:
"""
Encryption details for the fabric.
"""
return pulumi.get(self, "encryption_details")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the fabric.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def health(self) -> Optional[str]:
"""
Health of fabric.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrorDetails")
def health_error_details(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
Fabric health error details.
"""
return pulumi.get(self, "health_error_details")
@property
@pulumi.getter(name="internalIdentifier")
def internal_identifier(self) -> Optional[str]:
"""
Dra Registration Id.
"""
return pulumi.get(self, "internal_identifier")
@property
@pulumi.getter(name="rolloverEncryptionDetails")
def rollover_encryption_details(self) -> Optional['outputs.EncryptionDetailsResponse']:
"""
Rollover encryption details for the fabric.
"""
return pulumi.get(self, "rollover_encryption_details")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GenericContainerExtendedInfoResponse(dict):
"""
Container extended information
"""
def __init__(__self__, *,
container_identity_info: Optional['outputs.ContainerIdentityInfoResponse'] = None,
raw_cert_data: Optional[str] = None,
service_endpoints: Optional[Mapping[str, str]] = None):
"""
Container extended information
:param 'ContainerIdentityInfoResponseArgs' container_identity_info: Container identity information
:param str raw_cert_data: Public key of container cert
:param Mapping[str, str] service_endpoints: Azure Backup Service Endpoints for the container
"""
if container_identity_info is not None:
pulumi.set(__self__, "container_identity_info", container_identity_info)
if raw_cert_data is not None:
pulumi.set(__self__, "raw_cert_data", raw_cert_data)
if service_endpoints is not None:
pulumi.set(__self__, "service_endpoints", service_endpoints)
@property
@pulumi.getter(name="containerIdentityInfo")
def container_identity_info(self) -> Optional['outputs.ContainerIdentityInfoResponse']:
"""
Container identity information
"""
return pulumi.get(self, "container_identity_info")
@property
@pulumi.getter(name="rawCertData")
def raw_cert_data(self) -> Optional[str]:
"""
Public key of container cert
"""
return pulumi.get(self, "raw_cert_data")
@property
@pulumi.getter(name="serviceEndpoints")
def service_endpoints(self) -> Optional[Mapping[str, str]]:
"""
Azure Backup Service Endpoints for the container
"""
return pulumi.get(self, "service_endpoints")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GenericContainerResponse(dict):
"""
Base class for generic container of backup items
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
container_type: Optional[str] = None,
extended_information: Optional['outputs.GenericContainerExtendedInfoResponse'] = None,
fabric_name: Optional[str] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
registration_status: Optional[str] = None):
"""
Base class for generic container of backup items
:param str backup_management_type: Type of backup management for the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param 'GenericContainerExtendedInfoResponseArgs' extended_information: Extended information (not returned in List container API calls)
:param str fabric_name: Name of the container's fabric
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'GenericContainer')
if extended_information is not None:
pulumi.set(__self__, "extended_information", extended_information)
if fabric_name is not None:
pulumi.set(__self__, "fabric_name", fabric_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="extendedInformation")
def extended_information(self) -> Optional['outputs.GenericContainerExtendedInfoResponse']:
"""
Extended information (not returned in List container API calls)
"""
return pulumi.get(self, "extended_information")
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> Optional[str]:
"""
Name of the container's fabric
"""
return pulumi.get(self, "fabric_name")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class GenericProtectedItemResponse(dict):
"""
Base class for backup items.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
fabric_name: Optional[str] = None,
friendly_name: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
policy_state: Optional[str] = None,
protected_item_id: Optional[int] = None,
protection_state: Optional[str] = None,
source_associations: Optional[Mapping[str, str]] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
Base class for backup items.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param str fabric_name: Name of this backup item's fabric.
:param str friendly_name: Friendly name of the container.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str policy_state: Indicates consistency of policy object and policy applied to this backup item.
:param int protected_item_id: Data Plane Service ID of the protected item.
:param str protection_state: Backup state of this backup item.
:param Mapping[str, str] source_associations: Loosely coupled (type, value) associations (example - parent of a protected item)
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'GenericProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if fabric_name is not None:
pulumi.set(__self__, "fabric_name", fabric_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if policy_state is not None:
pulumi.set(__self__, "policy_state", policy_state)
if protected_item_id is not None:
pulumi.set(__self__, "protected_item_id", protected_item_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_associations is not None:
pulumi.set(__self__, "source_associations", source_associations)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="fabricName")
def fabric_name(self) -> Optional[str]:
"""
Name of this backup item's fabric.
"""
return pulumi.get(self, "fabric_name")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="policyState")
def policy_state(self) -> Optional[str]:
"""
Indicates consistency of policy object and policy applied to this backup item.
"""
return pulumi.get(self, "policy_state")
@property
@pulumi.getter(name="protectedItemId")
def protected_item_id(self) -> Optional[int]:
"""
Data Plane Service ID of the protected item.
"""
return pulumi.get(self, "protected_item_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Backup state of this backup item.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceAssociations")
def source_associations(self) -> Optional[Mapping[str, str]]:
"""
Loosely coupled (type, value) associations (example - parent of a protected item)
"""
return pulumi.get(self, "source_associations")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HealthErrorResponse(dict):
"""
Health Error
"""
def __init__(__self__, *,
creation_time_utc: Optional[str] = None,
customer_resolvability: Optional[str] = None,
entity_id: Optional[str] = None,
error_category: Optional[str] = None,
error_code: Optional[str] = None,
error_id: Optional[str] = None,
error_level: Optional[str] = None,
error_message: Optional[str] = None,
error_source: Optional[str] = None,
error_type: Optional[str] = None,
inner_health_errors: Optional[Sequence['outputs.InnerHealthErrorResponse']] = None,
possible_causes: Optional[str] = None,
recommended_action: Optional[str] = None,
recovery_provider_error_message: Optional[str] = None,
summary_message: Optional[str] = None):
"""
Health Error
:param str creation_time_utc: Error creation time (UTC)
:param str customer_resolvability: Value indicating whether the health error is customer resolvable.
:param str entity_id: ID of the entity.
:param str error_category: Category of error.
:param str error_code: Error code.
:param str error_id: The health error unique id.
:param str error_level: Level of error.
:param str error_message: Error message.
:param str error_source: Source of error.
:param str error_type: Type of error.
:param Sequence['InnerHealthErrorResponseArgs'] inner_health_errors: The inner health errors. HealthError having a list of HealthError as child errors is problematic. InnerHealthError is used because this will prevent an infinite loop of structures when Hydra tries to auto-generate the contract. We are exposing the related health errors as inner health errors and all API consumers can utilize this in the same fashion as Exception -> InnerException.
:param str possible_causes: Possible causes of error.
:param str recommended_action: Recommended action to resolve error.
:param str recovery_provider_error_message: DRA error message.
:param str summary_message: Summary message of the entity.
"""
if creation_time_utc is not None:
pulumi.set(__self__, "creation_time_utc", creation_time_utc)
if customer_resolvability is not None:
pulumi.set(__self__, "customer_resolvability", customer_resolvability)
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if error_category is not None:
pulumi.set(__self__, "error_category", error_category)
if error_code is not None:
pulumi.set(__self__, "error_code", error_code)
if error_id is not None:
pulumi.set(__self__, "error_id", error_id)
if error_level is not None:
pulumi.set(__self__, "error_level", error_level)
if error_message is not None:
pulumi.set(__self__, "error_message", error_message)
if error_source is not None:
pulumi.set(__self__, "error_source", error_source)
if error_type is not None:
pulumi.set(__self__, "error_type", error_type)
if inner_health_errors is not None:
pulumi.set(__self__, "inner_health_errors", inner_health_errors)
if possible_causes is not None:
pulumi.set(__self__, "possible_causes", possible_causes)
if recommended_action is not None:
pulumi.set(__self__, "recommended_action", recommended_action)
if recovery_provider_error_message is not None:
pulumi.set(__self__, "recovery_provider_error_message", recovery_provider_error_message)
if summary_message is not None:
pulumi.set(__self__, "summary_message", summary_message)
@property
@pulumi.getter(name="creationTimeUtc")
def creation_time_utc(self) -> Optional[str]:
"""
Error creation time (UTC)
"""
return pulumi.get(self, "creation_time_utc")
@property
@pulumi.getter(name="customerResolvability")
def customer_resolvability(self) -> Optional[str]:
"""
Value indicating whether the health error is customer resolvable.
"""
return pulumi.get(self, "customer_resolvability")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[str]:
"""
ID of the entity.
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter(name="errorCategory")
def error_category(self) -> Optional[str]:
"""
Category of error.
"""
return pulumi.get(self, "error_category")
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> Optional[str]:
"""
Error code.
"""
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorId")
def error_id(self) -> Optional[str]:
"""
The health error unique id.
"""
return pulumi.get(self, "error_id")
@property
@pulumi.getter(name="errorLevel")
def error_level(self) -> Optional[str]:
"""
Level of error.
"""
return pulumi.get(self, "error_level")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> Optional[str]:
"""
Error message.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter(name="errorSource")
def error_source(self) -> Optional[str]:
"""
Source of error.
"""
return pulumi.get(self, "error_source")
@property
@pulumi.getter(name="errorType")
def error_type(self) -> Optional[str]:
"""
Type of error.
"""
return pulumi.get(self, "error_type")
@property
@pulumi.getter(name="innerHealthErrors")
def inner_health_errors(self) -> Optional[Sequence['outputs.InnerHealthErrorResponse']]:
"""
The inner health errors. HealthError having a list of HealthError as child errors is problematic. InnerHealthError is used because this will prevent an infinite loop of structures when Hydra tries to auto-generate the contract. We are exposing the related health errors as inner health errors and all API consumers can utilize this in the same fashion as Exception -> InnerException.
"""
return pulumi.get(self, "inner_health_errors")
@property
@pulumi.getter(name="possibleCauses")
def possible_causes(self) -> Optional[str]:
"""
Possible causes of error.
"""
return pulumi.get(self, "possible_causes")
@property
@pulumi.getter(name="recommendedAction")
def recommended_action(self) -> Optional[str]:
"""
Recommended action to resolve error.
"""
return pulumi.get(self, "recommended_action")
@property
@pulumi.getter(name="recoveryProviderErrorMessage")
def recovery_provider_error_message(self) -> Optional[str]:
"""
DRA error message.
"""
return pulumi.get(self, "recovery_provider_error_message")
@property
@pulumi.getter(name="summaryMessage")
def summary_message(self) -> Optional[str]:
"""
Summary message of the entity.
"""
return pulumi.get(self, "summary_message")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaAzurePolicyDetailsResponse(dict):
"""
Hyper-V Replica Azure specific protection profile details.
"""
def __init__(__self__, *,
instance_type: str,
active_storage_account_id: Optional[str] = None,
application_consistent_snapshot_frequency_in_hours: Optional[int] = None,
encryption: Optional[str] = None,
online_replication_start_time: Optional[str] = None,
recovery_point_history_duration_in_hours: Optional[int] = None,
replication_interval: Optional[int] = None):
"""
Hyper-V Replica Azure specific protection profile details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str active_storage_account_id: The active storage account Id.
:param int application_consistent_snapshot_frequency_in_hours: The interval (in hours) at which Hyper-V Replica should create an application consistent snapshot within the VM.
:param str encryption: A value indicating whether encryption is enabled for virtual machines in this cloud.
:param str online_replication_start_time: The scheduled start time for the initial replication. If this parameter is Null, the initial replication starts immediately.
:param int recovery_point_history_duration_in_hours: The duration (in hours) to which point the recovery history needs to be maintained.
:param int replication_interval: The replication interval.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplicaAzure')
if active_storage_account_id is not None:
pulumi.set(__self__, "active_storage_account_id", active_storage_account_id)
if application_consistent_snapshot_frequency_in_hours is not None:
pulumi.set(__self__, "application_consistent_snapshot_frequency_in_hours", application_consistent_snapshot_frequency_in_hours)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if online_replication_start_time is not None:
pulumi.set(__self__, "online_replication_start_time", online_replication_start_time)
if recovery_point_history_duration_in_hours is not None:
pulumi.set(__self__, "recovery_point_history_duration_in_hours", recovery_point_history_duration_in_hours)
if replication_interval is not None:
pulumi.set(__self__, "replication_interval", replication_interval)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="activeStorageAccountId")
def active_storage_account_id(self) -> Optional[str]:
"""
The active storage account Id.
"""
return pulumi.get(self, "active_storage_account_id")
@property
@pulumi.getter(name="applicationConsistentSnapshotFrequencyInHours")
def application_consistent_snapshot_frequency_in_hours(self) -> Optional[int]:
"""
The interval (in hours) at which Hyper-V Replica should create an application consistent snapshot within the VM.
"""
return pulumi.get(self, "application_consistent_snapshot_frequency_in_hours")
@property
@pulumi.getter
def encryption(self) -> Optional[str]:
"""
A value indicating whether encryption is enabled for virtual machines in this cloud.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="onlineReplicationStartTime")
def online_replication_start_time(self) -> Optional[str]:
"""
The scheduled start time for the initial replication. If this parameter is Null, the initial replication starts immediately.
"""
return pulumi.get(self, "online_replication_start_time")
@property
@pulumi.getter(name="recoveryPointHistoryDurationInHours")
def recovery_point_history_duration_in_hours(self) -> Optional[int]:
"""
The duration (in hours) to which point the recovery history needs to be maintained.
"""
return pulumi.get(self, "recovery_point_history_duration_in_hours")
@property
@pulumi.getter(name="replicationInterval")
def replication_interval(self) -> Optional[int]:
"""
The replication interval.
"""
return pulumi.get(self, "replication_interval")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaAzureReplicationDetailsResponse(dict):
"""
Hyper V Replica Azure provider specific settings.
"""
def __init__(__self__, *,
instance_type: str,
azure_vm_disk_details: Optional[Sequence['outputs.AzureVmDiskDetailsResponse']] = None,
enable_rdp_on_target_option: Optional[str] = None,
encryption: Optional[str] = None,
initial_replication_details: Optional['outputs.InitialReplicationDetailsResponse'] = None,
last_replicated_time: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
license_type: Optional[str] = None,
o_s_details: Optional['outputs.OSDetailsResponse'] = None,
recovery_availability_set_id: Optional[str] = None,
recovery_azure_log_storage_account_id: Optional[str] = None,
recovery_azure_resource_group_id: Optional[str] = None,
recovery_azure_storage_account: Optional[str] = None,
recovery_azure_vm_size: Optional[str] = None,
recovery_azure_vm_name: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
selected_recovery_azure_network_id: Optional[str] = None,
selected_source_nic_id: Optional[str] = None,
source_vm_cpu_count: Optional[int] = None,
source_vm_ram_size_in_mb: Optional[int] = None,
target_availability_zone: Optional[str] = None,
target_proximity_placement_group_id: Optional[str] = None,
use_managed_disks: Optional[str] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
Hyper V Replica Azure provider specific settings.
:param str instance_type: Gets the Instance type.
:param Sequence['AzureVmDiskDetailsResponseArgs'] azure_vm_disk_details: Azure VM Disk details.
:param str enable_rdp_on_target_option: The selected option to enable RDP\SSH on target vm after failover. String value of {SrsDataContract.EnableRDPOnTargetOption} enum.
:param str encryption: The encryption info.
:param 'InitialReplicationDetailsResponseArgs' initial_replication_details: Initial replication details.
:param str last_replicated_time: The Last replication time.
:param str last_rpo_calculated_time: The last RPO calculated time.
:param str license_type: License Type of the VM to be used.
:param 'OSDetailsResponseArgs' o_s_details: The operating system info.
:param str recovery_availability_set_id: The recovery availability set Id.
:param str recovery_azure_log_storage_account_id: The ARM id of the log storage account used for replication. This will be set to null if no log storage account was provided during enable protection.
:param str recovery_azure_resource_group_id: The target resource group Id.
:param str recovery_azure_storage_account: The recovery Azure storage account.
:param str recovery_azure_vm_size: The Recovery Azure VM size.
:param str recovery_azure_vm_name: Recovery Azure given name.
:param int rpo_in_seconds: Last RPO value.
:param str selected_recovery_azure_network_id: The selected recovery azure network Id.
:param str selected_source_nic_id: The selected source nic Id which will be used as the primary nic during failover.
:param int source_vm_cpu_count: The CPU count of the VM on the primary side.
:param int source_vm_ram_size_in_mb: The RAM size of the VM on the primary side.
:param str target_availability_zone: The target availability zone.
:param str target_proximity_placement_group_id: The target proximity placement group Id.
:param str use_managed_disks: A value indicating whether managed disks should be used during failover.
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplicaAzure')
if azure_vm_disk_details is not None:
pulumi.set(__self__, "azure_vm_disk_details", azure_vm_disk_details)
if enable_rdp_on_target_option is not None:
pulumi.set(__self__, "enable_rdp_on_target_option", enable_rdp_on_target_option)
if encryption is not None:
pulumi.set(__self__, "encryption", encryption)
if initial_replication_details is not None:
pulumi.set(__self__, "initial_replication_details", initial_replication_details)
if last_replicated_time is not None:
pulumi.set(__self__, "last_replicated_time", last_replicated_time)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if o_s_details is not None:
pulumi.set(__self__, "o_s_details", o_s_details)
if recovery_availability_set_id is not None:
pulumi.set(__self__, "recovery_availability_set_id", recovery_availability_set_id)
if recovery_azure_log_storage_account_id is not None:
pulumi.set(__self__, "recovery_azure_log_storage_account_id", recovery_azure_log_storage_account_id)
if recovery_azure_resource_group_id is not None:
pulumi.set(__self__, "recovery_azure_resource_group_id", recovery_azure_resource_group_id)
if recovery_azure_storage_account is not None:
pulumi.set(__self__, "recovery_azure_storage_account", recovery_azure_storage_account)
if recovery_azure_vm_size is not None:
pulumi.set(__self__, "recovery_azure_vm_size", recovery_azure_vm_size)
if recovery_azure_vm_name is not None:
pulumi.set(__self__, "recovery_azure_vm_name", recovery_azure_vm_name)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if selected_recovery_azure_network_id is not None:
pulumi.set(__self__, "selected_recovery_azure_network_id", selected_recovery_azure_network_id)
if selected_source_nic_id is not None:
pulumi.set(__self__, "selected_source_nic_id", selected_source_nic_id)
if source_vm_cpu_count is not None:
pulumi.set(__self__, "source_vm_cpu_count", source_vm_cpu_count)
if source_vm_ram_size_in_mb is not None:
pulumi.set(__self__, "source_vm_ram_size_in_mb", source_vm_ram_size_in_mb)
if target_availability_zone is not None:
pulumi.set(__self__, "target_availability_zone", target_availability_zone)
if target_proximity_placement_group_id is not None:
pulumi.set(__self__, "target_proximity_placement_group_id", target_proximity_placement_group_id)
if use_managed_disks is not None:
pulumi.set(__self__, "use_managed_disks", use_managed_disks)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="azureVmDiskDetails")
def azure_vm_disk_details(self) -> Optional[Sequence['outputs.AzureVmDiskDetailsResponse']]:
"""
Azure VM Disk details.
"""
return pulumi.get(self, "azure_vm_disk_details")
@property
@pulumi.getter(name="enableRdpOnTargetOption")
def enable_rdp_on_target_option(self) -> Optional[str]:
"""
The selected option to enable RDP\SSH on target vm after failover. String value of {SrsDataContract.EnableRDPOnTargetOption} enum.
"""
return pulumi.get(self, "enable_rdp_on_target_option")
@property
@pulumi.getter
def encryption(self) -> Optional[str]:
"""
The encryption info.
"""
return pulumi.get(self, "encryption")
@property
@pulumi.getter(name="initialReplicationDetails")
def initial_replication_details(self) -> Optional['outputs.InitialReplicationDetailsResponse']:
"""
Initial replication details.
"""
return pulumi.get(self, "initial_replication_details")
@property
@pulumi.getter(name="lastReplicatedTime")
def last_replicated_time(self) -> Optional[str]:
"""
The Last replication time.
"""
return pulumi.get(self, "last_replicated_time")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The last RPO calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
License Type of the VM to be used.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="oSDetails")
def o_s_details(self) -> Optional['outputs.OSDetailsResponse']:
"""
The operating system info.
"""
return pulumi.get(self, "o_s_details")
@property
@pulumi.getter(name="recoveryAvailabilitySetId")
def recovery_availability_set_id(self) -> Optional[str]:
"""
The recovery availability set Id.
"""
return pulumi.get(self, "recovery_availability_set_id")
@property
@pulumi.getter(name="recoveryAzureLogStorageAccountId")
def recovery_azure_log_storage_account_id(self) -> Optional[str]:
"""
The ARM id of the log storage account used for replication. This will be set to null if no log storage account was provided during enable protection.
"""
return pulumi.get(self, "recovery_azure_log_storage_account_id")
@property
@pulumi.getter(name="recoveryAzureResourceGroupId")
def recovery_azure_resource_group_id(self) -> Optional[str]:
"""
The target resource group Id.
"""
return pulumi.get(self, "recovery_azure_resource_group_id")
@property
@pulumi.getter(name="recoveryAzureStorageAccount")
def recovery_azure_storage_account(self) -> Optional[str]:
"""
The recovery Azure storage account.
"""
return pulumi.get(self, "recovery_azure_storage_account")
@property
@pulumi.getter(name="recoveryAzureVMSize")
def recovery_azure_vm_size(self) -> Optional[str]:
"""
The Recovery Azure VM size.
"""
return pulumi.get(self, "recovery_azure_vm_size")
@property
@pulumi.getter(name="recoveryAzureVmName")
def recovery_azure_vm_name(self) -> Optional[str]:
"""
Recovery Azure given name.
"""
return pulumi.get(self, "recovery_azure_vm_name")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
Last RPO value.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="selectedRecoveryAzureNetworkId")
def selected_recovery_azure_network_id(self) -> Optional[str]:
"""
The selected recovery azure network Id.
"""
return pulumi.get(self, "selected_recovery_azure_network_id")
@property
@pulumi.getter(name="selectedSourceNicId")
def selected_source_nic_id(self) -> Optional[str]:
"""
The selected source nic Id which will be used as the primary nic during failover.
"""
return pulumi.get(self, "selected_source_nic_id")
@property
@pulumi.getter(name="sourceVmCpuCount")
def source_vm_cpu_count(self) -> Optional[int]:
"""
The CPU count of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_cpu_count")
@property
@pulumi.getter(name="sourceVmRamSizeInMB")
def source_vm_ram_size_in_mb(self) -> Optional[int]:
"""
The RAM size of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_ram_size_in_mb")
@property
@pulumi.getter(name="targetAvailabilityZone")
def target_availability_zone(self) -> Optional[str]:
"""
The target availability zone.
"""
return pulumi.get(self, "target_availability_zone")
@property
@pulumi.getter(name="targetProximityPlacementGroupId")
def target_proximity_placement_group_id(self) -> Optional[str]:
"""
The target proximity placement group Id.
"""
return pulumi.get(self, "target_proximity_placement_group_id")
@property
@pulumi.getter(name="useManagedDisks")
def use_managed_disks(self) -> Optional[str]:
"""
A value indicating whether managed disks should be used during failover.
"""
return pulumi.get(self, "use_managed_disks")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaBasePolicyDetailsResponse(dict):
"""
Base class for HyperVReplica policy details.
"""
def __init__(__self__, *,
instance_type: str,
allowed_authentication_type: Optional[int] = None,
application_consistent_snapshot_frequency_in_hours: Optional[int] = None,
compression: Optional[str] = None,
initial_replication_method: Optional[str] = None,
offline_replication_export_path: Optional[str] = None,
offline_replication_import_path: Optional[str] = None,
online_replication_start_time: Optional[str] = None,
recovery_points: Optional[int] = None,
replica_deletion_option: Optional[str] = None,
replication_port: Optional[int] = None):
"""
Base class for HyperVReplica policy details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int allowed_authentication_type: A value indicating the authentication type.
:param int application_consistent_snapshot_frequency_in_hours: A value indicating the application consistent frequency.
:param str compression: A value indicating whether compression has to be enabled.
:param str initial_replication_method: A value indicating whether IR is online.
:param str offline_replication_export_path: A value indicating the offline IR export path.
:param str offline_replication_import_path: A value indicating the offline IR import path.
:param str online_replication_start_time: A value indicating the online IR start time.
:param int recovery_points: A value indicating the number of recovery points.
:param str replica_deletion_option: A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
:param int replication_port: A value indicating the recovery HTTPS port.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplicaBasePolicyDetails')
if allowed_authentication_type is not None:
pulumi.set(__self__, "allowed_authentication_type", allowed_authentication_type)
if application_consistent_snapshot_frequency_in_hours is not None:
pulumi.set(__self__, "application_consistent_snapshot_frequency_in_hours", application_consistent_snapshot_frequency_in_hours)
if compression is not None:
pulumi.set(__self__, "compression", compression)
if initial_replication_method is not None:
pulumi.set(__self__, "initial_replication_method", initial_replication_method)
if offline_replication_export_path is not None:
pulumi.set(__self__, "offline_replication_export_path", offline_replication_export_path)
if offline_replication_import_path is not None:
pulumi.set(__self__, "offline_replication_import_path", offline_replication_import_path)
if online_replication_start_time is not None:
pulumi.set(__self__, "online_replication_start_time", online_replication_start_time)
if recovery_points is not None:
pulumi.set(__self__, "recovery_points", recovery_points)
if replica_deletion_option is not None:
pulumi.set(__self__, "replica_deletion_option", replica_deletion_option)
if replication_port is not None:
pulumi.set(__self__, "replication_port", replication_port)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="allowedAuthenticationType")
def allowed_authentication_type(self) -> Optional[int]:
"""
A value indicating the authentication type.
"""
return pulumi.get(self, "allowed_authentication_type")
@property
@pulumi.getter(name="applicationConsistentSnapshotFrequencyInHours")
def application_consistent_snapshot_frequency_in_hours(self) -> Optional[int]:
"""
A value indicating the application consistent frequency.
"""
return pulumi.get(self, "application_consistent_snapshot_frequency_in_hours")
@property
@pulumi.getter
def compression(self) -> Optional[str]:
"""
A value indicating whether compression has to be enabled.
"""
return pulumi.get(self, "compression")
@property
@pulumi.getter(name="initialReplicationMethod")
def initial_replication_method(self) -> Optional[str]:
"""
A value indicating whether IR is online.
"""
return pulumi.get(self, "initial_replication_method")
@property
@pulumi.getter(name="offlineReplicationExportPath")
def offline_replication_export_path(self) -> Optional[str]:
"""
A value indicating the offline IR export path.
"""
return pulumi.get(self, "offline_replication_export_path")
@property
@pulumi.getter(name="offlineReplicationImportPath")
def offline_replication_import_path(self) -> Optional[str]:
"""
A value indicating the offline IR import path.
"""
return pulumi.get(self, "offline_replication_import_path")
@property
@pulumi.getter(name="onlineReplicationStartTime")
def online_replication_start_time(self) -> Optional[str]:
"""
A value indicating the online IR start time.
"""
return pulumi.get(self, "online_replication_start_time")
@property
@pulumi.getter(name="recoveryPoints")
def recovery_points(self) -> Optional[int]:
"""
A value indicating the number of recovery points.
"""
return pulumi.get(self, "recovery_points")
@property
@pulumi.getter(name="replicaDeletionOption")
def replica_deletion_option(self) -> Optional[str]:
"""
A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
"""
return pulumi.get(self, "replica_deletion_option")
@property
@pulumi.getter(name="replicationPort")
def replication_port(self) -> Optional[int]:
"""
A value indicating the recovery HTTPS port.
"""
return pulumi.get(self, "replication_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaBaseReplicationDetailsResponse(dict):
"""
Hyper V replica provider specific settings base class.
"""
def __init__(__self__, *,
instance_type: str,
initial_replication_details: Optional['outputs.InitialReplicationDetailsResponse'] = None,
last_replicated_time: Optional[str] = None,
v_m_disk_details: Optional[Sequence['outputs.DiskDetailsResponse']] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
Hyper V replica provider specific settings base class.
:param str instance_type: Gets the Instance type.
:param 'InitialReplicationDetailsResponseArgs' initial_replication_details: Initial replication details.
:param str last_replicated_time: The Last replication time.
:param Sequence['DiskDetailsResponseArgs'] v_m_disk_details: VM disk details.
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplicaBaseReplicationDetails')
if initial_replication_details is not None:
pulumi.set(__self__, "initial_replication_details", initial_replication_details)
if last_replicated_time is not None:
pulumi.set(__self__, "last_replicated_time", last_replicated_time)
if v_m_disk_details is not None:
pulumi.set(__self__, "v_m_disk_details", v_m_disk_details)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="initialReplicationDetails")
def initial_replication_details(self) -> Optional['outputs.InitialReplicationDetailsResponse']:
"""
Initial replication details.
"""
return pulumi.get(self, "initial_replication_details")
@property
@pulumi.getter(name="lastReplicatedTime")
def last_replicated_time(self) -> Optional[str]:
"""
The Last replication time.
"""
return pulumi.get(self, "last_replicated_time")
@property
@pulumi.getter(name="vMDiskDetails")
def v_m_disk_details(self) -> Optional[Sequence['outputs.DiskDetailsResponse']]:
"""
VM disk details.
"""
return pulumi.get(self, "v_m_disk_details")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaBluePolicyDetailsResponse(dict):
"""
Hyper-V Replica Blue specific protection profile details.
"""
def __init__(__self__, *,
instance_type: str,
allowed_authentication_type: Optional[int] = None,
application_consistent_snapshot_frequency_in_hours: Optional[int] = None,
compression: Optional[str] = None,
initial_replication_method: Optional[str] = None,
offline_replication_export_path: Optional[str] = None,
offline_replication_import_path: Optional[str] = None,
online_replication_start_time: Optional[str] = None,
recovery_points: Optional[int] = None,
replica_deletion_option: Optional[str] = None,
replication_frequency_in_seconds: Optional[int] = None,
replication_port: Optional[int] = None):
"""
Hyper-V Replica Blue specific protection profile details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int allowed_authentication_type: A value indicating the authentication type.
:param int application_consistent_snapshot_frequency_in_hours: A value indicating the application consistent frequency.
:param str compression: A value indicating whether compression has to be enabled.
:param str initial_replication_method: A value indicating whether IR is online.
:param str offline_replication_export_path: A value indicating the offline IR export path.
:param str offline_replication_import_path: A value indicating the offline IR import path.
:param str online_replication_start_time: A value indicating the online IR start time.
:param int recovery_points: A value indicating the number of recovery points.
:param str replica_deletion_option: A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
:param int replication_frequency_in_seconds: A value indicating the replication interval.
:param int replication_port: A value indicating the recovery HTTPS port.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplica2012R2')
if allowed_authentication_type is not None:
pulumi.set(__self__, "allowed_authentication_type", allowed_authentication_type)
if application_consistent_snapshot_frequency_in_hours is not None:
pulumi.set(__self__, "application_consistent_snapshot_frequency_in_hours", application_consistent_snapshot_frequency_in_hours)
if compression is not None:
pulumi.set(__self__, "compression", compression)
if initial_replication_method is not None:
pulumi.set(__self__, "initial_replication_method", initial_replication_method)
if offline_replication_export_path is not None:
pulumi.set(__self__, "offline_replication_export_path", offline_replication_export_path)
if offline_replication_import_path is not None:
pulumi.set(__self__, "offline_replication_import_path", offline_replication_import_path)
if online_replication_start_time is not None:
pulumi.set(__self__, "online_replication_start_time", online_replication_start_time)
if recovery_points is not None:
pulumi.set(__self__, "recovery_points", recovery_points)
if replica_deletion_option is not None:
pulumi.set(__self__, "replica_deletion_option", replica_deletion_option)
if replication_frequency_in_seconds is not None:
pulumi.set(__self__, "replication_frequency_in_seconds", replication_frequency_in_seconds)
if replication_port is not None:
pulumi.set(__self__, "replication_port", replication_port)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="allowedAuthenticationType")
def allowed_authentication_type(self) -> Optional[int]:
"""
A value indicating the authentication type.
"""
return pulumi.get(self, "allowed_authentication_type")
@property
@pulumi.getter(name="applicationConsistentSnapshotFrequencyInHours")
def application_consistent_snapshot_frequency_in_hours(self) -> Optional[int]:
"""
A value indicating the application consistent frequency.
"""
return pulumi.get(self, "application_consistent_snapshot_frequency_in_hours")
@property
@pulumi.getter
def compression(self) -> Optional[str]:
"""
A value indicating whether compression has to be enabled.
"""
return pulumi.get(self, "compression")
@property
@pulumi.getter(name="initialReplicationMethod")
def initial_replication_method(self) -> Optional[str]:
"""
A value indicating whether IR is online.
"""
return pulumi.get(self, "initial_replication_method")
@property
@pulumi.getter(name="offlineReplicationExportPath")
def offline_replication_export_path(self) -> Optional[str]:
"""
A value indicating the offline IR export path.
"""
return pulumi.get(self, "offline_replication_export_path")
@property
@pulumi.getter(name="offlineReplicationImportPath")
def offline_replication_import_path(self) -> Optional[str]:
"""
A value indicating the offline IR import path.
"""
return pulumi.get(self, "offline_replication_import_path")
@property
@pulumi.getter(name="onlineReplicationStartTime")
def online_replication_start_time(self) -> Optional[str]:
"""
A value indicating the online IR start time.
"""
return pulumi.get(self, "online_replication_start_time")
@property
@pulumi.getter(name="recoveryPoints")
def recovery_points(self) -> Optional[int]:
"""
A value indicating the number of recovery points.
"""
return pulumi.get(self, "recovery_points")
@property
@pulumi.getter(name="replicaDeletionOption")
def replica_deletion_option(self) -> Optional[str]:
"""
A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
"""
return pulumi.get(self, "replica_deletion_option")
@property
@pulumi.getter(name="replicationFrequencyInSeconds")
def replication_frequency_in_seconds(self) -> Optional[int]:
"""
A value indicating the replication interval.
"""
return pulumi.get(self, "replication_frequency_in_seconds")
@property
@pulumi.getter(name="replicationPort")
def replication_port(self) -> Optional[int]:
"""
A value indicating the recovery HTTPS port.
"""
return pulumi.get(self, "replication_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaBlueReplicationDetailsResponse(dict):
"""
HyperV replica 2012 R2 (Blue) replication details.
"""
def __init__(__self__, *,
instance_type: str,
initial_replication_details: Optional['outputs.InitialReplicationDetailsResponse'] = None,
last_replicated_time: Optional[str] = None,
v_m_disk_details: Optional[Sequence['outputs.DiskDetailsResponse']] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
HyperV replica 2012 R2 (Blue) replication details.
:param str instance_type: Gets the Instance type.
:param 'InitialReplicationDetailsResponseArgs' initial_replication_details: Initial replication details.
:param str last_replicated_time: The Last replication time.
:param Sequence['DiskDetailsResponseArgs'] v_m_disk_details: VM disk details.
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplica2012R2')
if initial_replication_details is not None:
pulumi.set(__self__, "initial_replication_details", initial_replication_details)
if last_replicated_time is not None:
pulumi.set(__self__, "last_replicated_time", last_replicated_time)
if v_m_disk_details is not None:
pulumi.set(__self__, "v_m_disk_details", v_m_disk_details)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="initialReplicationDetails")
def initial_replication_details(self) -> Optional['outputs.InitialReplicationDetailsResponse']:
"""
Initial replication details.
"""
return pulumi.get(self, "initial_replication_details")
@property
@pulumi.getter(name="lastReplicatedTime")
def last_replicated_time(self) -> Optional[str]:
"""
The Last replication time.
"""
return pulumi.get(self, "last_replicated_time")
@property
@pulumi.getter(name="vMDiskDetails")
def v_m_disk_details(self) -> Optional[Sequence['outputs.DiskDetailsResponse']]:
"""
VM disk details.
"""
return pulumi.get(self, "v_m_disk_details")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaPolicyDetailsResponse(dict):
"""
Hyper-V Replica Blue specific protection profile details.
"""
def __init__(__self__, *,
instance_type: str,
allowed_authentication_type: Optional[int] = None,
application_consistent_snapshot_frequency_in_hours: Optional[int] = None,
compression: Optional[str] = None,
initial_replication_method: Optional[str] = None,
offline_replication_export_path: Optional[str] = None,
offline_replication_import_path: Optional[str] = None,
online_replication_start_time: Optional[str] = None,
recovery_points: Optional[int] = None,
replica_deletion_option: Optional[str] = None,
replication_port: Optional[int] = None):
"""
Hyper-V Replica Blue specific protection profile details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int allowed_authentication_type: A value indicating the authentication type.
:param int application_consistent_snapshot_frequency_in_hours: A value indicating the application consistent frequency.
:param str compression: A value indicating whether compression has to be enabled.
:param str initial_replication_method: A value indicating whether IR is online.
:param str offline_replication_export_path: A value indicating the offline IR export path.
:param str offline_replication_import_path: A value indicating the offline IR import path.
:param str online_replication_start_time: A value indicating the online IR start time.
:param int recovery_points: A value indicating the number of recovery points.
:param str replica_deletion_option: A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
:param int replication_port: A value indicating the recovery HTTPS port.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplica2012')
if allowed_authentication_type is not None:
pulumi.set(__self__, "allowed_authentication_type", allowed_authentication_type)
if application_consistent_snapshot_frequency_in_hours is not None:
pulumi.set(__self__, "application_consistent_snapshot_frequency_in_hours", application_consistent_snapshot_frequency_in_hours)
if compression is not None:
pulumi.set(__self__, "compression", compression)
if initial_replication_method is not None:
pulumi.set(__self__, "initial_replication_method", initial_replication_method)
if offline_replication_export_path is not None:
pulumi.set(__self__, "offline_replication_export_path", offline_replication_export_path)
if offline_replication_import_path is not None:
pulumi.set(__self__, "offline_replication_import_path", offline_replication_import_path)
if online_replication_start_time is not None:
pulumi.set(__self__, "online_replication_start_time", online_replication_start_time)
if recovery_points is not None:
pulumi.set(__self__, "recovery_points", recovery_points)
if replica_deletion_option is not None:
pulumi.set(__self__, "replica_deletion_option", replica_deletion_option)
if replication_port is not None:
pulumi.set(__self__, "replication_port", replication_port)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="allowedAuthenticationType")
def allowed_authentication_type(self) -> Optional[int]:
"""
A value indicating the authentication type.
"""
return pulumi.get(self, "allowed_authentication_type")
@property
@pulumi.getter(name="applicationConsistentSnapshotFrequencyInHours")
def application_consistent_snapshot_frequency_in_hours(self) -> Optional[int]:
"""
A value indicating the application consistent frequency.
"""
return pulumi.get(self, "application_consistent_snapshot_frequency_in_hours")
@property
@pulumi.getter
def compression(self) -> Optional[str]:
"""
A value indicating whether compression has to be enabled.
"""
return pulumi.get(self, "compression")
@property
@pulumi.getter(name="initialReplicationMethod")
def initial_replication_method(self) -> Optional[str]:
"""
A value indicating whether IR is online.
"""
return pulumi.get(self, "initial_replication_method")
@property
@pulumi.getter(name="offlineReplicationExportPath")
def offline_replication_export_path(self) -> Optional[str]:
"""
A value indicating the offline IR export path.
"""
return pulumi.get(self, "offline_replication_export_path")
@property
@pulumi.getter(name="offlineReplicationImportPath")
def offline_replication_import_path(self) -> Optional[str]:
"""
A value indicating the offline IR import path.
"""
return pulumi.get(self, "offline_replication_import_path")
@property
@pulumi.getter(name="onlineReplicationStartTime")
def online_replication_start_time(self) -> Optional[str]:
"""
A value indicating the online IR start time.
"""
return pulumi.get(self, "online_replication_start_time")
@property
@pulumi.getter(name="recoveryPoints")
def recovery_points(self) -> Optional[int]:
"""
A value indicating the number of recovery points.
"""
return pulumi.get(self, "recovery_points")
@property
@pulumi.getter(name="replicaDeletionOption")
def replica_deletion_option(self) -> Optional[str]:
"""
A value indicating whether the VM has to be auto deleted. Supported Values: String.Empty, None, OnRecoveryCloud
"""
return pulumi.get(self, "replica_deletion_option")
@property
@pulumi.getter(name="replicationPort")
def replication_port(self) -> Optional[int]:
"""
A value indicating the recovery HTTPS port.
"""
return pulumi.get(self, "replication_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVReplicaReplicationDetailsResponse(dict):
"""
HyperV replica 2012 replication details.
"""
def __init__(__self__, *,
instance_type: str,
initial_replication_details: Optional['outputs.InitialReplicationDetailsResponse'] = None,
last_replicated_time: Optional[str] = None,
v_m_disk_details: Optional[Sequence['outputs.DiskDetailsResponse']] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
HyperV replica 2012 replication details.
:param str instance_type: Gets the Instance type.
:param 'InitialReplicationDetailsResponseArgs' initial_replication_details: Initial replication details.
:param str last_replicated_time: The Last replication time.
:param Sequence['DiskDetailsResponseArgs'] v_m_disk_details: VM disk details.
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'HyperVReplica2012')
if initial_replication_details is not None:
pulumi.set(__self__, "initial_replication_details", initial_replication_details)
if last_replicated_time is not None:
pulumi.set(__self__, "last_replicated_time", last_replicated_time)
if v_m_disk_details is not None:
pulumi.set(__self__, "v_m_disk_details", v_m_disk_details)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="initialReplicationDetails")
def initial_replication_details(self) -> Optional['outputs.InitialReplicationDetailsResponse']:
"""
Initial replication details.
"""
return pulumi.get(self, "initial_replication_details")
@property
@pulumi.getter(name="lastReplicatedTime")
def last_replicated_time(self) -> Optional[str]:
"""
The Last replication time.
"""
return pulumi.get(self, "last_replicated_time")
@property
@pulumi.getter(name="vMDiskDetails")
def v_m_disk_details(self) -> Optional[Sequence['outputs.DiskDetailsResponse']]:
"""
VM disk details.
"""
return pulumi.get(self, "v_m_disk_details")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class HyperVSiteDetailsResponse(dict):
"""
HyperVSite fabric specific details.
"""
def __init__(__self__, *,
instance_type: str):
"""
HyperVSite fabric specific details.
:param str instance_type: Gets the class type. Overridden in derived classes.
"""
pulumi.set(__self__, "instance_type", 'HyperVSite')
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IPConfigResponse(dict):
"""
IP configuration details.
"""
def __init__(__self__, *,
l_b_backend_address_pool_ids: Optional[Sequence[str]] = None,
public_ip_address_id: Optional[str] = None,
static_ip_address: Optional[str] = None):
"""
IP configuration details.
:param Sequence[str] l_b_backend_address_pool_ids: The backend address pools associated with the IP configuration.
:param str public_ip_address_id: The Id of the public IP address associated with the IP configuration.
:param str static_ip_address: The static IP address of the IP configuration.
"""
if l_b_backend_address_pool_ids is not None:
pulumi.set(__self__, "l_b_backend_address_pool_ids", l_b_backend_address_pool_ids)
if public_ip_address_id is not None:
pulumi.set(__self__, "public_ip_address_id", public_ip_address_id)
if static_ip_address is not None:
pulumi.set(__self__, "static_ip_address", static_ip_address)
@property
@pulumi.getter(name="lBBackendAddressPoolIds")
def l_b_backend_address_pool_ids(self) -> Optional[Sequence[str]]:
"""
The backend address pools associated with the IP configuration.
"""
return pulumi.get(self, "l_b_backend_address_pool_ids")
@property
@pulumi.getter(name="publicIpAddressId")
def public_ip_address_id(self) -> Optional[str]:
"""
The Id of the public IP address associated with the IP configuration.
"""
return pulumi.get(self, "public_ip_address_id")
@property
@pulumi.getter(name="staticIPAddress")
def static_ip_address(self) -> Optional[str]:
"""
The static IP address of the IP configuration.
"""
return pulumi.get(self, "static_ip_address")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IaaSVMContainerResponse(dict):
"""
IaaS VM workload-specific container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
container_type: Optional[str] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
registration_status: Optional[str] = None,
resource_group: Optional[str] = None,
virtual_machine_id: Optional[str] = None,
virtual_machine_version: Optional[str] = None):
"""
IaaS VM workload-specific container.
:param str backup_management_type: Type of backup management for the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
:param str resource_group: Resource group name of Recovery Services Vault.
:param str virtual_machine_id: Fully qualified ARM url of the virtual machine represented by this Azure IaaS VM container.
:param str virtual_machine_version: Specifies whether the container represents a Classic or an Azure Resource Manager VM.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if container_type is not None:
pulumi.set(__self__, "container_type", 'IaaSVMContainer')
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
if resource_group is not None:
pulumi.set(__self__, "resource_group", resource_group)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
if virtual_machine_version is not None:
pulumi.set(__self__, "virtual_machine_version", virtual_machine_version)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
@property
@pulumi.getter(name="resourceGroup")
def resource_group(self) -> Optional[str]:
"""
Resource group name of Recovery Services Vault.
"""
return pulumi.get(self, "resource_group")
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[str]:
"""
Fully qualified ARM url of the virtual machine represented by this Azure IaaS VM container.
"""
return pulumi.get(self, "virtual_machine_id")
@property
@pulumi.getter(name="virtualMachineVersion")
def virtual_machine_version(self) -> Optional[str]:
"""
Specifies whether the container represents a Classic or an Azure Resource Manager VM.
"""
return pulumi.get(self, "virtual_machine_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IdentityDataResponse(dict):
"""
Identity for the resource.
"""
def __init__(__self__, *,
principal_id: str,
tenant_id: str,
type: str):
"""
Identity for the resource.
:param str principal_id: The principal ID of resource identity.
:param str tenant_id: The tenant ID of resource.
:param str type: The identity type.
"""
pulumi.set(__self__, "principal_id", principal_id)
pulumi.set(__self__, "tenant_id", tenant_id)
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="principalId")
def principal_id(self) -> str:
"""
The principal ID of resource identity.
"""
return pulumi.get(self, "principal_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> str:
"""
The tenant ID of resource.
"""
return pulumi.get(self, "tenant_id")
@property
@pulumi.getter
def type(self) -> str:
"""
The identity type.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class IdentityProviderDetailsResponse(dict):
"""
Identity provider details.
"""
def __init__(__self__, *,
aad_authority: Optional[str] = None,
application_id: Optional[str] = None,
audience: Optional[str] = None,
object_id: Optional[str] = None,
tenant_id: Optional[str] = None):
"""
Identity provider details.
:param str aad_authority: The base authority for Azure Active Directory authentication.
:param str application_id: The application/client Id for the service principal with which the on-premise management/data plane components would communicate with our Azure services.
:param str audience: The intended Audience of the service principal with which the on-premise management/data plane components would communicate with our Azure services.
:param str object_id: The object Id of the service principal with which the on-premise management/data plane components would communicate with our Azure services.
:param str tenant_id: The tenant Id for the service principal with which the on-premise management/data plane components would communicate with our Azure services.
"""
if aad_authority is not None:
pulumi.set(__self__, "aad_authority", aad_authority)
if application_id is not None:
pulumi.set(__self__, "application_id", application_id)
if audience is not None:
pulumi.set(__self__, "audience", audience)
if object_id is not None:
pulumi.set(__self__, "object_id", object_id)
if tenant_id is not None:
pulumi.set(__self__, "tenant_id", tenant_id)
@property
@pulumi.getter(name="aadAuthority")
def aad_authority(self) -> Optional[str]:
"""
The base authority for Azure Active Directory authentication.
"""
return pulumi.get(self, "aad_authority")
@property
@pulumi.getter(name="applicationId")
def application_id(self) -> Optional[str]:
"""
The application/client Id for the service principal with which the on-premise management/data plane components would communicate with our Azure services.
"""
return pulumi.get(self, "application_id")
@property
@pulumi.getter
def audience(self) -> Optional[str]:
"""
The intended Audience of the service principal with which the on-premise management/data plane components would communicate with our Azure services.
"""
return pulumi.get(self, "audience")
@property
@pulumi.getter(name="objectId")
def object_id(self) -> Optional[str]:
"""
The object Id of the service principal with which the on-premise management/data plane components would communicate with our Azure services.
"""
return pulumi.get(self, "object_id")
@property
@pulumi.getter(name="tenantId")
def tenant_id(self) -> Optional[str]:
"""
The tenant Id for the service principal with which the on-premise management/data plane components would communicate with our Azure services.
"""
return pulumi.get(self, "tenant_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageAgentDetailsResponse(dict):
"""
The details of the InMage agent.
"""
def __init__(__self__, *,
agent_expiry_date: Optional[str] = None,
agent_update_status: Optional[str] = None,
agent_version: Optional[str] = None,
post_update_reboot_status: Optional[str] = None):
"""
The details of the InMage agent.
:param str agent_expiry_date: Agent expiry date.
:param str agent_update_status: A value indicating whether installed agent needs to be updated.
:param str agent_version: The agent version.
:param str post_update_reboot_status: A value indicating whether reboot is required after update is applied.
"""
if agent_expiry_date is not None:
pulumi.set(__self__, "agent_expiry_date", agent_expiry_date)
if agent_update_status is not None:
pulumi.set(__self__, "agent_update_status", agent_update_status)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if post_update_reboot_status is not None:
pulumi.set(__self__, "post_update_reboot_status", post_update_reboot_status)
@property
@pulumi.getter(name="agentExpiryDate")
def agent_expiry_date(self) -> Optional[str]:
"""
Agent expiry date.
"""
return pulumi.get(self, "agent_expiry_date")
@property
@pulumi.getter(name="agentUpdateStatus")
def agent_update_status(self) -> Optional[str]:
"""
A value indicating whether installed agent needs to be updated.
"""
return pulumi.get(self, "agent_update_status")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The agent version.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="postUpdateRebootStatus")
def post_update_reboot_status(self) -> Optional[str]:
"""
A value indicating whether reboot is required after update is applied.
"""
return pulumi.get(self, "post_update_reboot_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageAzureV2ManagedDiskDetailsResponse(dict):
"""
InMageAzureV2 Managed disk details.
"""
def __init__(__self__, *,
disk_id: Optional[str] = None,
replica_disk_type: Optional[str] = None,
seed_managed_disk_id: Optional[str] = None):
"""
InMageAzureV2 Managed disk details.
:param str disk_id: The disk id.
:param str replica_disk_type: The replica disk type.
:param str seed_managed_disk_id: Seed managed disk Id.
"""
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if replica_disk_type is not None:
pulumi.set(__self__, "replica_disk_type", replica_disk_type)
if seed_managed_disk_id is not None:
pulumi.set(__self__, "seed_managed_disk_id", seed_managed_disk_id)
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[str]:
"""
The disk id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="replicaDiskType")
def replica_disk_type(self) -> Optional[str]:
"""
The replica disk type.
"""
return pulumi.get(self, "replica_disk_type")
@property
@pulumi.getter(name="seedManagedDiskId")
def seed_managed_disk_id(self) -> Optional[str]:
"""
Seed managed disk Id.
"""
return pulumi.get(self, "seed_managed_disk_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageAzureV2PolicyDetailsResponse(dict):
"""
InMage Azure v2 specific protection profile details.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
crash_consistent_frequency_in_minutes: Optional[int] = None,
multi_vm_sync_status: Optional[str] = None,
recovery_point_history: Optional[int] = None,
recovery_point_threshold_in_minutes: Optional[int] = None):
"""
InMage Azure v2 specific protection profile details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param int crash_consistent_frequency_in_minutes: The crash consistent snapshot frequency in minutes.
:param str multi_vm_sync_status: A value indicating whether multi-VM sync has to be enabled.
:param int recovery_point_history: The duration in minutes until which the recovery points need to be stored.
:param int recovery_point_threshold_in_minutes: The recovery point threshold in minutes.
"""
pulumi.set(__self__, "instance_type", 'InMageAzureV2')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if crash_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "crash_consistent_frequency_in_minutes", crash_consistent_frequency_in_minutes)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if recovery_point_history is not None:
pulumi.set(__self__, "recovery_point_history", recovery_point_history)
if recovery_point_threshold_in_minutes is not None:
pulumi.set(__self__, "recovery_point_threshold_in_minutes", recovery_point_threshold_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="crashConsistentFrequencyInMinutes")
def crash_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The crash consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "crash_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="recoveryPointHistory")
def recovery_point_history(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history")
@property
@pulumi.getter(name="recoveryPointThresholdInMinutes")
def recovery_point_threshold_in_minutes(self) -> Optional[int]:
"""
The recovery point threshold in minutes.
"""
return pulumi.get(self, "recovery_point_threshold_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageAzureV2ProtectedDiskDetailsResponse(dict):
"""
InMageAzureV2 protected disk details.
"""
def __init__(__self__, *,
disk_capacity_in_bytes: Optional[int] = None,
disk_id: Optional[str] = None,
disk_name: Optional[str] = None,
disk_resized: Optional[str] = None,
file_system_capacity_in_bytes: Optional[int] = None,
health_error_code: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
protection_stage: Optional[str] = None,
ps_data_in_mega_bytes: Optional[float] = None,
resync_duration_in_seconds: Optional[int] = None,
resync_progress_percentage: Optional[int] = None,
resync_required: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
source_data_in_mega_bytes: Optional[float] = None,
target_data_in_mega_bytes: Optional[float] = None):
"""
InMageAzureV2 protected disk details.
:param int disk_capacity_in_bytes: The disk capacity in bytes.
:param str disk_id: The disk id.
:param str disk_name: The disk name.
:param str disk_resized: A value indicating whether disk is resized.
:param int file_system_capacity_in_bytes: The disk file system capacity in bytes.
:param str health_error_code: The health error code for the disk.
:param str last_rpo_calculated_time: The last RPO calculated time.
:param str protection_stage: The protection stage.
:param float ps_data_in_mega_bytes: The PS data transit in MB.
:param int resync_duration_in_seconds: The resync duration in seconds.
:param int resync_progress_percentage: The resync progress percentage.
:param str resync_required: A value indicating whether resync is required for this disk.
:param int rpo_in_seconds: The RPO in seconds.
:param float source_data_in_mega_bytes: The source data transit in MB.
:param float target_data_in_mega_bytes: The target data transit in MB.
"""
if disk_capacity_in_bytes is not None:
pulumi.set(__self__, "disk_capacity_in_bytes", disk_capacity_in_bytes)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if disk_name is not None:
pulumi.set(__self__, "disk_name", disk_name)
if disk_resized is not None:
pulumi.set(__self__, "disk_resized", disk_resized)
if file_system_capacity_in_bytes is not None:
pulumi.set(__self__, "file_system_capacity_in_bytes", file_system_capacity_in_bytes)
if health_error_code is not None:
pulumi.set(__self__, "health_error_code", health_error_code)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if protection_stage is not None:
pulumi.set(__self__, "protection_stage", protection_stage)
if ps_data_in_mega_bytes is not None:
pulumi.set(__self__, "ps_data_in_mega_bytes", ps_data_in_mega_bytes)
if resync_duration_in_seconds is not None:
pulumi.set(__self__, "resync_duration_in_seconds", resync_duration_in_seconds)
if resync_progress_percentage is not None:
pulumi.set(__self__, "resync_progress_percentage", resync_progress_percentage)
if resync_required is not None:
pulumi.set(__self__, "resync_required", resync_required)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if source_data_in_mega_bytes is not None:
pulumi.set(__self__, "source_data_in_mega_bytes", source_data_in_mega_bytes)
if target_data_in_mega_bytes is not None:
pulumi.set(__self__, "target_data_in_mega_bytes", target_data_in_mega_bytes)
@property
@pulumi.getter(name="diskCapacityInBytes")
def disk_capacity_in_bytes(self) -> Optional[int]:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "disk_capacity_in_bytes")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[str]:
"""
The disk id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> Optional[str]:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskResized")
def disk_resized(self) -> Optional[str]:
"""
A value indicating whether disk is resized.
"""
return pulumi.get(self, "disk_resized")
@property
@pulumi.getter(name="fileSystemCapacityInBytes")
def file_system_capacity_in_bytes(self) -> Optional[int]:
"""
The disk file system capacity in bytes.
"""
return pulumi.get(self, "file_system_capacity_in_bytes")
@property
@pulumi.getter(name="healthErrorCode")
def health_error_code(self) -> Optional[str]:
"""
The health error code for the disk.
"""
return pulumi.get(self, "health_error_code")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The last RPO calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="protectionStage")
def protection_stage(self) -> Optional[str]:
"""
The protection stage.
"""
return pulumi.get(self, "protection_stage")
@property
@pulumi.getter(name="psDataInMegaBytes")
def ps_data_in_mega_bytes(self) -> Optional[float]:
"""
The PS data transit in MB.
"""
return pulumi.get(self, "ps_data_in_mega_bytes")
@property
@pulumi.getter(name="resyncDurationInSeconds")
def resync_duration_in_seconds(self) -> Optional[int]:
"""
The resync duration in seconds.
"""
return pulumi.get(self, "resync_duration_in_seconds")
@property
@pulumi.getter(name="resyncProgressPercentage")
def resync_progress_percentage(self) -> Optional[int]:
"""
The resync progress percentage.
"""
return pulumi.get(self, "resync_progress_percentage")
@property
@pulumi.getter(name="resyncRequired")
def resync_required(self) -> Optional[str]:
"""
A value indicating whether resync is required for this disk.
"""
return pulumi.get(self, "resync_required")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
The RPO in seconds.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="sourceDataInMegaBytes")
def source_data_in_mega_bytes(self) -> Optional[float]:
"""
The source data transit in MB.
"""
return pulumi.get(self, "source_data_in_mega_bytes")
@property
@pulumi.getter(name="targetDataInMegaBytes")
def target_data_in_mega_bytes(self) -> Optional[float]:
"""
The target data transit in MB.
"""
return pulumi.get(self, "target_data_in_mega_bytes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageAzureV2ReplicationDetailsResponse(dict):
"""
InMageAzureV2 provider specific settings
"""
def __init__(__self__, *,
instance_type: str,
process_server_name: str,
agent_expiry_date: Optional[str] = None,
agent_version: Optional[str] = None,
azure_vm_disk_details: Optional[Sequence['outputs.AzureVmDiskDetailsResponse']] = None,
compressed_data_rate_in_mb: Optional[float] = None,
datastores: Optional[Sequence[str]] = None,
discovery_type: Optional[str] = None,
disk_resized: Optional[str] = None,
enable_rdp_on_target_option: Optional[str] = None,
infrastructure_vm_id: Optional[str] = None,
ip_address: Optional[str] = None,
is_agent_update_required: Optional[str] = None,
is_reboot_after_update_required: Optional[str] = None,
last_heartbeat: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
last_update_received_time: Optional[str] = None,
license_type: Optional[str] = None,
master_target_id: Optional[str] = None,
multi_vm_group_id: Optional[str] = None,
multi_vm_group_name: Optional[str] = None,
multi_vm_sync_status: Optional[str] = None,
os_disk_id: Optional[str] = None,
os_type: Optional[str] = None,
os_version: Optional[str] = None,
process_server_id: Optional[str] = None,
protected_disks: Optional[Sequence['outputs.InMageAzureV2ProtectedDiskDetailsResponse']] = None,
protected_managed_disks: Optional[Sequence['outputs.InMageAzureV2ManagedDiskDetailsResponse']] = None,
protection_stage: Optional[str] = None,
recovery_availability_set_id: Optional[str] = None,
recovery_azure_log_storage_account_id: Optional[str] = None,
recovery_azure_resource_group_id: Optional[str] = None,
recovery_azure_storage_account: Optional[str] = None,
recovery_azure_vm_name: Optional[str] = None,
recovery_azure_vm_size: Optional[str] = None,
replica_id: Optional[str] = None,
resync_progress_percentage: Optional[int] = None,
rpo_in_seconds: Optional[int] = None,
selected_recovery_azure_network_id: Optional[str] = None,
selected_source_nic_id: Optional[str] = None,
selected_tfo_azure_network_id: Optional[str] = None,
source_vm_cpu_count: Optional[int] = None,
source_vm_ram_size_in_mb: Optional[int] = None,
target_availability_zone: Optional[str] = None,
target_proximity_placement_group_id: Optional[str] = None,
target_vm_id: Optional[str] = None,
uncompressed_data_rate_in_mb: Optional[float] = None,
use_managed_disks: Optional[str] = None,
v_center_infrastructure_id: Optional[str] = None,
validation_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
vhd_name: Optional[str] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
InMageAzureV2 provider specific settings
:param str instance_type: Gets the Instance type.
:param str process_server_name: The process server name.
:param str agent_expiry_date: Agent expiry date.
:param str agent_version: The agent version.
:param Sequence['AzureVmDiskDetailsResponseArgs'] azure_vm_disk_details: Azure VM Disk details.
:param float compressed_data_rate_in_mb: The compressed data change rate in MB.
:param Sequence[str] datastores: The data stores of the on-premise machine. Value can be list of strings that contain data store names.
:param str discovery_type: A value indicating the discovery type of the machine. Value can be vCenter or physical.
:param str disk_resized: A value indicating whether any disk is resized for this VM.
:param str enable_rdp_on_target_option: The selected option to enable RDP\SSH on target vm after failover. String value of {SrsDataContract.EnableRDPOnTargetOption} enum.
:param str infrastructure_vm_id: The infrastructure VM Id.
:param str ip_address: The source IP address.
:param str is_agent_update_required: A value indicating whether installed agent needs to be updated.
:param str is_reboot_after_update_required: A value indicating whether the source server requires a restart after update.
:param str last_heartbeat: The last heartbeat received from the source server.
:param str last_rpo_calculated_time: The last RPO calculated time.
:param str last_update_received_time: The last update time received from on-prem components.
:param str license_type: License Type of the VM to be used.
:param str master_target_id: The master target Id.
:param str multi_vm_group_id: The multi vm group Id.
:param str multi_vm_group_name: The multi vm group name.
:param str multi_vm_sync_status: A value indicating whether multi vm sync is enabled or disabled.
:param str os_disk_id: The id of the disk containing the OS.
:param str os_type: The type of the OS on the VM.
:param str os_version: The OS Version of the protected item.
:param str process_server_id: The process server Id.
:param Sequence['InMageAzureV2ProtectedDiskDetailsResponseArgs'] protected_disks: The list of protected disks.
:param Sequence['InMageAzureV2ManagedDiskDetailsResponseArgs'] protected_managed_disks: The list of protected managed disks.
:param str protection_stage: The protection stage.
:param str recovery_availability_set_id: The recovery availability set Id.
:param str recovery_azure_log_storage_account_id: The ARM id of the log storage account used for replication. This will be set to null if no log storage account was provided during enable protection.
:param str recovery_azure_resource_group_id: The target resource group Id.
:param str recovery_azure_storage_account: The recovery Azure storage account.
:param str recovery_azure_vm_name: Recovery Azure given name.
:param str recovery_azure_vm_size: The Recovery Azure VM size.
:param str replica_id: The replica id of the protected item.
:param int resync_progress_percentage: The resync progress percentage.
:param int rpo_in_seconds: The RPO in seconds.
:param str selected_recovery_azure_network_id: The selected recovery azure network Id.
:param str selected_source_nic_id: The selected source nic Id which will be used as the primary nic during failover.
:param str selected_tfo_azure_network_id: The test failover virtual network.
:param int source_vm_cpu_count: The CPU count of the VM on the primary side.
:param int source_vm_ram_size_in_mb: The RAM size of the VM on the primary side.
:param str target_availability_zone: The target availability zone.
:param str target_proximity_placement_group_id: The target proximity placement group Id.
:param str target_vm_id: The ARM Id of the target Azure VM. This value will be null until the VM is failed over. Only after failure it will be populated with the ARM Id of the Azure VM.
:param float uncompressed_data_rate_in_mb: The uncompressed data change rate in MB.
:param str use_managed_disks: A value indicating whether managed disks should be used during failover.
:param str v_center_infrastructure_id: The vCenter infrastructure Id.
:param Sequence['HealthErrorResponseArgs'] validation_errors: The validation errors of the on-premise machine Value can be list of validation errors.
:param str vhd_name: The OS disk VHD name.
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'InMageAzureV2')
pulumi.set(__self__, "process_server_name", process_server_name)
if agent_expiry_date is not None:
pulumi.set(__self__, "agent_expiry_date", agent_expiry_date)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if azure_vm_disk_details is not None:
pulumi.set(__self__, "azure_vm_disk_details", azure_vm_disk_details)
if compressed_data_rate_in_mb is not None:
pulumi.set(__self__, "compressed_data_rate_in_mb", compressed_data_rate_in_mb)
if datastores is not None:
pulumi.set(__self__, "datastores", datastores)
if discovery_type is not None:
pulumi.set(__self__, "discovery_type", discovery_type)
if disk_resized is not None:
pulumi.set(__self__, "disk_resized", disk_resized)
if enable_rdp_on_target_option is not None:
pulumi.set(__self__, "enable_rdp_on_target_option", enable_rdp_on_target_option)
if infrastructure_vm_id is not None:
pulumi.set(__self__, "infrastructure_vm_id", infrastructure_vm_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if is_agent_update_required is not None:
pulumi.set(__self__, "is_agent_update_required", is_agent_update_required)
if is_reboot_after_update_required is not None:
pulumi.set(__self__, "is_reboot_after_update_required", is_reboot_after_update_required)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if last_update_received_time is not None:
pulumi.set(__self__, "last_update_received_time", last_update_received_time)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if master_target_id is not None:
pulumi.set(__self__, "master_target_id", master_target_id)
if multi_vm_group_id is not None:
pulumi.set(__self__, "multi_vm_group_id", multi_vm_group_id)
if multi_vm_group_name is not None:
pulumi.set(__self__, "multi_vm_group_name", multi_vm_group_name)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if os_disk_id is not None:
pulumi.set(__self__, "os_disk_id", os_disk_id)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
if process_server_id is not None:
pulumi.set(__self__, "process_server_id", process_server_id)
if protected_disks is not None:
pulumi.set(__self__, "protected_disks", protected_disks)
if protected_managed_disks is not None:
pulumi.set(__self__, "protected_managed_disks", protected_managed_disks)
if protection_stage is not None:
pulumi.set(__self__, "protection_stage", protection_stage)
if recovery_availability_set_id is not None:
pulumi.set(__self__, "recovery_availability_set_id", recovery_availability_set_id)
if recovery_azure_log_storage_account_id is not None:
pulumi.set(__self__, "recovery_azure_log_storage_account_id", recovery_azure_log_storage_account_id)
if recovery_azure_resource_group_id is not None:
pulumi.set(__self__, "recovery_azure_resource_group_id", recovery_azure_resource_group_id)
if recovery_azure_storage_account is not None:
pulumi.set(__self__, "recovery_azure_storage_account", recovery_azure_storage_account)
if recovery_azure_vm_name is not None:
pulumi.set(__self__, "recovery_azure_vm_name", recovery_azure_vm_name)
if recovery_azure_vm_size is not None:
pulumi.set(__self__, "recovery_azure_vm_size", recovery_azure_vm_size)
if replica_id is not None:
pulumi.set(__self__, "replica_id", replica_id)
if resync_progress_percentage is not None:
pulumi.set(__self__, "resync_progress_percentage", resync_progress_percentage)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if selected_recovery_azure_network_id is not None:
pulumi.set(__self__, "selected_recovery_azure_network_id", selected_recovery_azure_network_id)
if selected_source_nic_id is not None:
pulumi.set(__self__, "selected_source_nic_id", selected_source_nic_id)
if selected_tfo_azure_network_id is not None:
pulumi.set(__self__, "selected_tfo_azure_network_id", selected_tfo_azure_network_id)
if source_vm_cpu_count is not None:
pulumi.set(__self__, "source_vm_cpu_count", source_vm_cpu_count)
if source_vm_ram_size_in_mb is not None:
pulumi.set(__self__, "source_vm_ram_size_in_mb", source_vm_ram_size_in_mb)
if target_availability_zone is not None:
pulumi.set(__self__, "target_availability_zone", target_availability_zone)
if target_proximity_placement_group_id is not None:
pulumi.set(__self__, "target_proximity_placement_group_id", target_proximity_placement_group_id)
if target_vm_id is not None:
pulumi.set(__self__, "target_vm_id", target_vm_id)
if uncompressed_data_rate_in_mb is not None:
pulumi.set(__self__, "uncompressed_data_rate_in_mb", uncompressed_data_rate_in_mb)
if use_managed_disks is not None:
pulumi.set(__self__, "use_managed_disks", use_managed_disks)
if v_center_infrastructure_id is not None:
pulumi.set(__self__, "v_center_infrastructure_id", v_center_infrastructure_id)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
if vhd_name is not None:
pulumi.set(__self__, "vhd_name", vhd_name)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="processServerName")
def process_server_name(self) -> str:
"""
The process server name.
"""
return pulumi.get(self, "process_server_name")
@property
@pulumi.getter(name="agentExpiryDate")
def agent_expiry_date(self) -> Optional[str]:
"""
Agent expiry date.
"""
return pulumi.get(self, "agent_expiry_date")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The agent version.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="azureVMDiskDetails")
def azure_vm_disk_details(self) -> Optional[Sequence['outputs.AzureVmDiskDetailsResponse']]:
"""
Azure VM Disk details.
"""
return pulumi.get(self, "azure_vm_disk_details")
@property
@pulumi.getter(name="compressedDataRateInMB")
def compressed_data_rate_in_mb(self) -> Optional[float]:
"""
The compressed data change rate in MB.
"""
return pulumi.get(self, "compressed_data_rate_in_mb")
@property
@pulumi.getter
def datastores(self) -> Optional[Sequence[str]]:
"""
The data stores of the on-premise machine. Value can be list of strings that contain data store names.
"""
return pulumi.get(self, "datastores")
@property
@pulumi.getter(name="discoveryType")
def discovery_type(self) -> Optional[str]:
"""
A value indicating the discovery type of the machine. Value can be vCenter or physical.
"""
return pulumi.get(self, "discovery_type")
@property
@pulumi.getter(name="diskResized")
def disk_resized(self) -> Optional[str]:
"""
A value indicating whether any disk is resized for this VM.
"""
return pulumi.get(self, "disk_resized")
@property
@pulumi.getter(name="enableRdpOnTargetOption")
def enable_rdp_on_target_option(self) -> Optional[str]:
"""
The selected option to enable RDP\SSH on target vm after failover. String value of {SrsDataContract.EnableRDPOnTargetOption} enum.
"""
return pulumi.get(self, "enable_rdp_on_target_option")
@property
@pulumi.getter(name="infrastructureVmId")
def infrastructure_vm_id(self) -> Optional[str]:
"""
The infrastructure VM Id.
"""
return pulumi.get(self, "infrastructure_vm_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The source IP address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="isAgentUpdateRequired")
def is_agent_update_required(self) -> Optional[str]:
"""
A value indicating whether installed agent needs to be updated.
"""
return pulumi.get(self, "is_agent_update_required")
@property
@pulumi.getter(name="isRebootAfterUpdateRequired")
def is_reboot_after_update_required(self) -> Optional[str]:
"""
A value indicating whether the source server requires a restart after update.
"""
return pulumi.get(self, "is_reboot_after_update_required")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from the source server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The last RPO calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="lastUpdateReceivedTime")
def last_update_received_time(self) -> Optional[str]:
"""
The last update time received from on-prem components.
"""
return pulumi.get(self, "last_update_received_time")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
License Type of the VM to be used.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="masterTargetId")
def master_target_id(self) -> Optional[str]:
"""
The master target Id.
"""
return pulumi.get(self, "master_target_id")
@property
@pulumi.getter(name="multiVmGroupId")
def multi_vm_group_id(self) -> Optional[str]:
"""
The multi vm group Id.
"""
return pulumi.get(self, "multi_vm_group_id")
@property
@pulumi.getter(name="multiVmGroupName")
def multi_vm_group_name(self) -> Optional[str]:
"""
The multi vm group name.
"""
return pulumi.get(self, "multi_vm_group_name")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi vm sync is enabled or disabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="osDiskId")
def os_disk_id(self) -> Optional[str]:
"""
The id of the disk containing the OS.
"""
return pulumi.get(self, "os_disk_id")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The type of the OS on the VM.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[str]:
"""
The OS Version of the protected item.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="processServerId")
def process_server_id(self) -> Optional[str]:
"""
The process server Id.
"""
return pulumi.get(self, "process_server_id")
@property
@pulumi.getter(name="protectedDisks")
def protected_disks(self) -> Optional[Sequence['outputs.InMageAzureV2ProtectedDiskDetailsResponse']]:
"""
The list of protected disks.
"""
return pulumi.get(self, "protected_disks")
@property
@pulumi.getter(name="protectedManagedDisks")
def protected_managed_disks(self) -> Optional[Sequence['outputs.InMageAzureV2ManagedDiskDetailsResponse']]:
"""
The list of protected managed disks.
"""
return pulumi.get(self, "protected_managed_disks")
@property
@pulumi.getter(name="protectionStage")
def protection_stage(self) -> Optional[str]:
"""
The protection stage.
"""
return pulumi.get(self, "protection_stage")
@property
@pulumi.getter(name="recoveryAvailabilitySetId")
def recovery_availability_set_id(self) -> Optional[str]:
"""
The recovery availability set Id.
"""
return pulumi.get(self, "recovery_availability_set_id")
@property
@pulumi.getter(name="recoveryAzureLogStorageAccountId")
def recovery_azure_log_storage_account_id(self) -> Optional[str]:
"""
The ARM id of the log storage account used for replication. This will be set to null if no log storage account was provided during enable protection.
"""
return pulumi.get(self, "recovery_azure_log_storage_account_id")
@property
@pulumi.getter(name="recoveryAzureResourceGroupId")
def recovery_azure_resource_group_id(self) -> Optional[str]:
"""
The target resource group Id.
"""
return pulumi.get(self, "recovery_azure_resource_group_id")
@property
@pulumi.getter(name="recoveryAzureStorageAccount")
def recovery_azure_storage_account(self) -> Optional[str]:
"""
The recovery Azure storage account.
"""
return pulumi.get(self, "recovery_azure_storage_account")
@property
@pulumi.getter(name="recoveryAzureVMName")
def recovery_azure_vm_name(self) -> Optional[str]:
"""
Recovery Azure given name.
"""
return pulumi.get(self, "recovery_azure_vm_name")
@property
@pulumi.getter(name="recoveryAzureVMSize")
def recovery_azure_vm_size(self) -> Optional[str]:
"""
The Recovery Azure VM size.
"""
return pulumi.get(self, "recovery_azure_vm_size")
@property
@pulumi.getter(name="replicaId")
def replica_id(self) -> Optional[str]:
"""
The replica id of the protected item.
"""
return pulumi.get(self, "replica_id")
@property
@pulumi.getter(name="resyncProgressPercentage")
def resync_progress_percentage(self) -> Optional[int]:
"""
The resync progress percentage.
"""
return pulumi.get(self, "resync_progress_percentage")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
The RPO in seconds.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="selectedRecoveryAzureNetworkId")
def selected_recovery_azure_network_id(self) -> Optional[str]:
"""
The selected recovery azure network Id.
"""
return pulumi.get(self, "selected_recovery_azure_network_id")
@property
@pulumi.getter(name="selectedSourceNicId")
def selected_source_nic_id(self) -> Optional[str]:
"""
The selected source nic Id which will be used as the primary nic during failover.
"""
return pulumi.get(self, "selected_source_nic_id")
@property
@pulumi.getter(name="selectedTfoAzureNetworkId")
def selected_tfo_azure_network_id(self) -> Optional[str]:
"""
The test failover virtual network.
"""
return pulumi.get(self, "selected_tfo_azure_network_id")
@property
@pulumi.getter(name="sourceVmCpuCount")
def source_vm_cpu_count(self) -> Optional[int]:
"""
The CPU count of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_cpu_count")
@property
@pulumi.getter(name="sourceVmRamSizeInMB")
def source_vm_ram_size_in_mb(self) -> Optional[int]:
"""
The RAM size of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_ram_size_in_mb")
@property
@pulumi.getter(name="targetAvailabilityZone")
def target_availability_zone(self) -> Optional[str]:
"""
The target availability zone.
"""
return pulumi.get(self, "target_availability_zone")
@property
@pulumi.getter(name="targetProximityPlacementGroupId")
def target_proximity_placement_group_id(self) -> Optional[str]:
"""
The target proximity placement group Id.
"""
return pulumi.get(self, "target_proximity_placement_group_id")
@property
@pulumi.getter(name="targetVmId")
def target_vm_id(self) -> Optional[str]:
"""
The ARM Id of the target Azure VM. This value will be null until the VM is failed over. Only after failure it will be populated with the ARM Id of the Azure VM.
"""
return pulumi.get(self, "target_vm_id")
@property
@pulumi.getter(name="uncompressedDataRateInMB")
def uncompressed_data_rate_in_mb(self) -> Optional[float]:
"""
The uncompressed data change rate in MB.
"""
return pulumi.get(self, "uncompressed_data_rate_in_mb")
@property
@pulumi.getter(name="useManagedDisks")
def use_managed_disks(self) -> Optional[str]:
"""
A value indicating whether managed disks should be used during failover.
"""
return pulumi.get(self, "use_managed_disks")
@property
@pulumi.getter(name="vCenterInfrastructureId")
def v_center_infrastructure_id(self) -> Optional[str]:
"""
The vCenter infrastructure Id.
"""
return pulumi.get(self, "v_center_infrastructure_id")
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
The validation errors of the on-premise machine Value can be list of validation errors.
"""
return pulumi.get(self, "validation_errors")
@property
@pulumi.getter(name="vhdName")
def vhd_name(self) -> Optional[str]:
"""
The OS disk VHD name.
"""
return pulumi.get(self, "vhd_name")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageBasePolicyDetailsResponse(dict):
"""
Base class for the policies of providers using InMage replication.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
multi_vm_sync_status: Optional[str] = None,
recovery_point_history: Optional[int] = None,
recovery_point_threshold_in_minutes: Optional[int] = None):
"""
Base class for the policies of providers using InMage replication.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param str multi_vm_sync_status: A value indicating whether multi-VM sync has to be enabled.
:param int recovery_point_history: The duration in minutes until which the recovery points need to be stored.
:param int recovery_point_threshold_in_minutes: The recovery point threshold in minutes.
"""
pulumi.set(__self__, "instance_type", 'InMageBasePolicyDetails')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if recovery_point_history is not None:
pulumi.set(__self__, "recovery_point_history", recovery_point_history)
if recovery_point_threshold_in_minutes is not None:
pulumi.set(__self__, "recovery_point_threshold_in_minutes", recovery_point_threshold_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="recoveryPointHistory")
def recovery_point_history(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history")
@property
@pulumi.getter(name="recoveryPointThresholdInMinutes")
def recovery_point_threshold_in_minutes(self) -> Optional[int]:
"""
The recovery point threshold in minutes.
"""
return pulumi.get(self, "recovery_point_threshold_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMagePolicyDetailsResponse(dict):
"""
InMage specific protection profile details.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
multi_vm_sync_status: Optional[str] = None,
recovery_point_history: Optional[int] = None,
recovery_point_threshold_in_minutes: Optional[int] = None):
"""
InMage specific protection profile details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param str multi_vm_sync_status: A value indicating whether multi-VM sync has to be enabled.
:param int recovery_point_history: The duration in minutes until which the recovery points need to be stored.
:param int recovery_point_threshold_in_minutes: The recovery point threshold in minutes.
"""
pulumi.set(__self__, "instance_type", 'InMage')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if recovery_point_history is not None:
pulumi.set(__self__, "recovery_point_history", recovery_point_history)
if recovery_point_threshold_in_minutes is not None:
pulumi.set(__self__, "recovery_point_threshold_in_minutes", recovery_point_threshold_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="recoveryPointHistory")
def recovery_point_history(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history")
@property
@pulumi.getter(name="recoveryPointThresholdInMinutes")
def recovery_point_threshold_in_minutes(self) -> Optional[int]:
"""
The recovery point threshold in minutes.
"""
return pulumi.get(self, "recovery_point_threshold_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageProtectedDiskDetailsResponse(dict):
"""
InMage protected disk details.
"""
def __init__(__self__, *,
disk_capacity_in_bytes: Optional[int] = None,
disk_id: Optional[str] = None,
disk_name: Optional[str] = None,
disk_resized: Optional[str] = None,
file_system_capacity_in_bytes: Optional[int] = None,
health_error_code: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
protection_stage: Optional[str] = None,
ps_data_in_mb: Optional[float] = None,
resync_duration_in_seconds: Optional[int] = None,
resync_progress_percentage: Optional[int] = None,
resync_required: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
source_data_in_mb: Optional[float] = None,
target_data_in_mb: Optional[float] = None):
"""
InMage protected disk details.
:param int disk_capacity_in_bytes: The disk capacity in bytes.
:param str disk_id: The disk id.
:param str disk_name: The disk name.
:param str disk_resized: A value indicating whether disk is resized.
:param int file_system_capacity_in_bytes: The file system capacity in bytes.
:param str health_error_code: The health error code for the disk.
:param str last_rpo_calculated_time: The last RPO calculated time.
:param str protection_stage: The protection stage.
:param float ps_data_in_mb: The PS data transit in MB.
:param int resync_duration_in_seconds: The resync duration in seconds.
:param int resync_progress_percentage: The resync progress percentage.
:param str resync_required: A value indicating whether resync is required for this disk.
:param int rpo_in_seconds: The RPO in seconds.
:param float source_data_in_mb: The source data transit in MB.
:param float target_data_in_mb: The target data transit in MB.
"""
if disk_capacity_in_bytes is not None:
pulumi.set(__self__, "disk_capacity_in_bytes", disk_capacity_in_bytes)
if disk_id is not None:
pulumi.set(__self__, "disk_id", disk_id)
if disk_name is not None:
pulumi.set(__self__, "disk_name", disk_name)
if disk_resized is not None:
pulumi.set(__self__, "disk_resized", disk_resized)
if file_system_capacity_in_bytes is not None:
pulumi.set(__self__, "file_system_capacity_in_bytes", file_system_capacity_in_bytes)
if health_error_code is not None:
pulumi.set(__self__, "health_error_code", health_error_code)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if protection_stage is not None:
pulumi.set(__self__, "protection_stage", protection_stage)
if ps_data_in_mb is not None:
pulumi.set(__self__, "ps_data_in_mb", ps_data_in_mb)
if resync_duration_in_seconds is not None:
pulumi.set(__self__, "resync_duration_in_seconds", resync_duration_in_seconds)
if resync_progress_percentage is not None:
pulumi.set(__self__, "resync_progress_percentage", resync_progress_percentage)
if resync_required is not None:
pulumi.set(__self__, "resync_required", resync_required)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if source_data_in_mb is not None:
pulumi.set(__self__, "source_data_in_mb", source_data_in_mb)
if target_data_in_mb is not None:
pulumi.set(__self__, "target_data_in_mb", target_data_in_mb)
@property
@pulumi.getter(name="diskCapacityInBytes")
def disk_capacity_in_bytes(self) -> Optional[int]:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "disk_capacity_in_bytes")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> Optional[str]:
"""
The disk id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> Optional[str]:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskResized")
def disk_resized(self) -> Optional[str]:
"""
A value indicating whether disk is resized.
"""
return pulumi.get(self, "disk_resized")
@property
@pulumi.getter(name="fileSystemCapacityInBytes")
def file_system_capacity_in_bytes(self) -> Optional[int]:
"""
The file system capacity in bytes.
"""
return pulumi.get(self, "file_system_capacity_in_bytes")
@property
@pulumi.getter(name="healthErrorCode")
def health_error_code(self) -> Optional[str]:
"""
The health error code for the disk.
"""
return pulumi.get(self, "health_error_code")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The last RPO calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="protectionStage")
def protection_stage(self) -> Optional[str]:
"""
The protection stage.
"""
return pulumi.get(self, "protection_stage")
@property
@pulumi.getter(name="psDataInMB")
def ps_data_in_mb(self) -> Optional[float]:
"""
The PS data transit in MB.
"""
return pulumi.get(self, "ps_data_in_mb")
@property
@pulumi.getter(name="resyncDurationInSeconds")
def resync_duration_in_seconds(self) -> Optional[int]:
"""
The resync duration in seconds.
"""
return pulumi.get(self, "resync_duration_in_seconds")
@property
@pulumi.getter(name="resyncProgressPercentage")
def resync_progress_percentage(self) -> Optional[int]:
"""
The resync progress percentage.
"""
return pulumi.get(self, "resync_progress_percentage")
@property
@pulumi.getter(name="resyncRequired")
def resync_required(self) -> Optional[str]:
"""
A value indicating whether resync is required for this disk.
"""
return pulumi.get(self, "resync_required")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
The RPO in seconds.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="sourceDataInMB")
def source_data_in_mb(self) -> Optional[float]:
"""
The source data transit in MB.
"""
return pulumi.get(self, "source_data_in_mb")
@property
@pulumi.getter(name="targetDataInMB")
def target_data_in_mb(self) -> Optional[float]:
"""
The target data transit in MB.
"""
return pulumi.get(self, "target_data_in_mb")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmAgentUpgradeBlockingErrorDetailsResponse(dict):
"""
InMageRcm source agent upgrade blocking error details.
"""
def __init__(__self__, *,
error_code: str,
error_message: str,
error_message_parameters: Mapping[str, str],
error_tags: Mapping[str, str],
possible_causes: str,
recommended_action: str):
"""
InMageRcm source agent upgrade blocking error details.
:param str error_code: The error code.
:param str error_message: The error message.
:param Mapping[str, str] error_message_parameters: The error message parameters.
:param Mapping[str, str] error_tags: The error tags.
:param str possible_causes: The possible causes.
:param str recommended_action: The recommended action.
"""
pulumi.set(__self__, "error_code", error_code)
pulumi.set(__self__, "error_message", error_message)
pulumi.set(__self__, "error_message_parameters", error_message_parameters)
pulumi.set(__self__, "error_tags", error_tags)
pulumi.set(__self__, "possible_causes", possible_causes)
pulumi.set(__self__, "recommended_action", recommended_action)
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> str:
"""
The error code.
"""
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> str:
"""
The error message.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter(name="errorMessageParameters")
def error_message_parameters(self) -> Mapping[str, str]:
"""
The error message parameters.
"""
return pulumi.get(self, "error_message_parameters")
@property
@pulumi.getter(name="errorTags")
def error_tags(self) -> Mapping[str, str]:
"""
The error tags.
"""
return pulumi.get(self, "error_tags")
@property
@pulumi.getter(name="possibleCauses")
def possible_causes(self) -> str:
"""
The possible causes.
"""
return pulumi.get(self, "possible_causes")
@property
@pulumi.getter(name="recommendedAction")
def recommended_action(self) -> str:
"""
The recommended action.
"""
return pulumi.get(self, "recommended_action")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmFabricSpecificDetailsResponse(dict):
"""
InMageRcm fabric specific details.
"""
def __init__(__self__, *,
agent_details: Sequence['outputs.AgentDetailsResponse'],
control_plane_uri: str,
data_plane_uri: str,
dras: Sequence['outputs.DraDetailsResponse'],
instance_type: str,
physical_site_id: str,
process_servers: Sequence['outputs.ProcessServerDetailsResponse'],
push_installers: Sequence['outputs.PushInstallerDetailsResponse'],
rcm_proxies: Sequence['outputs.RcmProxyDetailsResponse'],
replication_agents: Sequence['outputs.ReplicationAgentDetailsResponse'],
reprotect_agents: Sequence['outputs.ReprotectAgentDetailsResponse'],
service_container_id: str,
service_endpoint: str,
service_resource_id: str,
vmware_site_id: str):
"""
InMageRcm fabric specific details.
:param Sequence['AgentDetailsResponseArgs'] agent_details: The list of agent details.
:param str control_plane_uri: The control plane Uri.
:param str data_plane_uri: The data plane Uri.
:param Sequence['DraDetailsResponseArgs'] dras: The list of DRAs.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str physical_site_id: The ARM Id of the physical site.
:param Sequence['ProcessServerDetailsResponseArgs'] process_servers: The list of process servers.
:param Sequence['PushInstallerDetailsResponseArgs'] push_installers: The list of push installers.
:param Sequence['RcmProxyDetailsResponseArgs'] rcm_proxies: The list of RCM proxies.
:param Sequence['ReplicationAgentDetailsResponseArgs'] replication_agents: The list of replication agents.
:param Sequence['ReprotectAgentDetailsResponseArgs'] reprotect_agents: The list of reprotect agents.
:param str service_container_id: The service container Id.
:param str service_endpoint: The service endpoint.
:param str service_resource_id: The service resource Id.
:param str vmware_site_id: The ARM Id of the VMware site.
"""
pulumi.set(__self__, "agent_details", agent_details)
pulumi.set(__self__, "control_plane_uri", control_plane_uri)
pulumi.set(__self__, "data_plane_uri", data_plane_uri)
pulumi.set(__self__, "dras", dras)
pulumi.set(__self__, "instance_type", 'InMageRcm')
pulumi.set(__self__, "physical_site_id", physical_site_id)
pulumi.set(__self__, "process_servers", process_servers)
pulumi.set(__self__, "push_installers", push_installers)
pulumi.set(__self__, "rcm_proxies", rcm_proxies)
pulumi.set(__self__, "replication_agents", replication_agents)
pulumi.set(__self__, "reprotect_agents", reprotect_agents)
pulumi.set(__self__, "service_container_id", service_container_id)
pulumi.set(__self__, "service_endpoint", service_endpoint)
pulumi.set(__self__, "service_resource_id", service_resource_id)
pulumi.set(__self__, "vmware_site_id", vmware_site_id)
@property
@pulumi.getter(name="agentDetails")
def agent_details(self) -> Sequence['outputs.AgentDetailsResponse']:
"""
The list of agent details.
"""
return pulumi.get(self, "agent_details")
@property
@pulumi.getter(name="controlPlaneUri")
def control_plane_uri(self) -> str:
"""
The control plane Uri.
"""
return pulumi.get(self, "control_plane_uri")
@property
@pulumi.getter(name="dataPlaneUri")
def data_plane_uri(self) -> str:
"""
The data plane Uri.
"""
return pulumi.get(self, "data_plane_uri")
@property
@pulumi.getter
def dras(self) -> Sequence['outputs.DraDetailsResponse']:
"""
The list of DRAs.
"""
return pulumi.get(self, "dras")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="physicalSiteId")
def physical_site_id(self) -> str:
"""
The ARM Id of the physical site.
"""
return pulumi.get(self, "physical_site_id")
@property
@pulumi.getter(name="processServers")
def process_servers(self) -> Sequence['outputs.ProcessServerDetailsResponse']:
"""
The list of process servers.
"""
return pulumi.get(self, "process_servers")
@property
@pulumi.getter(name="pushInstallers")
def push_installers(self) -> Sequence['outputs.PushInstallerDetailsResponse']:
"""
The list of push installers.
"""
return pulumi.get(self, "push_installers")
@property
@pulumi.getter(name="rcmProxies")
def rcm_proxies(self) -> Sequence['outputs.RcmProxyDetailsResponse']:
"""
The list of RCM proxies.
"""
return pulumi.get(self, "rcm_proxies")
@property
@pulumi.getter(name="replicationAgents")
def replication_agents(self) -> Sequence['outputs.ReplicationAgentDetailsResponse']:
"""
The list of replication agents.
"""
return pulumi.get(self, "replication_agents")
@property
@pulumi.getter(name="reprotectAgents")
def reprotect_agents(self) -> Sequence['outputs.ReprotectAgentDetailsResponse']:
"""
The list of reprotect agents.
"""
return pulumi.get(self, "reprotect_agents")
@property
@pulumi.getter(name="serviceContainerId")
def service_container_id(self) -> str:
"""
The service container Id.
"""
return pulumi.get(self, "service_container_id")
@property
@pulumi.getter(name="serviceEndpoint")
def service_endpoint(self) -> str:
"""
The service endpoint.
"""
return pulumi.get(self, "service_endpoint")
@property
@pulumi.getter(name="serviceResourceId")
def service_resource_id(self) -> str:
"""
The service resource Id.
"""
return pulumi.get(self, "service_resource_id")
@property
@pulumi.getter(name="vmwareSiteId")
def vmware_site_id(self) -> str:
"""
The ARM Id of the VMware site.
"""
return pulumi.get(self, "vmware_site_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmLastAgentUpgradeErrorDetailsResponse(dict):
"""
InMageRcm last source agent upgrade error details.
"""
def __init__(__self__, *,
error_code: str,
error_message: str,
error_message_parameters: Mapping[str, str],
error_tags: Mapping[str, str],
possible_causes: str,
recommended_action: str):
"""
InMageRcm last source agent upgrade error details.
:param str error_code: The error code.
:param str error_message: The error message.
:param Mapping[str, str] error_message_parameters: The error message parameters.
:param Mapping[str, str] error_tags: The error tags.
:param str possible_causes: The possible causes.
:param str recommended_action: The recommended action.
"""
pulumi.set(__self__, "error_code", error_code)
pulumi.set(__self__, "error_message", error_message)
pulumi.set(__self__, "error_message_parameters", error_message_parameters)
pulumi.set(__self__, "error_tags", error_tags)
pulumi.set(__self__, "possible_causes", possible_causes)
pulumi.set(__self__, "recommended_action", recommended_action)
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> str:
"""
The error code.
"""
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> str:
"""
The error message.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter(name="errorMessageParameters")
def error_message_parameters(self) -> Mapping[str, str]:
"""
The error message parameters.
"""
return pulumi.get(self, "error_message_parameters")
@property
@pulumi.getter(name="errorTags")
def error_tags(self) -> Mapping[str, str]:
"""
The error tags.
"""
return pulumi.get(self, "error_tags")
@property
@pulumi.getter(name="possibleCauses")
def possible_causes(self) -> str:
"""
The possible causes.
"""
return pulumi.get(self, "possible_causes")
@property
@pulumi.getter(name="recommendedAction")
def recommended_action(self) -> str:
"""
The recommended action.
"""
return pulumi.get(self, "recommended_action")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmMobilityAgentDetailsResponse(dict):
"""
InMageRcm mobility agent details.
"""
def __init__(__self__, *,
agent_version_expiry_date: str,
driver_version: str,
driver_version_expiry_date: str,
is_upgradeable: str,
last_heartbeat_utc: str,
latest_upgradable_version_without_reboot: str,
latest_version: str,
reasons_blocking_upgrade: Sequence[str],
version: str):
"""
InMageRcm mobility agent details.
:param str agent_version_expiry_date: The agent version expiry date.
:param str driver_version: The driver version.
:param str driver_version_expiry_date: The driver version expiry date.
:param str is_upgradeable: A value indicating whether agent is upgradeable or not.
:param str last_heartbeat_utc: The time of the last heartbeat received from the agent.
:param str latest_upgradable_version_without_reboot: The latest upgradeable version available without reboot.
:param str latest_version: The latest agent version available.
:param Sequence[str] reasons_blocking_upgrade: The whether update is possible or not.
:param str version: The agent version.
"""
pulumi.set(__self__, "agent_version_expiry_date", agent_version_expiry_date)
pulumi.set(__self__, "driver_version", driver_version)
pulumi.set(__self__, "driver_version_expiry_date", driver_version_expiry_date)
pulumi.set(__self__, "is_upgradeable", is_upgradeable)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "latest_upgradable_version_without_reboot", latest_upgradable_version_without_reboot)
pulumi.set(__self__, "latest_version", latest_version)
pulumi.set(__self__, "reasons_blocking_upgrade", reasons_blocking_upgrade)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="agentVersionExpiryDate")
def agent_version_expiry_date(self) -> str:
"""
The agent version expiry date.
"""
return pulumi.get(self, "agent_version_expiry_date")
@property
@pulumi.getter(name="driverVersion")
def driver_version(self) -> str:
"""
The driver version.
"""
return pulumi.get(self, "driver_version")
@property
@pulumi.getter(name="driverVersionExpiryDate")
def driver_version_expiry_date(self) -> str:
"""
The driver version expiry date.
"""
return pulumi.get(self, "driver_version_expiry_date")
@property
@pulumi.getter(name="isUpgradeable")
def is_upgradeable(self) -> str:
"""
A value indicating whether agent is upgradeable or not.
"""
return pulumi.get(self, "is_upgradeable")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The time of the last heartbeat received from the agent.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter(name="latestUpgradableVersionWithoutReboot")
def latest_upgradable_version_without_reboot(self) -> str:
"""
The latest upgradeable version available without reboot.
"""
return pulumi.get(self, "latest_upgradable_version_without_reboot")
@property
@pulumi.getter(name="latestVersion")
def latest_version(self) -> str:
"""
The latest agent version available.
"""
return pulumi.get(self, "latest_version")
@property
@pulumi.getter(name="reasonsBlockingUpgrade")
def reasons_blocking_upgrade(self) -> Sequence[str]:
"""
The whether update is possible or not.
"""
return pulumi.get(self, "reasons_blocking_upgrade")
@property
@pulumi.getter
def version(self) -> str:
"""
The agent version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmNicDetailsResponse(dict):
"""
InMageRcm NIC details.
"""
def __init__(__self__, *,
is_primary_nic: str,
is_selected_for_failover: str,
nic_id: str,
source_ip_address: str,
source_ip_address_type: str,
source_network_id: str,
source_subnet_name: str,
target_ip_address: str,
target_ip_address_type: str,
target_subnet_name: str,
test_ip_address: str,
test_ip_address_type: str,
test_subnet_name: str):
"""
InMageRcm NIC details.
:param str is_primary_nic: A value indicating whether this is the primary NIC.
:param str is_selected_for_failover: A value indicating whether this NIC is selected for failover.
:param str nic_id: The NIC Id.
:param str source_ip_address: The source IP address.
:param str source_ip_address_type: The source IP address type.
:param str source_network_id: Source network Id.
:param str source_subnet_name: Source subnet name.
:param str target_ip_address: The target IP address.
:param str target_ip_address_type: The target IP address type.
:param str target_subnet_name: Target subnet name.
:param str test_ip_address: The test IP address.
:param str test_ip_address_type: The test IP address type.
:param str test_subnet_name: Test subnet name.
"""
pulumi.set(__self__, "is_primary_nic", is_primary_nic)
pulumi.set(__self__, "is_selected_for_failover", is_selected_for_failover)
pulumi.set(__self__, "nic_id", nic_id)
pulumi.set(__self__, "source_ip_address", source_ip_address)
pulumi.set(__self__, "source_ip_address_type", source_ip_address_type)
pulumi.set(__self__, "source_network_id", source_network_id)
pulumi.set(__self__, "source_subnet_name", source_subnet_name)
pulumi.set(__self__, "target_ip_address", target_ip_address)
pulumi.set(__self__, "target_ip_address_type", target_ip_address_type)
pulumi.set(__self__, "target_subnet_name", target_subnet_name)
pulumi.set(__self__, "test_ip_address", test_ip_address)
pulumi.set(__self__, "test_ip_address_type", test_ip_address_type)
pulumi.set(__self__, "test_subnet_name", test_subnet_name)
@property
@pulumi.getter(name="isPrimaryNic")
def is_primary_nic(self) -> str:
"""
A value indicating whether this is the primary NIC.
"""
return pulumi.get(self, "is_primary_nic")
@property
@pulumi.getter(name="isSelectedForFailover")
def is_selected_for_failover(self) -> str:
"""
A value indicating whether this NIC is selected for failover.
"""
return pulumi.get(self, "is_selected_for_failover")
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> str:
"""
The NIC Id.
"""
return pulumi.get(self, "nic_id")
@property
@pulumi.getter(name="sourceIPAddress")
def source_ip_address(self) -> str:
"""
The source IP address.
"""
return pulumi.get(self, "source_ip_address")
@property
@pulumi.getter(name="sourceIPAddressType")
def source_ip_address_type(self) -> str:
"""
The source IP address type.
"""
return pulumi.get(self, "source_ip_address_type")
@property
@pulumi.getter(name="sourceNetworkId")
def source_network_id(self) -> str:
"""
Source network Id.
"""
return pulumi.get(self, "source_network_id")
@property
@pulumi.getter(name="sourceSubnetName")
def source_subnet_name(self) -> str:
"""
Source subnet name.
"""
return pulumi.get(self, "source_subnet_name")
@property
@pulumi.getter(name="targetIPAddress")
def target_ip_address(self) -> str:
"""
The target IP address.
"""
return pulumi.get(self, "target_ip_address")
@property
@pulumi.getter(name="targetIPAddressType")
def target_ip_address_type(self) -> str:
"""
The target IP address type.
"""
return pulumi.get(self, "target_ip_address_type")
@property
@pulumi.getter(name="targetSubnetName")
def target_subnet_name(self) -> str:
"""
Target subnet name.
"""
return pulumi.get(self, "target_subnet_name")
@property
@pulumi.getter(name="testIPAddress")
def test_ip_address(self) -> str:
"""
The test IP address.
"""
return pulumi.get(self, "test_ip_address")
@property
@pulumi.getter(name="testIPAddressType")
def test_ip_address_type(self) -> str:
"""
The test IP address type.
"""
return pulumi.get(self, "test_ip_address_type")
@property
@pulumi.getter(name="testSubnetName")
def test_subnet_name(self) -> str:
"""
Test subnet name.
"""
return pulumi.get(self, "test_subnet_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmPolicyDetailsResponse(dict):
"""
InMageRcm specific policy details.
"""
def __init__(__self__, *,
app_consistent_frequency_in_minutes: int,
crash_consistent_frequency_in_minutes: int,
enable_multi_vm_sync: str,
instance_type: str,
recovery_point_history_in_minutes: int):
"""
InMageRcm specific policy details.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param int crash_consistent_frequency_in_minutes: The crash consistent snapshot frequency in minutes.
:param str enable_multi_vm_sync: A value indicating whether multi-VM sync has to be enabled.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int recovery_point_history_in_minutes: The duration in minutes until which the recovery points need to be stored.
"""
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
pulumi.set(__self__, "crash_consistent_frequency_in_minutes", crash_consistent_frequency_in_minutes)
pulumi.set(__self__, "enable_multi_vm_sync", enable_multi_vm_sync)
pulumi.set(__self__, "instance_type", 'InMageRcm')
pulumi.set(__self__, "recovery_point_history_in_minutes", recovery_point_history_in_minutes)
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> int:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="crashConsistentFrequencyInMinutes")
def crash_consistent_frequency_in_minutes(self) -> int:
"""
The crash consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "crash_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="enableMultiVmSync")
def enable_multi_vm_sync(self) -> str:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "enable_multi_vm_sync")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="recoveryPointHistoryInMinutes")
def recovery_point_history_in_minutes(self) -> int:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmProtectedDiskDetailsResponse(dict):
"""
InMageRcm protected disk details.
"""
def __init__(__self__, *,
capacity_in_bytes: int,
disk_encryption_set_id: str,
disk_id: str,
disk_name: str,
disk_type: str,
is_os_disk: str,
log_storage_account_id: str,
seed_managed_disk_id: str,
target_managed_disk_id: str):
"""
InMageRcm protected disk details.
:param int capacity_in_bytes: The disk capacity in bytes.
:param str disk_encryption_set_id: The disk encryption set ARM Id.
:param str disk_id: The disk Id.
:param str disk_name: The disk name.
:param str disk_type: The disk type.
:param str is_os_disk: A value indicating whether the disk is the OS disk.
:param str log_storage_account_id: The log storage account ARM Id.
:param str seed_managed_disk_id: The ARM Id of the seed managed disk.
:param str target_managed_disk_id: The ARM Id of the target managed disk.
"""
pulumi.set(__self__, "capacity_in_bytes", capacity_in_bytes)
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
pulumi.set(__self__, "disk_id", disk_id)
pulumi.set(__self__, "disk_name", disk_name)
pulumi.set(__self__, "disk_type", disk_type)
pulumi.set(__self__, "is_os_disk", is_os_disk)
pulumi.set(__self__, "log_storage_account_id", log_storage_account_id)
pulumi.set(__self__, "seed_managed_disk_id", seed_managed_disk_id)
pulumi.set(__self__, "target_managed_disk_id", target_managed_disk_id)
@property
@pulumi.getter(name="capacityInBytes")
def capacity_in_bytes(self) -> int:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "capacity_in_bytes")
@property
@pulumi.getter(name="diskEncryptionSetId")
def disk_encryption_set_id(self) -> str:
"""
The disk encryption set ARM Id.
"""
return pulumi.get(self, "disk_encryption_set_id")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> str:
"""
The disk Id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> str:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> str:
"""
The disk type.
"""
return pulumi.get(self, "disk_type")
@property
@pulumi.getter(name="isOSDisk")
def is_os_disk(self) -> str:
"""
A value indicating whether the disk is the OS disk.
"""
return pulumi.get(self, "is_os_disk")
@property
@pulumi.getter(name="logStorageAccountId")
def log_storage_account_id(self) -> str:
"""
The log storage account ARM Id.
"""
return pulumi.get(self, "log_storage_account_id")
@property
@pulumi.getter(name="seedManagedDiskId")
def seed_managed_disk_id(self) -> str:
"""
The ARM Id of the seed managed disk.
"""
return pulumi.get(self, "seed_managed_disk_id")
@property
@pulumi.getter(name="targetManagedDiskId")
def target_managed_disk_id(self) -> str:
"""
The ARM Id of the target managed disk.
"""
return pulumi.get(self, "target_managed_disk_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageRcmReplicationDetailsResponse(dict):
"""
InMageRcm provider specific details.
"""
def __init__(__self__, *,
agent_upgrade_blocking_error_details: Sequence['outputs.InMageRcmAgentUpgradeBlockingErrorDetailsResponse'],
agent_upgrade_state: str,
allocated_memory_in_mb: float,
discovery_type: str,
fabric_discovery_machine_id: str,
failover_recovery_point_id: str,
firmware_type: str,
initial_replication_processed_bytes: int,
initial_replication_progress_percentage: int,
initial_replication_transferred_bytes: int,
instance_type: str,
internal_identifier: str,
is_last_upgrade_successful: str,
last_agent_upgrade_error_details: Sequence['outputs.InMageRcmLastAgentUpgradeErrorDetailsResponse'],
last_agent_upgrade_failed_job_id: str,
last_agent_upgrade_type: str,
last_recovery_point_id: str,
last_recovery_point_received: str,
last_rpo_calculated_time: str,
last_rpo_in_seconds: int,
license_type: str,
mobility_agent_details: 'outputs.InMageRcmMobilityAgentDetailsResponse',
multi_vm_group_name: str,
os_type: str,
process_server_id: str,
process_server_name: str,
processor_core_count: int,
protected_disks: Sequence['outputs.InMageRcmProtectedDiskDetailsResponse'],
resync_processed_bytes: int,
resync_progress_percentage: int,
resync_required: str,
resync_state: str,
resync_transferred_bytes: int,
run_as_account_id: str,
target_availability_set_id: str,
target_availability_zone: str,
target_boot_diagnostics_storage_account_id: str,
target_generation: str,
target_network_id: str,
target_proximity_placement_group_id: str,
target_resource_group_id: str,
target_vm_name: str,
target_vm_size: str,
test_network_id: str,
vm_nics: Sequence['outputs.InMageRcmNicDetailsResponse']):
"""
InMageRcm provider specific details.
:param Sequence['InMageRcmAgentUpgradeBlockingErrorDetailsResponseArgs'] agent_upgrade_blocking_error_details: The agent upgrade blocking error information.
:param str agent_upgrade_state: The agent auto upgrade state.
:param float allocated_memory_in_mb: The allocated memory in MB.
:param str discovery_type: The type of the discovered VM.
:param str fabric_discovery_machine_id: The ARM Id of the discovered VM.
:param str failover_recovery_point_id: The recovery point Id to which the VM was failed over.
:param str firmware_type: The firmware type.
:param int initial_replication_processed_bytes: The initial replication processed bytes. This includes sum of total bytes transferred and matched bytes on all selected disks in source VM.
:param int initial_replication_progress_percentage: The initial replication progress percentage. This is calculated based on total bytes processed for all disks in the source VM.
:param int initial_replication_transferred_bytes: The initial replication transferred bytes from source VM to azure for all selected disks on source VM.
:param str instance_type: Gets the Instance type.
:param str internal_identifier: The virtual machine internal identifier.
:param str is_last_upgrade_successful: A value indicating whether last agent upgrade was successful or not.
:param Sequence['InMageRcmLastAgentUpgradeErrorDetailsResponseArgs'] last_agent_upgrade_error_details: The last agent upgrade error information.
:param str last_agent_upgrade_failed_job_id: The last agent upgrade failed or cancelled job Id.
:param str last_agent_upgrade_type: The last agent upgrade type.
:param str last_recovery_point_id: The last recovery point Id.
:param str last_recovery_point_received: The last recovery point received time.
:param str last_rpo_calculated_time: The last recovery point objective calculated time.
:param int last_rpo_in_seconds: The last recovery point objective value.
:param str license_type: License Type of the VM to be used.
:param 'InMageRcmMobilityAgentDetailsResponseArgs' mobility_agent_details: The mobility agent information.
:param str multi_vm_group_name: The multi VM group name.
:param str os_type: The type of the OS on the VM.
:param str process_server_id: The process server Id.
:param str process_server_name: The process server name.
:param int processor_core_count: The processor core count.
:param Sequence['InMageRcmProtectedDiskDetailsResponseArgs'] protected_disks: The list of protected disks.
:param int resync_processed_bytes: The resync processed bytes. This includes sum of total bytes transferred and matched bytes on all selected disks in source VM.
:param int resync_progress_percentage: The resync progress percentage. This is calculated based on total bytes processed for all disks in the source VM.
:param str resync_required: A value indicating whether resync is required.
:param str resync_state: The resync state.
:param int resync_transferred_bytes: The resync transferred bytes from source VM to azure for all selected disks on source VM.
:param str run_as_account_id: The run-as account Id.
:param str target_availability_set_id: The target availability set Id.
:param str target_availability_zone: The target availability zone.
:param str target_boot_diagnostics_storage_account_id: The target boot diagnostics storage account ARM Id.
:param str target_generation: The target generation.
:param str target_network_id: The target network Id.
:param str target_proximity_placement_group_id: The target proximity placement group Id.
:param str target_resource_group_id: The target resource group Id.
:param str target_vm_name: Target VM name.
:param str target_vm_size: The target VM size.
:param str test_network_id: The test network Id.
:param Sequence['InMageRcmNicDetailsResponseArgs'] vm_nics: The network details.
"""
pulumi.set(__self__, "agent_upgrade_blocking_error_details", agent_upgrade_blocking_error_details)
pulumi.set(__self__, "agent_upgrade_state", agent_upgrade_state)
pulumi.set(__self__, "allocated_memory_in_mb", allocated_memory_in_mb)
pulumi.set(__self__, "discovery_type", discovery_type)
pulumi.set(__self__, "fabric_discovery_machine_id", fabric_discovery_machine_id)
pulumi.set(__self__, "failover_recovery_point_id", failover_recovery_point_id)
pulumi.set(__self__, "firmware_type", firmware_type)
pulumi.set(__self__, "initial_replication_processed_bytes", initial_replication_processed_bytes)
pulumi.set(__self__, "initial_replication_progress_percentage", initial_replication_progress_percentage)
pulumi.set(__self__, "initial_replication_transferred_bytes", initial_replication_transferred_bytes)
pulumi.set(__self__, "instance_type", 'InMageRcm')
pulumi.set(__self__, "internal_identifier", internal_identifier)
pulumi.set(__self__, "is_last_upgrade_successful", is_last_upgrade_successful)
pulumi.set(__self__, "last_agent_upgrade_error_details", last_agent_upgrade_error_details)
pulumi.set(__self__, "last_agent_upgrade_failed_job_id", last_agent_upgrade_failed_job_id)
pulumi.set(__self__, "last_agent_upgrade_type", last_agent_upgrade_type)
pulumi.set(__self__, "last_recovery_point_id", last_recovery_point_id)
pulumi.set(__self__, "last_recovery_point_received", last_recovery_point_received)
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
pulumi.set(__self__, "last_rpo_in_seconds", last_rpo_in_seconds)
pulumi.set(__self__, "license_type", license_type)
pulumi.set(__self__, "mobility_agent_details", mobility_agent_details)
pulumi.set(__self__, "multi_vm_group_name", multi_vm_group_name)
pulumi.set(__self__, "os_type", os_type)
pulumi.set(__self__, "process_server_id", process_server_id)
pulumi.set(__self__, "process_server_name", process_server_name)
pulumi.set(__self__, "processor_core_count", processor_core_count)
pulumi.set(__self__, "protected_disks", protected_disks)
pulumi.set(__self__, "resync_processed_bytes", resync_processed_bytes)
pulumi.set(__self__, "resync_progress_percentage", resync_progress_percentage)
pulumi.set(__self__, "resync_required", resync_required)
pulumi.set(__self__, "resync_state", resync_state)
pulumi.set(__self__, "resync_transferred_bytes", resync_transferred_bytes)
pulumi.set(__self__, "run_as_account_id", run_as_account_id)
pulumi.set(__self__, "target_availability_set_id", target_availability_set_id)
pulumi.set(__self__, "target_availability_zone", target_availability_zone)
pulumi.set(__self__, "target_boot_diagnostics_storage_account_id", target_boot_diagnostics_storage_account_id)
pulumi.set(__self__, "target_generation", target_generation)
pulumi.set(__self__, "target_network_id", target_network_id)
pulumi.set(__self__, "target_proximity_placement_group_id", target_proximity_placement_group_id)
pulumi.set(__self__, "target_resource_group_id", target_resource_group_id)
pulumi.set(__self__, "target_vm_name", target_vm_name)
pulumi.set(__self__, "target_vm_size", target_vm_size)
pulumi.set(__self__, "test_network_id", test_network_id)
pulumi.set(__self__, "vm_nics", vm_nics)
@property
@pulumi.getter(name="agentUpgradeBlockingErrorDetails")
def agent_upgrade_blocking_error_details(self) -> Sequence['outputs.InMageRcmAgentUpgradeBlockingErrorDetailsResponse']:
"""
The agent upgrade blocking error information.
"""
return pulumi.get(self, "agent_upgrade_blocking_error_details")
@property
@pulumi.getter(name="agentUpgradeState")
def agent_upgrade_state(self) -> str:
"""
The agent auto upgrade state.
"""
return pulumi.get(self, "agent_upgrade_state")
@property
@pulumi.getter(name="allocatedMemoryInMB")
def allocated_memory_in_mb(self) -> float:
"""
The allocated memory in MB.
"""
return pulumi.get(self, "allocated_memory_in_mb")
@property
@pulumi.getter(name="discoveryType")
def discovery_type(self) -> str:
"""
The type of the discovered VM.
"""
return pulumi.get(self, "discovery_type")
@property
@pulumi.getter(name="fabricDiscoveryMachineId")
def fabric_discovery_machine_id(self) -> str:
"""
The ARM Id of the discovered VM.
"""
return pulumi.get(self, "fabric_discovery_machine_id")
@property
@pulumi.getter(name="failoverRecoveryPointId")
def failover_recovery_point_id(self) -> str:
"""
The recovery point Id to which the VM was failed over.
"""
return pulumi.get(self, "failover_recovery_point_id")
@property
@pulumi.getter(name="firmwareType")
def firmware_type(self) -> str:
"""
The firmware type.
"""
return pulumi.get(self, "firmware_type")
@property
@pulumi.getter(name="initialReplicationProcessedBytes")
def initial_replication_processed_bytes(self) -> int:
"""
The initial replication processed bytes. This includes sum of total bytes transferred and matched bytes on all selected disks in source VM.
"""
return pulumi.get(self, "initial_replication_processed_bytes")
@property
@pulumi.getter(name="initialReplicationProgressPercentage")
def initial_replication_progress_percentage(self) -> int:
"""
The initial replication progress percentage. This is calculated based on total bytes processed for all disks in the source VM.
"""
return pulumi.get(self, "initial_replication_progress_percentage")
@property
@pulumi.getter(name="initialReplicationTransferredBytes")
def initial_replication_transferred_bytes(self) -> int:
"""
The initial replication transferred bytes from source VM to azure for all selected disks on source VM.
"""
return pulumi.get(self, "initial_replication_transferred_bytes")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="internalIdentifier")
def internal_identifier(self) -> str:
"""
The virtual machine internal identifier.
"""
return pulumi.get(self, "internal_identifier")
@property
@pulumi.getter(name="isLastUpgradeSuccessful")
def is_last_upgrade_successful(self) -> str:
"""
A value indicating whether last agent upgrade was successful or not.
"""
return pulumi.get(self, "is_last_upgrade_successful")
@property
@pulumi.getter(name="lastAgentUpgradeErrorDetails")
def last_agent_upgrade_error_details(self) -> Sequence['outputs.InMageRcmLastAgentUpgradeErrorDetailsResponse']:
"""
The last agent upgrade error information.
"""
return pulumi.get(self, "last_agent_upgrade_error_details")
@property
@pulumi.getter(name="lastAgentUpgradeFailedJobId")
def last_agent_upgrade_failed_job_id(self) -> str:
"""
The last agent upgrade failed or cancelled job Id.
"""
return pulumi.get(self, "last_agent_upgrade_failed_job_id")
@property
@pulumi.getter(name="lastAgentUpgradeType")
def last_agent_upgrade_type(self) -> str:
"""
The last agent upgrade type.
"""
return pulumi.get(self, "last_agent_upgrade_type")
@property
@pulumi.getter(name="lastRecoveryPointId")
def last_recovery_point_id(self) -> str:
"""
The last recovery point Id.
"""
return pulumi.get(self, "last_recovery_point_id")
@property
@pulumi.getter(name="lastRecoveryPointReceived")
def last_recovery_point_received(self) -> str:
"""
The last recovery point received time.
"""
return pulumi.get(self, "last_recovery_point_received")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> str:
"""
The last recovery point objective calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="lastRpoInSeconds")
def last_rpo_in_seconds(self) -> int:
"""
The last recovery point objective value.
"""
return pulumi.get(self, "last_rpo_in_seconds")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> str:
"""
License Type of the VM to be used.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="mobilityAgentDetails")
def mobility_agent_details(self) -> 'outputs.InMageRcmMobilityAgentDetailsResponse':
"""
The mobility agent information.
"""
return pulumi.get(self, "mobility_agent_details")
@property
@pulumi.getter(name="multiVmGroupName")
def multi_vm_group_name(self) -> str:
"""
The multi VM group name.
"""
return pulumi.get(self, "multi_vm_group_name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> str:
"""
The type of the OS on the VM.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="processServerId")
def process_server_id(self) -> str:
"""
The process server Id.
"""
return pulumi.get(self, "process_server_id")
@property
@pulumi.getter(name="processServerName")
def process_server_name(self) -> str:
"""
The process server name.
"""
return pulumi.get(self, "process_server_name")
@property
@pulumi.getter(name="processorCoreCount")
def processor_core_count(self) -> int:
"""
The processor core count.
"""
return pulumi.get(self, "processor_core_count")
@property
@pulumi.getter(name="protectedDisks")
def protected_disks(self) -> Sequence['outputs.InMageRcmProtectedDiskDetailsResponse']:
"""
The list of protected disks.
"""
return pulumi.get(self, "protected_disks")
@property
@pulumi.getter(name="resyncProcessedBytes")
def resync_processed_bytes(self) -> int:
"""
The resync processed bytes. This includes sum of total bytes transferred and matched bytes on all selected disks in source VM.
"""
return pulumi.get(self, "resync_processed_bytes")
@property
@pulumi.getter(name="resyncProgressPercentage")
def resync_progress_percentage(self) -> int:
"""
The resync progress percentage. This is calculated based on total bytes processed for all disks in the source VM.
"""
return pulumi.get(self, "resync_progress_percentage")
@property
@pulumi.getter(name="resyncRequired")
def resync_required(self) -> str:
"""
A value indicating whether resync is required.
"""
return pulumi.get(self, "resync_required")
@property
@pulumi.getter(name="resyncState")
def resync_state(self) -> str:
"""
The resync state.
"""
return pulumi.get(self, "resync_state")
@property
@pulumi.getter(name="resyncTransferredBytes")
def resync_transferred_bytes(self) -> int:
"""
The resync transferred bytes from source VM to azure for all selected disks on source VM.
"""
return pulumi.get(self, "resync_transferred_bytes")
@property
@pulumi.getter(name="runAsAccountId")
def run_as_account_id(self) -> str:
"""
The run-as account Id.
"""
return pulumi.get(self, "run_as_account_id")
@property
@pulumi.getter(name="targetAvailabilitySetId")
def target_availability_set_id(self) -> str:
"""
The target availability set Id.
"""
return pulumi.get(self, "target_availability_set_id")
@property
@pulumi.getter(name="targetAvailabilityZone")
def target_availability_zone(self) -> str:
"""
The target availability zone.
"""
return pulumi.get(self, "target_availability_zone")
@property
@pulumi.getter(name="targetBootDiagnosticsStorageAccountId")
def target_boot_diagnostics_storage_account_id(self) -> str:
"""
The target boot diagnostics storage account ARM Id.
"""
return pulumi.get(self, "target_boot_diagnostics_storage_account_id")
@property
@pulumi.getter(name="targetGeneration")
def target_generation(self) -> str:
"""
The target generation.
"""
return pulumi.get(self, "target_generation")
@property
@pulumi.getter(name="targetNetworkId")
def target_network_id(self) -> str:
"""
The target network Id.
"""
return pulumi.get(self, "target_network_id")
@property
@pulumi.getter(name="targetProximityPlacementGroupId")
def target_proximity_placement_group_id(self) -> str:
"""
The target proximity placement group Id.
"""
return pulumi.get(self, "target_proximity_placement_group_id")
@property
@pulumi.getter(name="targetResourceGroupId")
def target_resource_group_id(self) -> str:
"""
The target resource group Id.
"""
return pulumi.get(self, "target_resource_group_id")
@property
@pulumi.getter(name="targetVmName")
def target_vm_name(self) -> str:
"""
Target VM name.
"""
return pulumi.get(self, "target_vm_name")
@property
@pulumi.getter(name="targetVmSize")
def target_vm_size(self) -> str:
"""
The target VM size.
"""
return pulumi.get(self, "target_vm_size")
@property
@pulumi.getter(name="testNetworkId")
def test_network_id(self) -> str:
"""
The test network Id.
"""
return pulumi.get(self, "test_network_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Sequence['outputs.InMageRcmNicDetailsResponse']:
"""
The network details.
"""
return pulumi.get(self, "vm_nics")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InMageReplicationDetailsResponse(dict):
"""
InMage provider specific settings
"""
def __init__(__self__, *,
instance_type: str,
active_site_type: Optional[str] = None,
agent_details: Optional['outputs.InMageAgentDetailsResponse'] = None,
azure_storage_account_id: Optional[str] = None,
compressed_data_rate_in_mb: Optional[float] = None,
consistency_points: Optional[Mapping[str, str]] = None,
datastores: Optional[Sequence[str]] = None,
discovery_type: Optional[str] = None,
disk_resized: Optional[str] = None,
infrastructure_vm_id: Optional[str] = None,
ip_address: Optional[str] = None,
last_heartbeat: Optional[str] = None,
last_rpo_calculated_time: Optional[str] = None,
last_update_received_time: Optional[str] = None,
master_target_id: Optional[str] = None,
multi_vm_group_id: Optional[str] = None,
multi_vm_group_name: Optional[str] = None,
multi_vm_sync_status: Optional[str] = None,
os_details: Optional['outputs.OSDiskDetailsResponse'] = None,
os_version: Optional[str] = None,
process_server_id: Optional[str] = None,
protected_disks: Optional[Sequence['outputs.InMageProtectedDiskDetailsResponse']] = None,
protection_stage: Optional[str] = None,
reboot_after_update_status: Optional[str] = None,
replica_id: Optional[str] = None,
resync_details: Optional['outputs.InitialReplicationDetailsResponse'] = None,
retention_window_end: Optional[str] = None,
retention_window_start: Optional[str] = None,
rpo_in_seconds: Optional[int] = None,
source_vm_cpu_count: Optional[int] = None,
source_vm_ram_size_in_mb: Optional[int] = None,
uncompressed_data_rate_in_mb: Optional[float] = None,
v_center_infrastructure_id: Optional[str] = None,
validation_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
vm_id: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMNicDetailsResponse']] = None,
vm_protection_state: Optional[str] = None,
vm_protection_state_description: Optional[str] = None):
"""
InMage provider specific settings
:param str instance_type: Gets the Instance type.
:param str active_site_type: The active location of the VM. If the VM is being protected from Azure, this field will take values from { Azure, OnPrem }. If the VM is being protected between two data-centers, this field will be OnPrem always.
:param 'InMageAgentDetailsResponseArgs' agent_details: The agent details.
:param str azure_storage_account_id: A value indicating the underlying Azure storage account. If the VM is not running in Azure, this value shall be set to null.
:param float compressed_data_rate_in_mb: The compressed data change rate in MB.
:param Mapping[str, str] consistency_points: The collection of Consistency points.
:param Sequence[str] datastores: The data stores of the on-premise machine Value can be list of strings that contain data store names
:param str discovery_type: A value indicating the discovery type of the machine.
:param str disk_resized: A value indicating whether any disk is resized for this VM.
:param str infrastructure_vm_id: The infrastructure VM Id.
:param str ip_address: The source IP address.
:param str last_heartbeat: The last heartbeat received from the source server.
:param str last_rpo_calculated_time: The last RPO calculated time.
:param str last_update_received_time: The last update time received from on-prem components.
:param str master_target_id: The master target Id.
:param str multi_vm_group_id: The multi vm group Id, if any.
:param str multi_vm_group_name: The multi vm group name, if any.
:param str multi_vm_sync_status: A value indicating whether the multi vm sync is enabled or disabled.
:param 'OSDiskDetailsResponseArgs' os_details: The OS details.
:param str os_version: The OS Version of the protected item.
:param str process_server_id: The process server Id.
:param Sequence['InMageProtectedDiskDetailsResponseArgs'] protected_disks: The list of protected disks.
:param str protection_stage: The protection stage.
:param str reboot_after_update_status: A value indicating whether the source server requires a restart after update.
:param str replica_id: The replica id of the protected item.
:param 'InitialReplicationDetailsResponseArgs' resync_details: The resync details of the machine
:param str retention_window_end: The retention window end time.
:param str retention_window_start: The retention window start time.
:param int rpo_in_seconds: The RPO in seconds.
:param int source_vm_cpu_count: The CPU count of the VM on the primary side.
:param int source_vm_ram_size_in_mb: The RAM size of the VM on the primary side.
:param float uncompressed_data_rate_in_mb: The uncompressed data change rate in MB.
:param str v_center_infrastructure_id: The vCenter infrastructure Id.
:param Sequence['HealthErrorResponseArgs'] validation_errors: The validation errors of the on-premise machine Value can be list of validation errors
:param str vm_id: The virtual machine Id.
:param Sequence['VMNicDetailsResponseArgs'] vm_nics: The PE Network details.
:param str vm_protection_state: The protection state for the vm.
:param str vm_protection_state_description: The protection state description for the vm.
"""
pulumi.set(__self__, "instance_type", 'InMage')
if active_site_type is not None:
pulumi.set(__self__, "active_site_type", active_site_type)
if agent_details is not None:
pulumi.set(__self__, "agent_details", agent_details)
if azure_storage_account_id is not None:
pulumi.set(__self__, "azure_storage_account_id", azure_storage_account_id)
if compressed_data_rate_in_mb is not None:
pulumi.set(__self__, "compressed_data_rate_in_mb", compressed_data_rate_in_mb)
if consistency_points is not None:
pulumi.set(__self__, "consistency_points", consistency_points)
if datastores is not None:
pulumi.set(__self__, "datastores", datastores)
if discovery_type is not None:
pulumi.set(__self__, "discovery_type", discovery_type)
if disk_resized is not None:
pulumi.set(__self__, "disk_resized", disk_resized)
if infrastructure_vm_id is not None:
pulumi.set(__self__, "infrastructure_vm_id", infrastructure_vm_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if last_rpo_calculated_time is not None:
pulumi.set(__self__, "last_rpo_calculated_time", last_rpo_calculated_time)
if last_update_received_time is not None:
pulumi.set(__self__, "last_update_received_time", last_update_received_time)
if master_target_id is not None:
pulumi.set(__self__, "master_target_id", master_target_id)
if multi_vm_group_id is not None:
pulumi.set(__self__, "multi_vm_group_id", multi_vm_group_id)
if multi_vm_group_name is not None:
pulumi.set(__self__, "multi_vm_group_name", multi_vm_group_name)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if os_details is not None:
pulumi.set(__self__, "os_details", os_details)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
if process_server_id is not None:
pulumi.set(__self__, "process_server_id", process_server_id)
if protected_disks is not None:
pulumi.set(__self__, "protected_disks", protected_disks)
if protection_stage is not None:
pulumi.set(__self__, "protection_stage", protection_stage)
if reboot_after_update_status is not None:
pulumi.set(__self__, "reboot_after_update_status", reboot_after_update_status)
if replica_id is not None:
pulumi.set(__self__, "replica_id", replica_id)
if resync_details is not None:
pulumi.set(__self__, "resync_details", resync_details)
if retention_window_end is not None:
pulumi.set(__self__, "retention_window_end", retention_window_end)
if retention_window_start is not None:
pulumi.set(__self__, "retention_window_start", retention_window_start)
if rpo_in_seconds is not None:
pulumi.set(__self__, "rpo_in_seconds", rpo_in_seconds)
if source_vm_cpu_count is not None:
pulumi.set(__self__, "source_vm_cpu_count", source_vm_cpu_count)
if source_vm_ram_size_in_mb is not None:
pulumi.set(__self__, "source_vm_ram_size_in_mb", source_vm_ram_size_in_mb)
if uncompressed_data_rate_in_mb is not None:
pulumi.set(__self__, "uncompressed_data_rate_in_mb", uncompressed_data_rate_in_mb)
if v_center_infrastructure_id is not None:
pulumi.set(__self__, "v_center_infrastructure_id", v_center_infrastructure_id)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
if vm_id is not None:
pulumi.set(__self__, "vm_id", vm_id)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
if vm_protection_state is not None:
pulumi.set(__self__, "vm_protection_state", vm_protection_state)
if vm_protection_state_description is not None:
pulumi.set(__self__, "vm_protection_state_description", vm_protection_state_description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="activeSiteType")
def active_site_type(self) -> Optional[str]:
"""
The active location of the VM. If the VM is being protected from Azure, this field will take values from { Azure, OnPrem }. If the VM is being protected between two data-centers, this field will be OnPrem always.
"""
return pulumi.get(self, "active_site_type")
@property
@pulumi.getter(name="agentDetails")
def agent_details(self) -> Optional['outputs.InMageAgentDetailsResponse']:
"""
The agent details.
"""
return pulumi.get(self, "agent_details")
@property
@pulumi.getter(name="azureStorageAccountId")
def azure_storage_account_id(self) -> Optional[str]:
"""
A value indicating the underlying Azure storage account. If the VM is not running in Azure, this value shall be set to null.
"""
return pulumi.get(self, "azure_storage_account_id")
@property
@pulumi.getter(name="compressedDataRateInMB")
def compressed_data_rate_in_mb(self) -> Optional[float]:
"""
The compressed data change rate in MB.
"""
return pulumi.get(self, "compressed_data_rate_in_mb")
@property
@pulumi.getter(name="consistencyPoints")
def consistency_points(self) -> Optional[Mapping[str, str]]:
"""
The collection of Consistency points.
"""
return pulumi.get(self, "consistency_points")
@property
@pulumi.getter
def datastores(self) -> Optional[Sequence[str]]:
"""
The data stores of the on-premise machine Value can be list of strings that contain data store names
"""
return pulumi.get(self, "datastores")
@property
@pulumi.getter(name="discoveryType")
def discovery_type(self) -> Optional[str]:
"""
A value indicating the discovery type of the machine.
"""
return pulumi.get(self, "discovery_type")
@property
@pulumi.getter(name="diskResized")
def disk_resized(self) -> Optional[str]:
"""
A value indicating whether any disk is resized for this VM.
"""
return pulumi.get(self, "disk_resized")
@property
@pulumi.getter(name="infrastructureVmId")
def infrastructure_vm_id(self) -> Optional[str]:
"""
The infrastructure VM Id.
"""
return pulumi.get(self, "infrastructure_vm_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The source IP address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from the source server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="lastRpoCalculatedTime")
def last_rpo_calculated_time(self) -> Optional[str]:
"""
The last RPO calculated time.
"""
return pulumi.get(self, "last_rpo_calculated_time")
@property
@pulumi.getter(name="lastUpdateReceivedTime")
def last_update_received_time(self) -> Optional[str]:
"""
The last update time received from on-prem components.
"""
return pulumi.get(self, "last_update_received_time")
@property
@pulumi.getter(name="masterTargetId")
def master_target_id(self) -> Optional[str]:
"""
The master target Id.
"""
return pulumi.get(self, "master_target_id")
@property
@pulumi.getter(name="multiVmGroupId")
def multi_vm_group_id(self) -> Optional[str]:
"""
The multi vm group Id, if any.
"""
return pulumi.get(self, "multi_vm_group_id")
@property
@pulumi.getter(name="multiVmGroupName")
def multi_vm_group_name(self) -> Optional[str]:
"""
The multi vm group name, if any.
"""
return pulumi.get(self, "multi_vm_group_name")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether the multi vm sync is enabled or disabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="osDetails")
def os_details(self) -> Optional['outputs.OSDiskDetailsResponse']:
"""
The OS details.
"""
return pulumi.get(self, "os_details")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[str]:
"""
The OS Version of the protected item.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="processServerId")
def process_server_id(self) -> Optional[str]:
"""
The process server Id.
"""
return pulumi.get(self, "process_server_id")
@property
@pulumi.getter(name="protectedDisks")
def protected_disks(self) -> Optional[Sequence['outputs.InMageProtectedDiskDetailsResponse']]:
"""
The list of protected disks.
"""
return pulumi.get(self, "protected_disks")
@property
@pulumi.getter(name="protectionStage")
def protection_stage(self) -> Optional[str]:
"""
The protection stage.
"""
return pulumi.get(self, "protection_stage")
@property
@pulumi.getter(name="rebootAfterUpdateStatus")
def reboot_after_update_status(self) -> Optional[str]:
"""
A value indicating whether the source server requires a restart after update.
"""
return pulumi.get(self, "reboot_after_update_status")
@property
@pulumi.getter(name="replicaId")
def replica_id(self) -> Optional[str]:
"""
The replica id of the protected item.
"""
return pulumi.get(self, "replica_id")
@property
@pulumi.getter(name="resyncDetails")
def resync_details(self) -> Optional['outputs.InitialReplicationDetailsResponse']:
"""
The resync details of the machine
"""
return pulumi.get(self, "resync_details")
@property
@pulumi.getter(name="retentionWindowEnd")
def retention_window_end(self) -> Optional[str]:
"""
The retention window end time.
"""
return pulumi.get(self, "retention_window_end")
@property
@pulumi.getter(name="retentionWindowStart")
def retention_window_start(self) -> Optional[str]:
"""
The retention window start time.
"""
return pulumi.get(self, "retention_window_start")
@property
@pulumi.getter(name="rpoInSeconds")
def rpo_in_seconds(self) -> Optional[int]:
"""
The RPO in seconds.
"""
return pulumi.get(self, "rpo_in_seconds")
@property
@pulumi.getter(name="sourceVmCpuCount")
def source_vm_cpu_count(self) -> Optional[int]:
"""
The CPU count of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_cpu_count")
@property
@pulumi.getter(name="sourceVmRamSizeInMB")
def source_vm_ram_size_in_mb(self) -> Optional[int]:
"""
The RAM size of the VM on the primary side.
"""
return pulumi.get(self, "source_vm_ram_size_in_mb")
@property
@pulumi.getter(name="uncompressedDataRateInMB")
def uncompressed_data_rate_in_mb(self) -> Optional[float]:
"""
The uncompressed data change rate in MB.
"""
return pulumi.get(self, "uncompressed_data_rate_in_mb")
@property
@pulumi.getter(name="vCenterInfrastructureId")
def v_center_infrastructure_id(self) -> Optional[str]:
"""
The vCenter infrastructure Id.
"""
return pulumi.get(self, "v_center_infrastructure_id")
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
The validation errors of the on-premise machine Value can be list of validation errors
"""
return pulumi.get(self, "validation_errors")
@property
@pulumi.getter(name="vmId")
def vm_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "vm_id")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMNicDetailsResponse']]:
"""
The PE Network details.
"""
return pulumi.get(self, "vm_nics")
@property
@pulumi.getter(name="vmProtectionState")
def vm_protection_state(self) -> Optional[str]:
"""
The protection state for the vm.
"""
return pulumi.get(self, "vm_protection_state")
@property
@pulumi.getter(name="vmProtectionStateDescription")
def vm_protection_state_description(self) -> Optional[str]:
"""
The protection state description for the vm.
"""
return pulumi.get(self, "vm_protection_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InitialReplicationDetailsResponse(dict):
"""
Initial replication details.
"""
def __init__(__self__, *,
initial_replication_progress_percentage: Optional[str] = None,
initial_replication_type: Optional[str] = None):
"""
Initial replication details.
:param str initial_replication_progress_percentage: The initial replication progress percentage.
:param str initial_replication_type: Initial replication type.
"""
if initial_replication_progress_percentage is not None:
pulumi.set(__self__, "initial_replication_progress_percentage", initial_replication_progress_percentage)
if initial_replication_type is not None:
pulumi.set(__self__, "initial_replication_type", initial_replication_type)
@property
@pulumi.getter(name="initialReplicationProgressPercentage")
def initial_replication_progress_percentage(self) -> Optional[str]:
"""
The initial replication progress percentage.
"""
return pulumi.get(self, "initial_replication_progress_percentage")
@property
@pulumi.getter(name="initialReplicationType")
def initial_replication_type(self) -> Optional[str]:
"""
Initial replication type.
"""
return pulumi.get(self, "initial_replication_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InnerHealthErrorResponse(dict):
"""
Implements InnerHealthError class. HealthError object has a list of InnerHealthErrors as child errors. InnerHealthError is used because this will prevent an infinite loop of structures when Hydra tries to auto-generate the contract. We are exposing the related health errors as inner health errors and all API consumers can utilize this in the same fashion as Exception -> InnerException.
"""
def __init__(__self__, *,
creation_time_utc: Optional[str] = None,
entity_id: Optional[str] = None,
error_category: Optional[str] = None,
error_code: Optional[str] = None,
error_level: Optional[str] = None,
error_message: Optional[str] = None,
error_source: Optional[str] = None,
error_type: Optional[str] = None,
possible_causes: Optional[str] = None,
recommended_action: Optional[str] = None,
recovery_provider_error_message: Optional[str] = None,
summary_message: Optional[str] = None):
"""
Implements InnerHealthError class. HealthError object has a list of InnerHealthErrors as child errors. InnerHealthError is used because this will prevent an infinite loop of structures when Hydra tries to auto-generate the contract. We are exposing the related health errors as inner health errors and all API consumers can utilize this in the same fashion as Exception -> InnerException.
:param str creation_time_utc: Error creation time (UTC)
:param str entity_id: ID of the entity.
:param str error_category: Category of error.
:param str error_code: Error code.
:param str error_level: Level of error.
:param str error_message: Error message.
:param str error_source: Source of error.
:param str error_type: Type of error.
:param str possible_causes: Possible causes of error.
:param str recommended_action: Recommended action to resolve error.
:param str recovery_provider_error_message: DRA error message.
:param str summary_message: Summary message of the entity.
"""
if creation_time_utc is not None:
pulumi.set(__self__, "creation_time_utc", creation_time_utc)
if entity_id is not None:
pulumi.set(__self__, "entity_id", entity_id)
if error_category is not None:
pulumi.set(__self__, "error_category", error_category)
if error_code is not None:
pulumi.set(__self__, "error_code", error_code)
if error_level is not None:
pulumi.set(__self__, "error_level", error_level)
if error_message is not None:
pulumi.set(__self__, "error_message", error_message)
if error_source is not None:
pulumi.set(__self__, "error_source", error_source)
if error_type is not None:
pulumi.set(__self__, "error_type", error_type)
if possible_causes is not None:
pulumi.set(__self__, "possible_causes", possible_causes)
if recommended_action is not None:
pulumi.set(__self__, "recommended_action", recommended_action)
if recovery_provider_error_message is not None:
pulumi.set(__self__, "recovery_provider_error_message", recovery_provider_error_message)
if summary_message is not None:
pulumi.set(__self__, "summary_message", summary_message)
@property
@pulumi.getter(name="creationTimeUtc")
def creation_time_utc(self) -> Optional[str]:
"""
Error creation time (UTC)
"""
return pulumi.get(self, "creation_time_utc")
@property
@pulumi.getter(name="entityId")
def entity_id(self) -> Optional[str]:
"""
ID of the entity.
"""
return pulumi.get(self, "entity_id")
@property
@pulumi.getter(name="errorCategory")
def error_category(self) -> Optional[str]:
"""
Category of error.
"""
return pulumi.get(self, "error_category")
@property
@pulumi.getter(name="errorCode")
def error_code(self) -> Optional[str]:
"""
Error code.
"""
return pulumi.get(self, "error_code")
@property
@pulumi.getter(name="errorLevel")
def error_level(self) -> Optional[str]:
"""
Level of error.
"""
return pulumi.get(self, "error_level")
@property
@pulumi.getter(name="errorMessage")
def error_message(self) -> Optional[str]:
"""
Error message.
"""
return pulumi.get(self, "error_message")
@property
@pulumi.getter(name="errorSource")
def error_source(self) -> Optional[str]:
"""
Source of error.
"""
return pulumi.get(self, "error_source")
@property
@pulumi.getter(name="errorType")
def error_type(self) -> Optional[str]:
"""
Type of error.
"""
return pulumi.get(self, "error_type")
@property
@pulumi.getter(name="possibleCauses")
def possible_causes(self) -> Optional[str]:
"""
Possible causes of error.
"""
return pulumi.get(self, "possible_causes")
@property
@pulumi.getter(name="recommendedAction")
def recommended_action(self) -> Optional[str]:
"""
Recommended action to resolve error.
"""
return pulumi.get(self, "recommended_action")
@property
@pulumi.getter(name="recoveryProviderErrorMessage")
def recovery_provider_error_message(self) -> Optional[str]:
"""
DRA error message.
"""
return pulumi.get(self, "recovery_provider_error_message")
@property
@pulumi.getter(name="summaryMessage")
def summary_message(self) -> Optional[str]:
"""
Summary message of the entity.
"""
return pulumi.get(self, "summary_message")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InputEndpointResponse(dict):
"""
Azure VM input endpoint details.
"""
def __init__(__self__, *,
endpoint_name: Optional[str] = None,
private_port: Optional[int] = None,
protocol: Optional[str] = None,
public_port: Optional[int] = None):
"""
Azure VM input endpoint details.
:param str endpoint_name: The input endpoint name.
:param int private_port: The input endpoint private port.
:param str protocol: The input endpoint protocol.
:param int public_port: The input endpoint public port.
"""
if endpoint_name is not None:
pulumi.set(__self__, "endpoint_name", endpoint_name)
if private_port is not None:
pulumi.set(__self__, "private_port", private_port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if public_port is not None:
pulumi.set(__self__, "public_port", public_port)
@property
@pulumi.getter(name="endpointName")
def endpoint_name(self) -> Optional[str]:
"""
The input endpoint name.
"""
return pulumi.get(self, "endpoint_name")
@property
@pulumi.getter(name="privatePort")
def private_port(self) -> Optional[int]:
"""
The input endpoint private port.
"""
return pulumi.get(self, "private_port")
@property
@pulumi.getter
def protocol(self) -> Optional[str]:
"""
The input endpoint protocol.
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter(name="publicPort")
def public_port(self) -> Optional[int]:
"""
The input endpoint public port.
"""
return pulumi.get(self, "public_port")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InquiryInfoResponse(dict):
"""
Details about inquired protectable items under a given container.
"""
def __init__(__self__, *,
error_detail: Optional['outputs.ErrorDetailResponse'] = None,
inquiry_details: Optional[Sequence['outputs.WorkloadInquiryDetailsResponse']] = None,
status: Optional[str] = None):
"""
Details about inquired protectable items under a given container.
:param 'ErrorDetailResponseArgs' error_detail: Error Details if the Status is non-success.
:param Sequence['WorkloadInquiryDetailsResponseArgs'] inquiry_details: Inquiry Details which will have workload specific details.
For e.g. - For SQL and oracle this will contain different details.
:param str status: Inquiry Status for this container such as
InProgress | Failed | Succeeded
"""
if error_detail is not None:
pulumi.set(__self__, "error_detail", error_detail)
if inquiry_details is not None:
pulumi.set(__self__, "inquiry_details", inquiry_details)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="errorDetail")
def error_detail(self) -> Optional['outputs.ErrorDetailResponse']:
"""
Error Details if the Status is non-success.
"""
return pulumi.get(self, "error_detail")
@property
@pulumi.getter(name="inquiryDetails")
def inquiry_details(self) -> Optional[Sequence['outputs.WorkloadInquiryDetailsResponse']]:
"""
Inquiry Details which will have workload specific details.
For e.g. - For SQL and oracle this will contain different details.
"""
return pulumi.get(self, "inquiry_details")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Inquiry Status for this container such as
InProgress | Failed | Succeeded
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class InquiryValidationResponse(dict):
"""
Validation for inquired protectable items under a given container.
"""
def __init__(__self__, *,
additional_detail: str,
error_detail: Optional['outputs.ErrorDetailResponse'] = None,
status: Optional[str] = None):
"""
Validation for inquired protectable items under a given container.
:param str additional_detail: Error Additional Detail in case the status is non-success.
:param 'ErrorDetailResponseArgs' error_detail: Error Detail in case the status is non-success.
:param str status: Status for the Inquiry Validation.
"""
pulumi.set(__self__, "additional_detail", additional_detail)
if error_detail is not None:
pulumi.set(__self__, "error_detail", error_detail)
if status is not None:
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="additionalDetail")
def additional_detail(self) -> str:
"""
Error Additional Detail in case the status is non-success.
"""
return pulumi.get(self, "additional_detail")
@property
@pulumi.getter(name="errorDetail")
def error_detail(self) -> Optional['outputs.ErrorDetailResponse']:
"""
Error Detail in case the status is non-success.
"""
return pulumi.get(self, "error_detail")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
Status for the Inquiry Validation.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KPIResourceHealthDetailsResponse(dict):
"""
KPI Resource Health Details
"""
def __init__(__self__, *,
resource_health_details: Optional[Sequence['outputs.ResourceHealthDetailsResponse']] = None,
resource_health_status: Optional[str] = None):
"""
KPI Resource Health Details
:param Sequence['ResourceHealthDetailsResponseArgs'] resource_health_details: Resource Health Status
:param str resource_health_status: Resource Health Status
"""
if resource_health_details is not None:
pulumi.set(__self__, "resource_health_details", resource_health_details)
if resource_health_status is not None:
pulumi.set(__self__, "resource_health_status", resource_health_status)
@property
@pulumi.getter(name="resourceHealthDetails")
def resource_health_details(self) -> Optional[Sequence['outputs.ResourceHealthDetailsResponse']]:
"""
Resource Health Status
"""
return pulumi.get(self, "resource_health_details")
@property
@pulumi.getter(name="resourceHealthStatus")
def resource_health_status(self) -> Optional[str]:
"""
Resource Health Status
"""
return pulumi.get(self, "resource_health_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LongTermRetentionPolicyResponse(dict):
"""
Long-term retention policy.
"""
def __init__(__self__, *,
daily_schedule: Optional['outputs.DailyRetentionScheduleResponse'] = None,
monthly_schedule: Optional['outputs.MonthlyRetentionScheduleResponse'] = None,
retention_policy_type: Optional[str] = None,
weekly_schedule: Optional['outputs.WeeklyRetentionScheduleResponse'] = None,
yearly_schedule: Optional['outputs.YearlyRetentionScheduleResponse'] = None):
"""
Long-term retention policy.
:param 'DailyRetentionScheduleResponseArgs' daily_schedule: Daily retention schedule of the backup policy.
:param 'MonthlyRetentionScheduleResponseArgs' monthly_schedule: Monthly retention schedule of the backup policy.
:param str retention_policy_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
:param 'WeeklyRetentionScheduleResponseArgs' weekly_schedule: Weekly retention schedule of the backup policy.
:param 'YearlyRetentionScheduleResponseArgs' yearly_schedule: Yearly retention schedule of the backup policy.
"""
if daily_schedule is not None:
pulumi.set(__self__, "daily_schedule", daily_schedule)
if monthly_schedule is not None:
pulumi.set(__self__, "monthly_schedule", monthly_schedule)
if retention_policy_type is not None:
pulumi.set(__self__, "retention_policy_type", 'LongTermRetentionPolicy')
if weekly_schedule is not None:
pulumi.set(__self__, "weekly_schedule", weekly_schedule)
if yearly_schedule is not None:
pulumi.set(__self__, "yearly_schedule", yearly_schedule)
@property
@pulumi.getter(name="dailySchedule")
def daily_schedule(self) -> Optional['outputs.DailyRetentionScheduleResponse']:
"""
Daily retention schedule of the backup policy.
"""
return pulumi.get(self, "daily_schedule")
@property
@pulumi.getter(name="monthlySchedule")
def monthly_schedule(self) -> Optional['outputs.MonthlyRetentionScheduleResponse']:
"""
Monthly retention schedule of the backup policy.
"""
return pulumi.get(self, "monthly_schedule")
@property
@pulumi.getter(name="retentionPolicyType")
def retention_policy_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "retention_policy_type")
@property
@pulumi.getter(name="weeklySchedule")
def weekly_schedule(self) -> Optional['outputs.WeeklyRetentionScheduleResponse']:
"""
Weekly retention schedule of the backup policy.
"""
return pulumi.get(self, "weekly_schedule")
@property
@pulumi.getter(name="yearlySchedule")
def yearly_schedule(self) -> Optional['outputs.YearlyRetentionScheduleResponse']:
"""
Yearly retention schedule of the backup policy.
"""
return pulumi.get(self, "yearly_schedule")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class LongTermSchedulePolicyResponse(dict):
"""
Long-term policy schedule.
"""
def __init__(__self__, *,
schedule_policy_type: Optional[str] = None):
"""
Long-term policy schedule.
:param str schedule_policy_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
if schedule_policy_type is not None:
pulumi.set(__self__, "schedule_policy_type", 'LongTermSchedulePolicy')
@property
@pulumi.getter(name="schedulePolicyType")
def schedule_policy_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "schedule_policy_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MABContainerHealthDetailsResponse(dict):
"""
MAB workload-specific Health Details.
"""
def __init__(__self__, *,
code: Optional[int] = None,
message: Optional[str] = None,
recommendations: Optional[Sequence[str]] = None,
title: Optional[str] = None):
"""
MAB workload-specific Health Details.
:param int code: Health Code
:param str message: Health Message
:param Sequence[str] recommendations: Health Recommended Actions
:param str title: Health Title
"""
if code is not None:
pulumi.set(__self__, "code", code)
if message is not None:
pulumi.set(__self__, "message", message)
if recommendations is not None:
pulumi.set(__self__, "recommendations", recommendations)
if title is not None:
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def code(self) -> Optional[int]:
"""
Health Code
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> Optional[str]:
"""
Health Message
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def recommendations(self) -> Optional[Sequence[str]]:
"""
Health Recommended Actions
"""
return pulumi.get(self, "recommendations")
@property
@pulumi.getter
def title(self) -> Optional[str]:
"""
Health Title
"""
return pulumi.get(self, "title")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MabContainerExtendedInfoResponse(dict):
"""
Additional information of the container.
"""
def __init__(__self__, *,
backup_item_type: Optional[str] = None,
backup_items: Optional[Sequence[str]] = None,
last_backup_status: Optional[str] = None,
last_refreshed_at: Optional[str] = None,
policy_name: Optional[str] = None):
"""
Additional information of the container.
:param str backup_item_type: Type of backup items associated with this container.
:param Sequence[str] backup_items: List of backup items associated with this container.
:param str last_backup_status: Latest backup status of this container.
:param str last_refreshed_at: Time stamp when this container was refreshed.
:param str policy_name: Backup policy associated with this container.
"""
if backup_item_type is not None:
pulumi.set(__self__, "backup_item_type", backup_item_type)
if backup_items is not None:
pulumi.set(__self__, "backup_items", backup_items)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if policy_name is not None:
pulumi.set(__self__, "policy_name", policy_name)
@property
@pulumi.getter(name="backupItemType")
def backup_item_type(self) -> Optional[str]:
"""
Type of backup items associated with this container.
"""
return pulumi.get(self, "backup_item_type")
@property
@pulumi.getter(name="backupItems")
def backup_items(self) -> Optional[Sequence[str]]:
"""
List of backup items associated with this container.
"""
return pulumi.get(self, "backup_items")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Latest backup status of this container.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[str]:
"""
Time stamp when this container was refreshed.
"""
return pulumi.get(self, "last_refreshed_at")
@property
@pulumi.getter(name="policyName")
def policy_name(self) -> Optional[str]:
"""
Backup policy associated with this container.
"""
return pulumi.get(self, "policy_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MabContainerResponse(dict):
"""
Container with items backed up using MAB backup engine.
"""
def __init__(__self__, *,
agent_version: Optional[str] = None,
backup_management_type: Optional[str] = None,
can_re_register: Optional[bool] = None,
container_health_state: Optional[str] = None,
container_id: Optional[int] = None,
container_type: Optional[str] = None,
extended_info: Optional['outputs.MabContainerExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
health_status: Optional[str] = None,
mab_container_health_details: Optional[Sequence['outputs.MABContainerHealthDetailsResponse']] = None,
protected_item_count: Optional[int] = None,
registration_status: Optional[str] = None):
"""
Container with items backed up using MAB backup engine.
:param str agent_version: Agent version of this container.
:param str backup_management_type: Type of backup management for the container.
:param bool can_re_register: Can the container be registered one more time.
:param str container_health_state: Health state of mab container.
:param int container_id: ContainerID represents the container.
:param str container_type: Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
:param 'MabContainerExtendedInfoResponseArgs' extended_info: Additional information for this container
:param str friendly_name: Friendly name of the container.
:param str health_status: Status of health of the container.
:param Sequence['MABContainerHealthDetailsResponseArgs'] mab_container_health_details: Health details on this mab container.
:param int protected_item_count: Number of items backed up in this container.
:param str registration_status: Status of registration of the container with the Recovery Services Vault.
"""
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if can_re_register is not None:
pulumi.set(__self__, "can_re_register", can_re_register)
if container_health_state is not None:
pulumi.set(__self__, "container_health_state", container_health_state)
if container_id is not None:
pulumi.set(__self__, "container_id", container_id)
if container_type is not None:
pulumi.set(__self__, "container_type", 'Windows')
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_status is not None:
pulumi.set(__self__, "health_status", health_status)
if mab_container_health_details is not None:
pulumi.set(__self__, "mab_container_health_details", mab_container_health_details)
if protected_item_count is not None:
pulumi.set(__self__, "protected_item_count", protected_item_count)
if registration_status is not None:
pulumi.set(__self__, "registration_status", registration_status)
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
Agent version of this container.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the container.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="canReRegister")
def can_re_register(self) -> Optional[bool]:
"""
Can the container be registered one more time.
"""
return pulumi.get(self, "can_re_register")
@property
@pulumi.getter(name="containerHealthState")
def container_health_state(self) -> Optional[str]:
"""
Health state of mab container.
"""
return pulumi.get(self, "container_health_state")
@property
@pulumi.getter(name="containerId")
def container_id(self) -> Optional[int]:
"""
ContainerID represents the container.
"""
return pulumi.get(self, "container_id")
@property
@pulumi.getter(name="containerType")
def container_type(self) -> Optional[str]:
"""
Type of the container. The value of this property for: 1. Compute Azure VM is Microsoft.Compute/virtualMachines 2.
Classic Compute Azure VM is Microsoft.ClassicCompute/virtualMachines 3. Windows machines (like MAB, DPM etc) is
Windows 4. Azure SQL instance is AzureSqlContainer. 5. Storage containers is StorageContainer. 6. Azure workload
Backup is VMAppContainer
"""
return pulumi.get(self, "container_type")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.MabContainerExtendedInfoResponse']:
"""
Additional information for this container
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the container.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthStatus")
def health_status(self) -> Optional[str]:
"""
Status of health of the container.
"""
return pulumi.get(self, "health_status")
@property
@pulumi.getter(name="mabContainerHealthDetails")
def mab_container_health_details(self) -> Optional[Sequence['outputs.MABContainerHealthDetailsResponse']]:
"""
Health details on this mab container.
"""
return pulumi.get(self, "mab_container_health_details")
@property
@pulumi.getter(name="protectedItemCount")
def protected_item_count(self) -> Optional[int]:
"""
Number of items backed up in this container.
"""
return pulumi.get(self, "protected_item_count")
@property
@pulumi.getter(name="registrationStatus")
def registration_status(self) -> Optional[str]:
"""
Status of registration of the container with the Recovery Services Vault.
"""
return pulumi.get(self, "registration_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MabFileFolderProtectedItemExtendedInfoResponse(dict):
"""
Additional information on the backed up item.
"""
def __init__(__self__, *,
last_refreshed_at: Optional[str] = None,
oldest_recovery_point: Optional[str] = None,
recovery_point_count: Optional[int] = None):
"""
Additional information on the backed up item.
:param str last_refreshed_at: Last time when the agent data synced to service.
:param str oldest_recovery_point: The oldest backup copy available.
:param int recovery_point_count: Number of backup copies associated with the backup item.
"""
if last_refreshed_at is not None:
pulumi.set(__self__, "last_refreshed_at", last_refreshed_at)
if oldest_recovery_point is not None:
pulumi.set(__self__, "oldest_recovery_point", oldest_recovery_point)
if recovery_point_count is not None:
pulumi.set(__self__, "recovery_point_count", recovery_point_count)
@property
@pulumi.getter(name="lastRefreshedAt")
def last_refreshed_at(self) -> Optional[str]:
"""
Last time when the agent data synced to service.
"""
return pulumi.get(self, "last_refreshed_at")
@property
@pulumi.getter(name="oldestRecoveryPoint")
def oldest_recovery_point(self) -> Optional[str]:
"""
The oldest backup copy available.
"""
return pulumi.get(self, "oldest_recovery_point")
@property
@pulumi.getter(name="recoveryPointCount")
def recovery_point_count(self) -> Optional[int]:
"""
Number of backup copies associated with the backup item.
"""
return pulumi.get(self, "recovery_point_count")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MabFileFolderProtectedItemResponse(dict):
"""
MAB workload-specific backup item.
"""
def __init__(__self__, *,
protected_item_type: str,
backup_management_type: Optional[str] = None,
backup_set_name: Optional[str] = None,
computer_name: Optional[str] = None,
container_name: Optional[str] = None,
create_mode: Optional[str] = None,
deferred_delete_sync_time_in_utc: Optional[int] = None,
deferred_delete_time_in_utc: Optional[str] = None,
deferred_delete_time_remaining: Optional[str] = None,
extended_info: Optional['outputs.MabFileFolderProtectedItemExtendedInfoResponse'] = None,
friendly_name: Optional[str] = None,
is_deferred_delete_schedule_upcoming: Optional[bool] = None,
is_rehydrate: Optional[bool] = None,
is_scheduled_for_deferred_delete: Optional[bool] = None,
last_backup_status: Optional[str] = None,
last_backup_time: Optional[str] = None,
last_recovery_point: Optional[str] = None,
policy_id: Optional[str] = None,
protection_state: Optional[str] = None,
source_resource_id: Optional[str] = None,
workload_type: Optional[str] = None):
"""
MAB workload-specific backup item.
:param str protected_item_type: backup item type.
:param str backup_management_type: Type of backup management for the backed up item.
:param str backup_set_name: Name of the backup set the backup item belongs to
:param str computer_name: Name of the computer associated with this backup item.
:param str container_name: Unique name of container
:param str create_mode: Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
:param int deferred_delete_sync_time_in_utc: Sync time for deferred deletion in UTC
:param str deferred_delete_time_in_utc: Time for deferred deletion in UTC
:param str deferred_delete_time_remaining: Time remaining before the DS marked for deferred delete is permanently deleted
:param 'MabFileFolderProtectedItemExtendedInfoResponseArgs' extended_info: Additional information with this backup item.
:param str friendly_name: Friendly name of this backup item.
:param bool is_deferred_delete_schedule_upcoming: Flag to identify whether the deferred deleted DS is to be purged soon
:param bool is_rehydrate: Flag to identify that deferred deleted DS is to be moved into Pause state
:param bool is_scheduled_for_deferred_delete: Flag to identify whether the DS is scheduled for deferred delete
:param str last_backup_status: Status of last backup operation.
:param str last_backup_time: Timestamp of the last backup operation on this backup item.
:param str last_recovery_point: Timestamp when the last (latest) backup copy was created for this backup item.
:param str policy_id: ID of the backup policy with which this item is backed up.
:param str protection_state: Protected, ProtectionStopped, IRPending or ProtectionError
:param str source_resource_id: ARM ID of the resource to be backed up.
:param str workload_type: Type of workload this item represents.
"""
pulumi.set(__self__, "protected_item_type", 'MabFileFolderProtectedItem')
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", backup_management_type)
if backup_set_name is not None:
pulumi.set(__self__, "backup_set_name", backup_set_name)
if computer_name is not None:
pulumi.set(__self__, "computer_name", computer_name)
if container_name is not None:
pulumi.set(__self__, "container_name", container_name)
if create_mode is not None:
pulumi.set(__self__, "create_mode", create_mode)
if deferred_delete_sync_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_sync_time_in_utc", deferred_delete_sync_time_in_utc)
if deferred_delete_time_in_utc is not None:
pulumi.set(__self__, "deferred_delete_time_in_utc", deferred_delete_time_in_utc)
if deferred_delete_time_remaining is not None:
pulumi.set(__self__, "deferred_delete_time_remaining", deferred_delete_time_remaining)
if extended_info is not None:
pulumi.set(__self__, "extended_info", extended_info)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if is_deferred_delete_schedule_upcoming is not None:
pulumi.set(__self__, "is_deferred_delete_schedule_upcoming", is_deferred_delete_schedule_upcoming)
if is_rehydrate is not None:
pulumi.set(__self__, "is_rehydrate", is_rehydrate)
if is_scheduled_for_deferred_delete is not None:
pulumi.set(__self__, "is_scheduled_for_deferred_delete", is_scheduled_for_deferred_delete)
if last_backup_status is not None:
pulumi.set(__self__, "last_backup_status", last_backup_status)
if last_backup_time is not None:
pulumi.set(__self__, "last_backup_time", last_backup_time)
if last_recovery_point is not None:
pulumi.set(__self__, "last_recovery_point", last_recovery_point)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if source_resource_id is not None:
pulumi.set(__self__, "source_resource_id", source_resource_id)
if workload_type is not None:
pulumi.set(__self__, "workload_type", workload_type)
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> str:
"""
backup item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
Type of backup management for the backed up item.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="backupSetName")
def backup_set_name(self) -> Optional[str]:
"""
Name of the backup set the backup item belongs to
"""
return pulumi.get(self, "backup_set_name")
@property
@pulumi.getter(name="computerName")
def computer_name(self) -> Optional[str]:
"""
Name of the computer associated with this backup item.
"""
return pulumi.get(self, "computer_name")
@property
@pulumi.getter(name="containerName")
def container_name(self) -> Optional[str]:
"""
Unique name of container
"""
return pulumi.get(self, "container_name")
@property
@pulumi.getter(name="createMode")
def create_mode(self) -> Optional[str]:
"""
Create mode to indicate recovery of existing soft deleted data source or creation of new data source.
"""
return pulumi.get(self, "create_mode")
@property
@pulumi.getter(name="deferredDeleteSyncTimeInUTC")
def deferred_delete_sync_time_in_utc(self) -> Optional[int]:
"""
Sync time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_sync_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeInUTC")
def deferred_delete_time_in_utc(self) -> Optional[str]:
"""
Time for deferred deletion in UTC
"""
return pulumi.get(self, "deferred_delete_time_in_utc")
@property
@pulumi.getter(name="deferredDeleteTimeRemaining")
def deferred_delete_time_remaining(self) -> Optional[str]:
"""
Time remaining before the DS marked for deferred delete is permanently deleted
"""
return pulumi.get(self, "deferred_delete_time_remaining")
@property
@pulumi.getter(name="extendedInfo")
def extended_info(self) -> Optional['outputs.MabFileFolderProtectedItemExtendedInfoResponse']:
"""
Additional information with this backup item.
"""
return pulumi.get(self, "extended_info")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of this backup item.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="isDeferredDeleteScheduleUpcoming")
def is_deferred_delete_schedule_upcoming(self) -> Optional[bool]:
"""
Flag to identify whether the deferred deleted DS is to be purged soon
"""
return pulumi.get(self, "is_deferred_delete_schedule_upcoming")
@property
@pulumi.getter(name="isRehydrate")
def is_rehydrate(self) -> Optional[bool]:
"""
Flag to identify that deferred deleted DS is to be moved into Pause state
"""
return pulumi.get(self, "is_rehydrate")
@property
@pulumi.getter(name="isScheduledForDeferredDelete")
def is_scheduled_for_deferred_delete(self) -> Optional[bool]:
"""
Flag to identify whether the DS is scheduled for deferred delete
"""
return pulumi.get(self, "is_scheduled_for_deferred_delete")
@property
@pulumi.getter(name="lastBackupStatus")
def last_backup_status(self) -> Optional[str]:
"""
Status of last backup operation.
"""
return pulumi.get(self, "last_backup_status")
@property
@pulumi.getter(name="lastBackupTime")
def last_backup_time(self) -> Optional[str]:
"""
Timestamp of the last backup operation on this backup item.
"""
return pulumi.get(self, "last_backup_time")
@property
@pulumi.getter(name="lastRecoveryPoint")
def last_recovery_point(self) -> Optional[str]:
"""
Timestamp when the last (latest) backup copy was created for this backup item.
"""
return pulumi.get(self, "last_recovery_point")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
ID of the backup policy with which this item is backed up.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
Protected, ProtectionStopped, IRPending or ProtectionError
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="sourceResourceId")
def source_resource_id(self) -> Optional[str]:
"""
ARM ID of the resource to be backed up.
"""
return pulumi.get(self, "source_resource_id")
@property
@pulumi.getter(name="workloadType")
def workload_type(self) -> Optional[str]:
"""
Type of workload this item represents.
"""
return pulumi.get(self, "workload_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MabProtectionPolicyResponse(dict):
"""
The backup policy for the file or folder container.
"""
def __init__(__self__, *,
backup_management_type: Optional[str] = None,
protected_items_count: Optional[int] = None,
retention_policy: Optional[Any] = None,
schedule_policy: Optional[Any] = None):
"""
The backup policy for the file or folder container.
:param str backup_management_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
:param int protected_items_count: The number of items associated with this policy.
:param Union['LongTermRetentionPolicyResponseArgs', 'SimpleRetentionPolicyResponseArgs'] retention_policy: The details specified in the Retention policy.
:param Union['LongTermSchedulePolicyResponseArgs', 'SimpleSchedulePolicyResponseArgs'] schedule_policy: The schedule specified in the backup policy.
"""
if backup_management_type is not None:
pulumi.set(__self__, "backup_management_type", 'MAB')
if protected_items_count is not None:
pulumi.set(__self__, "protected_items_count", protected_items_count)
if retention_policy is not None:
pulumi.set(__self__, "retention_policy", retention_policy)
if schedule_policy is not None:
pulumi.set(__self__, "schedule_policy", schedule_policy)
@property
@pulumi.getter(name="backupManagementType")
def backup_management_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "backup_management_type")
@property
@pulumi.getter(name="protectedItemsCount")
def protected_items_count(self) -> Optional[int]:
"""
The number of items associated with this policy.
"""
return pulumi.get(self, "protected_items_count")
@property
@pulumi.getter(name="retentionPolicy")
def retention_policy(self) -> Optional[Any]:
"""
The details specified in the Retention policy.
"""
return pulumi.get(self, "retention_policy")
@property
@pulumi.getter(name="schedulePolicy")
def schedule_policy(self) -> Optional[Any]:
"""
The schedule specified in the backup policy.
"""
return pulumi.get(self, "schedule_policy")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MasterTargetServerResponse(dict):
"""
Details of a Master Target Server.
"""
def __init__(__self__, *,
agent_expiry_date: Optional[str] = None,
agent_version: Optional[str] = None,
agent_version_details: Optional['outputs.VersionDetailsResponse'] = None,
data_stores: Optional[Sequence['outputs.DataStoreResponse']] = None,
disk_count: Optional[int] = None,
health_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
id: Optional[str] = None,
ip_address: Optional[str] = None,
last_heartbeat: Optional[str] = None,
mars_agent_expiry_date: Optional[str] = None,
mars_agent_version: Optional[str] = None,
mars_agent_version_details: Optional['outputs.VersionDetailsResponse'] = None,
name: Optional[str] = None,
os_type: Optional[str] = None,
os_version: Optional[str] = None,
retention_volumes: Optional[Sequence['outputs.RetentionVolumeResponse']] = None,
validation_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
version_status: Optional[str] = None):
"""
Details of a Master Target Server.
:param str agent_expiry_date: Agent expiry date.
:param str agent_version: The version of the scout component on the server.
:param 'VersionDetailsResponseArgs' agent_version_details: Agent version details.
:param Sequence['DataStoreResponseArgs'] data_stores: The list of data stores in the fabric.
:param int disk_count: Disk count of the master target.
:param Sequence['HealthErrorResponseArgs'] health_errors: Health errors.
:param str id: The server Id.
:param str ip_address: The IP address of the server.
:param str last_heartbeat: The last heartbeat received from the server.
:param str mars_agent_expiry_date: MARS agent expiry date.
:param str mars_agent_version: MARS agent version.
:param 'VersionDetailsResponseArgs' mars_agent_version_details: Mars agent version details.
:param str name: The server name.
:param str os_type: The OS type of the server.
:param str os_version: OS Version of the master target.
:param Sequence['RetentionVolumeResponseArgs'] retention_volumes: The retention volumes of Master target Server.
:param Sequence['HealthErrorResponseArgs'] validation_errors: Validation errors.
:param str version_status: Version status
"""
if agent_expiry_date is not None:
pulumi.set(__self__, "agent_expiry_date", agent_expiry_date)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if agent_version_details is not None:
pulumi.set(__self__, "agent_version_details", agent_version_details)
if data_stores is not None:
pulumi.set(__self__, "data_stores", data_stores)
if disk_count is not None:
pulumi.set(__self__, "disk_count", disk_count)
if health_errors is not None:
pulumi.set(__self__, "health_errors", health_errors)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if mars_agent_expiry_date is not None:
pulumi.set(__self__, "mars_agent_expiry_date", mars_agent_expiry_date)
if mars_agent_version is not None:
pulumi.set(__self__, "mars_agent_version", mars_agent_version)
if mars_agent_version_details is not None:
pulumi.set(__self__, "mars_agent_version_details", mars_agent_version_details)
if name is not None:
pulumi.set(__self__, "name", name)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
if retention_volumes is not None:
pulumi.set(__self__, "retention_volumes", retention_volumes)
if validation_errors is not None:
pulumi.set(__self__, "validation_errors", validation_errors)
if version_status is not None:
pulumi.set(__self__, "version_status", version_status)
@property
@pulumi.getter(name="agentExpiryDate")
def agent_expiry_date(self) -> Optional[str]:
"""
Agent expiry date.
"""
return pulumi.get(self, "agent_expiry_date")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The version of the scout component on the server.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="agentVersionDetails")
def agent_version_details(self) -> Optional['outputs.VersionDetailsResponse']:
"""
Agent version details.
"""
return pulumi.get(self, "agent_version_details")
@property
@pulumi.getter(name="dataStores")
def data_stores(self) -> Optional[Sequence['outputs.DataStoreResponse']]:
"""
The list of data stores in the fabric.
"""
return pulumi.get(self, "data_stores")
@property
@pulumi.getter(name="diskCount")
def disk_count(self) -> Optional[int]:
"""
Disk count of the master target.
"""
return pulumi.get(self, "disk_count")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
Health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The server Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address of the server.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from the server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="marsAgentExpiryDate")
def mars_agent_expiry_date(self) -> Optional[str]:
"""
MARS agent expiry date.
"""
return pulumi.get(self, "mars_agent_expiry_date")
@property
@pulumi.getter(name="marsAgentVersion")
def mars_agent_version(self) -> Optional[str]:
"""
MARS agent version.
"""
return pulumi.get(self, "mars_agent_version")
@property
@pulumi.getter(name="marsAgentVersionDetails")
def mars_agent_version_details(self) -> Optional['outputs.VersionDetailsResponse']:
"""
Mars agent version details.
"""
return pulumi.get(self, "mars_agent_version_details")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
The server name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type of the server.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[str]:
"""
OS Version of the master target.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="retentionVolumes")
def retention_volumes(self) -> Optional[Sequence['outputs.RetentionVolumeResponse']]:
"""
The retention volumes of Master target Server.
"""
return pulumi.get(self, "retention_volumes")
@property
@pulumi.getter(name="validationErrors")
def validation_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
Validation errors.
"""
return pulumi.get(self, "validation_errors")
@property
@pulumi.getter(name="versionStatus")
def version_status(self) -> Optional[str]:
"""
Version status
"""
return pulumi.get(self, "version_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MigrationItemPropertiesResponse(dict):
"""
Migration item properties.
"""
def __init__(__self__, *,
allowed_operations: Sequence[str],
current_job: 'outputs.CurrentJobDetailsResponse',
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
machine_name: str,
migration_state: str,
migration_state_description: str,
policy_friendly_name: str,
policy_id: str,
recovery_services_provider_id: str,
test_migrate_state: str,
test_migrate_state_description: str,
provider_specific_details: Optional['outputs.VMwareCbtMigrationDetailsResponse'] = None):
"""
Migration item properties.
:param Sequence[str] allowed_operations: The allowed operations on the migration item, based on the current migration state of the item.
:param 'CurrentJobDetailsResponseArgs' current_job: The current job details.
:param str health: The consolidated health.
:param Sequence['HealthErrorResponseArgs'] health_errors: The list of health errors.
:param str machine_name: The on-premise virtual machine name.
:param str migration_state: The migration status.
:param str migration_state_description: The migration state description.
:param str policy_friendly_name: The name of policy governing this item.
:param str policy_id: The ARM Id of policy governing this item.
:param str recovery_services_provider_id: The recovery services provider ARM Id.
:param str test_migrate_state: The test migrate state.
:param str test_migrate_state_description: The test migrate state description.
:param 'VMwareCbtMigrationDetailsResponseArgs' provider_specific_details: The migration provider custom settings.
"""
pulumi.set(__self__, "allowed_operations", allowed_operations)
pulumi.set(__self__, "current_job", current_job)
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "machine_name", machine_name)
pulumi.set(__self__, "migration_state", migration_state)
pulumi.set(__self__, "migration_state_description", migration_state_description)
pulumi.set(__self__, "policy_friendly_name", policy_friendly_name)
pulumi.set(__self__, "policy_id", policy_id)
pulumi.set(__self__, "recovery_services_provider_id", recovery_services_provider_id)
pulumi.set(__self__, "test_migrate_state", test_migrate_state)
pulumi.set(__self__, "test_migrate_state_description", test_migrate_state_description)
if provider_specific_details is not None:
pulumi.set(__self__, "provider_specific_details", provider_specific_details)
@property
@pulumi.getter(name="allowedOperations")
def allowed_operations(self) -> Sequence[str]:
"""
The allowed operations on the migration item, based on the current migration state of the item.
"""
return pulumi.get(self, "allowed_operations")
@property
@pulumi.getter(name="currentJob")
def current_job(self) -> 'outputs.CurrentJobDetailsResponse':
"""
The current job details.
"""
return pulumi.get(self, "current_job")
@property
@pulumi.getter
def health(self) -> str:
"""
The consolidated health.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The list of health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter(name="machineName")
def machine_name(self) -> str:
"""
The on-premise virtual machine name.
"""
return pulumi.get(self, "machine_name")
@property
@pulumi.getter(name="migrationState")
def migration_state(self) -> str:
"""
The migration status.
"""
return pulumi.get(self, "migration_state")
@property
@pulumi.getter(name="migrationStateDescription")
def migration_state_description(self) -> str:
"""
The migration state description.
"""
return pulumi.get(self, "migration_state_description")
@property
@pulumi.getter(name="policyFriendlyName")
def policy_friendly_name(self) -> str:
"""
The name of policy governing this item.
"""
return pulumi.get(self, "policy_friendly_name")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> str:
"""
The ARM Id of policy governing this item.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="recoveryServicesProviderId")
def recovery_services_provider_id(self) -> str:
"""
The recovery services provider ARM Id.
"""
return pulumi.get(self, "recovery_services_provider_id")
@property
@pulumi.getter(name="testMigrateState")
def test_migrate_state(self) -> str:
"""
The test migrate state.
"""
return pulumi.get(self, "test_migrate_state")
@property
@pulumi.getter(name="testMigrateStateDescription")
def test_migrate_state_description(self) -> str:
"""
The test migrate state description.
"""
return pulumi.get(self, "test_migrate_state_description")
@property
@pulumi.getter(name="providerSpecificDetails")
def provider_specific_details(self) -> Optional['outputs.VMwareCbtMigrationDetailsResponse']:
"""
The migration provider custom settings.
"""
return pulumi.get(self, "provider_specific_details")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MobilityServiceUpdateResponse(dict):
"""
The Mobility Service update details.
"""
def __init__(__self__, *,
os_type: Optional[str] = None,
reboot_status: Optional[str] = None,
version: Optional[str] = None):
"""
The Mobility Service update details.
:param str os_type: The OS type.
:param str reboot_status: The reboot status of the update - whether it is required or not.
:param str version: The version of the latest update.
"""
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if reboot_status is not None:
pulumi.set(__self__, "reboot_status", reboot_status)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="rebootStatus")
def reboot_status(self) -> Optional[str]:
"""
The reboot status of the update - whether it is required or not.
"""
return pulumi.get(self, "reboot_status")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The version of the latest update.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class MonthlyRetentionScheduleResponse(dict):
"""
The monthly retention schedule.
"""
def __init__(__self__, *,
retention_duration: Optional['outputs.RetentionDurationResponse'] = None,
retention_schedule_daily: Optional['outputs.DailyRetentionFormatResponse'] = None,
retention_schedule_format_type: Optional[str] = None,
retention_schedule_weekly: Optional['outputs.WeeklyRetentionFormatResponse'] = None,
retention_times: Optional[Sequence[str]] = None):
"""
The monthly retention schedule.
:param 'RetentionDurationResponseArgs' retention_duration: Retention duration of the retention policy.
:param 'DailyRetentionFormatResponseArgs' retention_schedule_daily: Daily retention format for the monthly retention policy.
:param str retention_schedule_format_type: Retention schedule format type for monthly retention policy.
:param 'WeeklyRetentionFormatResponseArgs' retention_schedule_weekly: Weekly retention format for the monthly retention policy.
:param Sequence[str] retention_times: Retention times of the retention policy.
"""
if retention_duration is not None:
pulumi.set(__self__, "retention_duration", retention_duration)
if retention_schedule_daily is not None:
pulumi.set(__self__, "retention_schedule_daily", retention_schedule_daily)
if retention_schedule_format_type is not None:
pulumi.set(__self__, "retention_schedule_format_type", retention_schedule_format_type)
if retention_schedule_weekly is not None:
pulumi.set(__self__, "retention_schedule_weekly", retention_schedule_weekly)
if retention_times is not None:
pulumi.set(__self__, "retention_times", retention_times)
@property
@pulumi.getter(name="retentionDuration")
def retention_duration(self) -> Optional['outputs.RetentionDurationResponse']:
"""
Retention duration of the retention policy.
"""
return pulumi.get(self, "retention_duration")
@property
@pulumi.getter(name="retentionScheduleDaily")
def retention_schedule_daily(self) -> Optional['outputs.DailyRetentionFormatResponse']:
"""
Daily retention format for the monthly retention policy.
"""
return pulumi.get(self, "retention_schedule_daily")
@property
@pulumi.getter(name="retentionScheduleFormatType")
def retention_schedule_format_type(self) -> Optional[str]:
"""
Retention schedule format type for monthly retention policy.
"""
return pulumi.get(self, "retention_schedule_format_type")
@property
@pulumi.getter(name="retentionScheduleWeekly")
def retention_schedule_weekly(self) -> Optional['outputs.WeeklyRetentionFormatResponse']:
"""
Weekly retention format for the monthly retention policy.
"""
return pulumi.get(self, "retention_schedule_weekly")
@property
@pulumi.getter(name="retentionTimes")
def retention_times(self) -> Optional[Sequence[str]]:
"""
Retention times of the retention policy.
"""
return pulumi.get(self, "retention_times")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class NetworkMappingPropertiesResponse(dict):
"""
Network Mapping Properties.
"""
def __init__(__self__, *,
fabric_specific_settings: Optional[Any] = None,
primary_fabric_friendly_name: Optional[str] = None,
primary_network_friendly_name: Optional[str] = None,
primary_network_id: Optional[str] = None,
recovery_fabric_arm_id: Optional[str] = None,
recovery_fabric_friendly_name: Optional[str] = None,
recovery_network_friendly_name: Optional[str] = None,
recovery_network_id: Optional[str] = None,
state: Optional[str] = None):
"""
Network Mapping Properties.
:param Union['AzureToAzureNetworkMappingSettingsResponseArgs', 'VmmToAzureNetworkMappingSettingsResponseArgs', 'VmmToVmmNetworkMappingSettingsResponseArgs'] fabric_specific_settings: The fabric specific settings.
:param str primary_fabric_friendly_name: The primary fabric friendly name.
:param str primary_network_friendly_name: The primary network friendly name.
:param str primary_network_id: The primary network id for network mapping.
:param str recovery_fabric_arm_id: The recovery fabric ARM id.
:param str recovery_fabric_friendly_name: The recovery fabric friendly name.
:param str recovery_network_friendly_name: The recovery network friendly name.
:param str recovery_network_id: The recovery network id for network mapping.
:param str state: The pairing state for network mapping.
"""
if fabric_specific_settings is not None:
pulumi.set(__self__, "fabric_specific_settings", fabric_specific_settings)
if primary_fabric_friendly_name is not None:
pulumi.set(__self__, "primary_fabric_friendly_name", primary_fabric_friendly_name)
if primary_network_friendly_name is not None:
pulumi.set(__self__, "primary_network_friendly_name", primary_network_friendly_name)
if primary_network_id is not None:
pulumi.set(__self__, "primary_network_id", primary_network_id)
if recovery_fabric_arm_id is not None:
pulumi.set(__self__, "recovery_fabric_arm_id", recovery_fabric_arm_id)
if recovery_fabric_friendly_name is not None:
pulumi.set(__self__, "recovery_fabric_friendly_name", recovery_fabric_friendly_name)
if recovery_network_friendly_name is not None:
pulumi.set(__self__, "recovery_network_friendly_name", recovery_network_friendly_name)
if recovery_network_id is not None:
pulumi.set(__self__, "recovery_network_id", recovery_network_id)
if state is not None:
pulumi.set(__self__, "state", state)
@property
@pulumi.getter(name="fabricSpecificSettings")
def fabric_specific_settings(self) -> Optional[Any]:
"""
The fabric specific settings.
"""
return pulumi.get(self, "fabric_specific_settings")
@property
@pulumi.getter(name="primaryFabricFriendlyName")
def primary_fabric_friendly_name(self) -> Optional[str]:
"""
The primary fabric friendly name.
"""
return pulumi.get(self, "primary_fabric_friendly_name")
@property
@pulumi.getter(name="primaryNetworkFriendlyName")
def primary_network_friendly_name(self) -> Optional[str]:
"""
The primary network friendly name.
"""
return pulumi.get(self, "primary_network_friendly_name")
@property
@pulumi.getter(name="primaryNetworkId")
def primary_network_id(self) -> Optional[str]:
"""
The primary network id for network mapping.
"""
return pulumi.get(self, "primary_network_id")
@property
@pulumi.getter(name="recoveryFabricArmId")
def recovery_fabric_arm_id(self) -> Optional[str]:
"""
The recovery fabric ARM id.
"""
return pulumi.get(self, "recovery_fabric_arm_id")
@property
@pulumi.getter(name="recoveryFabricFriendlyName")
def recovery_fabric_friendly_name(self) -> Optional[str]:
"""
The recovery fabric friendly name.
"""
return pulumi.get(self, "recovery_fabric_friendly_name")
@property
@pulumi.getter(name="recoveryNetworkFriendlyName")
def recovery_network_friendly_name(self) -> Optional[str]:
"""
The recovery network friendly name.
"""
return pulumi.get(self, "recovery_network_friendly_name")
@property
@pulumi.getter(name="recoveryNetworkId")
def recovery_network_id(self) -> Optional[str]:
"""
The recovery network id for network mapping.
"""
return pulumi.get(self, "recovery_network_id")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
The pairing state for network mapping.
"""
return pulumi.get(self, "state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OSDetailsResponse(dict):
"""
Disk Details.
"""
def __init__(__self__, *,
o_s_major_version: Optional[str] = None,
o_s_minor_version: Optional[str] = None,
o_s_version: Optional[str] = None,
os_edition: Optional[str] = None,
os_type: Optional[str] = None,
product_type: Optional[str] = None):
"""
Disk Details.
:param str o_s_major_version: The OS Major Version.
:param str o_s_minor_version: The OS Minor Version.
:param str o_s_version: The OS Version.
:param str os_edition: The OSEdition.
:param str os_type: VM Disk details.
:param str product_type: Product type.
"""
if o_s_major_version is not None:
pulumi.set(__self__, "o_s_major_version", o_s_major_version)
if o_s_minor_version is not None:
pulumi.set(__self__, "o_s_minor_version", o_s_minor_version)
if o_s_version is not None:
pulumi.set(__self__, "o_s_version", o_s_version)
if os_edition is not None:
pulumi.set(__self__, "os_edition", os_edition)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if product_type is not None:
pulumi.set(__self__, "product_type", product_type)
@property
@pulumi.getter(name="oSMajorVersion")
def o_s_major_version(self) -> Optional[str]:
"""
The OS Major Version.
"""
return pulumi.get(self, "o_s_major_version")
@property
@pulumi.getter(name="oSMinorVersion")
def o_s_minor_version(self) -> Optional[str]:
"""
The OS Minor Version.
"""
return pulumi.get(self, "o_s_minor_version")
@property
@pulumi.getter(name="oSVersion")
def o_s_version(self) -> Optional[str]:
"""
The OS Version.
"""
return pulumi.get(self, "o_s_version")
@property
@pulumi.getter(name="osEdition")
def os_edition(self) -> Optional[str]:
"""
The OSEdition.
"""
return pulumi.get(self, "os_edition")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
VM Disk details.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="productType")
def product_type(self) -> Optional[str]:
"""
Product type.
"""
return pulumi.get(self, "product_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class OSDiskDetailsResponse(dict):
"""
Details of the OS Disk.
"""
def __init__(__self__, *,
os_type: Optional[str] = None,
os_vhd_id: Optional[str] = None,
vhd_name: Optional[str] = None):
"""
Details of the OS Disk.
:param str os_type: The type of the OS on the VM.
:param str os_vhd_id: The id of the disk containing the OS.
:param str vhd_name: The OS disk VHD name.
"""
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if os_vhd_id is not None:
pulumi.set(__self__, "os_vhd_id", os_vhd_id)
if vhd_name is not None:
pulumi.set(__self__, "vhd_name", vhd_name)
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The type of the OS on the VM.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="osVhdId")
def os_vhd_id(self) -> Optional[str]:
"""
The id of the disk containing the OS.
"""
return pulumi.get(self, "os_vhd_id")
@property
@pulumi.getter(name="vhdName")
def vhd_name(self) -> Optional[str]:
"""
The OS disk VHD name.
"""
return pulumi.get(self, "vhd_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PolicyPropertiesResponse(dict):
"""
Protection profile custom data details.
"""
def __init__(__self__, *,
friendly_name: Optional[str] = None,
provider_specific_details: Optional[Any] = None):
"""
Protection profile custom data details.
:param str friendly_name: The FriendlyName.
:param Union['A2APolicyDetailsResponseArgs', 'HyperVReplicaAzurePolicyDetailsResponseArgs', 'HyperVReplicaBasePolicyDetailsResponseArgs', 'HyperVReplicaBluePolicyDetailsResponseArgs', 'HyperVReplicaPolicyDetailsResponseArgs', 'InMageAzureV2PolicyDetailsResponseArgs', 'InMageBasePolicyDetailsResponseArgs', 'InMagePolicyDetailsResponseArgs', 'InMageRcmPolicyDetailsResponseArgs', 'RcmAzureMigrationPolicyDetailsResponseArgs', 'VmwareCbtPolicyDetailsResponseArgs'] provider_specific_details: The ReplicationChannelSetting.
"""
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if provider_specific_details is not None:
pulumi.set(__self__, "provider_specific_details", provider_specific_details)
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The FriendlyName.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="providerSpecificDetails")
def provider_specific_details(self) -> Optional[Any]:
"""
The ReplicationChannelSetting.
"""
return pulumi.get(self, "provider_specific_details")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointConnectionResponse(dict):
"""
Private Endpoint Connection Response Properties.
"""
def __init__(__self__, *,
private_endpoint: 'outputs.PrivateEndpointResponse',
private_link_service_connection_state: 'outputs.PrivateLinkServiceConnectionStateResponse',
provisioning_state: str):
"""
Private Endpoint Connection Response Properties.
:param 'PrivateEndpointResponseArgs' private_endpoint: The Private Endpoint network resource that is linked to the Private Endpoint connection.
:param 'PrivateLinkServiceConnectionStateResponseArgs' private_link_service_connection_state: Gets or sets private link service connection state.
:param str provisioning_state: Gets or sets provisioning state of the private endpoint connection.
"""
pulumi.set(__self__, "private_endpoint", private_endpoint)
pulumi.set(__self__, "private_link_service_connection_state", private_link_service_connection_state)
pulumi.set(__self__, "provisioning_state", provisioning_state)
@property
@pulumi.getter(name="privateEndpoint")
def private_endpoint(self) -> 'outputs.PrivateEndpointResponse':
"""
The Private Endpoint network resource that is linked to the Private Endpoint connection.
"""
return pulumi.get(self, "private_endpoint")
@property
@pulumi.getter(name="privateLinkServiceConnectionState")
def private_link_service_connection_state(self) -> 'outputs.PrivateLinkServiceConnectionStateResponse':
"""
Gets or sets private link service connection state.
"""
return pulumi.get(self, "private_link_service_connection_state")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Gets or sets provisioning state of the private endpoint connection.
"""
return pulumi.get(self, "provisioning_state")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointConnectionVaultPropertiesResponse(dict):
"""
Information to be stored in Vault properties as an element of privateEndpointConnections List.
"""
def __init__(__self__, *,
id: str,
properties: 'outputs.PrivateEndpointConnectionResponse'):
"""
Information to be stored in Vault properties as an element of privateEndpointConnections List.
:param str id: Format of id subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.[Service]/{resource}/{resourceName}/privateEndpointConnections/{connectionName}.
:param 'PrivateEndpointConnectionResponseArgs' properties: Private Endpoint Connection Response Properties.
"""
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "properties", properties)
@property
@pulumi.getter
def id(self) -> str:
"""
Format of id subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.[Service]/{resource}/{resourceName}/privateEndpointConnections/{connectionName}.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def properties(self) -> 'outputs.PrivateEndpointConnectionResponse':
"""
Private Endpoint Connection Response Properties.
"""
return pulumi.get(self, "properties")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateEndpointResponse(dict):
"""
The Private Endpoint network resource that is linked to the Private Endpoint connection.
"""
def __init__(__self__, *,
id: str):
"""
The Private Endpoint network resource that is linked to the Private Endpoint connection.
:param str id: Gets or sets id.
"""
pulumi.set(__self__, "id", id)
@property
@pulumi.getter
def id(self) -> str:
"""
Gets or sets id.
"""
return pulumi.get(self, "id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PrivateLinkServiceConnectionStateResponse(dict):
"""
Gets or sets private link service connection state.
"""
def __init__(__self__, *,
actions_required: str,
description: str,
status: str):
"""
Gets or sets private link service connection state.
:param str actions_required: Gets or sets actions required.
:param str description: Gets or sets description.
:param str status: Gets or sets the status.
"""
pulumi.set(__self__, "actions_required", actions_required)
pulumi.set(__self__, "description", description)
pulumi.set(__self__, "status", status)
@property
@pulumi.getter(name="actionsRequired")
def actions_required(self) -> str:
"""
Gets or sets actions required.
"""
return pulumi.get(self, "actions_required")
@property
@pulumi.getter
def description(self) -> str:
"""
Gets or sets description.
"""
return pulumi.get(self, "description")
@property
@pulumi.getter
def status(self) -> str:
"""
Gets or sets the status.
"""
return pulumi.get(self, "status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProcessServerDetailsResponse(dict):
"""
Process server details.
"""
def __init__(__self__, *,
available_memory_in_bytes: int,
available_space_in_bytes: int,
free_space_percentage: float,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
historic_health: str,
id: str,
last_heartbeat_utc: str,
memory_usage_percentage: float,
name: str,
processor_usage_percentage: float,
throughput_in_bytes: int,
throughput_upload_pending_data_in_bytes: int,
total_memory_in_bytes: int,
total_space_in_bytes: int,
used_memory_in_bytes: int,
used_space_in_bytes: int,
version: str):
"""
Process server details.
:param int available_memory_in_bytes: The available memory.
:param int available_space_in_bytes: The available disk space.
:param float free_space_percentage: The free disk space percentage.
:param str health: The health of the process server.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str historic_health: The historic health of the process server based on the health in last 24 hours.
:param str id: The process server Id.
:param str last_heartbeat_utc: The last heartbeat received from the process server.
:param float memory_usage_percentage: The memory usage percentage.
:param str name: The process server name.
:param float processor_usage_percentage: The processor usage percentage.
:param int throughput_in_bytes: The throughput in bytes.
:param int throughput_upload_pending_data_in_bytes: The uploading pending data in bytes.
:param int total_memory_in_bytes: The total memory.
:param int total_space_in_bytes: The total disk space.
:param int used_memory_in_bytes: The used memory.
:param int used_space_in_bytes: The used disk space.
:param str version: The process server version.
"""
pulumi.set(__self__, "available_memory_in_bytes", available_memory_in_bytes)
pulumi.set(__self__, "available_space_in_bytes", available_space_in_bytes)
pulumi.set(__self__, "free_space_percentage", free_space_percentage)
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "historic_health", historic_health)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "memory_usage_percentage", memory_usage_percentage)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "processor_usage_percentage", processor_usage_percentage)
pulumi.set(__self__, "throughput_in_bytes", throughput_in_bytes)
pulumi.set(__self__, "throughput_upload_pending_data_in_bytes", throughput_upload_pending_data_in_bytes)
pulumi.set(__self__, "total_memory_in_bytes", total_memory_in_bytes)
pulumi.set(__self__, "total_space_in_bytes", total_space_in_bytes)
pulumi.set(__self__, "used_memory_in_bytes", used_memory_in_bytes)
pulumi.set(__self__, "used_space_in_bytes", used_space_in_bytes)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="availableMemoryInBytes")
def available_memory_in_bytes(self) -> int:
"""
The available memory.
"""
return pulumi.get(self, "available_memory_in_bytes")
@property
@pulumi.getter(name="availableSpaceInBytes")
def available_space_in_bytes(self) -> int:
"""
The available disk space.
"""
return pulumi.get(self, "available_space_in_bytes")
@property
@pulumi.getter(name="freeSpacePercentage")
def free_space_percentage(self) -> float:
"""
The free disk space percentage.
"""
return pulumi.get(self, "free_space_percentage")
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the process server.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter(name="historicHealth")
def historic_health(self) -> str:
"""
The historic health of the process server based on the health in last 24 hours.
"""
return pulumi.get(self, "historic_health")
@property
@pulumi.getter
def id(self) -> str:
"""
The process server Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the process server.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter(name="memoryUsagePercentage")
def memory_usage_percentage(self) -> float:
"""
The memory usage percentage.
"""
return pulumi.get(self, "memory_usage_percentage")
@property
@pulumi.getter
def name(self) -> str:
"""
The process server name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="processorUsagePercentage")
def processor_usage_percentage(self) -> float:
"""
The processor usage percentage.
"""
return pulumi.get(self, "processor_usage_percentage")
@property
@pulumi.getter(name="throughputInBytes")
def throughput_in_bytes(self) -> int:
"""
The throughput in bytes.
"""
return pulumi.get(self, "throughput_in_bytes")
@property
@pulumi.getter(name="throughputUploadPendingDataInBytes")
def throughput_upload_pending_data_in_bytes(self) -> int:
"""
The uploading pending data in bytes.
"""
return pulumi.get(self, "throughput_upload_pending_data_in_bytes")
@property
@pulumi.getter(name="totalMemoryInBytes")
def total_memory_in_bytes(self) -> int:
"""
The total memory.
"""
return pulumi.get(self, "total_memory_in_bytes")
@property
@pulumi.getter(name="totalSpaceInBytes")
def total_space_in_bytes(self) -> int:
"""
The total disk space.
"""
return pulumi.get(self, "total_space_in_bytes")
@property
@pulumi.getter(name="usedMemoryInBytes")
def used_memory_in_bytes(self) -> int:
"""
The used memory.
"""
return pulumi.get(self, "used_memory_in_bytes")
@property
@pulumi.getter(name="usedSpaceInBytes")
def used_space_in_bytes(self) -> int:
"""
The used disk space.
"""
return pulumi.get(self, "used_space_in_bytes")
@property
@pulumi.getter
def version(self) -> str:
"""
The process server version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProcessServerResponse(dict):
"""
Details of the Process Server.
"""
def __init__(__self__, *,
health: str,
mars_communication_status: str,
mars_registration_status: str,
ps_stats_refresh_time: str,
throughput_in_bytes: int,
throughput_in_m_bps: int,
throughput_status: str,
throughput_upload_pending_data_in_bytes: int,
agent_expiry_date: Optional[str] = None,
agent_version: Optional[str] = None,
agent_version_details: Optional['outputs.VersionDetailsResponse'] = None,
available_memory_in_bytes: Optional[int] = None,
available_space_in_bytes: Optional[int] = None,
cpu_load: Optional[str] = None,
cpu_load_status: Optional[str] = None,
friendly_name: Optional[str] = None,
health_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
host_id: Optional[str] = None,
id: Optional[str] = None,
ip_address: Optional[str] = None,
last_heartbeat: Optional[str] = None,
machine_count: Optional[str] = None,
memory_usage_status: Optional[str] = None,
mobility_service_updates: Optional[Sequence['outputs.MobilityServiceUpdateResponse']] = None,
os_type: Optional[str] = None,
os_version: Optional[str] = None,
ps_service_status: Optional[str] = None,
replication_pair_count: Optional[str] = None,
space_usage_status: Optional[str] = None,
ssl_cert_expiry_date: Optional[str] = None,
ssl_cert_expiry_remaining_days: Optional[int] = None,
system_load: Optional[str] = None,
system_load_status: Optional[str] = None,
total_memory_in_bytes: Optional[int] = None,
total_space_in_bytes: Optional[int] = None,
version_status: Optional[str] = None):
"""
Details of the Process Server.
:param str health: The health of Process Server.
:param str mars_communication_status: The MARS communication status.
:param str mars_registration_status: The MARS registration status.
:param str ps_stats_refresh_time: The process server stats refresh time.
:param int throughput_in_bytes: The throughput in bytes.
:param int throughput_in_m_bps: The throughput in MBps.
:param str throughput_status: The throughput status.
:param int throughput_upload_pending_data_in_bytes: The uploading pending data in bytes.
:param str agent_expiry_date: Agent expiry date.
:param str agent_version: The version of the scout component on the server.
:param 'VersionDetailsResponseArgs' agent_version_details: The agent version details.
:param int available_memory_in_bytes: The available memory.
:param int available_space_in_bytes: The available space.
:param str cpu_load: The percentage of the CPU load.
:param str cpu_load_status: The CPU load status.
:param str friendly_name: The Process Server's friendly name.
:param Sequence['HealthErrorResponseArgs'] health_errors: Health errors.
:param str host_id: The agent generated Id.
:param str id: The Process Server Id.
:param str ip_address: The IP address of the server.
:param str last_heartbeat: The last heartbeat received from the server.
:param str machine_count: The servers configured with this PS.
:param str memory_usage_status: The memory usage status.
:param Sequence['MobilityServiceUpdateResponseArgs'] mobility_service_updates: The list of the mobility service updates available on the Process Server.
:param str os_type: The OS type of the server.
:param str os_version: OS Version of the process server. Note: This will get populated if user has CS version greater than 9.12.0.0.
:param str ps_service_status: The PS service status.
:param str replication_pair_count: The number of replication pairs configured in this PS.
:param str space_usage_status: The space usage status.
:param str ssl_cert_expiry_date: The PS SSL cert expiry date.
:param int ssl_cert_expiry_remaining_days: CS SSL cert expiry date.
:param str system_load: The percentage of the system load.
:param str system_load_status: The system load status.
:param int total_memory_in_bytes: The total memory.
:param int total_space_in_bytes: The total space.
:param str version_status: Version status
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "mars_communication_status", mars_communication_status)
pulumi.set(__self__, "mars_registration_status", mars_registration_status)
pulumi.set(__self__, "ps_stats_refresh_time", ps_stats_refresh_time)
pulumi.set(__self__, "throughput_in_bytes", throughput_in_bytes)
pulumi.set(__self__, "throughput_in_m_bps", throughput_in_m_bps)
pulumi.set(__self__, "throughput_status", throughput_status)
pulumi.set(__self__, "throughput_upload_pending_data_in_bytes", throughput_upload_pending_data_in_bytes)
if agent_expiry_date is not None:
pulumi.set(__self__, "agent_expiry_date", agent_expiry_date)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if agent_version_details is not None:
pulumi.set(__self__, "agent_version_details", agent_version_details)
if available_memory_in_bytes is not None:
pulumi.set(__self__, "available_memory_in_bytes", available_memory_in_bytes)
if available_space_in_bytes is not None:
pulumi.set(__self__, "available_space_in_bytes", available_space_in_bytes)
if cpu_load is not None:
pulumi.set(__self__, "cpu_load", cpu_load)
if cpu_load_status is not None:
pulumi.set(__self__, "cpu_load_status", cpu_load_status)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_errors is not None:
pulumi.set(__self__, "health_errors", health_errors)
if host_id is not None:
pulumi.set(__self__, "host_id", host_id)
if id is not None:
pulumi.set(__self__, "id", id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if machine_count is not None:
pulumi.set(__self__, "machine_count", machine_count)
if memory_usage_status is not None:
pulumi.set(__self__, "memory_usage_status", memory_usage_status)
if mobility_service_updates is not None:
pulumi.set(__self__, "mobility_service_updates", mobility_service_updates)
if os_type is not None:
pulumi.set(__self__, "os_type", os_type)
if os_version is not None:
pulumi.set(__self__, "os_version", os_version)
if ps_service_status is not None:
pulumi.set(__self__, "ps_service_status", ps_service_status)
if replication_pair_count is not None:
pulumi.set(__self__, "replication_pair_count", replication_pair_count)
if space_usage_status is not None:
pulumi.set(__self__, "space_usage_status", space_usage_status)
if ssl_cert_expiry_date is not None:
pulumi.set(__self__, "ssl_cert_expiry_date", ssl_cert_expiry_date)
if ssl_cert_expiry_remaining_days is not None:
pulumi.set(__self__, "ssl_cert_expiry_remaining_days", ssl_cert_expiry_remaining_days)
if system_load is not None:
pulumi.set(__self__, "system_load", system_load)
if system_load_status is not None:
pulumi.set(__self__, "system_load_status", system_load_status)
if total_memory_in_bytes is not None:
pulumi.set(__self__, "total_memory_in_bytes", total_memory_in_bytes)
if total_space_in_bytes is not None:
pulumi.set(__self__, "total_space_in_bytes", total_space_in_bytes)
if version_status is not None:
pulumi.set(__self__, "version_status", version_status)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of Process Server.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="marsCommunicationStatus")
def mars_communication_status(self) -> str:
"""
The MARS communication status.
"""
return pulumi.get(self, "mars_communication_status")
@property
@pulumi.getter(name="marsRegistrationStatus")
def mars_registration_status(self) -> str:
"""
The MARS registration status.
"""
return pulumi.get(self, "mars_registration_status")
@property
@pulumi.getter(name="psStatsRefreshTime")
def ps_stats_refresh_time(self) -> str:
"""
The process server stats refresh time.
"""
return pulumi.get(self, "ps_stats_refresh_time")
@property
@pulumi.getter(name="throughputInBytes")
def throughput_in_bytes(self) -> int:
"""
The throughput in bytes.
"""
return pulumi.get(self, "throughput_in_bytes")
@property
@pulumi.getter(name="throughputInMBps")
def throughput_in_m_bps(self) -> int:
"""
The throughput in MBps.
"""
return pulumi.get(self, "throughput_in_m_bps")
@property
@pulumi.getter(name="throughputStatus")
def throughput_status(self) -> str:
"""
The throughput status.
"""
return pulumi.get(self, "throughput_status")
@property
@pulumi.getter(name="throughputUploadPendingDataInBytes")
def throughput_upload_pending_data_in_bytes(self) -> int:
"""
The uploading pending data in bytes.
"""
return pulumi.get(self, "throughput_upload_pending_data_in_bytes")
@property
@pulumi.getter(name="agentExpiryDate")
def agent_expiry_date(self) -> Optional[str]:
"""
Agent expiry date.
"""
return pulumi.get(self, "agent_expiry_date")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The version of the scout component on the server.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="agentVersionDetails")
def agent_version_details(self) -> Optional['outputs.VersionDetailsResponse']:
"""
The agent version details.
"""
return pulumi.get(self, "agent_version_details")
@property
@pulumi.getter(name="availableMemoryInBytes")
def available_memory_in_bytes(self) -> Optional[int]:
"""
The available memory.
"""
return pulumi.get(self, "available_memory_in_bytes")
@property
@pulumi.getter(name="availableSpaceInBytes")
def available_space_in_bytes(self) -> Optional[int]:
"""
The available space.
"""
return pulumi.get(self, "available_space_in_bytes")
@property
@pulumi.getter(name="cpuLoad")
def cpu_load(self) -> Optional[str]:
"""
The percentage of the CPU load.
"""
return pulumi.get(self, "cpu_load")
@property
@pulumi.getter(name="cpuLoadStatus")
def cpu_load_status(self) -> Optional[str]:
"""
The CPU load status.
"""
return pulumi.get(self, "cpu_load_status")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The Process Server's friendly name.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
Health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter(name="hostId")
def host_id(self) -> Optional[str]:
"""
The agent generated Id.
"""
return pulumi.get(self, "host_id")
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The Process Server Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address of the server.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from the server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="machineCount")
def machine_count(self) -> Optional[str]:
"""
The servers configured with this PS.
"""
return pulumi.get(self, "machine_count")
@property
@pulumi.getter(name="memoryUsageStatus")
def memory_usage_status(self) -> Optional[str]:
"""
The memory usage status.
"""
return pulumi.get(self, "memory_usage_status")
@property
@pulumi.getter(name="mobilityServiceUpdates")
def mobility_service_updates(self) -> Optional[Sequence['outputs.MobilityServiceUpdateResponse']]:
"""
The list of the mobility service updates available on the Process Server.
"""
return pulumi.get(self, "mobility_service_updates")
@property
@pulumi.getter(name="osType")
def os_type(self) -> Optional[str]:
"""
The OS type of the server.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="osVersion")
def os_version(self) -> Optional[str]:
"""
OS Version of the process server. Note: This will get populated if user has CS version greater than 9.12.0.0.
"""
return pulumi.get(self, "os_version")
@property
@pulumi.getter(name="psServiceStatus")
def ps_service_status(self) -> Optional[str]:
"""
The PS service status.
"""
return pulumi.get(self, "ps_service_status")
@property
@pulumi.getter(name="replicationPairCount")
def replication_pair_count(self) -> Optional[str]:
"""
The number of replication pairs configured in this PS.
"""
return pulumi.get(self, "replication_pair_count")
@property
@pulumi.getter(name="spaceUsageStatus")
def space_usage_status(self) -> Optional[str]:
"""
The space usage status.
"""
return pulumi.get(self, "space_usage_status")
@property
@pulumi.getter(name="sslCertExpiryDate")
def ssl_cert_expiry_date(self) -> Optional[str]:
"""
The PS SSL cert expiry date.
"""
return pulumi.get(self, "ssl_cert_expiry_date")
@property
@pulumi.getter(name="sslCertExpiryRemainingDays")
def ssl_cert_expiry_remaining_days(self) -> Optional[int]:
"""
CS SSL cert expiry date.
"""
return pulumi.get(self, "ssl_cert_expiry_remaining_days")
@property
@pulumi.getter(name="systemLoad")
def system_load(self) -> Optional[str]:
"""
The percentage of the system load.
"""
return pulumi.get(self, "system_load")
@property
@pulumi.getter(name="systemLoadStatus")
def system_load_status(self) -> Optional[str]:
"""
The system load status.
"""
return pulumi.get(self, "system_load_status")
@property
@pulumi.getter(name="totalMemoryInBytes")
def total_memory_in_bytes(self) -> Optional[int]:
"""
The total memory.
"""
return pulumi.get(self, "total_memory_in_bytes")
@property
@pulumi.getter(name="totalSpaceInBytes")
def total_space_in_bytes(self) -> Optional[int]:
"""
The total space.
"""
return pulumi.get(self, "total_space_in_bytes")
@property
@pulumi.getter(name="versionStatus")
def version_status(self) -> Optional[str]:
"""
Version status
"""
return pulumi.get(self, "version_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ProtectionContainerMappingPropertiesResponse(dict):
"""
Protection container mapping properties.
"""
def __init__(__self__, *,
health: Optional[str] = None,
health_error_details: Optional[Sequence['outputs.HealthErrorResponse']] = None,
policy_friendly_name: Optional[str] = None,
policy_id: Optional[str] = None,
provider_specific_details: Optional[Any] = None,
source_fabric_friendly_name: Optional[str] = None,
source_protection_container_friendly_name: Optional[str] = None,
state: Optional[str] = None,
target_fabric_friendly_name: Optional[str] = None,
target_protection_container_friendly_name: Optional[str] = None,
target_protection_container_id: Optional[str] = None):
"""
Protection container mapping properties.
:param str health: Health of pairing.
:param Sequence['HealthErrorResponseArgs'] health_error_details: Health error.
:param str policy_friendly_name: Friendly name of replication policy.
:param str policy_id: Policy ARM Id.
:param Union['A2AProtectionContainerMappingDetailsResponseArgs', 'VMwareCbtProtectionContainerMappingDetailsResponseArgs'] provider_specific_details: Provider specific provider details.
:param str source_fabric_friendly_name: Friendly name of source fabric.
:param str source_protection_container_friendly_name: Friendly name of source protection container.
:param str state: Association Status
:param str target_fabric_friendly_name: Friendly name of target fabric.
:param str target_protection_container_friendly_name: Friendly name of paired container.
:param str target_protection_container_id: Paired protection container ARM ID.
"""
if health is not None:
pulumi.set(__self__, "health", health)
if health_error_details is not None:
pulumi.set(__self__, "health_error_details", health_error_details)
if policy_friendly_name is not None:
pulumi.set(__self__, "policy_friendly_name", policy_friendly_name)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if provider_specific_details is not None:
pulumi.set(__self__, "provider_specific_details", provider_specific_details)
if source_fabric_friendly_name is not None:
pulumi.set(__self__, "source_fabric_friendly_name", source_fabric_friendly_name)
if source_protection_container_friendly_name is not None:
pulumi.set(__self__, "source_protection_container_friendly_name", source_protection_container_friendly_name)
if state is not None:
pulumi.set(__self__, "state", state)
if target_fabric_friendly_name is not None:
pulumi.set(__self__, "target_fabric_friendly_name", target_fabric_friendly_name)
if target_protection_container_friendly_name is not None:
pulumi.set(__self__, "target_protection_container_friendly_name", target_protection_container_friendly_name)
if target_protection_container_id is not None:
pulumi.set(__self__, "target_protection_container_id", target_protection_container_id)
@property
@pulumi.getter
def health(self) -> Optional[str]:
"""
Health of pairing.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrorDetails")
def health_error_details(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
Health error.
"""
return pulumi.get(self, "health_error_details")
@property
@pulumi.getter(name="policyFriendlyName")
def policy_friendly_name(self) -> Optional[str]:
"""
Friendly name of replication policy.
"""
return pulumi.get(self, "policy_friendly_name")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
Policy ARM Id.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="providerSpecificDetails")
def provider_specific_details(self) -> Optional[Any]:
"""
Provider specific provider details.
"""
return pulumi.get(self, "provider_specific_details")
@property
@pulumi.getter(name="sourceFabricFriendlyName")
def source_fabric_friendly_name(self) -> Optional[str]:
"""
Friendly name of source fabric.
"""
return pulumi.get(self, "source_fabric_friendly_name")
@property
@pulumi.getter(name="sourceProtectionContainerFriendlyName")
def source_protection_container_friendly_name(self) -> Optional[str]:
"""
Friendly name of source protection container.
"""
return pulumi.get(self, "source_protection_container_friendly_name")
@property
@pulumi.getter
def state(self) -> Optional[str]:
"""
Association Status
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="targetFabricFriendlyName")
def target_fabric_friendly_name(self) -> Optional[str]:
"""
Friendly name of target fabric.
"""
return pulumi.get(self, "target_fabric_friendly_name")
@property
@pulumi.getter(name="targetProtectionContainerFriendlyName")
def target_protection_container_friendly_name(self) -> Optional[str]:
"""
Friendly name of paired container.
"""
return pulumi.get(self, "target_protection_container_friendly_name")
@property
@pulumi.getter(name="targetProtectionContainerId")
def target_protection_container_id(self) -> Optional[str]:
"""
Paired protection container ARM ID.
"""
return pulumi.get(self, "target_protection_container_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class PushInstallerDetailsResponse(dict):
"""
Push installer details.
"""
def __init__(__self__, *,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
id: str,
last_heartbeat_utc: str,
name: str,
version: str):
"""
Push installer details.
:param str health: The health of the push installer.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str id: The push installer Id.
:param str last_heartbeat_utc: The last heartbeat received from the push installer.
:param str name: The push installer name.
:param str version: The push installer version.
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the push installer.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> str:
"""
The push installer Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the push installer.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
The push installer name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
The push installer version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RcmAzureMigrationPolicyDetailsResponse(dict):
"""
RCM based Azure migration specific policy details.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
crash_consistent_frequency_in_minutes: Optional[int] = None,
multi_vm_sync_status: Optional[str] = None,
recovery_point_history: Optional[int] = None,
recovery_point_threshold_in_minutes: Optional[int] = None):
"""
RCM based Azure migration specific policy details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param int crash_consistent_frequency_in_minutes: The crash consistent snapshot frequency in minutes.
:param str multi_vm_sync_status: A value indicating whether multi-VM sync has to be enabled.
:param int recovery_point_history: The duration in minutes until which the recovery points need to be stored.
:param int recovery_point_threshold_in_minutes: The recovery point threshold in minutes.
"""
pulumi.set(__self__, "instance_type", 'RcmAzureMigration')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if crash_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "crash_consistent_frequency_in_minutes", crash_consistent_frequency_in_minutes)
if multi_vm_sync_status is not None:
pulumi.set(__self__, "multi_vm_sync_status", multi_vm_sync_status)
if recovery_point_history is not None:
pulumi.set(__self__, "recovery_point_history", recovery_point_history)
if recovery_point_threshold_in_minutes is not None:
pulumi.set(__self__, "recovery_point_threshold_in_minutes", recovery_point_threshold_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="crashConsistentFrequencyInMinutes")
def crash_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The crash consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "crash_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="multiVmSyncStatus")
def multi_vm_sync_status(self) -> Optional[str]:
"""
A value indicating whether multi-VM sync has to be enabled.
"""
return pulumi.get(self, "multi_vm_sync_status")
@property
@pulumi.getter(name="recoveryPointHistory")
def recovery_point_history(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history")
@property
@pulumi.getter(name="recoveryPointThresholdInMinutes")
def recovery_point_threshold_in_minutes(self) -> Optional[int]:
"""
The recovery point threshold in minutes.
"""
return pulumi.get(self, "recovery_point_threshold_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RcmProxyDetailsResponse(dict):
"""
RCM proxy details.
"""
def __init__(__self__, *,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
id: str,
last_heartbeat_utc: str,
name: str,
version: str):
"""
RCM proxy details.
:param str health: The health of the RCM proxy.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str id: The RCM proxy Id.
:param str last_heartbeat_utc: The last heartbeat received from the RCM proxy.
:param str name: The RCM proxy name.
:param str version: The RCM proxy version.
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the RCM proxy.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> str:
"""
The RCM proxy Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the RCM proxy.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
The RCM proxy name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
The RCM proxy version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanA2ADetailsResponse(dict):
"""
Recovery plan A2A specific details.
"""
def __init__(__self__, *,
instance_type: str,
primary_zone: Optional[str] = None,
recovery_zone: Optional[str] = None):
"""
Recovery plan A2A specific details.
:param str instance_type: Gets the Instance type.
:param str primary_zone: The primary zone.
:param str recovery_zone: The recovery zone.
"""
pulumi.set(__self__, "instance_type", 'A2A')
if primary_zone is not None:
pulumi.set(__self__, "primary_zone", primary_zone)
if recovery_zone is not None:
pulumi.set(__self__, "recovery_zone", recovery_zone)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="primaryZone")
def primary_zone(self) -> Optional[str]:
"""
The primary zone.
"""
return pulumi.get(self, "primary_zone")
@property
@pulumi.getter(name="recoveryZone")
def recovery_zone(self) -> Optional[str]:
"""
The recovery zone.
"""
return pulumi.get(self, "recovery_zone")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanActionResponse(dict):
"""
Recovery plan action details.
"""
def __init__(__self__, *,
action_name: str,
custom_details: Any,
failover_directions: Sequence[str],
failover_types: Sequence[str]):
"""
Recovery plan action details.
:param str action_name: The action name.
:param Union['RecoveryPlanAutomationRunbookActionDetailsResponseArgs', 'RecoveryPlanManualActionDetailsResponseArgs', 'RecoveryPlanScriptActionDetailsResponseArgs'] custom_details: The custom details.
:param Sequence[str] failover_directions: The list of failover directions.
:param Sequence[str] failover_types: The list of failover types.
"""
pulumi.set(__self__, "action_name", action_name)
pulumi.set(__self__, "custom_details", custom_details)
pulumi.set(__self__, "failover_directions", failover_directions)
pulumi.set(__self__, "failover_types", failover_types)
@property
@pulumi.getter(name="actionName")
def action_name(self) -> str:
"""
The action name.
"""
return pulumi.get(self, "action_name")
@property
@pulumi.getter(name="customDetails")
def custom_details(self) -> Any:
"""
The custom details.
"""
return pulumi.get(self, "custom_details")
@property
@pulumi.getter(name="failoverDirections")
def failover_directions(self) -> Sequence[str]:
"""
The list of failover directions.
"""
return pulumi.get(self, "failover_directions")
@property
@pulumi.getter(name="failoverTypes")
def failover_types(self) -> Sequence[str]:
"""
The list of failover types.
"""
return pulumi.get(self, "failover_types")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanAutomationRunbookActionDetailsResponse(dict):
"""
Recovery plan Automation runbook action details.
"""
def __init__(__self__, *,
fabric_location: str,
instance_type: str,
runbook_id: Optional[str] = None,
timeout: Optional[str] = None):
"""
Recovery plan Automation runbook action details.
:param str fabric_location: The fabric location.
:param str instance_type: Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
:param str runbook_id: The runbook ARM Id.
:param str timeout: The runbook timeout.
"""
pulumi.set(__self__, "fabric_location", fabric_location)
pulumi.set(__self__, "instance_type", 'AutomationRunbookActionDetails')
if runbook_id is not None:
pulumi.set(__self__, "runbook_id", runbook_id)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="fabricLocation")
def fabric_location(self) -> str:
"""
The fabric location.
"""
return pulumi.get(self, "fabric_location")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="runbookId")
def runbook_id(self) -> Optional[str]:
"""
The runbook ARM Id.
"""
return pulumi.get(self, "runbook_id")
@property
@pulumi.getter
def timeout(self) -> Optional[str]:
"""
The runbook timeout.
"""
return pulumi.get(self, "timeout")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanGroupResponse(dict):
"""
Recovery plan group details.
"""
def __init__(__self__, *,
group_type: str,
end_group_actions: Optional[Sequence['outputs.RecoveryPlanActionResponse']] = None,
replication_protected_items: Optional[Sequence['outputs.RecoveryPlanProtectedItemResponse']] = None,
start_group_actions: Optional[Sequence['outputs.RecoveryPlanActionResponse']] = None):
"""
Recovery plan group details.
:param str group_type: The group type.
:param Sequence['RecoveryPlanActionResponseArgs'] end_group_actions: The end group actions.
:param Sequence['RecoveryPlanProtectedItemResponseArgs'] replication_protected_items: The list of protected items.
:param Sequence['RecoveryPlanActionResponseArgs'] start_group_actions: The start group actions.
"""
pulumi.set(__self__, "group_type", group_type)
if end_group_actions is not None:
pulumi.set(__self__, "end_group_actions", end_group_actions)
if replication_protected_items is not None:
pulumi.set(__self__, "replication_protected_items", replication_protected_items)
if start_group_actions is not None:
pulumi.set(__self__, "start_group_actions", start_group_actions)
@property
@pulumi.getter(name="groupType")
def group_type(self) -> str:
"""
The group type.
"""
return pulumi.get(self, "group_type")
@property
@pulumi.getter(name="endGroupActions")
def end_group_actions(self) -> Optional[Sequence['outputs.RecoveryPlanActionResponse']]:
"""
The end group actions.
"""
return pulumi.get(self, "end_group_actions")
@property
@pulumi.getter(name="replicationProtectedItems")
def replication_protected_items(self) -> Optional[Sequence['outputs.RecoveryPlanProtectedItemResponse']]:
"""
The list of protected items.
"""
return pulumi.get(self, "replication_protected_items")
@property
@pulumi.getter(name="startGroupActions")
def start_group_actions(self) -> Optional[Sequence['outputs.RecoveryPlanActionResponse']]:
"""
The start group actions.
"""
return pulumi.get(self, "start_group_actions")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanManualActionDetailsResponse(dict):
"""
Recovery plan manual action details.
"""
def __init__(__self__, *,
instance_type: str,
description: Optional[str] = None):
"""
Recovery plan manual action details.
:param str instance_type: Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
:param str description: The manual action description.
"""
pulumi.set(__self__, "instance_type", 'ManualActionDetails')
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter
def description(self) -> Optional[str]:
"""
The manual action description.
"""
return pulumi.get(self, "description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanPropertiesResponse(dict):
"""
Recovery plan custom details.
"""
def __init__(__self__, *,
provider_specific_details: Sequence['outputs.RecoveryPlanA2ADetailsResponse'],
allowed_operations: Optional[Sequence[str]] = None,
current_scenario: Optional['outputs.CurrentScenarioDetailsResponse'] = None,
current_scenario_status: Optional[str] = None,
current_scenario_status_description: Optional[str] = None,
failover_deployment_model: Optional[str] = None,
friendly_name: Optional[str] = None,
groups: Optional[Sequence['outputs.RecoveryPlanGroupResponse']] = None,
last_planned_failover_time: Optional[str] = None,
last_test_failover_time: Optional[str] = None,
last_unplanned_failover_time: Optional[str] = None,
primary_fabric_friendly_name: Optional[str] = None,
primary_fabric_id: Optional[str] = None,
recovery_fabric_friendly_name: Optional[str] = None,
recovery_fabric_id: Optional[str] = None,
replication_providers: Optional[Sequence[str]] = None):
"""
Recovery plan custom details.
:param Sequence['RecoveryPlanA2ADetailsResponseArgs'] provider_specific_details: The provider id and provider specific details.
:param Sequence[str] allowed_operations: The list of allowed operations.
:param 'CurrentScenarioDetailsResponseArgs' current_scenario: The current scenario details.
:param str current_scenario_status: The recovery plan status.
:param str current_scenario_status_description: The recovery plan status description.
:param str failover_deployment_model: The failover deployment model.
:param str friendly_name: The friendly name.
:param Sequence['RecoveryPlanGroupResponseArgs'] groups: The recovery plan groups.
:param str last_planned_failover_time: The start time of the last planned failover.
:param str last_test_failover_time: The start time of the last test failover.
:param str last_unplanned_failover_time: The start time of the last unplanned failover.
:param str primary_fabric_friendly_name: The primary fabric friendly name.
:param str primary_fabric_id: The primary fabric Id.
:param str recovery_fabric_friendly_name: The recovery fabric friendly name.
:param str recovery_fabric_id: The recovery fabric Id.
:param Sequence[str] replication_providers: The list of replication providers.
"""
pulumi.set(__self__, "provider_specific_details", provider_specific_details)
if allowed_operations is not None:
pulumi.set(__self__, "allowed_operations", allowed_operations)
if current_scenario is not None:
pulumi.set(__self__, "current_scenario", current_scenario)
if current_scenario_status is not None:
pulumi.set(__self__, "current_scenario_status", current_scenario_status)
if current_scenario_status_description is not None:
pulumi.set(__self__, "current_scenario_status_description", current_scenario_status_description)
if failover_deployment_model is not None:
pulumi.set(__self__, "failover_deployment_model", failover_deployment_model)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if groups is not None:
pulumi.set(__self__, "groups", groups)
if last_planned_failover_time is not None:
pulumi.set(__self__, "last_planned_failover_time", last_planned_failover_time)
if last_test_failover_time is not None:
pulumi.set(__self__, "last_test_failover_time", last_test_failover_time)
if last_unplanned_failover_time is not None:
pulumi.set(__self__, "last_unplanned_failover_time", last_unplanned_failover_time)
if primary_fabric_friendly_name is not None:
pulumi.set(__self__, "primary_fabric_friendly_name", primary_fabric_friendly_name)
if primary_fabric_id is not None:
pulumi.set(__self__, "primary_fabric_id", primary_fabric_id)
if recovery_fabric_friendly_name is not None:
pulumi.set(__self__, "recovery_fabric_friendly_name", recovery_fabric_friendly_name)
if recovery_fabric_id is not None:
pulumi.set(__self__, "recovery_fabric_id", recovery_fabric_id)
if replication_providers is not None:
pulumi.set(__self__, "replication_providers", replication_providers)
@property
@pulumi.getter(name="providerSpecificDetails")
def provider_specific_details(self) -> Sequence['outputs.RecoveryPlanA2ADetailsResponse']:
"""
The provider id and provider specific details.
"""
return pulumi.get(self, "provider_specific_details")
@property
@pulumi.getter(name="allowedOperations")
def allowed_operations(self) -> Optional[Sequence[str]]:
"""
The list of allowed operations.
"""
return pulumi.get(self, "allowed_operations")
@property
@pulumi.getter(name="currentScenario")
def current_scenario(self) -> Optional['outputs.CurrentScenarioDetailsResponse']:
"""
The current scenario details.
"""
return pulumi.get(self, "current_scenario")
@property
@pulumi.getter(name="currentScenarioStatus")
def current_scenario_status(self) -> Optional[str]:
"""
The recovery plan status.
"""
return pulumi.get(self, "current_scenario_status")
@property
@pulumi.getter(name="currentScenarioStatusDescription")
def current_scenario_status_description(self) -> Optional[str]:
"""
The recovery plan status description.
"""
return pulumi.get(self, "current_scenario_status_description")
@property
@pulumi.getter(name="failoverDeploymentModel")
def failover_deployment_model(self) -> Optional[str]:
"""
The failover deployment model.
"""
return pulumi.get(self, "failover_deployment_model")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The friendly name.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter
def groups(self) -> Optional[Sequence['outputs.RecoveryPlanGroupResponse']]:
"""
The recovery plan groups.
"""
return pulumi.get(self, "groups")
@property
@pulumi.getter(name="lastPlannedFailoverTime")
def last_planned_failover_time(self) -> Optional[str]:
"""
The start time of the last planned failover.
"""
return pulumi.get(self, "last_planned_failover_time")
@property
@pulumi.getter(name="lastTestFailoverTime")
def last_test_failover_time(self) -> Optional[str]:
"""
The start time of the last test failover.
"""
return pulumi.get(self, "last_test_failover_time")
@property
@pulumi.getter(name="lastUnplannedFailoverTime")
def last_unplanned_failover_time(self) -> Optional[str]:
"""
The start time of the last unplanned failover.
"""
return pulumi.get(self, "last_unplanned_failover_time")
@property
@pulumi.getter(name="primaryFabricFriendlyName")
def primary_fabric_friendly_name(self) -> Optional[str]:
"""
The primary fabric friendly name.
"""
return pulumi.get(self, "primary_fabric_friendly_name")
@property
@pulumi.getter(name="primaryFabricId")
def primary_fabric_id(self) -> Optional[str]:
"""
The primary fabric Id.
"""
return pulumi.get(self, "primary_fabric_id")
@property
@pulumi.getter(name="recoveryFabricFriendlyName")
def recovery_fabric_friendly_name(self) -> Optional[str]:
"""
The recovery fabric friendly name.
"""
return pulumi.get(self, "recovery_fabric_friendly_name")
@property
@pulumi.getter(name="recoveryFabricId")
def recovery_fabric_id(self) -> Optional[str]:
"""
The recovery fabric Id.
"""
return pulumi.get(self, "recovery_fabric_id")
@property
@pulumi.getter(name="replicationProviders")
def replication_providers(self) -> Optional[Sequence[str]]:
"""
The list of replication providers.
"""
return pulumi.get(self, "replication_providers")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanProtectedItemResponse(dict):
"""
Recovery plan protected item.
"""
def __init__(__self__, *,
id: Optional[str] = None,
virtual_machine_id: Optional[str] = None):
"""
Recovery plan protected item.
:param str id: The ARM Id of the recovery plan protected item.
:param str virtual_machine_id: The virtual machine Id.
"""
if id is not None:
pulumi.set(__self__, "id", id)
if virtual_machine_id is not None:
pulumi.set(__self__, "virtual_machine_id", virtual_machine_id)
@property
@pulumi.getter
def id(self) -> Optional[str]:
"""
The ARM Id of the recovery plan protected item.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="virtualMachineId")
def virtual_machine_id(self) -> Optional[str]:
"""
The virtual machine Id.
"""
return pulumi.get(self, "virtual_machine_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryPlanScriptActionDetailsResponse(dict):
"""
Recovery plan script action details.
"""
def __init__(__self__, *,
fabric_location: str,
instance_type: str,
path: str,
timeout: Optional[str] = None):
"""
Recovery plan script action details.
:param str fabric_location: The fabric location.
:param str instance_type: Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
:param str path: The script path.
:param str timeout: The script timeout.
"""
pulumi.set(__self__, "fabric_location", fabric_location)
pulumi.set(__self__, "instance_type", 'ScriptActionDetails')
pulumi.set(__self__, "path", path)
if timeout is not None:
pulumi.set(__self__, "timeout", timeout)
@property
@pulumi.getter(name="fabricLocation")
def fabric_location(self) -> str:
"""
The fabric location.
"""
return pulumi.get(self, "fabric_location")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the type of action details (see RecoveryPlanActionDetailsTypes enum for possible values).
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter
def path(self) -> str:
"""
The script path.
"""
return pulumi.get(self, "path")
@property
@pulumi.getter
def timeout(self) -> Optional[str]:
"""
The script timeout.
"""
return pulumi.get(self, "timeout")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RecoveryServicesProviderPropertiesResponse(dict):
"""
Recovery services provider properties.
"""
def __init__(__self__, *,
allowed_scenarios: Optional[Sequence[str]] = None,
authentication_identity_details: Optional['outputs.IdentityProviderDetailsResponse'] = None,
connection_status: Optional[str] = None,
dra_identifier: Optional[str] = None,
fabric_friendly_name: Optional[str] = None,
fabric_type: Optional[str] = None,
friendly_name: Optional[str] = None,
health_error_details: Optional[Sequence['outputs.HealthErrorResponse']] = None,
last_heart_beat: Optional[str] = None,
protected_item_count: Optional[int] = None,
provider_version: Optional[str] = None,
provider_version_details: Optional['outputs.VersionDetailsResponse'] = None,
provider_version_expiry_date: Optional[str] = None,
provider_version_state: Optional[str] = None,
resource_access_identity_details: Optional['outputs.IdentityProviderDetailsResponse'] = None,
server_version: Optional[str] = None):
"""
Recovery services provider properties.
:param Sequence[str] allowed_scenarios: The scenarios allowed on this provider.
:param 'IdentityProviderDetailsResponseArgs' authentication_identity_details: The authentication identity details.
:param str connection_status: A value indicating whether DRA is responsive.
:param str dra_identifier: The DRA Id.
:param str fabric_friendly_name: The fabric friendly name.
:param str fabric_type: Type of the site.
:param str friendly_name: Friendly name of the DRA.
:param Sequence['HealthErrorResponseArgs'] health_error_details: The recovery services provider health error details.
:param str last_heart_beat: Time when last heartbeat was sent by the DRA.
:param int protected_item_count: Number of protected VMs currently managed by the DRA.
:param str provider_version: The provider version.
:param 'VersionDetailsResponseArgs' provider_version_details: The provider version details.
:param str provider_version_expiry_date: Expiry date of the version.
:param str provider_version_state: DRA version status.
:param 'IdentityProviderDetailsResponseArgs' resource_access_identity_details: The resource access identity details.
:param str server_version: The fabric provider.
"""
if allowed_scenarios is not None:
pulumi.set(__self__, "allowed_scenarios", allowed_scenarios)
if authentication_identity_details is not None:
pulumi.set(__self__, "authentication_identity_details", authentication_identity_details)
if connection_status is not None:
pulumi.set(__self__, "connection_status", connection_status)
if dra_identifier is not None:
pulumi.set(__self__, "dra_identifier", dra_identifier)
if fabric_friendly_name is not None:
pulumi.set(__self__, "fabric_friendly_name", fabric_friendly_name)
if fabric_type is not None:
pulumi.set(__self__, "fabric_type", fabric_type)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_error_details is not None:
pulumi.set(__self__, "health_error_details", health_error_details)
if last_heart_beat is not None:
pulumi.set(__self__, "last_heart_beat", last_heart_beat)
if protected_item_count is not None:
pulumi.set(__self__, "protected_item_count", protected_item_count)
if provider_version is not None:
pulumi.set(__self__, "provider_version", provider_version)
if provider_version_details is not None:
pulumi.set(__self__, "provider_version_details", provider_version_details)
if provider_version_expiry_date is not None:
pulumi.set(__self__, "provider_version_expiry_date", provider_version_expiry_date)
if provider_version_state is not None:
pulumi.set(__self__, "provider_version_state", provider_version_state)
if resource_access_identity_details is not None:
pulumi.set(__self__, "resource_access_identity_details", resource_access_identity_details)
if server_version is not None:
pulumi.set(__self__, "server_version", server_version)
@property
@pulumi.getter(name="allowedScenarios")
def allowed_scenarios(self) -> Optional[Sequence[str]]:
"""
The scenarios allowed on this provider.
"""
return pulumi.get(self, "allowed_scenarios")
@property
@pulumi.getter(name="authenticationIdentityDetails")
def authentication_identity_details(self) -> Optional['outputs.IdentityProviderDetailsResponse']:
"""
The authentication identity details.
"""
return pulumi.get(self, "authentication_identity_details")
@property
@pulumi.getter(name="connectionStatus")
def connection_status(self) -> Optional[str]:
"""
A value indicating whether DRA is responsive.
"""
return pulumi.get(self, "connection_status")
@property
@pulumi.getter(name="draIdentifier")
def dra_identifier(self) -> Optional[str]:
"""
The DRA Id.
"""
return pulumi.get(self, "dra_identifier")
@property
@pulumi.getter(name="fabricFriendlyName")
def fabric_friendly_name(self) -> Optional[str]:
"""
The fabric friendly name.
"""
return pulumi.get(self, "fabric_friendly_name")
@property
@pulumi.getter(name="fabricType")
def fabric_type(self) -> Optional[str]:
"""
Type of the site.
"""
return pulumi.get(self, "fabric_type")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the DRA.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthErrorDetails")
def health_error_details(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
The recovery services provider health error details.
"""
return pulumi.get(self, "health_error_details")
@property
@pulumi.getter(name="lastHeartBeat")
def last_heart_beat(self) -> Optional[str]:
"""
Time when last heartbeat was sent by the DRA.
"""
return pulumi.get(self, "last_heart_beat")
@property
@pulumi.getter(name="protectedItemCount")
def protected_item_count(self) -> Optional[int]:
"""
Number of protected VMs currently managed by the DRA.
"""
return pulumi.get(self, "protected_item_count")
@property
@pulumi.getter(name="providerVersion")
def provider_version(self) -> Optional[str]:
"""
The provider version.
"""
return pulumi.get(self, "provider_version")
@property
@pulumi.getter(name="providerVersionDetails")
def provider_version_details(self) -> Optional['outputs.VersionDetailsResponse']:
"""
The provider version details.
"""
return pulumi.get(self, "provider_version_details")
@property
@pulumi.getter(name="providerVersionExpiryDate")
def provider_version_expiry_date(self) -> Optional[str]:
"""
Expiry date of the version.
"""
return pulumi.get(self, "provider_version_expiry_date")
@property
@pulumi.getter(name="providerVersionState")
def provider_version_state(self) -> Optional[str]:
"""
DRA version status.
"""
return pulumi.get(self, "provider_version_state")
@property
@pulumi.getter(name="resourceAccessIdentityDetails")
def resource_access_identity_details(self) -> Optional['outputs.IdentityProviderDetailsResponse']:
"""
The resource access identity details.
"""
return pulumi.get(self, "resource_access_identity_details")
@property
@pulumi.getter(name="serverVersion")
def server_version(self) -> Optional[str]:
"""
The fabric provider.
"""
return pulumi.get(self, "server_version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReplicationAgentDetailsResponse(dict):
"""
Replication agent details.
"""
def __init__(__self__, *,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
id: str,
last_heartbeat_utc: str,
name: str,
version: str):
"""
Replication agent details.
:param str health: The health of the replication agent.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str id: The replication agent Id.
:param str last_heartbeat_utc: The last heartbeat received from the replication agent.
:param str name: The replication agent name.
:param str version: The replication agent version.
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the replication agent.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> str:
"""
The replication agent Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the replication agent.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
The replication agent name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
The replication agent version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReplicationProtectedItemPropertiesResponse(dict):
"""
Replication protected item custom data details.
"""
def __init__(__self__, *,
active_location: Optional[str] = None,
allowed_operations: Optional[Sequence[str]] = None,
current_scenario: Optional['outputs.CurrentScenarioDetailsResponse'] = None,
failover_health: Optional[str] = None,
failover_recovery_point_id: Optional[str] = None,
friendly_name: Optional[str] = None,
health_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
last_successful_failover_time: Optional[str] = None,
last_successful_test_failover_time: Optional[str] = None,
policy_friendly_name: Optional[str] = None,
policy_id: Optional[str] = None,
primary_fabric_friendly_name: Optional[str] = None,
primary_fabric_provider: Optional[str] = None,
primary_protection_container_friendly_name: Optional[str] = None,
protectable_item_id: Optional[str] = None,
protected_item_type: Optional[str] = None,
protection_state: Optional[str] = None,
protection_state_description: Optional[str] = None,
provider_specific_details: Optional[Any] = None,
recovery_container_id: Optional[str] = None,
recovery_fabric_friendly_name: Optional[str] = None,
recovery_fabric_id: Optional[str] = None,
recovery_protection_container_friendly_name: Optional[str] = None,
recovery_services_provider_id: Optional[str] = None,
replication_health: Optional[str] = None,
test_failover_state: Optional[str] = None,
test_failover_state_description: Optional[str] = None):
"""
Replication protected item custom data details.
:param str active_location: The Current active location of the PE.
:param Sequence[str] allowed_operations: The allowed operations on the Replication protected item.
:param 'CurrentScenarioDetailsResponseArgs' current_scenario: The current scenario.
:param str failover_health: The consolidated failover health for the VM.
:param str failover_recovery_point_id: The recovery point ARM Id to which the Vm was failed over.
:param str friendly_name: The name.
:param Sequence['HealthErrorResponseArgs'] health_errors: List of health errors.
:param str last_successful_failover_time: The Last successful failover time.
:param str last_successful_test_failover_time: The Last successful test failover time.
:param str policy_friendly_name: The name of Policy governing this PE.
:param str policy_id: The ID of Policy governing this PE.
:param str primary_fabric_friendly_name: The friendly name of the primary fabric.
:param str primary_fabric_provider: The fabric provider of the primary fabric.
:param str primary_protection_container_friendly_name: The name of primary protection container friendly name.
:param str protectable_item_id: The protected item ARM Id.
:param str protected_item_type: The type of protected item type.
:param str protection_state: The protection status.
:param str protection_state_description: The protection state description.
:param Union['A2AReplicationDetailsResponseArgs', 'HyperVReplicaAzureReplicationDetailsResponseArgs', 'HyperVReplicaBaseReplicationDetailsResponseArgs', 'HyperVReplicaBlueReplicationDetailsResponseArgs', 'HyperVReplicaReplicationDetailsResponseArgs', 'InMageAzureV2ReplicationDetailsResponseArgs', 'InMageRcmReplicationDetailsResponseArgs', 'InMageReplicationDetailsResponseArgs'] provider_specific_details: The Replication provider custom settings.
:param str recovery_container_id: The recovery container Id.
:param str recovery_fabric_friendly_name: The friendly name of recovery fabric.
:param str recovery_fabric_id: The Arm Id of recovery fabric.
:param str recovery_protection_container_friendly_name: The name of recovery container friendly name.
:param str recovery_services_provider_id: The recovery provider ARM Id.
:param str replication_health: The consolidated protection health for the VM taking any issues with SRS as well as all the replication units associated with the VM's replication group into account. This is a string representation of the ProtectionHealth enumeration.
:param str test_failover_state: The Test failover state.
:param str test_failover_state_description: The Test failover state description.
"""
if active_location is not None:
pulumi.set(__self__, "active_location", active_location)
if allowed_operations is not None:
pulumi.set(__self__, "allowed_operations", allowed_operations)
if current_scenario is not None:
pulumi.set(__self__, "current_scenario", current_scenario)
if failover_health is not None:
pulumi.set(__self__, "failover_health", failover_health)
if failover_recovery_point_id is not None:
pulumi.set(__self__, "failover_recovery_point_id", failover_recovery_point_id)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_errors is not None:
pulumi.set(__self__, "health_errors", health_errors)
if last_successful_failover_time is not None:
pulumi.set(__self__, "last_successful_failover_time", last_successful_failover_time)
if last_successful_test_failover_time is not None:
pulumi.set(__self__, "last_successful_test_failover_time", last_successful_test_failover_time)
if policy_friendly_name is not None:
pulumi.set(__self__, "policy_friendly_name", policy_friendly_name)
if policy_id is not None:
pulumi.set(__self__, "policy_id", policy_id)
if primary_fabric_friendly_name is not None:
pulumi.set(__self__, "primary_fabric_friendly_name", primary_fabric_friendly_name)
if primary_fabric_provider is not None:
pulumi.set(__self__, "primary_fabric_provider", primary_fabric_provider)
if primary_protection_container_friendly_name is not None:
pulumi.set(__self__, "primary_protection_container_friendly_name", primary_protection_container_friendly_name)
if protectable_item_id is not None:
pulumi.set(__self__, "protectable_item_id", protectable_item_id)
if protected_item_type is not None:
pulumi.set(__self__, "protected_item_type", protected_item_type)
if protection_state is not None:
pulumi.set(__self__, "protection_state", protection_state)
if protection_state_description is not None:
pulumi.set(__self__, "protection_state_description", protection_state_description)
if provider_specific_details is not None:
pulumi.set(__self__, "provider_specific_details", provider_specific_details)
if recovery_container_id is not None:
pulumi.set(__self__, "recovery_container_id", recovery_container_id)
if recovery_fabric_friendly_name is not None:
pulumi.set(__self__, "recovery_fabric_friendly_name", recovery_fabric_friendly_name)
if recovery_fabric_id is not None:
pulumi.set(__self__, "recovery_fabric_id", recovery_fabric_id)
if recovery_protection_container_friendly_name is not None:
pulumi.set(__self__, "recovery_protection_container_friendly_name", recovery_protection_container_friendly_name)
if recovery_services_provider_id is not None:
pulumi.set(__self__, "recovery_services_provider_id", recovery_services_provider_id)
if replication_health is not None:
pulumi.set(__self__, "replication_health", replication_health)
if test_failover_state is not None:
pulumi.set(__self__, "test_failover_state", test_failover_state)
if test_failover_state_description is not None:
pulumi.set(__self__, "test_failover_state_description", test_failover_state_description)
@property
@pulumi.getter(name="activeLocation")
def active_location(self) -> Optional[str]:
"""
The Current active location of the PE.
"""
return pulumi.get(self, "active_location")
@property
@pulumi.getter(name="allowedOperations")
def allowed_operations(self) -> Optional[Sequence[str]]:
"""
The allowed operations on the Replication protected item.
"""
return pulumi.get(self, "allowed_operations")
@property
@pulumi.getter(name="currentScenario")
def current_scenario(self) -> Optional['outputs.CurrentScenarioDetailsResponse']:
"""
The current scenario.
"""
return pulumi.get(self, "current_scenario")
@property
@pulumi.getter(name="failoverHealth")
def failover_health(self) -> Optional[str]:
"""
The consolidated failover health for the VM.
"""
return pulumi.get(self, "failover_health")
@property
@pulumi.getter(name="failoverRecoveryPointId")
def failover_recovery_point_id(self) -> Optional[str]:
"""
The recovery point ARM Id to which the Vm was failed over.
"""
return pulumi.get(self, "failover_recovery_point_id")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
The name.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
List of health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter(name="lastSuccessfulFailoverTime")
def last_successful_failover_time(self) -> Optional[str]:
"""
The Last successful failover time.
"""
return pulumi.get(self, "last_successful_failover_time")
@property
@pulumi.getter(name="lastSuccessfulTestFailoverTime")
def last_successful_test_failover_time(self) -> Optional[str]:
"""
The Last successful test failover time.
"""
return pulumi.get(self, "last_successful_test_failover_time")
@property
@pulumi.getter(name="policyFriendlyName")
def policy_friendly_name(self) -> Optional[str]:
"""
The name of Policy governing this PE.
"""
return pulumi.get(self, "policy_friendly_name")
@property
@pulumi.getter(name="policyId")
def policy_id(self) -> Optional[str]:
"""
The ID of Policy governing this PE.
"""
return pulumi.get(self, "policy_id")
@property
@pulumi.getter(name="primaryFabricFriendlyName")
def primary_fabric_friendly_name(self) -> Optional[str]:
"""
The friendly name of the primary fabric.
"""
return pulumi.get(self, "primary_fabric_friendly_name")
@property
@pulumi.getter(name="primaryFabricProvider")
def primary_fabric_provider(self) -> Optional[str]:
"""
The fabric provider of the primary fabric.
"""
return pulumi.get(self, "primary_fabric_provider")
@property
@pulumi.getter(name="primaryProtectionContainerFriendlyName")
def primary_protection_container_friendly_name(self) -> Optional[str]:
"""
The name of primary protection container friendly name.
"""
return pulumi.get(self, "primary_protection_container_friendly_name")
@property
@pulumi.getter(name="protectableItemId")
def protectable_item_id(self) -> Optional[str]:
"""
The protected item ARM Id.
"""
return pulumi.get(self, "protectable_item_id")
@property
@pulumi.getter(name="protectedItemType")
def protected_item_type(self) -> Optional[str]:
"""
The type of protected item type.
"""
return pulumi.get(self, "protected_item_type")
@property
@pulumi.getter(name="protectionState")
def protection_state(self) -> Optional[str]:
"""
The protection status.
"""
return pulumi.get(self, "protection_state")
@property
@pulumi.getter(name="protectionStateDescription")
def protection_state_description(self) -> Optional[str]:
"""
The protection state description.
"""
return pulumi.get(self, "protection_state_description")
@property
@pulumi.getter(name="providerSpecificDetails")
def provider_specific_details(self) -> Optional[Any]:
"""
The Replication provider custom settings.
"""
return pulumi.get(self, "provider_specific_details")
@property
@pulumi.getter(name="recoveryContainerId")
def recovery_container_id(self) -> Optional[str]:
"""
The recovery container Id.
"""
return pulumi.get(self, "recovery_container_id")
@property
@pulumi.getter(name="recoveryFabricFriendlyName")
def recovery_fabric_friendly_name(self) -> Optional[str]:
"""
The friendly name of recovery fabric.
"""
return pulumi.get(self, "recovery_fabric_friendly_name")
@property
@pulumi.getter(name="recoveryFabricId")
def recovery_fabric_id(self) -> Optional[str]:
"""
The Arm Id of recovery fabric.
"""
return pulumi.get(self, "recovery_fabric_id")
@property
@pulumi.getter(name="recoveryProtectionContainerFriendlyName")
def recovery_protection_container_friendly_name(self) -> Optional[str]:
"""
The name of recovery container friendly name.
"""
return pulumi.get(self, "recovery_protection_container_friendly_name")
@property
@pulumi.getter(name="recoveryServicesProviderId")
def recovery_services_provider_id(self) -> Optional[str]:
"""
The recovery provider ARM Id.
"""
return pulumi.get(self, "recovery_services_provider_id")
@property
@pulumi.getter(name="replicationHealth")
def replication_health(self) -> Optional[str]:
"""
The consolidated protection health for the VM taking any issues with SRS as well as all the replication units associated with the VM's replication group into account. This is a string representation of the ProtectionHealth enumeration.
"""
return pulumi.get(self, "replication_health")
@property
@pulumi.getter(name="testFailoverState")
def test_failover_state(self) -> Optional[str]:
"""
The Test failover state.
"""
return pulumi.get(self, "test_failover_state")
@property
@pulumi.getter(name="testFailoverStateDescription")
def test_failover_state_description(self) -> Optional[str]:
"""
The Test failover state description.
"""
return pulumi.get(self, "test_failover_state_description")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ReprotectAgentDetailsResponse(dict):
"""
Reprotect agent details.
"""
def __init__(__self__, *,
health: str,
health_errors: Sequence['outputs.HealthErrorResponse'],
id: str,
last_heartbeat_utc: str,
name: str,
version: str):
"""
Reprotect agent details.
:param str health: The health of the reprotect agent.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors.
:param str id: The reprotect agent Id.
:param str last_heartbeat_utc: The last heartbeat received from the reprotect agent.
:param str name: The reprotect agent name.
:param str version: The version.
"""
pulumi.set(__self__, "health", health)
pulumi.set(__self__, "health_errors", health_errors)
pulumi.set(__self__, "id", id)
pulumi.set(__self__, "last_heartbeat_utc", last_heartbeat_utc)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "version", version)
@property
@pulumi.getter
def health(self) -> str:
"""
The health of the reprotect agent.
"""
return pulumi.get(self, "health")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Sequence['outputs.HealthErrorResponse']:
"""
The health errors.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter
def id(self) -> str:
"""
The reprotect agent Id.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="lastHeartbeatUtc")
def last_heartbeat_utc(self) -> str:
"""
The last heartbeat received from the reprotect agent.
"""
return pulumi.get(self, "last_heartbeat_utc")
@property
@pulumi.getter
def name(self) -> str:
"""
The reprotect agent name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> str:
"""
The version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class ResourceHealthDetailsResponse(dict):
"""
Health Details for backup items.
"""
def __init__(__self__, *,
code: int,
message: str,
recommendations: Sequence[str],
title: str):
"""
Health Details for backup items.
:param int code: Health Code
:param str message: Health Message
:param Sequence[str] recommendations: Health Recommended Actions
:param str title: Health Title
"""
pulumi.set(__self__, "code", code)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "recommendations", recommendations)
pulumi.set(__self__, "title", title)
@property
@pulumi.getter
def code(self) -> int:
"""
Health Code
"""
return pulumi.get(self, "code")
@property
@pulumi.getter
def message(self) -> str:
"""
Health Message
"""
return pulumi.get(self, "message")
@property
@pulumi.getter
def recommendations(self) -> Sequence[str]:
"""
Health Recommended Actions
"""
return pulumi.get(self, "recommendations")
@property
@pulumi.getter
def title(self) -> str:
"""
Health Title
"""
return pulumi.get(self, "title")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RetentionDurationResponse(dict):
"""
Retention duration.
"""
def __init__(__self__, *,
count: Optional[int] = None,
duration_type: Optional[str] = None):
"""
Retention duration.
:param int count: Count of the duration types. Retention duration is determined by the combining the Count times and durationType.
For example, if Count = 3 and durationType = Weeks, then the retention duration is three weeks.
:param str duration_type: The retention duration type of the retention policy.
"""
if count is not None:
pulumi.set(__self__, "count", count)
if duration_type is not None:
pulumi.set(__self__, "duration_type", duration_type)
@property
@pulumi.getter
def count(self) -> Optional[int]:
"""
Count of the duration types. Retention duration is determined by the combining the Count times and durationType.
For example, if Count = 3 and durationType = Weeks, then the retention duration is three weeks.
"""
return pulumi.get(self, "count")
@property
@pulumi.getter(name="durationType")
def duration_type(self) -> Optional[str]:
"""
The retention duration type of the retention policy.
"""
return pulumi.get(self, "duration_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RetentionVolumeResponse(dict):
"""
The retention details of the MT.
"""
def __init__(__self__, *,
capacity_in_bytes: Optional[int] = None,
free_space_in_bytes: Optional[int] = None,
threshold_percentage: Optional[int] = None,
volume_name: Optional[str] = None):
"""
The retention details of the MT.
:param int capacity_in_bytes: The volume capacity.
:param int free_space_in_bytes: The free space available in this volume.
:param int threshold_percentage: The threshold percentage.
:param str volume_name: The volume name.
"""
if capacity_in_bytes is not None:
pulumi.set(__self__, "capacity_in_bytes", capacity_in_bytes)
if free_space_in_bytes is not None:
pulumi.set(__self__, "free_space_in_bytes", free_space_in_bytes)
if threshold_percentage is not None:
pulumi.set(__self__, "threshold_percentage", threshold_percentage)
if volume_name is not None:
pulumi.set(__self__, "volume_name", volume_name)
@property
@pulumi.getter(name="capacityInBytes")
def capacity_in_bytes(self) -> Optional[int]:
"""
The volume capacity.
"""
return pulumi.get(self, "capacity_in_bytes")
@property
@pulumi.getter(name="freeSpaceInBytes")
def free_space_in_bytes(self) -> Optional[int]:
"""
The free space available in this volume.
"""
return pulumi.get(self, "free_space_in_bytes")
@property
@pulumi.getter(name="thresholdPercentage")
def threshold_percentage(self) -> Optional[int]:
"""
The threshold percentage.
"""
return pulumi.get(self, "threshold_percentage")
@property
@pulumi.getter(name="volumeName")
def volume_name(self) -> Optional[str]:
"""
The volume name.
"""
return pulumi.get(self, "volume_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class RunAsAccountResponse(dict):
"""
CS Accounts Details.
"""
def __init__(__self__, *,
account_id: Optional[str] = None,
account_name: Optional[str] = None):
"""
CS Accounts Details.
:param str account_id: The CS RunAs account Id.
:param str account_name: The CS RunAs account name.
"""
if account_id is not None:
pulumi.set(__self__, "account_id", account_id)
if account_name is not None:
pulumi.set(__self__, "account_name", account_name)
@property
@pulumi.getter(name="accountId")
def account_id(self) -> Optional[str]:
"""
The CS RunAs account Id.
"""
return pulumi.get(self, "account_id")
@property
@pulumi.getter(name="accountName")
def account_name(self) -> Optional[str]:
"""
The CS RunAs account name.
"""
return pulumi.get(self, "account_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SimpleRetentionPolicyResponse(dict):
"""
Simple policy retention.
"""
def __init__(__self__, *,
retention_duration: Optional['outputs.RetentionDurationResponse'] = None,
retention_policy_type: Optional[str] = None):
"""
Simple policy retention.
:param 'RetentionDurationResponseArgs' retention_duration: Retention duration of the protection policy.
:param str retention_policy_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
if retention_duration is not None:
pulumi.set(__self__, "retention_duration", retention_duration)
if retention_policy_type is not None:
pulumi.set(__self__, "retention_policy_type", 'SimpleRetentionPolicy')
@property
@pulumi.getter(name="retentionDuration")
def retention_duration(self) -> Optional['outputs.RetentionDurationResponse']:
"""
Retention duration of the protection policy.
"""
return pulumi.get(self, "retention_duration")
@property
@pulumi.getter(name="retentionPolicyType")
def retention_policy_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "retention_policy_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SimpleSchedulePolicyResponse(dict):
"""
Simple policy schedule.
"""
def __init__(__self__, *,
schedule_policy_type: Optional[str] = None,
schedule_run_days: Optional[Sequence[str]] = None,
schedule_run_frequency: Optional[str] = None,
schedule_run_times: Optional[Sequence[str]] = None,
schedule_weekly_frequency: Optional[int] = None):
"""
Simple policy schedule.
:param str schedule_policy_type: This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
:param Sequence[str] schedule_run_days: This list is the days of the week when the schedule runs.
:param str schedule_run_frequency: Defines the frequency interval (daily or weekly) for the schedule policy.
:param Sequence[str] schedule_run_times: List of times, during a day, when the schedule runs.
:param int schedule_weekly_frequency: The number of times per week the schedule runs.
"""
if schedule_policy_type is not None:
pulumi.set(__self__, "schedule_policy_type", 'SimpleSchedulePolicy')
if schedule_run_days is not None:
pulumi.set(__self__, "schedule_run_days", schedule_run_days)
if schedule_run_frequency is not None:
pulumi.set(__self__, "schedule_run_frequency", schedule_run_frequency)
if schedule_run_times is not None:
pulumi.set(__self__, "schedule_run_times", schedule_run_times)
if schedule_weekly_frequency is not None:
pulumi.set(__self__, "schedule_weekly_frequency", schedule_weekly_frequency)
@property
@pulumi.getter(name="schedulePolicyType")
def schedule_policy_type(self) -> Optional[str]:
"""
This property is used as the discriminator for deciding the specific types in the polymorphic chain of types.
"""
return pulumi.get(self, "schedule_policy_type")
@property
@pulumi.getter(name="scheduleRunDays")
def schedule_run_days(self) -> Optional[Sequence[str]]:
"""
This list is the days of the week when the schedule runs.
"""
return pulumi.get(self, "schedule_run_days")
@property
@pulumi.getter(name="scheduleRunFrequency")
def schedule_run_frequency(self) -> Optional[str]:
"""
Defines the frequency interval (daily or weekly) for the schedule policy.
"""
return pulumi.get(self, "schedule_run_frequency")
@property
@pulumi.getter(name="scheduleRunTimes")
def schedule_run_times(self) -> Optional[Sequence[str]]:
"""
List of times, during a day, when the schedule runs.
"""
return pulumi.get(self, "schedule_run_times")
@property
@pulumi.getter(name="scheduleWeeklyFrequency")
def schedule_weekly_frequency(self) -> Optional[int]:
"""
The number of times per week the schedule runs.
"""
return pulumi.get(self, "schedule_weekly_frequency")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class SkuResponse(dict):
"""
Identifies the unique system identifier for each Azure resource.
"""
def __init__(__self__, *,
name: str):
"""
Identifies the unique system identifier for each Azure resource.
:param str name: The Sku name.
"""
pulumi.set(__self__, "name", name)
@property
@pulumi.getter
def name(self) -> str:
"""
The Sku name.
"""
return pulumi.get(self, "name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class StorageClassificationMappingPropertiesResponse(dict):
"""
Storage mapping properties.
"""
def __init__(__self__, *,
target_storage_classification_id: Optional[str] = None):
"""
Storage mapping properties.
:param str target_storage_classification_id: Target storage object Id.
"""
if target_storage_classification_id is not None:
pulumi.set(__self__, "target_storage_classification_id", target_storage_classification_id)
@property
@pulumi.getter(name="targetStorageClassificationId")
def target_storage_classification_id(self) -> Optional[str]:
"""
Target storage object Id.
"""
return pulumi.get(self, "target_storage_classification_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class UpgradeDetailsResponse(dict):
"""
Details for upgrading vault.
"""
def __init__(__self__, *,
end_time_utc: str,
last_updated_time_utc: str,
message: str,
operation_id: str,
previous_resource_id: str,
start_time_utc: str,
status: str,
trigger_type: str,
upgraded_resource_id: str):
"""
Details for upgrading vault.
:param str end_time_utc: UTC time at which the upgrade operation has ended.
:param str last_updated_time_utc: UTC time at which the upgrade operation status was last updated.
:param str message: Message to the user containing information about the upgrade operation.
:param str operation_id: ID of the vault upgrade operation.
:param str previous_resource_id: Resource ID of the vault before the upgrade.
:param str start_time_utc: UTC time at which the upgrade operation has started.
:param str status: Status of the vault upgrade operation.
:param str trigger_type: The way the vault upgrade was triggered.
:param str upgraded_resource_id: Resource ID of the upgraded vault.
"""
pulumi.set(__self__, "end_time_utc", end_time_utc)
pulumi.set(__self__, "last_updated_time_utc", last_updated_time_utc)
pulumi.set(__self__, "message", message)
pulumi.set(__self__, "operation_id", operation_id)
pulumi.set(__self__, "previous_resource_id", previous_resource_id)
pulumi.set(__self__, "start_time_utc", start_time_utc)
pulumi.set(__self__, "status", status)
pulumi.set(__self__, "trigger_type", trigger_type)
pulumi.set(__self__, "upgraded_resource_id", upgraded_resource_id)
@property
@pulumi.getter(name="endTimeUtc")
def end_time_utc(self) -> str:
"""
UTC time at which the upgrade operation has ended.
"""
return pulumi.get(self, "end_time_utc")
@property
@pulumi.getter(name="lastUpdatedTimeUtc")
def last_updated_time_utc(self) -> str:
"""
UTC time at which the upgrade operation status was last updated.
"""
return pulumi.get(self, "last_updated_time_utc")
@property
@pulumi.getter
def message(self) -> str:
"""
Message to the user containing information about the upgrade operation.
"""
return pulumi.get(self, "message")
@property
@pulumi.getter(name="operationId")
def operation_id(self) -> str:
"""
ID of the vault upgrade operation.
"""
return pulumi.get(self, "operation_id")
@property
@pulumi.getter(name="previousResourceId")
def previous_resource_id(self) -> str:
"""
Resource ID of the vault before the upgrade.
"""
return pulumi.get(self, "previous_resource_id")
@property
@pulumi.getter(name="startTimeUtc")
def start_time_utc(self) -> str:
"""
UTC time at which the upgrade operation has started.
"""
return pulumi.get(self, "start_time_utc")
@property
@pulumi.getter
def status(self) -> str:
"""
Status of the vault upgrade operation.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter(name="triggerType")
def trigger_type(self) -> str:
"""
The way the vault upgrade was triggered.
"""
return pulumi.get(self, "trigger_type")
@property
@pulumi.getter(name="upgradedResourceId")
def upgraded_resource_id(self) -> str:
"""
Resource ID of the upgraded vault.
"""
return pulumi.get(self, "upgraded_resource_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VCenterPropertiesResponse(dict):
"""
vCenter properties.
"""
def __init__(__self__, *,
discovery_status: Optional[str] = None,
fabric_arm_resource_name: Optional[str] = None,
friendly_name: Optional[str] = None,
health_errors: Optional[Sequence['outputs.HealthErrorResponse']] = None,
infrastructure_id: Optional[str] = None,
internal_id: Optional[str] = None,
ip_address: Optional[str] = None,
last_heartbeat: Optional[str] = None,
port: Optional[str] = None,
process_server_id: Optional[str] = None,
run_as_account_id: Optional[str] = None):
"""
vCenter properties.
:param str discovery_status: The VCenter discovery status.
:param str fabric_arm_resource_name: The ARM resource name of the fabric containing this VCenter.
:param str friendly_name: Friendly name of the vCenter.
:param Sequence['HealthErrorResponseArgs'] health_errors: The health errors for this VCenter.
:param str infrastructure_id: The infrastructure Id of vCenter.
:param str internal_id: VCenter internal ID.
:param str ip_address: The IP address of the vCenter.
:param str last_heartbeat: The time when the last heartbeat was received by vCenter.
:param str port: The port number for discovery.
:param str process_server_id: The process server Id.
:param str run_as_account_id: The account Id which has privileges to discover the vCenter.
"""
if discovery_status is not None:
pulumi.set(__self__, "discovery_status", discovery_status)
if fabric_arm_resource_name is not None:
pulumi.set(__self__, "fabric_arm_resource_name", fabric_arm_resource_name)
if friendly_name is not None:
pulumi.set(__self__, "friendly_name", friendly_name)
if health_errors is not None:
pulumi.set(__self__, "health_errors", health_errors)
if infrastructure_id is not None:
pulumi.set(__self__, "infrastructure_id", infrastructure_id)
if internal_id is not None:
pulumi.set(__self__, "internal_id", internal_id)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if port is not None:
pulumi.set(__self__, "port", port)
if process_server_id is not None:
pulumi.set(__self__, "process_server_id", process_server_id)
if run_as_account_id is not None:
pulumi.set(__self__, "run_as_account_id", run_as_account_id)
@property
@pulumi.getter(name="discoveryStatus")
def discovery_status(self) -> Optional[str]:
"""
The VCenter discovery status.
"""
return pulumi.get(self, "discovery_status")
@property
@pulumi.getter(name="fabricArmResourceName")
def fabric_arm_resource_name(self) -> Optional[str]:
"""
The ARM resource name of the fabric containing this VCenter.
"""
return pulumi.get(self, "fabric_arm_resource_name")
@property
@pulumi.getter(name="friendlyName")
def friendly_name(self) -> Optional[str]:
"""
Friendly name of the vCenter.
"""
return pulumi.get(self, "friendly_name")
@property
@pulumi.getter(name="healthErrors")
def health_errors(self) -> Optional[Sequence['outputs.HealthErrorResponse']]:
"""
The health errors for this VCenter.
"""
return pulumi.get(self, "health_errors")
@property
@pulumi.getter(name="infrastructureId")
def infrastructure_id(self) -> Optional[str]:
"""
The infrastructure Id of vCenter.
"""
return pulumi.get(self, "infrastructure_id")
@property
@pulumi.getter(name="internalId")
def internal_id(self) -> Optional[str]:
"""
VCenter internal ID.
"""
return pulumi.get(self, "internal_id")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address of the vCenter.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The time when the last heartbeat was received by vCenter.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter
def port(self) -> Optional[str]:
"""
The port number for discovery.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter(name="processServerId")
def process_server_id(self) -> Optional[str]:
"""
The process server Id.
"""
return pulumi.get(self, "process_server_id")
@property
@pulumi.getter(name="runAsAccountId")
def run_as_account_id(self) -> Optional[str]:
"""
The account Id which has privileges to discover the vCenter.
"""
return pulumi.get(self, "run_as_account_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMNicDetailsResponse(dict):
"""
Hyper V VM network details.
"""
def __init__(__self__, *,
enable_accelerated_networking_on_recovery: Optional[bool] = None,
enable_accelerated_networking_on_tfo: Optional[bool] = None,
ip_address_type: Optional[str] = None,
nic_id: Optional[str] = None,
primary_nic_static_ip_address: Optional[str] = None,
recovery_lb_backend_address_pool_ids: Optional[Sequence[str]] = None,
recovery_network_security_group_id: Optional[str] = None,
recovery_nic_ip_address_type: Optional[str] = None,
recovery_nic_name: Optional[str] = None,
recovery_nic_resource_group_name: Optional[str] = None,
recovery_public_ip_address_id: Optional[str] = None,
recovery_vm_network_id: Optional[str] = None,
recovery_vm_subnet_name: Optional[str] = None,
replica_nic_id: Optional[str] = None,
replica_nic_static_ip_address: Optional[str] = None,
reuse_existing_nic: Optional[bool] = None,
selection_type: Optional[str] = None,
source_nic_arm_id: Optional[str] = None,
tfo_ip_configs: Optional[Sequence['outputs.IPConfigResponse']] = None,
tfo_network_security_group_id: Optional[str] = None,
tfo_recovery_nic_name: Optional[str] = None,
tfo_recovery_nic_resource_group_name: Optional[str] = None,
tfo_reuse_existing_nic: Optional[bool] = None,
tfo_vm_network_id: Optional[str] = None,
tfo_vm_subnet_name: Optional[str] = None,
v_m_network_name: Optional[str] = None,
v_m_subnet_name: Optional[str] = None):
"""
Hyper V VM network details.
:param bool enable_accelerated_networking_on_recovery: A value indicating whether the NIC has accelerated networking enabled.
:param bool enable_accelerated_networking_on_tfo: Whether the test failover NIC has accelerated networking enabled.
:param str ip_address_type: Ip address type.
:param str nic_id: The nic Id.
:param str primary_nic_static_ip_address: Primary nic static IP address.
:param Sequence[str] recovery_lb_backend_address_pool_ids: The target backend address pools for the NIC.
:param str recovery_network_security_group_id: The id of the NSG associated with the NIC.
:param str recovery_nic_ip_address_type: IP allocation type for recovery VM.
:param str recovery_nic_name: The name of the NIC to be used when creating target NICs.
:param str recovery_nic_resource_group_name: The resource group of the NIC to be used when creating target NICs.
:param str recovery_public_ip_address_id: The id of the public IP address resource associated with the NIC.
:param str recovery_vm_network_id: Recovery VM network Id.
:param str recovery_vm_subnet_name: Recovery VM subnet name.
:param str replica_nic_id: The replica nic Id.
:param str replica_nic_static_ip_address: Replica nic static IP address.
:param bool reuse_existing_nic: A value indicating whether an existing NIC is allowed to be reused during failover subject to availability.
:param str selection_type: Selection type for failover.
:param str source_nic_arm_id: The source nic ARM Id.
:param Sequence['IPConfigResponseArgs'] tfo_ip_configs: The IP configurations to be used by NIC during test failover.
:param str tfo_network_security_group_id: The NSG to be used by NIC during test failover.
:param str tfo_recovery_nic_name: The name of the NIC to be used when creating target NICs in TFO.
:param str tfo_recovery_nic_resource_group_name: The resource group of the NIC to be used when creating target NICs in TFO.
:param bool tfo_reuse_existing_nic: A value indicating whether an existing NIC is allowed to be reused during test failover subject to availability.
:param str tfo_vm_network_id: The network to be used by NIC during test failover.
:param str tfo_vm_subnet_name: The subnet to be used by NIC during test failover.
:param str v_m_network_name: VM network name.
:param str v_m_subnet_name: VM subnet name.
"""
if enable_accelerated_networking_on_recovery is not None:
pulumi.set(__self__, "enable_accelerated_networking_on_recovery", enable_accelerated_networking_on_recovery)
if enable_accelerated_networking_on_tfo is not None:
pulumi.set(__self__, "enable_accelerated_networking_on_tfo", enable_accelerated_networking_on_tfo)
if ip_address_type is not None:
pulumi.set(__self__, "ip_address_type", ip_address_type)
if nic_id is not None:
pulumi.set(__self__, "nic_id", nic_id)
if primary_nic_static_ip_address is not None:
pulumi.set(__self__, "primary_nic_static_ip_address", primary_nic_static_ip_address)
if recovery_lb_backend_address_pool_ids is not None:
pulumi.set(__self__, "recovery_lb_backend_address_pool_ids", recovery_lb_backend_address_pool_ids)
if recovery_network_security_group_id is not None:
pulumi.set(__self__, "recovery_network_security_group_id", recovery_network_security_group_id)
if recovery_nic_ip_address_type is not None:
pulumi.set(__self__, "recovery_nic_ip_address_type", recovery_nic_ip_address_type)
if recovery_nic_name is not None:
pulumi.set(__self__, "recovery_nic_name", recovery_nic_name)
if recovery_nic_resource_group_name is not None:
pulumi.set(__self__, "recovery_nic_resource_group_name", recovery_nic_resource_group_name)
if recovery_public_ip_address_id is not None:
pulumi.set(__self__, "recovery_public_ip_address_id", recovery_public_ip_address_id)
if recovery_vm_network_id is not None:
pulumi.set(__self__, "recovery_vm_network_id", recovery_vm_network_id)
if recovery_vm_subnet_name is not None:
pulumi.set(__self__, "recovery_vm_subnet_name", recovery_vm_subnet_name)
if replica_nic_id is not None:
pulumi.set(__self__, "replica_nic_id", replica_nic_id)
if replica_nic_static_ip_address is not None:
pulumi.set(__self__, "replica_nic_static_ip_address", replica_nic_static_ip_address)
if reuse_existing_nic is not None:
pulumi.set(__self__, "reuse_existing_nic", reuse_existing_nic)
if selection_type is not None:
pulumi.set(__self__, "selection_type", selection_type)
if source_nic_arm_id is not None:
pulumi.set(__self__, "source_nic_arm_id", source_nic_arm_id)
if tfo_ip_configs is not None:
pulumi.set(__self__, "tfo_ip_configs", tfo_ip_configs)
if tfo_network_security_group_id is not None:
pulumi.set(__self__, "tfo_network_security_group_id", tfo_network_security_group_id)
if tfo_recovery_nic_name is not None:
pulumi.set(__self__, "tfo_recovery_nic_name", tfo_recovery_nic_name)
if tfo_recovery_nic_resource_group_name is not None:
pulumi.set(__self__, "tfo_recovery_nic_resource_group_name", tfo_recovery_nic_resource_group_name)
if tfo_reuse_existing_nic is not None:
pulumi.set(__self__, "tfo_reuse_existing_nic", tfo_reuse_existing_nic)
if tfo_vm_network_id is not None:
pulumi.set(__self__, "tfo_vm_network_id", tfo_vm_network_id)
if tfo_vm_subnet_name is not None:
pulumi.set(__self__, "tfo_vm_subnet_name", tfo_vm_subnet_name)
if v_m_network_name is not None:
pulumi.set(__self__, "v_m_network_name", v_m_network_name)
if v_m_subnet_name is not None:
pulumi.set(__self__, "v_m_subnet_name", v_m_subnet_name)
@property
@pulumi.getter(name="enableAcceleratedNetworkingOnRecovery")
def enable_accelerated_networking_on_recovery(self) -> Optional[bool]:
"""
A value indicating whether the NIC has accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking_on_recovery")
@property
@pulumi.getter(name="enableAcceleratedNetworkingOnTfo")
def enable_accelerated_networking_on_tfo(self) -> Optional[bool]:
"""
Whether the test failover NIC has accelerated networking enabled.
"""
return pulumi.get(self, "enable_accelerated_networking_on_tfo")
@property
@pulumi.getter(name="ipAddressType")
def ip_address_type(self) -> Optional[str]:
"""
Ip address type.
"""
return pulumi.get(self, "ip_address_type")
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> Optional[str]:
"""
The nic Id.
"""
return pulumi.get(self, "nic_id")
@property
@pulumi.getter(name="primaryNicStaticIPAddress")
def primary_nic_static_ip_address(self) -> Optional[str]:
"""
Primary nic static IP address.
"""
return pulumi.get(self, "primary_nic_static_ip_address")
@property
@pulumi.getter(name="recoveryLBBackendAddressPoolIds")
def recovery_lb_backend_address_pool_ids(self) -> Optional[Sequence[str]]:
"""
The target backend address pools for the NIC.
"""
return pulumi.get(self, "recovery_lb_backend_address_pool_ids")
@property
@pulumi.getter(name="recoveryNetworkSecurityGroupId")
def recovery_network_security_group_id(self) -> Optional[str]:
"""
The id of the NSG associated with the NIC.
"""
return pulumi.get(self, "recovery_network_security_group_id")
@property
@pulumi.getter(name="recoveryNicIpAddressType")
def recovery_nic_ip_address_type(self) -> Optional[str]:
"""
IP allocation type for recovery VM.
"""
return pulumi.get(self, "recovery_nic_ip_address_type")
@property
@pulumi.getter(name="recoveryNicName")
def recovery_nic_name(self) -> Optional[str]:
"""
The name of the NIC to be used when creating target NICs.
"""
return pulumi.get(self, "recovery_nic_name")
@property
@pulumi.getter(name="recoveryNicResourceGroupName")
def recovery_nic_resource_group_name(self) -> Optional[str]:
"""
The resource group of the NIC to be used when creating target NICs.
"""
return pulumi.get(self, "recovery_nic_resource_group_name")
@property
@pulumi.getter(name="recoveryPublicIpAddressId")
def recovery_public_ip_address_id(self) -> Optional[str]:
"""
The id of the public IP address resource associated with the NIC.
"""
return pulumi.get(self, "recovery_public_ip_address_id")
@property
@pulumi.getter(name="recoveryVMNetworkId")
def recovery_vm_network_id(self) -> Optional[str]:
"""
Recovery VM network Id.
"""
return pulumi.get(self, "recovery_vm_network_id")
@property
@pulumi.getter(name="recoveryVMSubnetName")
def recovery_vm_subnet_name(self) -> Optional[str]:
"""
Recovery VM subnet name.
"""
return pulumi.get(self, "recovery_vm_subnet_name")
@property
@pulumi.getter(name="replicaNicId")
def replica_nic_id(self) -> Optional[str]:
"""
The replica nic Id.
"""
return pulumi.get(self, "replica_nic_id")
@property
@pulumi.getter(name="replicaNicStaticIPAddress")
def replica_nic_static_ip_address(self) -> Optional[str]:
"""
Replica nic static IP address.
"""
return pulumi.get(self, "replica_nic_static_ip_address")
@property
@pulumi.getter(name="reuseExistingNic")
def reuse_existing_nic(self) -> Optional[bool]:
"""
A value indicating whether an existing NIC is allowed to be reused during failover subject to availability.
"""
return pulumi.get(self, "reuse_existing_nic")
@property
@pulumi.getter(name="selectionType")
def selection_type(self) -> Optional[str]:
"""
Selection type for failover.
"""
return pulumi.get(self, "selection_type")
@property
@pulumi.getter(name="sourceNicArmId")
def source_nic_arm_id(self) -> Optional[str]:
"""
The source nic ARM Id.
"""
return pulumi.get(self, "source_nic_arm_id")
@property
@pulumi.getter(name="tfoIPConfigs")
def tfo_ip_configs(self) -> Optional[Sequence['outputs.IPConfigResponse']]:
"""
The IP configurations to be used by NIC during test failover.
"""
return pulumi.get(self, "tfo_ip_configs")
@property
@pulumi.getter(name="tfoNetworkSecurityGroupId")
def tfo_network_security_group_id(self) -> Optional[str]:
"""
The NSG to be used by NIC during test failover.
"""
return pulumi.get(self, "tfo_network_security_group_id")
@property
@pulumi.getter(name="tfoRecoveryNicName")
def tfo_recovery_nic_name(self) -> Optional[str]:
"""
The name of the NIC to be used when creating target NICs in TFO.
"""
return pulumi.get(self, "tfo_recovery_nic_name")
@property
@pulumi.getter(name="tfoRecoveryNicResourceGroupName")
def tfo_recovery_nic_resource_group_name(self) -> Optional[str]:
"""
The resource group of the NIC to be used when creating target NICs in TFO.
"""
return pulumi.get(self, "tfo_recovery_nic_resource_group_name")
@property
@pulumi.getter(name="tfoReuseExistingNic")
def tfo_reuse_existing_nic(self) -> Optional[bool]:
"""
A value indicating whether an existing NIC is allowed to be reused during test failover subject to availability.
"""
return pulumi.get(self, "tfo_reuse_existing_nic")
@property
@pulumi.getter(name="tfoVMNetworkId")
def tfo_vm_network_id(self) -> Optional[str]:
"""
The network to be used by NIC during test failover.
"""
return pulumi.get(self, "tfo_vm_network_id")
@property
@pulumi.getter(name="tfoVMSubnetName")
def tfo_vm_subnet_name(self) -> Optional[str]:
"""
The subnet to be used by NIC during test failover.
"""
return pulumi.get(self, "tfo_vm_subnet_name")
@property
@pulumi.getter(name="vMNetworkName")
def v_m_network_name(self) -> Optional[str]:
"""
VM network name.
"""
return pulumi.get(self, "v_m_network_name")
@property
@pulumi.getter(name="vMSubnetName")
def v_m_subnet_name(self) -> Optional[str]:
"""
VM subnet name.
"""
return pulumi.get(self, "v_m_subnet_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareCbtMigrationDetailsResponse(dict):
"""
VMwareCbt provider specific settings
"""
def __init__(__self__, *,
data_mover_run_as_account_id: str,
instance_type: str,
last_recovery_point_received: str,
migration_recovery_point_id: str,
os_type: str,
snapshot_run_as_account_id: str,
target_location: str,
vmware_machine_id: str,
license_type: Optional[str] = None,
protected_disks: Optional[Sequence['outputs.VMwareCbtProtectedDiskDetailsResponse']] = None,
target_availability_set_id: Optional[str] = None,
target_boot_diagnostics_storage_account_id: Optional[str] = None,
target_network_id: Optional[str] = None,
target_resource_group_id: Optional[str] = None,
target_vm_name: Optional[str] = None,
target_vm_size: Optional[str] = None,
vm_nics: Optional[Sequence['outputs.VMwareCbtNicDetailsResponse']] = None):
"""
VMwareCbt provider specific settings
:param str data_mover_run_as_account_id: The data mover RunAs account Id.
:param str instance_type: Gets the instance type.
:param str last_recovery_point_received: The last recovery point received time.
:param str migration_recovery_point_id: The recovery point Id to which the VM was migrated.
:param str os_type: The type of the OS on the VM.
:param str snapshot_run_as_account_id: The snapshot RunAs account Id.
:param str target_location: The target location.
:param str vmware_machine_id: The ARM Id of the VM discovered in VMware.
:param str license_type: License Type of the VM to be used.
:param Sequence['VMwareCbtProtectedDiskDetailsResponseArgs'] protected_disks: The list of protected disks.
:param str target_availability_set_id: The target availability set Id.
:param str target_boot_diagnostics_storage_account_id: The target boot diagnostics storage account ARM Id.
:param str target_network_id: The target network Id.
:param str target_resource_group_id: The target resource group Id.
:param str target_vm_name: Target VM name.
:param str target_vm_size: The target VM size.
:param Sequence['VMwareCbtNicDetailsResponseArgs'] vm_nics: The network details.
"""
pulumi.set(__self__, "data_mover_run_as_account_id", data_mover_run_as_account_id)
pulumi.set(__self__, "instance_type", 'VMwareCbt')
pulumi.set(__self__, "last_recovery_point_received", last_recovery_point_received)
pulumi.set(__self__, "migration_recovery_point_id", migration_recovery_point_id)
pulumi.set(__self__, "os_type", os_type)
pulumi.set(__self__, "snapshot_run_as_account_id", snapshot_run_as_account_id)
pulumi.set(__self__, "target_location", target_location)
pulumi.set(__self__, "vmware_machine_id", vmware_machine_id)
if license_type is not None:
pulumi.set(__self__, "license_type", license_type)
if protected_disks is not None:
pulumi.set(__self__, "protected_disks", protected_disks)
if target_availability_set_id is not None:
pulumi.set(__self__, "target_availability_set_id", target_availability_set_id)
if target_boot_diagnostics_storage_account_id is not None:
pulumi.set(__self__, "target_boot_diagnostics_storage_account_id", target_boot_diagnostics_storage_account_id)
if target_network_id is not None:
pulumi.set(__self__, "target_network_id", target_network_id)
if target_resource_group_id is not None:
pulumi.set(__self__, "target_resource_group_id", target_resource_group_id)
if target_vm_name is not None:
pulumi.set(__self__, "target_vm_name", target_vm_name)
if target_vm_size is not None:
pulumi.set(__self__, "target_vm_size", target_vm_size)
if vm_nics is not None:
pulumi.set(__self__, "vm_nics", vm_nics)
@property
@pulumi.getter(name="dataMoverRunAsAccountId")
def data_mover_run_as_account_id(self) -> str:
"""
The data mover RunAs account Id.
"""
return pulumi.get(self, "data_mover_run_as_account_id")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the instance type.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="lastRecoveryPointReceived")
def last_recovery_point_received(self) -> str:
"""
The last recovery point received time.
"""
return pulumi.get(self, "last_recovery_point_received")
@property
@pulumi.getter(name="migrationRecoveryPointId")
def migration_recovery_point_id(self) -> str:
"""
The recovery point Id to which the VM was migrated.
"""
return pulumi.get(self, "migration_recovery_point_id")
@property
@pulumi.getter(name="osType")
def os_type(self) -> str:
"""
The type of the OS on the VM.
"""
return pulumi.get(self, "os_type")
@property
@pulumi.getter(name="snapshotRunAsAccountId")
def snapshot_run_as_account_id(self) -> str:
"""
The snapshot RunAs account Id.
"""
return pulumi.get(self, "snapshot_run_as_account_id")
@property
@pulumi.getter(name="targetLocation")
def target_location(self) -> str:
"""
The target location.
"""
return pulumi.get(self, "target_location")
@property
@pulumi.getter(name="vmwareMachineId")
def vmware_machine_id(self) -> str:
"""
The ARM Id of the VM discovered in VMware.
"""
return pulumi.get(self, "vmware_machine_id")
@property
@pulumi.getter(name="licenseType")
def license_type(self) -> Optional[str]:
"""
License Type of the VM to be used.
"""
return pulumi.get(self, "license_type")
@property
@pulumi.getter(name="protectedDisks")
def protected_disks(self) -> Optional[Sequence['outputs.VMwareCbtProtectedDiskDetailsResponse']]:
"""
The list of protected disks.
"""
return pulumi.get(self, "protected_disks")
@property
@pulumi.getter(name="targetAvailabilitySetId")
def target_availability_set_id(self) -> Optional[str]:
"""
The target availability set Id.
"""
return pulumi.get(self, "target_availability_set_id")
@property
@pulumi.getter(name="targetBootDiagnosticsStorageAccountId")
def target_boot_diagnostics_storage_account_id(self) -> Optional[str]:
"""
The target boot diagnostics storage account ARM Id.
"""
return pulumi.get(self, "target_boot_diagnostics_storage_account_id")
@property
@pulumi.getter(name="targetNetworkId")
def target_network_id(self) -> Optional[str]:
"""
The target network Id.
"""
return pulumi.get(self, "target_network_id")
@property
@pulumi.getter(name="targetResourceGroupId")
def target_resource_group_id(self) -> Optional[str]:
"""
The target resource group Id.
"""
return pulumi.get(self, "target_resource_group_id")
@property
@pulumi.getter(name="targetVmName")
def target_vm_name(self) -> Optional[str]:
"""
Target VM name.
"""
return pulumi.get(self, "target_vm_name")
@property
@pulumi.getter(name="targetVmSize")
def target_vm_size(self) -> Optional[str]:
"""
The target VM size.
"""
return pulumi.get(self, "target_vm_size")
@property
@pulumi.getter(name="vmNics")
def vm_nics(self) -> Optional[Sequence['outputs.VMwareCbtNicDetailsResponse']]:
"""
The network details.
"""
return pulumi.get(self, "vm_nics")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareCbtNicDetailsResponse(dict):
"""
VMwareCbt NIC details.
"""
def __init__(__self__, *,
nic_id: str,
source_ip_address: str,
source_ip_address_type: str,
source_network_id: str,
is_primary_nic: Optional[str] = None,
is_selected_for_migration: Optional[str] = None,
target_ip_address: Optional[str] = None,
target_ip_address_type: Optional[str] = None,
target_subnet_name: Optional[str] = None):
"""
VMwareCbt NIC details.
:param str nic_id: The NIC Id.
:param str source_ip_address: The source IP address.
:param str source_ip_address_type: The source IP address type.
:param str source_network_id: Source network Id.
:param str is_primary_nic: A value indicating whether this is the primary NIC.
:param str is_selected_for_migration: A value indicating whether this NIC is selected for migration.
:param str target_ip_address: The target IP address.
:param str target_ip_address_type: The target IP address type.
:param str target_subnet_name: Target subnet name.
"""
pulumi.set(__self__, "nic_id", nic_id)
pulumi.set(__self__, "source_ip_address", source_ip_address)
pulumi.set(__self__, "source_ip_address_type", source_ip_address_type)
pulumi.set(__self__, "source_network_id", source_network_id)
if is_primary_nic is not None:
pulumi.set(__self__, "is_primary_nic", is_primary_nic)
if is_selected_for_migration is not None:
pulumi.set(__self__, "is_selected_for_migration", is_selected_for_migration)
if target_ip_address is not None:
pulumi.set(__self__, "target_ip_address", target_ip_address)
if target_ip_address_type is not None:
pulumi.set(__self__, "target_ip_address_type", target_ip_address_type)
if target_subnet_name is not None:
pulumi.set(__self__, "target_subnet_name", target_subnet_name)
@property
@pulumi.getter(name="nicId")
def nic_id(self) -> str:
"""
The NIC Id.
"""
return pulumi.get(self, "nic_id")
@property
@pulumi.getter(name="sourceIPAddress")
def source_ip_address(self) -> str:
"""
The source IP address.
"""
return pulumi.get(self, "source_ip_address")
@property
@pulumi.getter(name="sourceIPAddressType")
def source_ip_address_type(self) -> str:
"""
The source IP address type.
"""
return pulumi.get(self, "source_ip_address_type")
@property
@pulumi.getter(name="sourceNetworkId")
def source_network_id(self) -> str:
"""
Source network Id.
"""
return pulumi.get(self, "source_network_id")
@property
@pulumi.getter(name="isPrimaryNic")
def is_primary_nic(self) -> Optional[str]:
"""
A value indicating whether this is the primary NIC.
"""
return pulumi.get(self, "is_primary_nic")
@property
@pulumi.getter(name="isSelectedForMigration")
def is_selected_for_migration(self) -> Optional[str]:
"""
A value indicating whether this NIC is selected for migration.
"""
return pulumi.get(self, "is_selected_for_migration")
@property
@pulumi.getter(name="targetIPAddress")
def target_ip_address(self) -> Optional[str]:
"""
The target IP address.
"""
return pulumi.get(self, "target_ip_address")
@property
@pulumi.getter(name="targetIPAddressType")
def target_ip_address_type(self) -> Optional[str]:
"""
The target IP address type.
"""
return pulumi.get(self, "target_ip_address_type")
@property
@pulumi.getter(name="targetSubnetName")
def target_subnet_name(self) -> Optional[str]:
"""
Target subnet name.
"""
return pulumi.get(self, "target_subnet_name")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareCbtProtectedDiskDetailsResponse(dict):
"""
VMwareCbt protected disk details.
"""
def __init__(__self__, *,
capacity_in_bytes: int,
disk_id: str,
disk_name: str,
disk_path: str,
is_os_disk: str,
log_storage_account_id: str,
log_storage_account_sas_secret_name: str,
seed_managed_disk_id: str,
target_managed_disk_id: str,
disk_type: Optional[str] = None):
"""
VMwareCbt protected disk details.
:param int capacity_in_bytes: The disk capacity in bytes.
:param str disk_id: The disk id.
:param str disk_name: The disk name.
:param str disk_path: The disk path.
:param str is_os_disk: A value indicating whether the disk is the OS disk.
:param str log_storage_account_id: The log storage account ARM Id.
:param str log_storage_account_sas_secret_name: The key vault secret name of the log storage account.
:param str seed_managed_disk_id: The ARM Id of the seed managed disk.
:param str target_managed_disk_id: The ARM Id of the target managed disk.
:param str disk_type: The disk type.
"""
pulumi.set(__self__, "capacity_in_bytes", capacity_in_bytes)
pulumi.set(__self__, "disk_id", disk_id)
pulumi.set(__self__, "disk_name", disk_name)
pulumi.set(__self__, "disk_path", disk_path)
pulumi.set(__self__, "is_os_disk", is_os_disk)
pulumi.set(__self__, "log_storage_account_id", log_storage_account_id)
pulumi.set(__self__, "log_storage_account_sas_secret_name", log_storage_account_sas_secret_name)
pulumi.set(__self__, "seed_managed_disk_id", seed_managed_disk_id)
pulumi.set(__self__, "target_managed_disk_id", target_managed_disk_id)
if disk_type is not None:
pulumi.set(__self__, "disk_type", disk_type)
@property
@pulumi.getter(name="capacityInBytes")
def capacity_in_bytes(self) -> int:
"""
The disk capacity in bytes.
"""
return pulumi.get(self, "capacity_in_bytes")
@property
@pulumi.getter(name="diskId")
def disk_id(self) -> str:
"""
The disk id.
"""
return pulumi.get(self, "disk_id")
@property
@pulumi.getter(name="diskName")
def disk_name(self) -> str:
"""
The disk name.
"""
return pulumi.get(self, "disk_name")
@property
@pulumi.getter(name="diskPath")
def disk_path(self) -> str:
"""
The disk path.
"""
return pulumi.get(self, "disk_path")
@property
@pulumi.getter(name="isOSDisk")
def is_os_disk(self) -> str:
"""
A value indicating whether the disk is the OS disk.
"""
return pulumi.get(self, "is_os_disk")
@property
@pulumi.getter(name="logStorageAccountId")
def log_storage_account_id(self) -> str:
"""
The log storage account ARM Id.
"""
return pulumi.get(self, "log_storage_account_id")
@property
@pulumi.getter(name="logStorageAccountSasSecretName")
def log_storage_account_sas_secret_name(self) -> str:
"""
The key vault secret name of the log storage account.
"""
return pulumi.get(self, "log_storage_account_sas_secret_name")
@property
@pulumi.getter(name="seedManagedDiskId")
def seed_managed_disk_id(self) -> str:
"""
The ARM Id of the seed managed disk.
"""
return pulumi.get(self, "seed_managed_disk_id")
@property
@pulumi.getter(name="targetManagedDiskId")
def target_managed_disk_id(self) -> str:
"""
The ARM Id of the target managed disk.
"""
return pulumi.get(self, "target_managed_disk_id")
@property
@pulumi.getter(name="diskType")
def disk_type(self) -> Optional[str]:
"""
The disk type.
"""
return pulumi.get(self, "disk_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareCbtProtectionContainerMappingDetailsResponse(dict):
"""
VMwareCbt provider specific container mapping details.
"""
def __init__(__self__, *,
instance_type: str,
key_vault_id: str,
key_vault_uri: str,
service_bus_connection_string_secret_name: str,
storage_account_id: str,
storage_account_sas_secret_name: str,
target_location: str):
"""
VMwareCbt provider specific container mapping details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str key_vault_id: The target key vault ARM Id.
:param str key_vault_uri: The target key vault URI.
:param str service_bus_connection_string_secret_name: The secret name of the service bus connection string.
:param str storage_account_id: The storage account ARM Id.
:param str storage_account_sas_secret_name: The secret name of the storage account.
:param str target_location: The target location.
"""
pulumi.set(__self__, "instance_type", 'VMwareCbt')
pulumi.set(__self__, "key_vault_id", key_vault_id)
pulumi.set(__self__, "key_vault_uri", key_vault_uri)
pulumi.set(__self__, "service_bus_connection_string_secret_name", service_bus_connection_string_secret_name)
pulumi.set(__self__, "storage_account_id", storage_account_id)
pulumi.set(__self__, "storage_account_sas_secret_name", storage_account_sas_secret_name)
pulumi.set(__self__, "target_location", target_location)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="keyVaultId")
def key_vault_id(self) -> str:
"""
The target key vault ARM Id.
"""
return pulumi.get(self, "key_vault_id")
@property
@pulumi.getter(name="keyVaultUri")
def key_vault_uri(self) -> str:
"""
The target key vault URI.
"""
return pulumi.get(self, "key_vault_uri")
@property
@pulumi.getter(name="serviceBusConnectionStringSecretName")
def service_bus_connection_string_secret_name(self) -> str:
"""
The secret name of the service bus connection string.
"""
return pulumi.get(self, "service_bus_connection_string_secret_name")
@property
@pulumi.getter(name="storageAccountId")
def storage_account_id(self) -> str:
"""
The storage account ARM Id.
"""
return pulumi.get(self, "storage_account_id")
@property
@pulumi.getter(name="storageAccountSasSecretName")
def storage_account_sas_secret_name(self) -> str:
"""
The secret name of the storage account.
"""
return pulumi.get(self, "storage_account_sas_secret_name")
@property
@pulumi.getter(name="targetLocation")
def target_location(self) -> str:
"""
The target location.
"""
return pulumi.get(self, "target_location")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareDetailsResponse(dict):
"""
Store the fabric details specific to the VMware fabric.
"""
def __init__(__self__, *,
instance_type: str,
agent_count: Optional[str] = None,
agent_expiry_date: Optional[str] = None,
agent_version: Optional[str] = None,
agent_version_details: Optional['outputs.VersionDetailsResponse'] = None,
available_memory_in_bytes: Optional[int] = None,
available_space_in_bytes: Optional[int] = None,
cpu_load: Optional[str] = None,
cpu_load_status: Optional[str] = None,
cs_service_status: Optional[str] = None,
database_server_load: Optional[str] = None,
database_server_load_status: Optional[str] = None,
host_name: Optional[str] = None,
ip_address: Optional[str] = None,
last_heartbeat: Optional[str] = None,
master_target_servers: Optional[Sequence['outputs.MasterTargetServerResponse']] = None,
memory_usage_status: Optional[str] = None,
process_server_count: Optional[str] = None,
process_servers: Optional[Sequence['outputs.ProcessServerResponse']] = None,
protected_servers: Optional[str] = None,
ps_template_version: Optional[str] = None,
replication_pair_count: Optional[str] = None,
run_as_accounts: Optional[Sequence['outputs.RunAsAccountResponse']] = None,
space_usage_status: Optional[str] = None,
ssl_cert_expiry_date: Optional[str] = None,
ssl_cert_expiry_remaining_days: Optional[int] = None,
system_load: Optional[str] = None,
system_load_status: Optional[str] = None,
total_memory_in_bytes: Optional[int] = None,
total_space_in_bytes: Optional[int] = None,
version_status: Optional[str] = None,
web_load: Optional[str] = None,
web_load_status: Optional[str] = None):
"""
Store the fabric details specific to the VMware fabric.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str agent_count: The number of source and target servers configured to talk to this CS.
:param str agent_expiry_date: Agent expiry date.
:param str agent_version: The agent Version.
:param 'VersionDetailsResponseArgs' agent_version_details: The agent version details.
:param int available_memory_in_bytes: The available memory.
:param int available_space_in_bytes: The available space.
:param str cpu_load: The percentage of the CPU load.
:param str cpu_load_status: The CPU load status.
:param str cs_service_status: The CS service status.
:param str database_server_load: The database server load.
:param str database_server_load_status: The database server load status.
:param str host_name: The host name.
:param str ip_address: The IP address.
:param str last_heartbeat: The last heartbeat received from CS server.
:param Sequence['MasterTargetServerResponseArgs'] master_target_servers: The list of Master Target servers associated with the fabric.
:param str memory_usage_status: The memory usage status.
:param str process_server_count: The number of process servers.
:param Sequence['ProcessServerResponseArgs'] process_servers: The list of Process Servers associated with the fabric.
:param str protected_servers: The number of protected servers.
:param str ps_template_version: PS template version.
:param str replication_pair_count: The number of replication pairs configured in this CS.
:param Sequence['RunAsAccountResponseArgs'] run_as_accounts: The list of run as accounts created on the server.
:param str space_usage_status: The space usage status.
:param str ssl_cert_expiry_date: CS SSL cert expiry date.
:param int ssl_cert_expiry_remaining_days: CS SSL cert expiry date.
:param str system_load: The percentage of the system load.
:param str system_load_status: The system load status.
:param int total_memory_in_bytes: The total memory.
:param int total_space_in_bytes: The total space.
:param str version_status: Version status
:param str web_load: The web load.
:param str web_load_status: The web load status.
"""
pulumi.set(__self__, "instance_type", 'VMware')
if agent_count is not None:
pulumi.set(__self__, "agent_count", agent_count)
if agent_expiry_date is not None:
pulumi.set(__self__, "agent_expiry_date", agent_expiry_date)
if agent_version is not None:
pulumi.set(__self__, "agent_version", agent_version)
if agent_version_details is not None:
pulumi.set(__self__, "agent_version_details", agent_version_details)
if available_memory_in_bytes is not None:
pulumi.set(__self__, "available_memory_in_bytes", available_memory_in_bytes)
if available_space_in_bytes is not None:
pulumi.set(__self__, "available_space_in_bytes", available_space_in_bytes)
if cpu_load is not None:
pulumi.set(__self__, "cpu_load", cpu_load)
if cpu_load_status is not None:
pulumi.set(__self__, "cpu_load_status", cpu_load_status)
if cs_service_status is not None:
pulumi.set(__self__, "cs_service_status", cs_service_status)
if database_server_load is not None:
pulumi.set(__self__, "database_server_load", database_server_load)
if database_server_load_status is not None:
pulumi.set(__self__, "database_server_load_status", database_server_load_status)
if host_name is not None:
pulumi.set(__self__, "host_name", host_name)
if ip_address is not None:
pulumi.set(__self__, "ip_address", ip_address)
if last_heartbeat is not None:
pulumi.set(__self__, "last_heartbeat", last_heartbeat)
if master_target_servers is not None:
pulumi.set(__self__, "master_target_servers", master_target_servers)
if memory_usage_status is not None:
pulumi.set(__self__, "memory_usage_status", memory_usage_status)
if process_server_count is not None:
pulumi.set(__self__, "process_server_count", process_server_count)
if process_servers is not None:
pulumi.set(__self__, "process_servers", process_servers)
if protected_servers is not None:
pulumi.set(__self__, "protected_servers", protected_servers)
if ps_template_version is not None:
pulumi.set(__self__, "ps_template_version", ps_template_version)
if replication_pair_count is not None:
pulumi.set(__self__, "replication_pair_count", replication_pair_count)
if run_as_accounts is not None:
pulumi.set(__self__, "run_as_accounts", run_as_accounts)
if space_usage_status is not None:
pulumi.set(__self__, "space_usage_status", space_usage_status)
if ssl_cert_expiry_date is not None:
pulumi.set(__self__, "ssl_cert_expiry_date", ssl_cert_expiry_date)
if ssl_cert_expiry_remaining_days is not None:
pulumi.set(__self__, "ssl_cert_expiry_remaining_days", ssl_cert_expiry_remaining_days)
if system_load is not None:
pulumi.set(__self__, "system_load", system_load)
if system_load_status is not None:
pulumi.set(__self__, "system_load_status", system_load_status)
if total_memory_in_bytes is not None:
pulumi.set(__self__, "total_memory_in_bytes", total_memory_in_bytes)
if total_space_in_bytes is not None:
pulumi.set(__self__, "total_space_in_bytes", total_space_in_bytes)
if version_status is not None:
pulumi.set(__self__, "version_status", version_status)
if web_load is not None:
pulumi.set(__self__, "web_load", web_load)
if web_load_status is not None:
pulumi.set(__self__, "web_load_status", web_load_status)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="agentCount")
def agent_count(self) -> Optional[str]:
"""
The number of source and target servers configured to talk to this CS.
"""
return pulumi.get(self, "agent_count")
@property
@pulumi.getter(name="agentExpiryDate")
def agent_expiry_date(self) -> Optional[str]:
"""
Agent expiry date.
"""
return pulumi.get(self, "agent_expiry_date")
@property
@pulumi.getter(name="agentVersion")
def agent_version(self) -> Optional[str]:
"""
The agent Version.
"""
return pulumi.get(self, "agent_version")
@property
@pulumi.getter(name="agentVersionDetails")
def agent_version_details(self) -> Optional['outputs.VersionDetailsResponse']:
"""
The agent version details.
"""
return pulumi.get(self, "agent_version_details")
@property
@pulumi.getter(name="availableMemoryInBytes")
def available_memory_in_bytes(self) -> Optional[int]:
"""
The available memory.
"""
return pulumi.get(self, "available_memory_in_bytes")
@property
@pulumi.getter(name="availableSpaceInBytes")
def available_space_in_bytes(self) -> Optional[int]:
"""
The available space.
"""
return pulumi.get(self, "available_space_in_bytes")
@property
@pulumi.getter(name="cpuLoad")
def cpu_load(self) -> Optional[str]:
"""
The percentage of the CPU load.
"""
return pulumi.get(self, "cpu_load")
@property
@pulumi.getter(name="cpuLoadStatus")
def cpu_load_status(self) -> Optional[str]:
"""
The CPU load status.
"""
return pulumi.get(self, "cpu_load_status")
@property
@pulumi.getter(name="csServiceStatus")
def cs_service_status(self) -> Optional[str]:
"""
The CS service status.
"""
return pulumi.get(self, "cs_service_status")
@property
@pulumi.getter(name="databaseServerLoad")
def database_server_load(self) -> Optional[str]:
"""
The database server load.
"""
return pulumi.get(self, "database_server_load")
@property
@pulumi.getter(name="databaseServerLoadStatus")
def database_server_load_status(self) -> Optional[str]:
"""
The database server load status.
"""
return pulumi.get(self, "database_server_load_status")
@property
@pulumi.getter(name="hostName")
def host_name(self) -> Optional[str]:
"""
The host name.
"""
return pulumi.get(self, "host_name")
@property
@pulumi.getter(name="ipAddress")
def ip_address(self) -> Optional[str]:
"""
The IP address.
"""
return pulumi.get(self, "ip_address")
@property
@pulumi.getter(name="lastHeartbeat")
def last_heartbeat(self) -> Optional[str]:
"""
The last heartbeat received from CS server.
"""
return pulumi.get(self, "last_heartbeat")
@property
@pulumi.getter(name="masterTargetServers")
def master_target_servers(self) -> Optional[Sequence['outputs.MasterTargetServerResponse']]:
"""
The list of Master Target servers associated with the fabric.
"""
return pulumi.get(self, "master_target_servers")
@property
@pulumi.getter(name="memoryUsageStatus")
def memory_usage_status(self) -> Optional[str]:
"""
The memory usage status.
"""
return pulumi.get(self, "memory_usage_status")
@property
@pulumi.getter(name="processServerCount")
def process_server_count(self) -> Optional[str]:
"""
The number of process servers.
"""
return pulumi.get(self, "process_server_count")
@property
@pulumi.getter(name="processServers")
def process_servers(self) -> Optional[Sequence['outputs.ProcessServerResponse']]:
"""
The list of Process Servers associated with the fabric.
"""
return pulumi.get(self, "process_servers")
@property
@pulumi.getter(name="protectedServers")
def protected_servers(self) -> Optional[str]:
"""
The number of protected servers.
"""
return pulumi.get(self, "protected_servers")
@property
@pulumi.getter(name="psTemplateVersion")
def ps_template_version(self) -> Optional[str]:
"""
PS template version.
"""
return pulumi.get(self, "ps_template_version")
@property
@pulumi.getter(name="replicationPairCount")
def replication_pair_count(self) -> Optional[str]:
"""
The number of replication pairs configured in this CS.
"""
return pulumi.get(self, "replication_pair_count")
@property
@pulumi.getter(name="runAsAccounts")
def run_as_accounts(self) -> Optional[Sequence['outputs.RunAsAccountResponse']]:
"""
The list of run as accounts created on the server.
"""
return pulumi.get(self, "run_as_accounts")
@property
@pulumi.getter(name="spaceUsageStatus")
def space_usage_status(self) -> Optional[str]:
"""
The space usage status.
"""
return pulumi.get(self, "space_usage_status")
@property
@pulumi.getter(name="sslCertExpiryDate")
def ssl_cert_expiry_date(self) -> Optional[str]:
"""
CS SSL cert expiry date.
"""
return pulumi.get(self, "ssl_cert_expiry_date")
@property
@pulumi.getter(name="sslCertExpiryRemainingDays")
def ssl_cert_expiry_remaining_days(self) -> Optional[int]:
"""
CS SSL cert expiry date.
"""
return pulumi.get(self, "ssl_cert_expiry_remaining_days")
@property
@pulumi.getter(name="systemLoad")
def system_load(self) -> Optional[str]:
"""
The percentage of the system load.
"""
return pulumi.get(self, "system_load")
@property
@pulumi.getter(name="systemLoadStatus")
def system_load_status(self) -> Optional[str]:
"""
The system load status.
"""
return pulumi.get(self, "system_load_status")
@property
@pulumi.getter(name="totalMemoryInBytes")
def total_memory_in_bytes(self) -> Optional[int]:
"""
The total memory.
"""
return pulumi.get(self, "total_memory_in_bytes")
@property
@pulumi.getter(name="totalSpaceInBytes")
def total_space_in_bytes(self) -> Optional[int]:
"""
The total space.
"""
return pulumi.get(self, "total_space_in_bytes")
@property
@pulumi.getter(name="versionStatus")
def version_status(self) -> Optional[str]:
"""
Version status
"""
return pulumi.get(self, "version_status")
@property
@pulumi.getter(name="webLoad")
def web_load(self) -> Optional[str]:
"""
The web load.
"""
return pulumi.get(self, "web_load")
@property
@pulumi.getter(name="webLoadStatus")
def web_load_status(self) -> Optional[str]:
"""
The web load status.
"""
return pulumi.get(self, "web_load_status")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VMwareV2FabricSpecificDetailsResponse(dict):
"""
VMwareV2 fabric specific details.
"""
def __init__(__self__, *,
instance_type: str,
migration_solution_id: str,
service_endpoint: str,
service_resource_id: str,
vmware_site_id: str):
"""
VMwareV2 fabric specific details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param str migration_solution_id: The Migration solution ARM Id.
:param str service_endpoint: The service endpoint.
:param str service_resource_id: The service resource Id.
:param str vmware_site_id: The ARM Id of the VMware site.
"""
pulumi.set(__self__, "instance_type", 'VMwareV2')
pulumi.set(__self__, "migration_solution_id", migration_solution_id)
pulumi.set(__self__, "service_endpoint", service_endpoint)
pulumi.set(__self__, "service_resource_id", service_resource_id)
pulumi.set(__self__, "vmware_site_id", vmware_site_id)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="migrationSolutionId")
def migration_solution_id(self) -> str:
"""
The Migration solution ARM Id.
"""
return pulumi.get(self, "migration_solution_id")
@property
@pulumi.getter(name="serviceEndpoint")
def service_endpoint(self) -> str:
"""
The service endpoint.
"""
return pulumi.get(self, "service_endpoint")
@property
@pulumi.getter(name="serviceResourceId")
def service_resource_id(self) -> str:
"""
The service resource Id.
"""
return pulumi.get(self, "service_resource_id")
@property
@pulumi.getter(name="vmwareSiteId")
def vmware_site_id(self) -> str:
"""
The ARM Id of the VMware site.
"""
return pulumi.get(self, "vmware_site_id")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VaultPropertiesResponse(dict):
"""
Properties of the vault.
"""
def __init__(__self__, *,
private_endpoint_connections: Sequence['outputs.PrivateEndpointConnectionVaultPropertiesResponse'],
private_endpoint_state_for_backup: str,
private_endpoint_state_for_site_recovery: str,
provisioning_state: str,
upgrade_details: Optional['outputs.UpgradeDetailsResponse'] = None):
"""
Properties of the vault.
:param Sequence['PrivateEndpointConnectionVaultPropertiesResponseArgs'] private_endpoint_connections: List of private endpoint connection.
:param str private_endpoint_state_for_backup: Private endpoint state for backup.
:param str private_endpoint_state_for_site_recovery: Private endpoint state for site recovery.
:param str provisioning_state: Provisioning State.
:param 'UpgradeDetailsResponseArgs' upgrade_details: Details for upgrading vault.
"""
pulumi.set(__self__, "private_endpoint_connections", private_endpoint_connections)
pulumi.set(__self__, "private_endpoint_state_for_backup", private_endpoint_state_for_backup)
pulumi.set(__self__, "private_endpoint_state_for_site_recovery", private_endpoint_state_for_site_recovery)
pulumi.set(__self__, "provisioning_state", provisioning_state)
if upgrade_details is not None:
pulumi.set(__self__, "upgrade_details", upgrade_details)
@property
@pulumi.getter(name="privateEndpointConnections")
def private_endpoint_connections(self) -> Sequence['outputs.PrivateEndpointConnectionVaultPropertiesResponse']:
"""
List of private endpoint connection.
"""
return pulumi.get(self, "private_endpoint_connections")
@property
@pulumi.getter(name="privateEndpointStateForBackup")
def private_endpoint_state_for_backup(self) -> str:
"""
Private endpoint state for backup.
"""
return pulumi.get(self, "private_endpoint_state_for_backup")
@property
@pulumi.getter(name="privateEndpointStateForSiteRecovery")
def private_endpoint_state_for_site_recovery(self) -> str:
"""
Private endpoint state for site recovery.
"""
return pulumi.get(self, "private_endpoint_state_for_site_recovery")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
Provisioning State.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="upgradeDetails")
def upgrade_details(self) -> Optional['outputs.UpgradeDetailsResponse']:
"""
Details for upgrading vault.
"""
return pulumi.get(self, "upgrade_details")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VersionDetailsResponse(dict):
"""
Version related details.
"""
def __init__(__self__, *,
expiry_date: Optional[str] = None,
status: Optional[str] = None,
version: Optional[str] = None):
"""
Version related details.
:param str expiry_date: Version expiry date.
:param str status: A value indicating whether security update required.
:param str version: The agent version.
"""
if expiry_date is not None:
pulumi.set(__self__, "expiry_date", expiry_date)
if status is not None:
pulumi.set(__self__, "status", status)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="expiryDate")
def expiry_date(self) -> Optional[str]:
"""
Version expiry date.
"""
return pulumi.get(self, "expiry_date")
@property
@pulumi.getter
def status(self) -> Optional[str]:
"""
A value indicating whether security update required.
"""
return pulumi.get(self, "status")
@property
@pulumi.getter
def version(self) -> Optional[str]:
"""
The agent version.
"""
return pulumi.get(self, "version")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VmmDetailsResponse(dict):
"""
VMM fabric specific details.
"""
def __init__(__self__, *,
instance_type: str):
"""
VMM fabric specific details.
:param str instance_type: Gets the class type. Overridden in derived classes.
"""
pulumi.set(__self__, "instance_type", 'VMM')
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VmmToAzureNetworkMappingSettingsResponse(dict):
"""
E2A Network Mapping fabric specific settings.
"""
def __init__(__self__, *,
instance_type: str):
"""
E2A Network Mapping fabric specific settings.
:param str instance_type: Gets the Instance type.
"""
pulumi.set(__self__, "instance_type", 'VmmToAzure')
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VmmToVmmNetworkMappingSettingsResponse(dict):
"""
E2E Network Mapping fabric specific settings.
"""
def __init__(__self__, *,
instance_type: str):
"""
E2E Network Mapping fabric specific settings.
:param str instance_type: Gets the Instance type.
"""
pulumi.set(__self__, "instance_type", 'VmmToVmm')
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the Instance type.
"""
return pulumi.get(self, "instance_type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class VmwareCbtPolicyDetailsResponse(dict):
"""
VMware Cbt specific policy details.
"""
def __init__(__self__, *,
instance_type: str,
app_consistent_frequency_in_minutes: Optional[int] = None,
crash_consistent_frequency_in_minutes: Optional[int] = None,
recovery_point_history_in_minutes: Optional[int] = None):
"""
VMware Cbt specific policy details.
:param str instance_type: Gets the class type. Overridden in derived classes.
:param int app_consistent_frequency_in_minutes: The app consistent snapshot frequency in minutes.
:param int crash_consistent_frequency_in_minutes: The crash consistent snapshot frequency in minutes.
:param int recovery_point_history_in_minutes: The duration in minutes until which the recovery points need to be stored.
"""
pulumi.set(__self__, "instance_type", 'VMwareCbt')
if app_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "app_consistent_frequency_in_minutes", app_consistent_frequency_in_minutes)
if crash_consistent_frequency_in_minutes is not None:
pulumi.set(__self__, "crash_consistent_frequency_in_minutes", crash_consistent_frequency_in_minutes)
if recovery_point_history_in_minutes is not None:
pulumi.set(__self__, "recovery_point_history_in_minutes", recovery_point_history_in_minutes)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
"""
Gets the class type. Overridden in derived classes.
"""
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="appConsistentFrequencyInMinutes")
def app_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The app consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "app_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="crashConsistentFrequencyInMinutes")
def crash_consistent_frequency_in_minutes(self) -> Optional[int]:
"""
The crash consistent snapshot frequency in minutes.
"""
return pulumi.get(self, "crash_consistent_frequency_in_minutes")
@property
@pulumi.getter(name="recoveryPointHistoryInMinutes")
def recovery_point_history_in_minutes(self) -> Optional[int]:
"""
The duration in minutes until which the recovery points need to be stored.
"""
return pulumi.get(self, "recovery_point_history_in_minutes")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WeeklyRetentionFormatResponse(dict):
"""
Weekly retention format.
"""
def __init__(__self__, *,
days_of_the_week: Optional[Sequence[str]] = None,
weeks_of_the_month: Optional[Sequence[str]] = None):
"""
Weekly retention format.
:param Sequence[str] days_of_the_week: List of days of the week.
:param Sequence[str] weeks_of_the_month: List of weeks of the month.
"""
if days_of_the_week is not None:
pulumi.set(__self__, "days_of_the_week", days_of_the_week)
if weeks_of_the_month is not None:
pulumi.set(__self__, "weeks_of_the_month", weeks_of_the_month)
@property
@pulumi.getter(name="daysOfTheWeek")
def days_of_the_week(self) -> Optional[Sequence[str]]:
"""
List of days of the week.
"""
return pulumi.get(self, "days_of_the_week")
@property
@pulumi.getter(name="weeksOfTheMonth")
def weeks_of_the_month(self) -> Optional[Sequence[str]]:
"""
List of weeks of the month.
"""
return pulumi.get(self, "weeks_of_the_month")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WeeklyRetentionScheduleResponse(dict):
"""
Weekly retention schedule.
"""
def __init__(__self__, *,
days_of_the_week: Optional[Sequence[str]] = None,
retention_duration: Optional['outputs.RetentionDurationResponse'] = None,
retention_times: Optional[Sequence[str]] = None):
"""
Weekly retention schedule.
:param Sequence[str] days_of_the_week: List of the days of the week for the weekly retention policy.
:param 'RetentionDurationResponseArgs' retention_duration: Retention duration of retention policy.
:param Sequence[str] retention_times: Retention times of the retention policy.
"""
if days_of_the_week is not None:
pulumi.set(__self__, "days_of_the_week", days_of_the_week)
if retention_duration is not None:
pulumi.set(__self__, "retention_duration", retention_duration)
if retention_times is not None:
pulumi.set(__self__, "retention_times", retention_times)
@property
@pulumi.getter(name="daysOfTheWeek")
def days_of_the_week(self) -> Optional[Sequence[str]]:
"""
List of the days of the week for the weekly retention policy.
"""
return pulumi.get(self, "days_of_the_week")
@property
@pulumi.getter(name="retentionDuration")
def retention_duration(self) -> Optional['outputs.RetentionDurationResponse']:
"""
Retention duration of retention policy.
"""
return pulumi.get(self, "retention_duration")
@property
@pulumi.getter(name="retentionTimes")
def retention_times(self) -> Optional[Sequence[str]]:
"""
Retention times of the retention policy.
"""
return pulumi.get(self, "retention_times")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class WorkloadInquiryDetailsResponse(dict):
"""
Details of an inquired protectable item.
"""
def __init__(__self__, *,
inquiry_validation: Optional['outputs.InquiryValidationResponse'] = None,
item_count: Optional[int] = None,
type: Optional[str] = None):
"""
Details of an inquired protectable item.
:param 'InquiryValidationResponseArgs' inquiry_validation: Inquiry validation such as permissions and other backup validations.
:param int item_count: Contains the protectable item Count inside this Container.
:param str type: Type of the Workload such as SQL, Oracle etc.
"""
if inquiry_validation is not None:
pulumi.set(__self__, "inquiry_validation", inquiry_validation)
if item_count is not None:
pulumi.set(__self__, "item_count", item_count)
if type is not None:
pulumi.set(__self__, "type", type)
@property
@pulumi.getter(name="inquiryValidation")
def inquiry_validation(self) -> Optional['outputs.InquiryValidationResponse']:
"""
Inquiry validation such as permissions and other backup validations.
"""
return pulumi.get(self, "inquiry_validation")
@property
@pulumi.getter(name="itemCount")
def item_count(self) -> Optional[int]:
"""
Contains the protectable item Count inside this Container.
"""
return pulumi.get(self, "item_count")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Type of the Workload such as SQL, Oracle etc.
"""
return pulumi.get(self, "type")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class YearlyRetentionScheduleResponse(dict):
"""
Yearly retention schedule.
"""
def __init__(__self__, *,
months_of_year: Optional[Sequence[str]] = None,
retention_duration: Optional['outputs.RetentionDurationResponse'] = None,
retention_schedule_daily: Optional['outputs.DailyRetentionFormatResponse'] = None,
retention_schedule_format_type: Optional[str] = None,
retention_schedule_weekly: Optional['outputs.WeeklyRetentionFormatResponse'] = None,
retention_times: Optional[Sequence[str]] = None):
"""
Yearly retention schedule.
:param Sequence[str] months_of_year: List of the months of year for the yearly retention policy.
:param 'RetentionDurationResponseArgs' retention_duration: Retention duration for the retention policy.
:param 'DailyRetentionFormatResponseArgs' retention_schedule_daily: Daily retention format for the yearly retention policy.
:param str retention_schedule_format_type: Retention schedule format for the yearly retention policy.
:param 'WeeklyRetentionFormatResponseArgs' retention_schedule_weekly: Weekly retention format for the yearly retention policy.
:param Sequence[str] retention_times: Retention times for the retention policy.
"""
if months_of_year is not None:
pulumi.set(__self__, "months_of_year", months_of_year)
if retention_duration is not None:
pulumi.set(__self__, "retention_duration", retention_duration)
if retention_schedule_daily is not None:
pulumi.set(__self__, "retention_schedule_daily", retention_schedule_daily)
if retention_schedule_format_type is not None:
pulumi.set(__self__, "retention_schedule_format_type", retention_schedule_format_type)
if retention_schedule_weekly is not None:
pulumi.set(__self__, "retention_schedule_weekly", retention_schedule_weekly)
if retention_times is not None:
pulumi.set(__self__, "retention_times", retention_times)
@property
@pulumi.getter(name="monthsOfYear")
def months_of_year(self) -> Optional[Sequence[str]]:
"""
List of the months of year for the yearly retention policy.
"""
return pulumi.get(self, "months_of_year")
@property
@pulumi.getter(name="retentionDuration")
def retention_duration(self) -> Optional['outputs.RetentionDurationResponse']:
"""
Retention duration for the retention policy.
"""
return pulumi.get(self, "retention_duration")
@property
@pulumi.getter(name="retentionScheduleDaily")
def retention_schedule_daily(self) -> Optional['outputs.DailyRetentionFormatResponse']:
"""
Daily retention format for the yearly retention policy.
"""
return pulumi.get(self, "retention_schedule_daily")
@property
@pulumi.getter(name="retentionScheduleFormatType")
def retention_schedule_format_type(self) -> Optional[str]:
"""
Retention schedule format for the yearly retention policy.
"""
return pulumi.get(self, "retention_schedule_format_type")
@property
@pulumi.getter(name="retentionScheduleWeekly")
def retention_schedule_weekly(self) -> Optional['outputs.WeeklyRetentionFormatResponse']:
"""
Weekly retention format for the yearly retention policy.
"""
return pulumi.get(self, "retention_schedule_weekly")
@property
@pulumi.getter(name="retentionTimes")
def retention_times(self) -> Optional[Sequence[str]]:
"""
Retention times for the retention policy.
"""
return pulumi.get(self, "retention_times")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
| 39.947321 | 529 | 0.659061 | 80,173 | 702,194 | 5.471281 | 0.021641 | 0.034155 | 0.038379 | 0.056093 | 0.862188 | 0.809492 | 0.764721 | 0.720531 | 0.690536 | 0.661709 | 0 | 0.000426 | 0.251751 | 702,194 | 17,577 | 530 | 39.949593 | 0.834434 | 0.246836 | 0 | 0.709534 | 1 | 0 | 0.17076 | 0.094625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.165375 | false | 0 | 0.002479 | 0.016011 | 0.33323 | 0.00062 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
5e98c7e15dbe989d6a094c3ed9209a447f37b679 | 18,582 | py | Python | benchmarks/convergence_benchmarks.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 7 | 2018-09-28T21:40:08.000Z | 2021-06-10T10:44:39.000Z | benchmarks/convergence_benchmarks.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 1 | 2021-07-06T12:28:32.000Z | 2021-07-29T20:34:13.000Z | benchmarks/convergence_benchmarks.py | Goodpaster/QSoME | 8b24d58dfab5ac0d90fd84b8519b25864eee6f74 | [
"Apache-2.0"
] | 1 | 2021-04-08T12:28:44.000Z | 2021-04-08T12:28:44.000Z | from pyscf import gto
import time
from qsome import interaction_mediator, cluster_subsystem, cluster_supersystem
from copy import deepcopy as copy
#All mol objects which will be used for testing.
sn2_mol1 = gto.M()
sn2_mol1.atom = '''
Cl 3.51109 -0.30375 0.00020
C 1.97915 0.64190 0.00018
H 2.00892 1.27702 -0.88401
H 2.00850 1.27637 0.88486
'''
sn2_mol2 = gto.M()
sn2_mol2.atom = '''
C 0.77452 -0.25849 -0.00044
H 0.80952 -0.91242 -0.87692
H 0.80921 -0.91317 0.87550
C -0.51363 0.54349 -0.00030
H -0.53565 1.20393 0.87514
H -0.53562 1.20434 -0.87545
C -1.75043 -0.33137 -0.00050
H -1.72894 -0.99275 -0.87536
H -1.72864 -0.99348 0.87379
C -3.04233 0.46077 0.00008
H -3.05786 1.12092 0.87408
H -3.05853 1.12117 -0.87372
C -4.26820 -0.42758 0.00047
H -4.28002 -1.07524 0.88008
H -5.19301 0.14965 0.00102
H -4.28083 -1.07487 -0.87940
'''
sn2_mol3 = gto.M()
sn2_mol3.atom = '''
Cl 3.51109 -0.30375 0.00020
C 1.97915 0.64190 0.00018
H 2.00892 1.27702 -0.88401
H 2.00850 1.27637 0.88486
C 0.77452 -0.25849 -0.00044
H 0.80952 -0.91242 -0.87692
H 0.80921 -0.91317 0.87550
C -0.51363 0.54349 -0.00030
H -0.53565 1.20393 0.87514
H -0.53562 1.20434 -0.87545
'''
sn2_mol4 = gto.M()
sn2_mol4.atom = '''
C -1.75043 -0.33137 -0.00050
H -1.72894 -0.99275 -0.87536
H -1.72864 -0.99348 0.87379
C -3.04233 0.46077 0.00008
H -3.05786 1.12092 0.87408
H -3.05853 1.12117 -0.87372
C -4.26820 -0.42758 0.00047
H -4.28002 -1.07524 0.88008
H -5.19301 0.14965 0.00102
H -4.28083 -1.07487 -0.87940
'''
bond_cleave1 = gto.M()
bond_cleave1.atom = '''
C 0.84878 1.53039 0.00000
H 1.50962 1.58000 0.87454
H 1.50962 1.58000 -0.87454
C -0.84878 -1.53039 0.00000
H -1.50962 -1.58000 -0.87454
H -1.50962 -1.58000 0.87454
'''
bond_cleave2 = gto.M()
bond_cleave2.atom = '''
C -0.46985 7.76776 0.00000
H 0.06673 8.71696 0.00000
H -1.11758 7.75217 0.87964
H -1.11758 7.75217 -0.87964
C 0.46985 6.58062 0.00000
H 1.12915 6.62528 0.87377
H 1.12915 6.62528 -0.87377
C -0.26441 5.25506 0.00000
H -0.92565 5.20629 0.87450
H -0.92565 5.20629 -0.87450
C 0.65932 4.05507 0.00000
H 1.32017 4.10471 0.87457
H 1.32017 4.10471 -0.87457
C -0.07512 2.73049 0.00000
H -0.73597 2.68085 -0.87453
H -0.73597 2.68085 0.87453
C 0.07512 -2.73049 0.00000
H 0.73597 -2.68085 0.87453
H 0.73597 -2.68085 -0.87453
C -0.65932 -4.05507 0.00000
H -1.32017 -4.10471 -0.87457
H -1.32017 -4.10471 0.87457
C 0.26441 -5.25506 0.00000
H 0.92565 -5.20629 0.87450
H 0.92565 -5.20629 -0.87450
C -0.46985 -6.58062 0.00000
H -1.12915 -6.62528 -0.87377
H -1.12915 -6.62528 0.87377
C 0.46985 -7.76776 0.00000
H 1.11758 -7.75217 0.87964
H -0.06673 -8.71696 0.00000
H 1.11758 -7.75217 -0.87964
'''
bond_cleave3 = gto.M()
bond_cleave3.atom = '''
C 0.65932 4.05507 0.00000
H 1.32017 4.10471 0.87457
H 1.32017 4.10471 -0.87457
C -0.07512 2.73049 0.00000
H -0.73597 2.68085 -0.87453
H -0.73597 2.68085 0.87453
C 0.84878 1.53039 0.00000
H 1.50962 1.58000 0.87454
H 1.50962 1.58000 -0.87454
C -0.84878 -1.53039 0.00000
H -1.50962 -1.58000 -0.87454
H -1.50962 -1.58000 0.87454
C 0.07512 -2.73049 0.00000
H 0.73597 -2.68085 0.87453
H 0.73597 -2.68085 -0.87453
C -0.65932 -4.05507 0.00000
H -1.32017 -4.10471 -0.87457
H -1.32017 -4.10471 0.87457
'''
bond_cleave4 = gto.M()
bond_cleave4.atom = '''
C -0.46985 7.76776 0.00000
H 0.06673 8.71696 0.00000
H -1.11758 7.75217 0.87964
H -1.11758 7.75217 -0.87964
C 0.46985 6.58062 0.00000
H 1.12915 6.62528 0.87377
H 1.12915 6.62528 -0.87377
C -0.26441 5.25506 0.00000
H -0.92565 5.20629 0.87450
H -0.92565 5.20629 -0.87450
C 0.26441 -5.25506 0.00000
H 0.92565 -5.20629 0.87450
H 0.92565 -5.20629 -0.87450
C -0.46985 -6.58062 0.00000
H -1.12915 -6.62528 -0.87377
H -1.12915 -6.62528 0.87377
C 0.46985 -7.76776 0.00000
H 1.11758 -7.75217 0.87964
H -0.06673 -8.71696 0.00000
H 1.11758 -7.75217 -0.87964
'''
thiol1 = gto.M()
thiol1.atom = '''
C -1.39013 0.60272 0.00001
H -1.36123 1.27811 -0.87485
H -1.36123 1.27809 0.87489
S -3.02131 -0.17073 -0.00001
'''
thiol2 = gto.M()
thiol2.atom = '''
C 3.63982 0.17268 -0.00001
H 4.49175 -0.52133 0.00002
H 3.73370 0.81761 0.88742
H 3.73370 0.81754 -0.88749
C 2.31212 -0.56204 0.00001
H 2.24759 -1.22456 0.88028
H 2.24758 -1.22460 -0.88022
C 1.11974 0.37903 -0.00001
H 1.17755 1.04420 0.88131
H 1.17756 1.04418 -0.88134
C -0.21248 -0.35144 -0.00000
H -0.28014 -1.01161 0.88083
H -0.28015 -1.01160 -0.88084
'''
thiol3 = gto.M()
thiol3.atom = '''
C -1.39013 0.60272 0.00001
H -1.36123 1.27811 -0.87485
H -1.36123 1.27809 0.87489
S -3.02131 -0.17073 -0.00001
C 1.11974 0.37903 -0.00001
H 1.17755 1.04420 0.88131
H 1.17756 1.04418 -0.88134
C -0.21248 -0.35144 -0.00000
H -0.28014 -1.01161 0.88083
H -0.28015 -1.01160 -0.88084
'''
thiol4 = gto.M()
thiol4.atom = '''
C 3.63982 0.17268 -0.00001
H 4.49175 -0.52133 0.00002
H 3.73370 0.81761 0.88742
H 3.73370 0.81754 -0.88749
C 2.31212 -0.56204 0.00001
H 2.24759 -1.22456 0.88028
H 2.24758 -1.22460 -0.88022
'''
mof1 = gto.M()
mof1.atom = '''
Fe 0.00000 -0.00000 0.05961
'''
mof2 = gto.M()
mof2.atom = '''
O 1.38352 1.42088 -0.29729
O 1.38352 -1.42088 -0.29729
O -1.38352 -1.42088 -0.29729
C 1.21957 -2.62333 -0.64080
C -0.00000 -3.27203 -0.81174
C -1.21957 -2.62333 -0.64080
O -1.38352 1.42088 -0.29729
C -1.21957 2.62333 -0.64080
H -2.13506 -3.20415 -0.82549
H 2.13506 -3.20415 -0.82549
C 1.21957 2.62333 -0.64080
C 0.00000 3.27203 -0.81174
H 2.13506 3.20415 -0.82549
H -2.13506 3.20415 -0.82549
C 0.00000 -0.00000 2.11025
O 0.00000 -0.00000 3.24357
H -0.00000 4.30911 -1.11232
H -0.00000 -4.30911 -1.11232
'''
mof3 = gto.M()
mof3.atom = '''
Fe 0.00000 -0.00000 0.05961
O 1.38352 1.42088 -0.29729
O 1.38352 -1.42088 -0.29729
O -1.38352 -1.42088 -0.29729
O -1.38352 1.42088 -0.29729
'''
mof4 = gto.M()
mof4.atom = '''
C 1.21957 -2.62333 -0.64080
C -0.00000 -3.27203 -0.81174
C -1.21957 -2.62333 -0.64080
C -1.21957 2.62333 -0.64080
H -2.13506 -3.20415 -0.82549
H 2.13506 -3.20415 -0.82549
C 1.21957 2.62333 -0.64080
C 0.00000 3.27203 -0.81174
H 2.13506 3.20415 -0.82549
H -2.13506 3.20415 -0.82549
C 0.00000 -0.00000 2.11025
O 0.00000 -0.00000 3.24357
H -0.00000 4.30911 -1.11232
H -0.00000 -4.30911 -1.11232
'''
mol_list = [sn2_mol1, sn2_mol2, sn2_mol3, sn2_mol4, bond_cleave1, bond_cleave2, bond_cleave3, bond_cleave4, thiol1, thiol2, thiol3, thiol4, mof1, mof2, mof3, mof4]
basis_set_list = ['3-21g', '6-31g', 'cc-pVDZ', 'aug-cc-pVDZ', 'cc-pVTZ']
charges_cs = [-1, 1, -1, 1, -2, 2, -2, 2, -2, 1, -2, 1, 2, -2, -6, 6]
charges_os = [0, 0, 0, 0, 0, 0, 0, 0, -1, 1, -1, 1, 2, -2, -6, 6]
spin_os = [1, -1, 1, -1, 0, 0, 0, 0, 1, 0, 1, 0, 4, 0, 4, 0]
grid_points = [3,4]
init_guess = ['atom', 'h1e']#, 'super', 'sub', 'localsuper']
xc_fun = ['lda', 'pbe', 'b3lyp', 'm06']
#simple_damping_values = [0.3, 0.4, 0.5]
simple_damping_values = [0.4]
class MolObjects:
def __iter__(self):
self.mol_index = 0
self.basis_index = 0
self.charges_index = 0
self.grid_index = 0
mol1 = mol_list[0]
mol1.basis = basis_set_list[0]
mol1.charge = charges_cs[0]
mol2 = mol_list[1]
mol2.basis = basis_set_list[0]
mol2.charge = charges_cs[1]
mol1.build()
mol2.build()
self.subs = (mol1, mol2)
return self
def __next__(self):
temp1 = gto.mole.copy(self.subs[0])
temp2 = gto.mole.copy(self.subs[1])
x = (temp1, temp2)
self.mol_index += 2
if self.mol_index >= len(mol_list):
self.mol_index = 0
self.basis_index += 1
if self.basis_index >= len(basis_set_list):
self.basis_index = 0
self.charges_index += 1
if self.charges_index >= 2:
return None
mol1 = mol_list[self.mol_index]
mol1.basis = basis_set_list[self.basis_index]
if self.charges_index > 0:
mol1.charge = charges_os[self.mol_index]
mol1.spin = spin_os[self.mol_index]
else:
mol1.charge = charges_cs[self.mol_index]
mol2 = mol_list[self.mol_index + 1]
mol2.basis = basis_set_list[self.basis_index]
if self.charges_index > 0:
mol2.charge = charges_os[self.mol_index + 1]
mol2.spin = spin_os[self.mol_index + 1]
else:
mol2.charge = charges_cs[self.mol_index + 1]
mol1.build()
mol2.build()
self.subs = (mol1, mol2)
return x
def simple_density_damping():
output_filename = 'simple_density_damping_results.out'
molO = MolObjects()
moliter = iter(molO)
x = next(moliter)
num = 0
while x is not None:
for gp in grid_points:
for ig in init_guess:
for xc in xc_fun:
for fud in range(2):
for pud in range(fud, 2):
for dp in simple_damping_values:
header_string = f"molnum: {num}\nbasis: {x[0].basis}\ncharge: {x[0].charge}\ngridsize: {gp}\ninitguess: {ig}\nxc_fun: {xc}\nfock_update: {fud}\nproj_update: {pud}\ndamping: {dp}\n"
with open(output_filename, 'a') as fout:
fout.write(header_string)
sub1 = cluster_subsystem.ClusterEnvSubSystem(x[0], xc)
sub2 = cluster_subsystem.ClusterEnvSubSystem(x[1], xc)
sup = cluster_supersystem.ClusterSuperSystem([sub1, sub2], xc, fs_grid_level=gp, ft_cycles=100, ft_initguess=ig, ft_updatefock=fud, ft_updateproj=pud, ft_diis=None, ft_damp=dp)
sup.init_density()
start_time = time.time()
sup.freeze_and_thaw()
end_time = time.time()
elapsed_time = end_time - start_time
cycles = sup.ft_iter
write_string = f" FT cycles: {cycles}\n Elapsed Time: {elapsed_time}\n Average time per cycle: {elapsed_time/float(cycles)}\n Sub1 E: {sub1.get_env_energy()}\n Sub2 E: {sub2.get_env_energy()}\n\n"
with open(output_filename, 'a') as fout:
fout.write(write_string)
if x[0].spin != 0 or x[1].spin != 0:
sub1 = cluster_subsystem.ClusterEnvSubSystem(x[0], xc, unrestricted=True)
sub2 = cluster_subsystem.ClusterEnvSubSystem(x[1], xc, unrestricted=True)
sup = cluster_supersystem.ClusterSuperSystem([sub1, sub2], xc, fs_grid_level=gp, ft_cycles=100, ft_initguess=ig, ft_updatefock=fud, ft_updateproj=pud, fs_unrestricted=True, ft_unrestricted=True, ft_damp=dp, ft_diis=None)
sup.init_density()
start_time = time.time()
sup.freeze_and_thaw()
end_time = time.time()
elapsed_time = end_time - start_time
cycles = sup.ft_iter
write_string = f"Unrestricted\n FT cycles: {cycles}\n Elapsed Time: {elapsed_time}\n Average time per cycle: {elapsed_time/float(cycles)}\n Sub1 E: {sub1.get_env_energy()}\n Sub2 E: {sub2.get_env_energy()}\n\n"
num += 2
x = next(moliter)
print ("Progress")
print (f"{num/2} Done Total")
def optimal_density_damping():
output_filename = 'optimal_density_damping_results.out'
molO = MolObjects()
moliter = iter(molO)
x = next(moliter)
num = 0
while x is not None:
for gp in grid_points:
for ig in init_guess:
for xc in xc_fun:
for fud in range(2):
for pud in range(fud, 2):
header_string = f"molnum: {num}\nbasis: {x[0].basis}\ncharge: {x[0].charge}\ngridsize: {gp}\ninitguess: {ig}\nxc_fun: {xc}\nfock_update: {fud}\nproj_update: {pud}\n"
with open(output_filename, 'a') as fout:
fout.write(header_string)
sub1 = cluster_subsystem.ClusterEnvSubSystem(x[0], xc, damp=-1)
sub2 = cluster_subsystem.ClusterEnvSubSystem(x[1], xc, damp=-1)
sup = cluster_supersystem.ClusterSuperSystem([sub1, sub2], xc, fs_grid_level=gp, ft_cycles=100, ft_initguess=ig, ft_updatefock=fud, ft_updateproj=pud, ft_diis=None)
sup.init_density()
start_time = time.time()
sup.freeze_and_thaw()
end_time = time.time()
elapsed_time = end_time - start_time
cycles = sup.ft_iter
write_string = f" FT cycles: {cycles}\n Elapsed Time: {elapsed_time}\n Average time per cycle: {elapsed_time/float(cycles)}\n Sub1 E: {sub1.get_env_energy()}\n Sub2 E: {sub2.get_env_energy()}\n\n"
with open(output_filename, 'a') as fout:
fout.write(write_string)
if x[0].spin != 0 or x[1].spin != 0:
sub1 = cluster_subsystem.ClusterEnvSubSystem(x[0], xc, unrestricted=True, damp=-1)
sub2 = cluster_subsystem.ClusterEnvSubSystem(x[1], xc, unrestricted=True, damp=-1)
sup = cluster_supersystem.ClusterSuperSystem([sub1, sub2], xc, fs_grid_level=gp, ft_cycles=100, ft_initguess=ig, ft_updatefock=fud, ft_updateproj=pud, fs_unrestricted=True, ft_unrestricted=True, ft_diis=None)
sup.init_density()
start_time = time.time()
sup.freeze_and_thaw()
end_time = time.time()
elapsed_time = end_time - start_time
cycles = sup.ft_iter
write_string = f"Unrestricted\n FT cycles: {cycles}\n Elapsed Time: {elapsed_time}\n Average time per cycle: {elapsed_time/float(cycles)}\n Sub1 E: {sub1.get_env_energy()}\n Sub2 E: {sub2.get_env_energy()}\n\n"
num += 2
x = next(moliter)
print ("Progress")
print (f"{num/2} Done Total")
#optimal_density_damping()
simple_density_damping()
| 44.561151 | 256 | 0.461307 | 2,490 | 18,582 | 3.344177 | 0.133735 | 0.036027 | 0.025219 | 0.017293 | 0.853248 | 0.837877 | 0.813498 | 0.795124 | 0.78936 | 0.768824 | 0 | 0.34505 | 0.431977 | 18,582 | 416 | 257 | 44.668269 | 0.443865 | 0.007642 | 0 | 0.743456 | 0 | 0.015707 | 0.568236 | 0.026416 | 0 | 0 | 0 | 0 | 0 | 1 | 0.010471 | false | 0 | 0.010471 | 0 | 0.031414 | 0.010471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 9 |
5ea55e7482bcfa67c509bcddad17bed710be6647 | 96 | py | Python | src/creds.py | NickElefth/instaPYgram | 8e6b7de15dd5737b0782e4cdf7d916aee9249d0e | [
"Apache-2.0"
] | null | null | null | src/creds.py | NickElefth/instaPYgram | 8e6b7de15dd5737b0782e4cdf7d916aee9249d0e | [
"Apache-2.0"
] | null | null | null | src/creds.py | NickElefth/instaPYgram | 8e6b7de15dd5737b0782e4cdf7d916aee9249d0e | [
"Apache-2.0"
] | null | null | null |
def getUsername():
return "renovinft"
def getPassword():
return "RenoviNFT4/06/2021"
| 12 | 31 | 0.677083 | 10 | 96 | 6.5 | 0.8 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 0.197917 | 96 | 7 | 32 | 13.714286 | 0.753247 | 0 | 0 | 0 | 0 | 0 | 0.284211 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.5 | true | 0.25 | 0 | 0.5 | 1 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 1 | 1 | 0 | 1 | 1 | 0 | 0 | 7 |
0de1211fb44e5878a66bd75855aa9aa36a453800 | 11,131 | py | Python | pack/metrics.py | yippp/SY-GNN | 65a1e2566dce549724cef080dfac7efc00fbe91b | [
"MIT"
] | 6 | 2020-05-04T03:37:24.000Z | 2021-04-21T15:15:54.000Z | pack/metrics.py | yippp/SY-GNN | 65a1e2566dce549724cef080dfac7efc00fbe91b | [
"MIT"
] | null | null | null | pack/metrics.py | yippp/SY-GNN | 65a1e2566dce549724cef080dfac7efc00fbe91b | [
"MIT"
] | null | null | null | import numpy as np
import torch
from pack.data import Structure
__all__ = ['ModelBias', 'MeanSquaredError', 'RootMeanSquaredError', 'MeanAbsoluteError', 'RootMeanSquaredErrorInt',
'MeanAbsoluteErrorInt']
class Metric:
r"""
Base class for all metrics.
Metrics measure the performance during the training and evaluation.
Args:
name (str): name used in logging for this metric. If set to `None`, `MSE_[target]` will be used (Default: None)
"""
def __init__(self, name=None):
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
def add_batch(self, batch, result):
""" Add a batch to calculate the metric on """
raise NotImplementedError
def aggregate(self):
""" Aggregate metric over all previously added batches """
raise NotImplementedError
def reset(self):
""" Reset the metric after aggregation to collect new batches """
pass
class ModelBias(Metric):
r"""
Calculates the bias of the model. For non-scalar quantities, the mean of all components is taken.
Args:
target (str): name of target property
model_output (int, str): index or key, in case of multiple outputs (Default: None)
name (str): name used in logging for this metric. If set to `None`, `MSE_[target]` will be used (Default: None)
element_wise (bool): set to True if the model output is an element-wise property (forces, positions, ...)
"""
def __init__(self, target, model_output=None, name=None, element_wise=False):
name = 'Bias_' + target if name is None else name
super(ModelBias, self).__init__(name)
self.target = target
self.model_output = model_output
self.element_wise = element_wise
self.l2loss = 0.
self.n_entries = 0.
def reset(self):
self.l2loss = 0.
self.n_entries = 0.
def _get_diff(self, y, yp):
return y - yp
def add_batch(self, batch, result):
y = batch[self.target]
if self.model_output is None:
yp = result
else:
if type(self.model_output) is list:
for idx in self.model_output:
result = result[idx]
else:
result = result[self.model_output]
yp = result
diff = self._get_diff(y, yp)
self.l2loss += torch.sum(diff.view(-1)).detach().cpu().data.numpy()
if self.element_wise:
self.n_entries += torch.sum(batch[Structure.atom_mask]) * y.shape[-1]
else:
self.n_entries += np.prod(y.shape)
def aggregate(self):
return self.l2loss / self.n_entries
class MeanSquaredError(Metric):
r"""
Metric for mean square error. For non-scalar quantities, the mean of all components is taken.
Args:
target (str): name of target property
model_output (int, str): index or key, in case of multiple outputs (Default: None)
name (str): name used in logging for this metric. If set to `None`, `MSE_[target]` will be used (Default: None)
element_wise (bool): set to True if the model output is an element-wise property (forces, positions, ...)
"""
def __init__(self, target, model_output=None, name=None, element_wise=False,
mean=None, stddev=None):
name = 'MSE_' + target if name is None else name
super(MeanSquaredError, self).__init__(name)
self.target = target
self.model_output = model_output
self.element_wise = element_wise
self.l2loss = 0.
self.n_entries = 0.
self.mean = mean
self.stddev = stddev
def reset(self):
self.l2loss = 0.
self.n_entries = 0.
def _get_diff(self, y, yp):
diff = y - yp
return diff
def add_batch(self, batch, result):
y = batch[self.target].squeeze()
if self.model_output is None:
yp = result
else:
if type(self.model_output) is list:
for idx in self.model_output:
result = result[idx]
else:
result = result[self.model_output]
# result = result * (self.stddev + 1e-9) + self.mean # denormalize the predicted value
yp = result
diff = self._get_diff(y, yp)
self.l2loss += torch.sum(diff.view(-1) ** 2).detach().cpu().data.numpy()
if self.element_wise:
self.n_entries += torch.sum(batch[Structure.atom_mask]) * y.shape[-1]
else:
self.n_entries += np.prod(y.shape)
def aggregate(self):
return self.l2loss / self.n_entries
class RootMeanSquaredError(MeanSquaredError):
r"""
Metric for root mean square error. For non-scalar quantities, the mean of all components is taken.
Args:
target (str): name of target property
model_output (int, str): index or key, in case of multiple outputs (Default: None)
name (str): name used in logging for this metric. If set to `None`, `RMSE_[target]` will be used (Default: None)
element_wise (bool): set to True if the model output is an element-wise property (forces, positions, ...)
"""
def __init__(self, target, model_output=None, name=None, element_wise=False,
mean=None, stddev=None):
name = 'RMSE_' + target if name is None else name
super(RootMeanSquaredError, self).__init__(target, model_output, name,
element_wise=element_wise, mean=mean, stddev=stddev)
def aggregate(self):
return np.sqrt(self.l2loss / self.n_entries)
class MeanAbsoluteError(Metric):
r"""
Metric for mean absolute error. For non-scalar quantities, the mean of all components is taken.
Args:
target (str): name of target property
model_output (int, str): index or key, in case of multiple outputs (Default: None)
name (str): name used in logging for this metric. If set to `None`, `MAE_[target]` will be used (Default: None)
element_wise (bool): set to True if the model output is an element-wise property (forces, positions, ...)
"""
def __init__(self, target, model_output=None, name=None, element_wise=False,
mean=None, stddev=None):
name = 'MAE_' + target if name is None else name
super(MeanAbsoluteError, self).__init__(name)
self.target = target
self.element_wise = element_wise
self.model_output = model_output
self.l1loss = 0.
self.n_entries = 0.
self.mean = mean
self.stddev = stddev
def reset(self):
self.l1loss = 0.
self.n_entries = 0.
def _get_diff(self, y, yp):
diff = y - yp
return diff
def add_batch(self, batch, result):
y = batch[self.target].squeeze()
if self.model_output is None:
yp = result
else:
if type(self.model_output) is list:
for idx in self.model_output:
result = result[idx]
# print(result.shape)
else:
result = result[self.model_output]
# result = result * (self.stddev + 1e-9) + self.mean # denormalize the predicted value
yp = result
diff = self._get_diff(y, yp)
self.l1loss += torch.sum(torch.abs(diff).view(-1), 0).detach().cpu().data.numpy()
if self.element_wise:
self.n_entries += torch.sum(batch[Structure.atom_mask]) * y.shape[-1]
else:
self.n_entries += np.prod(y.shape)
def aggregate(self):
return self.l1loss / self.n_entries
class ClassPrecentageError(Metric):
r"""
Metric for classification prencentage error. For non-scalar quantities, the mean of all components is taken.
Args:
target (str): name of target property
name (str): name used in logging for this metric. If set to `None`, `MAE_[target]` will be used (Default: None)
"""
def __init__(self, target, name=None):
name = 'CE_' + target if name is None else name
super(ClassPrecentageError, self).__init__(name)
self.target = target
self.precentloss = 0.
self.n_entries = 0.
def reset(self):
self.precentloss = 0.
self.n_entries = 0.
def _get_diff(self, y, yp):
yp = yp.max(dim=1)[1]
diff = (yp != y.squeeze(dim=1).long()).sum().float()
return diff
def add_batch(self, batch, result):
y = batch[self.target]
if type(result) is dict:
yp = result['y']
else:
yp = result
diff = self._get_diff(y, yp)
self.precentloss += diff.detach().cpu().data.numpy()
self.n_entries += np.prod(y.shape)
def aggregate(self):
return self.precentloss / self.n_entries
class MeanAbsoluteErrorInt(MeanAbsoluteError):
r"""
Int the result before calculate MAE.
"""
def __init__(self, target, model_output=None, name=None, element_wise=False):
super(MeanAbsoluteErrorInt, self).__init__(target, model_output, name,
element_wise=element_wise)
def add_batch(self, batch, result):
y = batch[self.target]
if self.model_output is None:
yp = torch.round(result.clamp(min=0.))
else:
if type(self.model_output) is list:
for idx in self.model_output:
result = result[idx]
else:
result = result[self.model_output]
yp = torch.round(result.clamp(min=0.))
diff = self._get_diff(y, yp)
self.l1loss += torch.sum(diff.view(-1) ** 2).detach().cpu().data.numpy()
if self.element_wise:
self.n_entries += torch.sum(batch[Structure.atom_mask]) * y.shape[-1]
else:
self.n_entries += np.prod(y.shape)
class RootMeanSquaredErrorInt(RootMeanSquaredError):
r"""
Int the result before calculate RMSE.
"""
def __init__(self, target, model_output=None, name=None, element_wise=False):
super(RootMeanSquaredErrorInt, self).__init__(target, model_output, name,
element_wise=element_wise)
def add_batch(self, batch, result):
y = batch[self.target]
if self.model_output is None:
yp = torch.round(result.clamp(min=0.))
else:
if type(self.model_output) is list:
for idx in self.model_output:
result = result[idx]
else:
result = result[self.model_output]
yp = torch.round(result.clamp(min=0.))
diff = self._get_diff(y, yp)
self.l2loss += torch.sum(diff.view(-1) ** 2).detach().cpu().data.numpy()
if self.element_wise:
self.n_entries += torch.sum(batch[Structure.atom_mask]) * y.shape[-1]
else:
self.n_entries += np.prod(y.shape)
| 35.449045 | 120 | 0.595274 | 1,430 | 11,131 | 4.49021 | 0.104895 | 0.073665 | 0.044853 | 0.026476 | 0.809531 | 0.803925 | 0.779474 | 0.767637 | 0.739449 | 0.726678 | 0 | 0.007185 | 0.299793 | 11,131 | 313 | 121 | 35.5623 | 0.816654 | 0.246519 | 0 | 0.748792 | 0 | 0 | 0.015633 | 0.002831 | 0 | 0 | 0 | 0 | 0 | 1 | 0.144928 | false | 0.004831 | 0.014493 | 0.028986 | 0.241546 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 1 | 1 | 1 | 1 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.