hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e0253228836a46896b07cf2ddc20895d4265270a
| 37,413
|
py
|
Python
|
PROGRAMA_APS.py
|
rafaaelm/APS_2Semestre
|
c00090969f7fa98eb567248661a115d5cfd26f0f
|
[
"MIT"
] | null | null | null |
PROGRAMA_APS.py
|
rafaaelm/APS_2Semestre
|
c00090969f7fa98eb567248661a115d5cfd26f0f
|
[
"MIT"
] | null | null | null |
PROGRAMA_APS.py
|
rafaaelm/APS_2Semestre
|
c00090969f7fa98eb567248661a115d5cfd26f0f
|
[
"MIT"
] | null | null | null |
# ATIVIDADE PRÁTICA SUPERVISIONADAS (APS)
# Funções por tipos de materiais
# Plasticos
def MaterialEscolhidoPlastico(z):
x = z
if x >= -291 and x <= -150:
print('', ['sacola de supermercado, saco de lixo, embalagem de leite', 'Tipo de plastico: PEBD (Polietileno de Baixa Densidade)',
' Ponto de coleta: Rua Serra Dourada 165, Campinas, SP, 13100-312'], '\n',
['Garrafa de alvejante, Garrafa detergente, frascos shampoo,', 'Tipo: plasticos: PEAD (Polietileno de Alta Densidade)',
'ponto de coleta: Rua Celso soares Couto s/n°, Pq. Itajaí'])
return x
elif x >= -149 and x <= -100:
print('',['Garrafas Pet', 'embalagem de cosméticos', 'Tipo de plastico: PET (Tereftalato de Polietileno)', 'Local de coleta: Av. São José dos campos s/ n°, Vl Campos Sales' ], '\n',
['pote de sorvete, partes internas de geladeira, brinquedo lego', 'Tipo plastico: PS (Poliestireno)',
'Local de coleta: Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'])
return x
elif x > -100 and x <= 0:
print('',['Pote de sorvete', 'Tipo plastico: PP (Polipropileno)', 'Av. Marechal Rondon, esq. com Rua José Manuel Veiga, Jd. Eulina'], '\n',
['PVC', 'Tipo de platico: PVC (Policloreto de Vinila)', 'Ponto de coleta: Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'])
return x
elif x > 0 and x <= 90:
print('',['pote de sorvete, partes internas de geladeira, brinquedo lego', 'Tipo plastico: PS (Poliestireno)',
'Local de coleta: Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'], '\n',
['Garrafas Pet', 'embalagem de cosméticos', 'Tipo de plastico: PET (Tereftalato de Polietileno)', 'Local de coleta: Av. São José dos campos s/ n°, Vl Campos Sales' ])
return x
elif x > 90 and x <= 149:
print('',['pote de sorvete, partes internas de geladeira, brinquedo lego', 'Tipo plastico: PS (Poliestireno)',
'Local de coleta: Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'], '\n',
['PVC', 'Tipo de platico: PVC (Policloreto de Vinila)',
'Ponto de coleta: Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'] )
return x
elif x > 149 and x <= 210:
print(['sacola de supermercado, saco de lixo, embalagem de leite', 'Tipo de plastico: PEBD (Polietileno de Baixa Densidade)',
' Ponto de coleta: Rua Serra Dourada 165, Campinas, SP, 13100-312'], '\n',
['PVC', 'Tipo de platico: PVC (Policloreto de Vinila)', 'Ponto de coleta: Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'], '\n',
['Garrafas Pet', 'embalagem de cosméticos', 'Tipo de plastico: PET (Tereftalato de Polietileno)', 'Local de coleta: Av. São José dos campos s/ n°, Vl Campos Sales' ])
return x
elif x > 210 and x <= 280:
print('',['Garrafa de alvejante, Garrafa detergente, frascos shampoo,', 'Tipo: plasticos: PEAD (Polietileno de Alta Densidade)',
'ponto de coleta: Rua Celso soares Couto s/n°, Pq. Itajaí'], '\n',
['sacola de supermercado, saco de lixo, embalagem de leite', 'Tipo de plastico: PEBD (Polietileno de Baixa Densidade)',
' Ponto de coleta: Rua Serra Dourada 165, Campinas, SP, 13100-312'])
# Vidro
def MaterialEscolhidoVidro(z):
x = z
if x >= -291 and x <= -120:
print('', ['temperado, laminado, duplo', 'Ponto de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['Termoendurecido, Lacado, Controle Acústico, Controle Solar', 'Pontos de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto']),
return x
elif x > -120 and x <= -90:
print('',['Vidros especiais: Corta fogo, Auto limpeza, Anti-Bala / Blindado','Ponto de coleta proximo: ',
' Ponto Verde Av. Santa Isabel, 2300, Ecoponto Jardim EulinaAv. Mal. Rondon'])
return x
elif x > -90 and x <= -20:
print('',['temperado, laminado, duplo', 'Ponto de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n' ,
['Vidros especiais: Vidro para Pavimentos, Decorativo / Vidro Texturado, espelho',
'Ponto de coleta proximo: Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'])
return x
elif x > -20 and x <= 50:
print('',['temperado, laminado, duplo', 'Ponto de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'])
return x
elif x > 50 and x <= 100:
print('',['Termoendurecido, Lacado, Controle Acústico, Controle Solar', 'Pontos de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['Vidros especiais: Vidro para Pavimentos, Decorativo / Vidro Texturado, espelho'
'Ponto de coleta proximo: Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'])
return x
elif x > 100 and x <= 150:
print('',['Vidros especiais: Corta fogo, Auto limpeza, Anti-Bala / Blindado','Ponto de coleta proximo: ',
' Ponto Verde Av. Santa Isabel, 2300, Ecoponto Jardim EulinaAv. Mal. Rondon'])
return x
elif x > 151 and x <= 219:
print('', ['Vidros especiais: Corta fogo, Auto limpeza, Anti-Bala / Blindado','Ponto de coleta proximo: ',
' Ponto Verde Av. Santa Isabel, 2300, Ecoponto Jardim EulinaAv. Mal. Rondon'], '\n',
['Termoendurecido, Lacado, Controle Acústico, Controle Solar',
'Pontos de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'])
return x
elif x > 219 and x <= 280:
print('',['Vidros especiais: Corta fogo, Auto limpeza, Anti-Bala / Blindado','Ponto de coleta proximo: ',
' Ponto Verde Av. Santa Isabel, 2300, Ecoponto Jardim EulinaAv. Mal. Rondon'], '\n',
['temperado, laminado, duplo', 'Ponto de coleta proximo: Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'])
return x
#Papel
def MaterialEscolhidoPapel(z):
x = z
if x >= -291 and x <= -120:
print('', ['lista telefonica, jornal, revista', 'tipo: papel imprensa',
'Ponto de coleta: (Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['A4, A3, A2, A1', 'Tipo: papel para imprimir',
'Ponto de coleta: (comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, SP - CEP: 13056-094'])
elif x > -120 and x <= -90:
print('',['Papelão', 'Tipo: papel ondulado',
'Ponto de coleta: (Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], ' \n',
['embalagens de alimentos e remedios', 'tipo: papel cartão', 'Ponto de coleta: (Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['pepel higienico, papel toalha, guarda napos', 'Tipo: papel sanitário', 'Ponto de coleta: ops: esse tipo de papel não pode ser reciclado ;-)'])
return x
elif x > -90 and x <= 0:
print('',['A4, A3, A2, A1', 'Tipo: papel para imprimir',
'Ponto de coleta: (comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, SP - CEP: 13056-094'], '\n',
['embalagens de alimentos e remedios', 'tipo: papel cartão', 'Ponto de coleta: (Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'])
return x
elif x > 0 and x <= 60:
print('',['Papelão', 'Tipo: papel ondulado',
'Ponto de coleta: (Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['lista telefonica, jornal, revista', 'tipo: papel imprensa',
'Ponto de coleta: (Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'])
return x
elif x > 60 and x <= 110:
print('',['Papelão', 'Tipo: papel ondulado',
'Ponto de coleta: (Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['lista telefonica, jornal, revista', 'tipo: papel imprensa',
'Ponto de coleta: (Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['A4, A3, A2, A1', 'Tipo: papel para imprimir',
'Ponto de coleta: (comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, SP - CEP: 13056-094'])
return x
elif x > 110 and x <= 170:
print('',['lista telefonica, jornal, revista', 'tipo: papel imprensa',
'Ponto de coleta: (Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['embalagens de alimentos e remedios', 'tipo: papel cartão', 'Ponto de coleta: (Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'])
return x
elif x > 170 and x <= 220:
print('',['embalagens de alimentos e remedios', 'tipo: papel cartão', 'Ponto de coleta: (Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['A4, A3, A2, A1', 'Tipo: papel para imprimir',
'Ponto de coleta: (comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, SP - CEP: 13056-094'])
return x
elif x > 220 and x <= 280:
print('',['lista telefonica, jornal, revista', 'tipo: papel imprensa',
'Ponto de coleta: (Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['embalagens de alimentos e remedios', 'tipo: papel cartão', 'Ponto de coleta: (Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['Papelão', 'Tipo: papel ondulado',
'Ponto de coleta: (Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'])
return x
#Metal
def MaterialEscolhidoMetal(z):
x = z
if x > -291 and x <= -200:
print('', ['ferro, aço', 'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'], '\n',
['alumínio, cobre', 'tipo: não ferroso. Ponto de coleta: FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079 '], '\n',
['chumbo, níquel, mercúrio',
'tipo: metal pesado. Ponto de coleta: Sucatas e Metais 2 Irmãos | Ferro Velho em Campinas | Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442 '])
return x
elif x > -200 and x <= -160:
print('não tem metal reciclável proximo')
return x
elif x > -160 and x <= -110:
print('',['alumínio, cobre', 'tipo: não ferroso. Ponto de coleta: FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079 '])
return x
elif x > -110 and x <= -60:
print('',['chumbo, níquel, mercúrio',
'tipo: metal pesado. Ponto de coleta: Sucatas e Metais 2 Irmãos | Ferro Velho em Campinas | Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442 '], '\n',
['ferro, aço',
'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
return x
elif x > -60 and x <= 0:
print('',['chumbo, níquel, mercúrio',
'tipo: metal pesado. Ponto de coleta: Sucatas e Metais 2 Irmãos | Ferro Velho em Campinas | Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442 '])
return x
elif x > 0 and x <= 60:
print('',['ferro, aço',
'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'], '\n',
['chumbo, níquel, mercúrio',
'tipo: metal pesado. Ponto de coleta: Sucatas e Metais 2 Irmãos | Ferro Velho em Campinas | Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442 '])
return x
elif x > 60 and x <= 110:
print('',['ferro, aço',
'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
return x
elif x > 110 and x <= 170:
print('',['alumínio, cobre', 'tipo: não ferroso. Ponto de coleta: FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079 '],'\n',
['ferro, aço',
'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
elif x > 170 and x <= 220:
print('',['alumínio, cobre', 'tipo: não ferroso. Ponto de coleta: FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079 '], '\n',
['ferro, aço',
'Tipo: ferroso. Ponto de coleta: Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
elif x > 220 and x <= 280:
print('não tem metal reciclável proximo')
return x
#Eletrônicos
def MaterialEscolhidoEletro(z):
x = z
if x > -291 and x <= -200:
print('',['Computador, televisor, monitor, notebook, celular, tablet',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['telefones, impressora, vídeo, cassete, aparelho DVD', '(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'])
return x
elif x > -200 and x <= -130:
print('',['microondas, modem, caixa de som',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['no Break, estabilizador, cabos de energia', 'Ponto de coleta: (GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'])
return x
elif x > -130 and x <= -85:
print(['telefones, impressora, vídeo, cassete, aparelho DVD', '(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['microondas, modem, caixa de som',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return x
elif x > -85 and x <= -20:
print(['telefones, impressora, vídeo, cassete, aparelho DVD', '(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['no Break, estabilizador, cabos de energia', 'Ponto de coleta: (GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'])
elif x > -20 and x <= 20:
print('não tem lixo eletrônico ao redor')
return x
elif x > 20 and x <= 80:
print(['microondas, modem, caixa de som',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['Computador, televisor, monitor, notebook, celular, tablet',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return x
elif x > 80 and x <= 130:
print(['Computador, televisor, monitor, notebook, celular, tablet',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['telefones, impressora, vídeo, cassete, aparelho DVD', '(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['microondas, modem, caixa de som',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return x
elif x > 130 and x <= 180:
print(['telefones, impressora, vídeo, cassete, aparelho DVD', '(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'])
return x
elif x > 180 and x <= 230:
print(['Computador, televisor, monitor, notebook, celular, tablet',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['no Break, estabilizador, cabos de energia', 'Ponto de coleta: (GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'])
return x
elif x > 230 and x <= 280:
print(['Computador, televisor, monitor, notebook, celular, tablet',
'Ponto de coleta: (Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return x
#Converte uma opção d0 menu (1, 2, 3, etc.)
def MaterialSelecionado(latitude, longitude, opcao):
if latitude < -90 or latitude > 90 or longitude < -180 or longitude > 180:
return 'coordenadas invalidas'
elif (-90 <= latitude <= 90) and (-180 <= longitude <= 180):
float(latitude)
float(longitude)
z = latitude + longitude
if opcao == 1:
return MaterialEscolhidoPlastico(z)
elif opcao == 2:
return MaterialEscolhidoMetal(z)
elif opcao == 3:
return MaterialEscolhidoVidro(z)
elif opcao == 4:
return MaterialEscolhidoPapel(z)
elif opcao == 5:
return MaterialEscolhidoEletro(z)
# teste condicional coordenadas para material reciclável proximo,
def coordenada_la(latitude, longitude):
if latitude < -90 or latitude > 90 or longitude < -180 or longitude > 180:
return 'coordenadas invalidas'
elif -90 <= latitude <= 90 and -180 <= longitude <= 180:
float(latitude)
float(longitude)
# metodo simples para converter coordenadas em defimal
x = latitude + longitude
if -291 >= x <= -230:
x = ['no Break', 'cobre', 'níquel', 'Pote de sorvete', 'Garrafas Pet', 'embalagem de cosméticos']
return x
elif -229 >= x <= -200:
x = ['embalagens de alimentos ', 'guarda napos', 'Papelão']
return x
elif -199 >= x <= -152:
x = ['aparelho DVD, telefones, frascos shampoo, embalagem de leite']
return x
elif -151 >= x <= -116:
x = ['televisor, revista, Garrafa de alvejante, mercúrio']
return x
elif -115 >= x <= -94:
x = ['PVC, embalagem de cosméticos, embalagem de leite, Garrafa detergente ']
return x
elif -93 >= x <= -79:
x = ['cabos de energia, embalagens de alimentos, notebook ']
return x
elif -78 >= x <= -50:
x = ['caixa de som, impressora, monitor, tablet, mercúrio ']
return x
elif -49 >= x <= -1:
x = ['Vidro para Pavimentos, vidro temperado, brinquedo lego']
return x
elif -0 >= x <= 30:
x = ['impressora, cabos de energia, frascos shampoo, alumínio']
return x
elif 31 >= x <= 51:
x = ['estabilizador, revista, partes internas de geladeira']
return x
elif 52 >= x <= 92:
x = ['Computador, ferro, cobre, brinquedo lego']
return x
elif 93 >= x <= 110:
x = ['brinquedo lego, remedios, tablet, lista telefonica']
return x
elif 111 >= x <= 135:
x = ['sacola de supermercado, microondas, papel para imprimir, televisor']
return x
elif 136 >= x <= 157:
x = ['brinquedos de plastico, garrafa de vidro, telefones, aparelho DVD']
return x
elif 158 >= x <= 178:
x = ['painel de vidro, baterias, peças de carro, notebook']
return x
elif 179 >= x <= 200:
x = ['vidro termoendurecido, vidro acústico, janela, vidro temperado']
return x
elif 201 >= x <= 220:
x = ['sacola plastica, garrafa pet, pote de alimento, caixa de remedio']
return x
elif 221 >= x <= 240:
x = ['vidro termoendurecido, vidro lacado, vidro controle Acústico, vidro controle Solar, '
'pepel higienico, papel toalha, guarda napos']
return x
elif 241 >= x <= 251:
x = ['sacola plastica, garrafa pet, pote de alimento, caixa de remedio, caixa de som, impressora']
return x
elif 252 >= x <= 260:
x = ['Computador, ferro, baterias, peças de carro']
return x
elif 261 >= x <= 265:
x = ['embalagens de alimentos e remedios, lista telefonica, jornal']
return x
elif 230 >= x <= 280:
x = ['baterias, celulares e telefones']
return x
else:
x = ['painel de vidro, baterias, peças de carro, notebook, caixa de som, impressora, monitor, tablet, mercúrio']
return x
#Pontos de coleta
def PontosColeta(latitude,longitude):
if latitude < -91 or latitude > 90 or longitude < -181 or longitude > 180:
return 'coordenadas invalidas, tente novamente'
elif -90 <= latitude <= 90 and -180 <= longitude <= 180:
float(latitude)
float(longitude)
# metodo simples de calcular lugares proximos
x = latitude + longitude
if -291 >= x <= -240:
print('', ['(GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'],
'\n',
['(FERRO VELHO CLEBER) R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079'])
return latitude, longitude
elif -239 >= x <= -215:
print('', ['(GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'],'\n',
['Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'], '\n',
['Sucatas e Metais 2 Irmãos | Ferro Velho em Campinas | Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442'])
return latitude, longitude
elif -239 >= x <= -215:
print('', ['(Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['Av. São José dos campos s/ n°, Vl Campos Sales'], '\n',
['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return latitude, longitude
elif -239 >= x <= -215:
print('', ['Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['(Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079'])
return latitude, longitude
elif -214 >= x <= -190:
print('', ['Rua Serra Dourada 165, Campinas, SP, 13100-312'], '\n',
['Rua Celso soares Couto s/n°, Pq. Itajaí'], '\n',
['Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'], '\n',
['Ponto Verde Av. Santa Isabel, 2300, Ecoponto Jardim EulinaAv. Mal. Rondon'])
return latitude, longitude
elif -189 >= x <= -169:
print('', ['(Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'])
return latitude, longitude
elif -168 >= x <= -148:
print('', ['Ponto de coleta: Rua Serra Dourada 165, Campinas, SP, 13100-312'], '\n',
['Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'], '\n',
['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['(Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'])
return latitude, longitude
elif -147 >= x <= -120:
print('', ['Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'], '\n',
['Av. Marechal Rondon, esq. com Rua josé Manuel Veiga, jd. Eulina'], '\n',
['(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'])
return latitude, longitude
elif -119 >= x <= -100:
print('', ['(Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP'], '\n',
['(Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['(GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'])
return latitude, longitude
elif -99 >= x <= -84:
print('', ['Rua Celso soares Couto s/n°, Pq. Itajaí'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'])
return latitude, longitude
elif -83 >= x <= -64:
print('', ['(Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida -- Campinas, SP'], '\n',
['(comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, SP - CEP: 13056-094'], '\n',
['R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
return latitude, longitude
elif -63 >= x <= -20:
print('', ['(Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079'], '\n',
['Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'])
return latitude, longitude
elif -19 >= x <= 2:
print('', ['R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'], '\n',
['(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079'])
return latitude, longitude
elif 3 >= x <= 26:
print('', ['(Latem) Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['Rua Serra Dourada 165, Campinas, SP, 13100-312'], '\n',
['FERRO VELHO CLEBER: R. Pref. Celso Daniel, 161 - Vila San Martin, Campinas - SP, 13069-079'], '\n',
['Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'])
return latitude, longitude
elif 27 >= x <= 57:
print('', ['Rua Manoel Gomes Ferreira, esq. com Rua José Ramos Catarino'], '\n',
['(Ecoponto / Ponto Verde) fica na Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'])
return latitude, longitude
elif 58 >= x <= 90:
print('', ['(Marapara maravalhas aparas de papel papelão e sucatas) R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'], '\n',
['Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra, Campinas - SP, 13080-220'])
return latitude, longitude
elif 91 >= x <= 130:
print('', ['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['Ferro Velho Roberto, Compra E Venda De Sucatas: R. Marquês de Abrantes, 600 - Jardim Santa Genebra,'
+ ' Campinas - SP, 13080-220'], '\n',
['(GMV Recycle) fica na Rod. Lix da Cunha, 911 - Jardim do Lago II, Campinas - SP, 13051-051'], '\n',
['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, '
+ 'Campinas - SP, 13041-445'])
return latitude, longitude
elif 131 >= x <= 163:
print('', ['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['(Papeis zambelli)R Leonor Ponessi Cappelli, 210, Parque Rural Fazenda Santa Cândida - Campinas, SP']
, '\n',
['(comercio de sucatas rodrigues) R Itacuruçá, 480 - Jardim Aeroporto de Viracopos - Campinas, '
+ 'SP - CEP: 13056-094'], '\n',)
return latitude, longitude
elif 164 >= x <= 200:
print('', ['Av. São José dos campos s/ n°, Vl Campos Sales'], '\n',
['Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'], '\n',
['Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'], '\n',
['R São José do Rio Preto, 1101 - Jardim Nova Europa - Campinas, SP - CEP: 13040-060'])
return latitude, longitude
elif 201 >= x <= 231:
print('', ['Av. Santa Isabel, 2300 - Barão Geraldo, Campinas -SP, 13084-012'], '\n',
['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['Ponto-Verde Av. Santa Isabel, 2300 Ecoponto Jardim Eulina Av. Mal. Rondon'])
return latitude, longitude
elif 232 >= x <= 260:
print('', ['Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'], '\n',
['Sucatas em Campinas: R. Guaiçara, 354 - Jardim Itatinga, Campinas - SP, 13052-442'])
return latitude, longitude
elif 261 >= x <= 300:
print('', ['(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'], '\n',
['Av. Marechal Rondon, esq. com Rua josé Manuel Veiga, jd. Eulina'], '\n',
['Ponto verde - Vila Costa e Silva Rua Saldanha da Gama, 77 Ecoponto'], '\n',
['Rua josé martins lourenço, esq. com Rua Geraldo Bretas, jd. São Gabriel'])
return latitude, longitude
elif x <= 270:
print('', [
'(Reversis - Reciclagem de Eletrônicos e Informática) fica na R. da Abolição, 1900 - Pte. Preta, Campinas - SP, 13041-445'],
'\n',
['Av. Marechal Rondon, esq. com Rua josé Manuel Veiga, jd. Eulina'], '\n',
['Av. Orozimbo Maia, 2062 - Cambuí - Campinas, SP - CEP: 13024-045'])
return latitude, longitude
else:
return 'Coordenadas inválidas'
print('Seja bem-vindo!')
print("""Este programa tem como finalidade localizar tipos de materiais e os pontos de coleta
para a reciclagem próximos de você.
Observação: coordenada latitude começa de -90 até 90 e longitude de -180 até 180, cuidado com as informações que for digitar
""")
opcao = 1
#Menu de opções
while opcao != 9:
print("""
Menu de opções:
[1] - Plastico
[2] - Metal
[3] - Vidro
[4] - Papel
[5] - Eletrônicos
[6] - Materiais proximos para coletar
[7] - Pontos para coleta proximos
[9] - Encerrar programa""")
opcao = float(input('Digite a opção desejada: '))
z = opcao
if z == 1:
print('Você selecionou "Plastico" \nlocalizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
MaterialSelecionado(latitude, longitude, opcao)
print("""Sobre: São formados por polímeros(Tereftalato de Polietileno, Polietileno de Alta Densidade
Poli cloreto de Vinila, Polietileno de Baixa Densidade, Polipropileno e Poliestireno),
divididos em grupos de termoplásticos(podem ser reciclado) e termorrígidos (não são recicláveis).
Reutilização:
* vaso para plantas utilizando PETS
* Porta lápis
* Fazer decoração para casa
* casa de pássaros
* brinquedos """)
elif z == 2:
print('Você selecionou "Metal" \nlocalizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
MaterialSelecionado(latitude, longitude, opcao)
print("""Sobre: Separados por ferrosos e não ferrosos. Ferrosos possuem propriedades magnéticas,
já os não ferrosos não possuem quantia grande de ferro.
Reutilização:
São derretidos para recriar outros tipos de materiais.
Os metais mais utilizados são alumínio, ferro, aço e cobre.""")
elif z == 3:
print('Você selecionou "Vidro" \nlocalizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
MaterialSelecionado(latitude, longitude, opcao)
print("""Sobre: é uma misturara de produtos inorgânicos: dióxido de silício,
barrilha ou soda (carbonato de sódio - Na2CO3) e calcário (carbonato de cálcio – CaCO3).
a mistura dos três podem criar o vidro.
Reutilização:
O vidro no seu processo quando reaproveitado, é utilizado para criar outros tipos de materiais feito de vidro """)
elif z == 4:
print('Você selecionou "Papel" \nlocalizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
MaterialSelecionado(latitude, longitude, opcao)
print("""Sobre: O papel comum tem como matéria prima, fibras de celulose extraídas de
arvores, como o pinheiro e o eucalipto.
Reutilização:
* fazer embrulho para presentes;
* fatiado serve para forro de gaiola;
* possível fazer fogueira, para acender churrasqueira;
* recortes serve para fazer artesanato;
* papeis velhos é possível criar uma folha nova reciclada""")
elif z == 5:
print('Você selecionou "Eletrônicos" \nlocalizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
MaterialSelecionado(latitude, longitude, opcao)
print("""
Sobre: são componentes que controlados por circuitos elétricos, esse circuito
pode se manifestar de diversas maneiras, como visual, sonora, mecânica, emitir ondas de rádio e muito mais.
Reutilização:
* recriar novos equipamentos;
* vidro pode ser vendido para empresas que usam matéria prima; """)
elif z == 6:
print('localizando materiais reciclaveis proximos')
la = float(input('latitude: '))
lon = float(input('longitude: '))
print(coordenada_la(la, lon))
elif z == 7:
print('localizando pontos de coleta proximos')
latitude = float(input('latitude: '))
longitude = float(input('longitude: '))
print(PontosColeta(latitude, longitude))
elif z == 9:
print("""Autores:
Lucas Alves de Souza Marques Timoteo - N555696
Leonardo Slonzo Alvares Portes – F3079A9
Nathan Duarte da Silva – N6188C2
Rafael Mascarenhas – F19DAD3
Vinicius de Campos da Silva – F206BA7
""")
print('Encerrando programa!!')
exit()
else:
print('valor invalido, tente novamente')
| 60.538835
| 199
| 0.610857
| 4,811
| 37,413
| 4.752858
| 0.123675
| 0.044608
| 0.036386
| 0.017843
| 0.794236
| 0.787195
| 0.779279
| 0.769658
| 0.764672
| 0.750809
| 0
| 0.064645
| 0.269398
| 37,413
| 617
| 200
| 60.636953
| 0.771384
| 0.009088
| 0
| 0.508137
| 0
| 0.164557
| 0.645079
| 0.000567
| 0
| 0
| 0
| 0.001621
| 0
| 1
| 0.014467
| false
| 0
| 0
| 0
| 0.18264
| 0.155515
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
0ec8072a94e1f2ab553792dd9fa919a2820b9957
| 184
|
py
|
Python
|
page_counter/__init__.py
|
bircow/page-counter
|
aca721fdbb7bec2d0352136a32065d8950154495
|
[
"MIT"
] | 2
|
2018-05-03T21:06:07.000Z
|
2018-05-03T21:06:10.000Z
|
page_counter/__init__.py
|
bircow/page-counter
|
aca721fdbb7bec2d0352136a32065d8950154495
|
[
"MIT"
] | 2
|
2018-05-03T12:45:45.000Z
|
2018-05-03T20:14:06.000Z
|
page_counter/__init__.py
|
bircow/page-counter
|
aca721fdbb7bec2d0352136a32065d8950154495
|
[
"MIT"
] | null | null | null |
from page_counter.dialects import dialect_choices, dialect_names
from page_counter.page_counter import PageCounter
__all__ = [
'PageCounter', 'dialect_choices', 'dialect_names'
]
| 26.285714
| 64
| 0.809783
| 22
| 184
| 6.272727
| 0.454545
| 0.23913
| 0.217391
| 0.376812
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11413
| 184
| 6
| 65
| 30.666667
| 0.846626
| 0
| 0
| 0
| 0
| 0
| 0.211957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 0.4
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
0ecda381a4fad88bafeed2b8177bda8d90adaad0
| 3,340
|
py
|
Python
|
test_twitter.py
|
asyncy/asyncy-twitter
|
ecc1a62e3acb32c36277782c9813ff3b094ed6a4
|
[
"MIT"
] | 2
|
2019-11-09T05:26:01.000Z
|
2020-07-19T07:06:02.000Z
|
test_twitter.py
|
asyncy/asyncy-twitter
|
ecc1a62e3acb32c36277782c9813ff3b094ed6a4
|
[
"MIT"
] | 2
|
2018-08-07T05:29:39.000Z
|
2019-05-20T13:05:36.000Z
|
test_twitter.py
|
asyncy/asyncy-twitter
|
ecc1a62e3acb32c36277782c9813ff3b094ed6a4
|
[
"MIT"
] | 2
|
2019-06-28T07:16:51.000Z
|
2019-08-30T11:01:14.000Z
|
from http import HTTPStatus
import sys
import os
from stream import Stream
def test_tweet_request(client):
data = {"status": "Mock status"}
url = "/tweet"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_tweet_request_fail(client):
data = {"status": ""}
url = "/tweet"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_retweet_request(client):
data = {"tweet": os.environ["RETWEET_ID"]}
url = "/retweet"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_retweet_request_fail(client):
data = {"id": ""}
url = "/retweet"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_follow_user_id_request(client):
data = {"id": os.environ["USER_ID"]}
url = "/follow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_follow_screen_name_request(client):
data = {"handle": os.environ["SCREEN_NAME"]}
url = "/follow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_follow_request_fail(client):
data = {}
url = "/follow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_unfollow_id_request(client):
data = {"id": os.environ["USER_ID"]}
url = "/unfollow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_unfollow_screen_name_request(client):
data = {"screen_name": os.environ["SCREEN_NAME"]}
url = "/unfollow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_unfollow_request_fail(client):
data = {}
url = "/unfollow"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_followers_id_request(client):
data = {"user": os.environ["RETWEET_ID"]}
url = "/followers"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_followers_screen_name_request(client):
data = {"screen_name": os.environ["SCREEN_NAME"]}
url = "/followers"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_followers_request_fail(client):
data = {}
url = "/followers"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
def test_subscribe_request(client):
data = {
"data": {"track": "Article370", "isTesting": True},
"id": "307d6a9a-60da-4915-9ee5-bae3c0238874",
"endpoint": "https://webhook.site/#!/832c4ebb-8b80-4330-bbfb-337aea98a579",
}
url = "/stream/subscribe"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.OK
def test_subscribe_request_fail(client):
data = {
"data": {"isTesting": True},
"id": "307d6a9a-60da-4915-9ee5-bae3c023887f",
"endpoint": "https://webhook.site/#!/832c4ebb-8b80-4330-bbfb-337aea98a579",
}
url = "/stream/subscribe"
response = client.post(url, json=data)
assert response.status_code == HTTPStatus.BAD_REQUEST
| 28.305085
| 83
| 0.682635
| 416
| 3,340
| 5.298077
| 0.129808
| 0.047641
| 0.122505
| 0.142922
| 0.84392
| 0.779038
| 0.779038
| 0.747278
| 0.747278
| 0.747278
| 0
| 0.030004
| 0.181737
| 3,340
| 117
| 84
| 28.547009
| 0.776436
| 0
| 0
| 0.643678
| 0
| 0
| 0.157485
| 0.021557
| 0
| 0
| 0
| 0
| 0.172414
| 1
| 0.172414
| false
| 0
| 0.045977
| 0
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
161d07ab1232b0a05380410d4438fa4281cc2323
| 21
|
py
|
Python
|
Feature/hessian_matrix_det.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Feature/hessian_matrix_det.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
Feature/hessian_matrix_det.py
|
Joevaen/Scikit-image_On_CT
|
e3bf0eeadc50691041b4b7c44a19d07546a85001
|
[
"Apache-2.0"
] | null | null | null |
# 计算图像上的近似Hessian行列式。
| 21
| 21
| 0.857143
| 1
| 21
| 18
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 21
| 1
| 21
| 21
| 0.9
| 0.904762
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
162445e74600be1a9947649c0cfc4437b53878a0
| 12,083
|
py
|
Python
|
Tests/Plot/test_Lam_Mag_inset_plot.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | 2
|
2019-06-08T15:04:39.000Z
|
2020-09-07T13:32:22.000Z
|
Tests/Plot/test_Lam_Mag_inset_plot.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | null | null | null |
Tests/Plot/test_Lam_Mag_inset_plot.py
|
mxgnsr/pyleecan
|
2b0a04e4ae67c073a91362ab42332908fef53bdd
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from os.path import join
from unittest import TestCase
import matplotlib.pyplot as plt
from numpy import pi
from pyleecan.Classes.Frame import Frame
from pyleecan.Classes.LamSlotMag import LamSlotMag
from pyleecan.Classes.Lamination import Lamination
from pyleecan.Classes.MagnetType10 import MagnetType10
from pyleecan.Classes.MagnetType11 import MagnetType11
from pyleecan.Classes.MagnetType12 import MagnetType12
from pyleecan.Classes.MagnetType13 import MagnetType13
from pyleecan.Classes.MagnetType14 import MagnetType14
from pyleecan.Classes.Shaft import Shaft
from pyleecan.Classes.VentilationCirc import VentilationCirc
from pyleecan.Classes.VentilationTrap import VentilationTrap
from pyleecan.Classes.MatMagnetics import MatMagnetics
from pyleecan.Classes.SlotMFlat import SlotMFlat
from pyleecan.Classes.SlotMPolar import SlotMPolar
from Tests import save_plot_path as save_path
class test_Lam_Mag_inset_plot(TestCase):
"""unittest for Lamination with inset magnet plot"""
def test_Lam_Mag_10_inset(self):
"""Test machine plot with Magnet 10 inset"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=100e-3,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet = [MagnetType10(Lmag=0.5, Hmag=0.02, Wmag=0.04)]
rotor.slot = SlotMFlat(Zs=4, W0=0.04, H0=0.02, magnet=magnet)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.axial_vent.append(VentilationCirc(Zh=4, Alpha0=0, D0=2.5e-3, H0=50e-3))
rotor.axial_vent.append(VentilationCirc(Zh=8, Alpha0=0, D0=5e-3, H0=60e-3))
rotor.axial_vent.append(VentilationCirc(Zh=12, Alpha0=0, D0=10e-3, H0=70e-3))
stator = LamSlotMag(
Rint=110e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
magnet2 = [MagnetType10(Lmag=0.5, Hmag=0.02, Wmag=0.04)]
stator.slot = SlotMFlat(Zs=8, W0=0.04, W3=2 * pi / 64, H0=0.02, magnet=magnet2)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
stator.axial_vent.append(
VentilationTrap(Zh=6, Alpha0=pi / 6, W1=10e-3, W2=20e-3, D0=0.02, H0=0.140)
)
stator.axial_vent.append(
VentilationTrap(Zh=6, Alpha0=pi / 6, W1=20e-3, W2=40e-3, D0=0.02, H0=0.170)
)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 30)
fig.savefig(join(save_path, "test_Lam_Mag_10i_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 22)
fig.savefig(join(save_path, "test_Lam_Mag_10i_3-Stator.png"))
rotor.slot.magnet = []
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 26)
fig.savefig(join(save_path, "test_Lam_Mag_10i_4-Rotor_no_mag.png"))
def test_Lam_Mag_10_inset_2_mag(self):
"""Test machine plot with Magnet 10 inset with two magnet in the slot"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=100e-3,
is_internal=True,
is_stator=False,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
rotor.slot = SlotMFlat(
Zs=4,
W0=0.03,
H0=0.02,
W3=2 * pi / 60,
magnet=[
MagnetType10(Lmag=0.5, Hmag=0.015, Wmag=0.03),
MagnetType10(Lmag=0.5, Hmag=0.015, Wmag=0.03),
],
)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.axial_vent.append(VentilationCirc(Zh=4, Alpha0=0, D0=2.5e-3, H0=50e-3))
rotor.axial_vent.append(VentilationCirc(Zh=8, Alpha0=0, D0=5e-3, H0=60e-3))
rotor.axial_vent.append(VentilationCirc(Zh=12, Alpha0=0, D0=10e-3, H0=70e-3))
stator = LamSlotMag(
Rint=110e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.45,
Nrvd=1,
Wrvd=0.05,
)
stator.slot = SlotMFlat(
Zs=8,
W0=0.03,
W3=2 * pi / 64,
H0=0.02,
magnet=[
MagnetType10(Lmag=0.5, Hmag=0.025, Wmag=0.03),
MagnetType10(Lmag=0.5, Hmag=0.025, Wmag=0.03),
],
)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
stator.axial_vent.append(
VentilationTrap(Zh=6, Alpha0=pi / 6, W1=10e-3, W2=20e-3, D0=0.02, H0=0.140)
)
stator.axial_vent.append(
VentilationTrap(Zh=6, Alpha0=pi / 6, W1=20e-3, W2=40e-3, D0=0.02, H0=0.170)
)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 34)
fig.savefig(join(save_path, "test_Lam_Mag_10i_2_Mag_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 30)
fig.savefig(join(save_path, "test_Lam_Mag_10i_3_Mag_2-Stator.png"))
def test_Lam_Mag_11_inset(self):
"""Test machine plot with Magnet 11 inset"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=True,
is_stator=False,
L1=0.4,
Nrvd=2,
Wrvd=0.05,
)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
magnet = [MagnetType11(Lmag=0.5, Hmag=0.01, Wmag=pi / 8)]
rotor.slot = SlotMPolar(Zs=8, W0=pi / 8, H0=0.01, W3=2 * pi / 64, magnet=magnet)
stator = LamSlotMag(
Rint=115e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.4,
Nrvd=2,
Wrvd=0.05,
)
magnet2 = [MagnetType11(Lmag=0.35, Hmag=0.03, Wmag=pi / 4)]
stator.slot = SlotMPolar(
Zs=4, W0=pi / 4, H0=0.02, W3=2 * pi / 64, magnet=magnet2
)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 10)
fig.savefig(join(save_path, "test_Lam_Mag_11i_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 6)
fig.savefig(join(save_path, "test_Lam_Mag_11i_3-Stator.png"))
rotor.slot.magnet = []
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 2)
fig.savefig(join(save_path, "test_Lam_Mag_11i_4-Rotor_no_mag.png"))
def test_Lam_Mag_11_inset_2_mag(self):
"""Test machine plot with Magnet 11 inset with two magnet in the slot"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=True,
is_stator=False,
L1=0.4,
Nrvd=2,
Wrvd=0.05,
)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.slot = SlotMPolar(
Zs=8,
W0=pi / 12,
H0=0.01,
W3=pi / 18,
magnet=[
MagnetType11(Lmag=0.5, Hmag=0.01, Wmag=pi / 12),
MagnetType11(Lmag=0.5, Hmag=0.01, Wmag=pi / 12),
],
)
stator = LamSlotMag(
Rint=115e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.4,
Nrvd=2,
Wrvd=0.05,
)
stator.slot = SlotMPolar(
Zs=4,
W0=pi / 10,
H0=0.02,
W3=2 * pi / 50,
magnet=[
MagnetType11(Lmag=0.35, Hmag=0.03, Wmag=pi / 10),
MagnetType11(Lmag=0.35, Hmag=0.03, Wmag=pi / 10),
],
)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 18)
fig.savefig(join(save_path, "test_Lam_Mag_11i_2_Mag_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 10)
fig.savefig(join(save_path, "test_Lam_Mag_11i_3_Mag_2-Stator.png"))
def test_Lam_Mag_12_inset(self):
"""Test machine plot with Magnet 12 inset"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=True,
is_stator=False,
L1=0.35,
Nrvd=3,
Wrvd=0.05,
)
magnet = [MagnetType12(Lmag=0.5, Hmag=0.02, Wmag=0.04)]
rotor.slot = SlotMFlat(Zs=8, W0=0.04, H0=0.02, W3=2 * pi / 64, magnet=magnet)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
stator = LamSlotMag(
Rint=110e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.35,
Nrvd=3,
Wrvd=0.05,
)
magnet2 = [MagnetType12(Lmag=0.5, Hmag=0.03, Wmag=0.04)]
stator.slot = SlotMFlat(Zs=4, W0=0.04, H0=0.02, W3=2 * pi / 64, magnet=magnet2)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 10)
fig.savefig(join(save_path, "test_Lam_Mag_12i_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 6)
fig.savefig(join(save_path, "test_Lam_Mag_12i_3-Stator.png"))
def test_Lam_Mag_13_inset(self):
"""Test machine plot with Magnet 12 inset"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=True,
is_stator=False,
L1=0.42,
Nrvd=4,
Wrvd=0.02,
)
magnet = [MagnetType13(Lmag=0.5, Hmag=0.02, Wmag=0.04, Rtop=0.04)]
rotor.slot = SlotMFlat(Zs=8, W0=0.04, H0=0.02, W3=2 * pi / 64, magnet=magnet)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
stator = LamSlotMag(
Rint=110e-3,
Rext=200e-3,
is_internal=False,
is_stator=True,
L1=0.42,
Nrvd=4,
Wrvd=0.02,
)
magnet2 = [MagnetType13(Lmag=0.5, Hmag=0.02, Wmag=0.04, Rtop=0.04)]
stator.slot = SlotMFlat(Zs=4, W0=0.04, H0=0.025, W3=2 * pi / 64, magnet=magnet2)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 10)
fig.savefig(join(save_path, "test_Lam_Mag_13i_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 6)
fig.savefig(join(save_path, "test_Lam_Mag_13i_3-Stator.png"))
def test_Lam_Mag_14_inset(self):
"""Test machine plot with Magnet 14 inset"""
plt.close("all")
rotor = LamSlotMag(
Rint=40e-3,
Rext=90e-3,
is_internal=True,
is_stator=False,
L1=0.4,
Nrvd=5,
Wrvd=0.02,
)
magnet = [MagnetType14(Lmag=0.5, Hmag=0.02, Wmag=0.628, Rtop=0.04)]
rotor.slot = SlotMPolar(Zs=4, W0=0.628, H0=0.02, magnet=magnet)
rotor.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
stator = Lamination(
Rint=130e-3,
Rext=0.2,
is_internal=False,
is_stator=True,
L1=0.4,
Nrvd=5,
Wrvd=0.02,
)
stator.mat_type.mag = MatMagnetics(Wlam=0.5e-3)
rotor.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 6)
fig.savefig(join(save_path, "test_Lam_Mag_14i_2-Rotor.png"))
stator.plot()
fig = plt.gcf()
self.assertEqual(len(fig.axes[0].patches), 2)
fig.savefig(join(save_path, "test_Lam_Mag_14i_3-Stator.png"))
| 32.394102
| 88
| 0.548953
| 1,691
| 12,083
| 3.814311
| 0.084565
| 0.026047
| 0.037209
| 0.032248
| 0.84186
| 0.837054
| 0.822946
| 0.77845
| 0.749612
| 0.707752
| 0
| 0.106731
| 0.317636
| 12,083
| 372
| 89
| 32.481183
| 0.675561
| 0.032939
| 0
| 0.688679
| 0
| 0
| 0.044143
| 0.042339
| 0
| 0
| 0
| 0
| 0.050314
| 1
| 0.022013
| false
| 0
| 0.059748
| 0
| 0.084906
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
16cd3b5edc1d2631d88c4bf703f2839919beeee8
| 23
|
py
|
Python
|
amktools/wav2brr/__init__.py
|
jimbo1qaz/amktools
|
25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c
|
[
"BSD-3-Clause"
] | 2
|
2020-03-14T06:13:03.000Z
|
2022-03-03T17:53:51.000Z
|
amktools/wav2brr/__init__.py
|
nyanpasu64/amktools
|
25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c
|
[
"BSD-3-Clause"
] | 14
|
2018-06-19T14:48:58.000Z
|
2018-10-28T07:02:27.000Z
|
amktools/wav2brr/__init__.py
|
jimbo1qaz/amktools
|
25a65d7c9c09a2622065fcacdaed82e1f9d7fb2c
|
[
"BSD-3-Clause"
] | null | null | null |
from .wav2brr import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.173913
| 23
| 1
| 23
| 23
| 0.842105
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc42d6c23c459984e76b67314546ca349cc4a276
| 970
|
py
|
Python
|
050_AnimeGANv2/01_float32/05_float16_quantization.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 1,529
|
2019-12-11T13:36:23.000Z
|
2022-03-31T18:38:27.000Z
|
050_AnimeGANv2/01_float32/05_float16_quantization.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 200
|
2020-01-06T09:24:42.000Z
|
2022-03-31T17:29:08.000Z
|
050_AnimeGANv2/01_float32/05_float16_quantization.py
|
IgiArdiyanto/PINTO_model_zoo
|
9247b56a7dff37f28a8a7822a7ef4dd9adf7234d
|
[
"MIT"
] | 288
|
2020-02-21T14:56:02.000Z
|
2022-03-30T03:00:35.000Z
|
### tensorflow==2.3.0
import tensorflow as tf
# Float16 Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_Hayao')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('animeganv2_hayao_256x256_float16_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Float16 Quantization complete! - animeganv2_hayao_256x256_float16_quant.tflite")
# Float16 Quantization - Input/Output=float32
converter = tf.lite.TFLiteConverter.from_saved_model('saved_model_Paprika')
converter.optimizations = [tf.lite.Optimize.DEFAULT]
converter.target_spec.supported_types = [tf.float16]
tflite_quant_model = converter.convert()
with open('animeganv2_paprika_256x256_float16_quant.tflite', 'wb') as w:
w.write(tflite_quant_model)
print("Float16 Quantization complete! - animeganv2_paprika_256x256_float16_quant.tflite")
| 46.190476
| 89
| 0.819588
| 126
| 970
| 6.02381
| 0.309524
| 0.100132
| 0.084321
| 0.131752
| 0.940711
| 0.940711
| 0.843215
| 0.843215
| 0.843215
| 0.843215
| 0
| 0.061247
| 0.074227
| 970
| 21
| 89
| 46.190476
| 0.783964
| 0.108247
| 0
| 0.533333
| 0
| 0
| 0.337209
| 0.213953
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0.133333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bc671cb3e653d8c94ba3f041918cb3d7ad0be0e4
| 41
|
py
|
Python
|
comment_parser/__init__.py
|
DallogFheir/beginner-projects-bot
|
a19a2194f0c8fffb2a6098abd1242b18b1aebab8
|
[
"MIT"
] | null | null | null |
comment_parser/__init__.py
|
DallogFheir/beginner-projects-bot
|
a19a2194f0c8fffb2a6098abd1242b18b1aebab8
|
[
"MIT"
] | 1
|
2021-06-12T16:15:53.000Z
|
2021-08-12T21:32:22.000Z
|
comment_parser/__init__.py
|
DallogFheir/beginner-projects-bot
|
a19a2194f0c8fffb2a6098abd1242b18b1aebab8
|
[
"MIT"
] | null | null | null |
from .comment_parser import CommentParser
| 41
| 41
| 0.902439
| 5
| 41
| 7.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073171
| 41
| 1
| 41
| 41
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc7333e25a48def30179d76d4ea3dfb0cdb7dabd
| 21
|
py
|
Python
|
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | null | null | null |
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | null | null | null |
__init__.py
|
dustinmaurer/options-strategy-backtester
|
1974bfd672d163a39f928208f36a8470a99e1a48
|
[
"MIT"
] | 1
|
2021-04-11T07:18:55.000Z
|
2021-04-11T07:18:55.000Z
|
from . import option
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
bc8275c83aefd5421141ae89d431747724b6b3a7
| 116
|
py
|
Python
|
UserServer/login/__init__.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
UserServer/login/__init__.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
UserServer/login/__init__.py
|
cxiaolong/XboxBackend
|
d42033c73d29a8ba43e359b32020fa55d1740da1
|
[
"MIT"
] | null | null | null |
from flask import Blueprint
login_bp = Blueprint('login', __name__, url_prefix='/user/login/')
from . import login
| 23.2
| 66
| 0.758621
| 16
| 116
| 5.125
| 0.625
| 0.341463
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 116
| 5
| 67
| 23.2
| 0.803922
| 0
| 0
| 0
| 0
| 0
| 0.145299
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 6
|
bca47effc513a0d266514a16536ecf8c6fad75f0
| 17,349
|
py
|
Python
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_business_card_from_url_async.py
|
rorezende/azure-sdk-for-python
|
efd65877b7d8895aeb1bcdd2175d757cd82136b3
|
[
"MIT"
] | 8
|
2021-01-13T23:44:08.000Z
|
2021-03-17T10:13:36.000Z
|
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_business_card_from_url_async.py
|
Co0olboi/azure-sdk-for-python
|
167351fb00f98de55f9a43828ca46394392cbdd0
|
[
"MIT"
] | null | null | null |
sdk/formrecognizer/azure-ai-formrecognizer/tests/test_business_card_from_url_async.py
|
Co0olboi/azure-sdk-for-python
|
167351fb00f98de55f9a43828ca46394392cbdd0
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import pytest
import functools
from datetime import date, time
from azure.core.exceptions import HttpResponseError, ServiceRequestError, ClientAuthenticationError
from azure.core.credentials import AzureKeyCredential
from azure.ai.formrecognizer._generated.models import AnalyzeOperationResult
from azure.ai.formrecognizer._response_handlers import prepare_prebuilt_models
from azure.ai.formrecognizer import FormRecognizerApiVersion
from azure.ai.formrecognizer.aio import FormRecognizerClient
from testcase import GlobalFormRecognizerAccountPreparer
from asynctestcase import AsyncFormRecognizerTest
from testcase import GlobalClientPreparer as _GlobalClientPreparer
GlobalClientPreparer = functools.partial(_GlobalClientPreparer, FormRecognizerClient)
class TestBusinessCardFromUrlAsync(AsyncFormRecognizerTest):
@GlobalFormRecognizerAccountPreparer()
async def test_polling_interval(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential(form_recognizer_account_key), polling_interval=7)
self.assertEqual(client._client._config.polling_interval, 7)
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, polling_interval=6)
await poller.wait()
self.assertEqual(poller._polling_method._timeout, 6)
poller2 = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
await poller2.wait()
self.assertEqual(poller2._polling_method._timeout, 7) # goes back to client default
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_cards_encoded_url(self, client):
async with client:
try:
poller = await client.begin_recognize_business_cards_from_url("https://fakeuri.com/blank%20space")
except HttpResponseError as e:
self.assertIn("https://fakeuri.com/blank%20space", e.response.request.body)
@GlobalFormRecognizerAccountPreparer()
async def test_business_card_url_bad_endpoint(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
with self.assertRaises(ServiceRequestError):
client = FormRecognizerClient("http://notreal.azure.com", AzureKeyCredential(form_recognizer_account_key))
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_authentication_successful_key(self, client):
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
result = await poller.result()
@GlobalFormRecognizerAccountPreparer()
async def test_authentication_bad_key(self, resource_group, location, form_recognizer_account, form_recognizer_account_key):
client = FormRecognizerClient(form_recognizer_account, AzureKeyCredential("xxxx"))
async with client:
with self.assertRaises(ClientAuthenticationError):
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_bad_url(self, client):
async with client:
with self.assertRaises(HttpResponseError):
poller = await client.begin_recognize_business_cards_from_url("https://badurl.jpg")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_url_pass_stream(self, client):
async with client:
with open(self.business_card_png, "rb") as business_card:
with self.assertRaises(HttpResponseError):
poller = await client.begin_recognize_business_cards_from_url(business_card)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_url_transform_png(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_business_card = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_business_card)
async with client:
poller = await client.begin_recognize_business_cards_from_url(
business_card_url=self.business_card_url_png,
include_field_elements=True,
cls=callback
)
result = await poller.result()
raw_response = responses[0]
returned_model = responses[1]
business_card = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
# check dict values
self.assertFormFieldTransformCorrect(business_card.fields.get("ContactNames"), actual.get("ContactNames"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("JobTitles"), actual.get("JobTitles"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Departments"), actual.get("Departments"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Emails"), actual.get("Emails"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Websites"), actual.get("Websites"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("MobilePhones"), actual.get("MobilePhones"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("OtherPhones"), actual.get("OtherPhones"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Faxes"), actual.get("Faxes"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Addresses"), actual.get("Addresses"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("CompanyNames"), actual.get("CompanyNames"), read_results)
# check page range
self.assertEqual(business_card.page_range.first_page_number, document_results[0].page_range[0])
self.assertEqual(business_card.page_range.last_page_number, document_results[0].page_range[1])
# Check page metadata
self.assertFormPagesTransformCorrect(business_card.pages, read_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_url_transform_jpg(self, client):
responses = []
def callback(raw_response, _, headers):
analyze_result = client._deserialize(AnalyzeOperationResult, raw_response)
extracted_business_card = prepare_prebuilt_models(analyze_result)
responses.append(analyze_result)
responses.append(extracted_business_card)
async with client:
poller = await client.begin_recognize_business_cards_from_url(
business_card_url=self.business_card_url_jpg,
include_field_elements=True,
cls=callback
)
result = await poller.result()
raw_response = responses[0]
returned_model = responses[1]
business_card = returned_model[0]
actual = raw_response.analyze_result.document_results[0].fields
read_results = raw_response.analyze_result.read_results
document_results = raw_response.analyze_result.document_results
page_results = raw_response.analyze_result.page_results
# check dict values
self.assertFormFieldTransformCorrect(business_card.fields.get("ContactNames"), actual.get("ContactNames"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("JobTitles"), actual.get("JobTitles"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Departments"), actual.get("Departments"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Emails"), actual.get("Emails"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Websites"), actual.get("Websites"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("MobilePhones"), actual.get("MobilePhones"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("OtherPhones"), actual.get("OtherPhones"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Faxes"), actual.get("Faxes"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("Addresses"), actual.get("Addresses"), read_results)
self.assertFormFieldTransformCorrect(business_card.fields.get("CompanyNames"), actual.get("CompanyNames"), read_results)
# check page range
self.assertEqual(business_card.page_range.first_page_number, document_results[0].page_range[0])
self.assertEqual(business_card.page_range.last_page_number, document_results[0].page_range[1])
# Check page metadata
self.assertFormPagesTransformCorrect(business_card.pages, read_results)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_jpg(self, client):
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
result = await poller.result()
self.assertEqual(len(result), 1)
business_card = result[0]
# check dict values
self.assertEqual(len(business_card.fields.get("ContactNames").value), 1)
self.assertEqual(business_card.fields.get("ContactNames").value[0].value['FirstName'].value, 'Avery')
self.assertEqual(business_card.fields.get("ContactNames").value[0].value['LastName'].value, 'Smith')
self.assertEqual(len(business_card.fields.get("JobTitles").value), 1)
self.assertEqual(business_card.fields.get("JobTitles").value[0].value, "Senior Researcher")
self.assertEqual(len(business_card.fields.get("Departments").value), 1)
self.assertEqual(business_card.fields.get("Departments").value[0].value, "Cloud & Al Department")
self.assertEqual(len(business_card.fields.get("Emails").value), 1)
self.assertEqual(business_card.fields.get("Emails").value[0].value, "avery.smith@contoso.com")
self.assertEqual(len(business_card.fields.get("Websites").value), 1)
self.assertEqual(business_card.fields.get("Websites").value[0].value, "https://www.contoso.com/")
# TODO: uncomment https://github.com/Azure/azure-sdk-for-python/issues/14300
# self.assertEqual(len(business_card.fields.get("MobilePhones").value), 1)
# self.assertEqual(business_card.fields.get("MobilePhones").value[0].value, "https://www.contoso.com/")
# self.assertEqual(len(business_card.fields.get("OtherPhones").value), 1)
# self.assertEqual(business_card.fields.get("OtherPhones").value[0].value, "https://www.contoso.com/")
# self.assertEqual(len(business_card.fields.get("Faxes").value), 1)
# self.assertEqual(business_card.fields.get("Faxes").value[0].value, "https://www.contoso.com/")
self.assertEqual(len(business_card.fields.get("Addresses").value), 1)
self.assertEqual(business_card.fields.get("Addresses").value[0].value, "2 Kingdom Street Paddington, London, W2 6BD")
self.assertEqual(len(business_card.fields.get("CompanyNames").value), 1)
self.assertEqual(business_card.fields.get("CompanyNames").value[0].value, "Contoso")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_png(self, client):
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_png)
result = await poller.result()
self.assertEqual(len(result), 1)
business_card = result[0]
# check dict values
self.assertEqual(len(business_card.fields.get("ContactNames").value), 1)
self.assertEqual(business_card.fields.get("ContactNames").value[0].value['FirstName'].value, 'Avery')
self.assertEqual(business_card.fields.get("ContactNames").value[0].value['LastName'].value, 'Smith')
self.assertEqual(len(business_card.fields.get("JobTitles").value), 1)
self.assertEqual(business_card.fields.get("JobTitles").value[0].value, "Senior Researcher")
self.assertEqual(len(business_card.fields.get("Departments").value), 1)
self.assertEqual(business_card.fields.get("Departments").value[0].value, "Cloud & Al Department")
self.assertEqual(len(business_card.fields.get("Emails").value), 1)
self.assertEqual(business_card.fields.get("Emails").value[0].value, "avery.smith@contoso.com")
self.assertEqual(len(business_card.fields.get("Websites").value), 1)
self.assertEqual(business_card.fields.get("Websites").value[0].value, "https://www.contoso.com/")
# TODO: uncomment https://github.com/Azure/azure-sdk-for-python/issues/14300
# self.assertEqual(len(business_card.fields.get("MobilePhones").value), 1)
# self.assertEqual(business_card.fields.get("MobilePhones").value[0].value, "https://www.contoso.com/")
# self.assertEqual(len(business_card.fields.get("OtherPhones").value), 1)
# self.assertEqual(business_card.fields.get("OtherPhones").value[0].value, "https://www.contoso.com/")
# self.assertEqual(len(business_card.fields.get("Faxes").value), 1)
# self.assertEqual(business_card.fields.get("Faxes").value[0].value, "https://www.contoso.com/")
self.assertEqual(len(business_card.fields.get("Addresses").value), 1)
self.assertEqual(business_card.fields.get("Addresses").value[0].value, "2 Kingdom Street Paddington, London, W2 6BD")
self.assertEqual(len(business_card.fields.get("CompanyNames").value), 1)
self.assertEqual(business_card.fields.get("CompanyNames").value[0].value, "Contoso")
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_jpg_include_field_elements(self, client):
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, include_field_elements=True)
result = await poller.result()
self.assertEqual(len(result), 1)
business_card = result[0]
self.assertFormPagesHasValues(business_card.pages)
for name, field in business_card.fields.items():
if field.value_type not in ["list", "dictionary"]:
self.assertFieldElementsHasValues(field.value_data.field_elements, receipt.page_range.first_page_number)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
@pytest.mark.live_test_only
async def test_business_card_continuation_token(self, client):
async with client:
initial_poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
cont_token = initial_poller.continuation_token()
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, continuation_token=cont_token)
result = await poller.result()
self.assertIsNotNone(result)
await initial_poller.wait() # necessary so azure-devtools doesn't throw assertion error
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer(client_kwargs={"api_version": FormRecognizerApiVersion.V2_0})
async def test_business_card_v2(self, client):
with pytest.raises(ValueError) as e:
async with client:
await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg)
assert "Method 'begin_recognize_business_cards_from_url' is only available for API version V2_1_PREVIEW and up" in str(e.value)
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_locale_specified(self, client):
async with client:
poller = await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, locale="en-IN")
assert 'en-IN' == poller._polling_method._initial_response.http_response.request.query['locale']
await poller.wait()
@GlobalFormRecognizerAccountPreparer()
@GlobalClientPreparer()
async def test_business_card_locale_error(self, client):
with pytest.raises(HttpResponseError) as e:
async with client:
await client.begin_recognize_business_cards_from_url(self.business_card_url_jpg, locale="not a locale")
assert "locale" in e.value.error.message
| 55.784566
| 136
| 0.731051
| 1,908
| 17,349
| 6.404612
| 0.121593
| 0.109002
| 0.092799
| 0.106547
| 0.812439
| 0.786088
| 0.764648
| 0.764648
| 0.742308
| 0.733879
| 0
| 0.006744
| 0.16243
| 17,349
| 310
| 137
| 55.964516
| 0.834217
| 0.089631
| 0
| 0.671111
| 0
| 0
| 0.080627
| 0.005519
| 0
| 0
| 0
| 0.003226
| 0.324444
| 1
| 0.008889
| false
| 0.004444
| 0.053333
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcac76f70b5f42d37acc2ca18a9d3eb4a406e70b
| 1,026
|
py
|
Python
|
Ship.py
|
miethe/DnD-Spaceships
|
a37afdb4f329123fcf7b0e06ea89e2fe6e3ea7bc
|
[
"MIT"
] | null | null | null |
Ship.py
|
miethe/DnD-Spaceships
|
a37afdb4f329123fcf7b0e06ea89e2fe6e3ea7bc
|
[
"MIT"
] | null | null | null |
Ship.py
|
miethe/DnD-Spaceships
|
a37afdb4f329123fcf7b0e06ea89e2fe6e3ea7bc
|
[
"MIT"
] | null | null | null |
class ship:
name = "Unknown"
maelstrom_location_data = None
quadrant = ''
sector = ''
def __init__(self, name = "Unknown", maelstrom_location_data = None):
self.name = name
self.maelstrom_location_data = maelstrom_location_data
def getMaelstromLocationData(self):
return self.maelstrom_location_data
def setMaelstromCoordinates(self, quadrant, sector):
self.quadrant = quadrant
self.sector = sector
def setNewMaelstromLocation(self, maelstrom_location_data):
self.maelstrom_location_data = maelstrom_location_data
def getShipCoordinates(self):
return self.maelstrom_location_data.getCoordinates()
def getShipQuadrant(self):
return self.maelstrom_location_data.getQuadrant()
def getShipSector(self):
return self.maelstrom_location_data.getSector()
def getMontressorPerceptionCheck(self, quadrant, sector):
return self.maelstrom_location_data.getMontressorPerceptionCheck(quadrant, sector)
| 30.176471
| 90
| 0.72807
| 101
| 1,026
| 7.118812
| 0.237624
| 0.283727
| 0.350487
| 0.278164
| 0.47427
| 0.431154
| 0.1363
| 0.1363
| 0
| 0
| 0
| 0
| 0.199805
| 1,026
| 33
| 91
| 31.090909
| 0.875761
| 0
| 0
| 0.086957
| 0
| 0
| 0.013659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.347826
| false
| 0
| 0
| 0.217391
| 0.782609
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
bcb268d5e85e7c448988d51f7d359b5623328c52
| 14,204
|
py
|
Python
|
tests/app/service_invite/test_service_invite_rest.py
|
alphagov/notifications-api
|
494a01ba57aeb46d3622b4895a1373a3cc47371c
|
[
"MIT"
] | 51
|
2016-04-03T23:36:17.000Z
|
2022-03-21T20:04:52.000Z
|
tests/app/service_invite/test_service_invite_rest.py
|
alphagov/notifications-api
|
494a01ba57aeb46d3622b4895a1373a3cc47371c
|
[
"MIT"
] | 1,335
|
2015-12-15T14:28:50.000Z
|
2022-03-30T16:24:27.000Z
|
tests/app/service_invite/test_service_invite_rest.py
|
alphagov/notifications-api
|
494a01ba57aeb46d3622b4895a1373a3cc47371c
|
[
"MIT"
] | 30
|
2016-01-08T19:05:32.000Z
|
2021-12-20T16:37:23.000Z
|
import json
import uuid
import pytest
from flask import current_app
from freezegun import freeze_time
from notifications_utils.url_safe_token import generate_token
from app.models import EMAIL_AUTH_TYPE, SMS_AUTH_TYPE, Notification
from tests import create_admin_authorization_header
from tests.app.db import create_invited_user
@pytest.mark.parametrize('extra_args, expected_start_of_invite_url', [
(
{},
'http://localhost:6012/invitation/'
),
(
{'invite_link_host': 'https://www.example.com'},
'https://www.example.com/invitation/'
),
])
def test_create_invited_user(
admin_request,
sample_service,
mocker,
invitation_email_template,
extra_args,
expected_start_of_invite_url,
):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'invited_user@service.gov.uk'
invite_from = sample_service.users[0]
data = dict(
service=str(sample_service.id),
email_address=email_address,
from_user=str(invite_from.id),
permissions='send_messages,manage_service,manage_api_keys',
auth_type=EMAIL_AUTH_TYPE,
folder_permissions=['folder_1', 'folder_2', 'folder_3'],
**extra_args
)
json_resp = admin_request.post(
'service_invite.create_invited_user',
service_id=sample_service.id,
_data=data,
_expected_status=201
)
assert json_resp['data']['service'] == str(sample_service.id)
assert json_resp['data']['email_address'] == email_address
assert json_resp['data']['from_user'] == str(invite_from.id)
assert json_resp['data']['permissions'] == 'send_messages,manage_service,manage_api_keys'
assert json_resp['data']['auth_type'] == EMAIL_AUTH_TYPE
assert json_resp['data']['id']
assert json_resp['data']['folder_permissions'] == ['folder_1', 'folder_2', 'folder_3']
notification = Notification.query.first()
assert notification.reply_to_text == invite_from.email_address
assert len(notification.personalisation.keys()) == 3
assert notification.personalisation['service_name'] == 'Sample service'
assert notification.personalisation['user_name'] == 'Test User'
assert notification.personalisation['url'].startswith(expected_start_of_invite_url)
assert len(notification.personalisation['url']) > len(expected_start_of_invite_url)
assert str(notification.template_id) == current_app.config['INVITATION_EMAIL_TEMPLATE_ID']
mocked.assert_called_once_with([(str(notification.id))], queue="notify-internal-tasks")
@pytest.mark.parametrize('extra_args, expected_start_of_invite_url', [
(
{},
'http://localhost:6012/invitation/'
),
(
{'invite_link_host': 'https://www.example.com'},
'https://www.example.com/invitation/'
),
])
def test_invited_user_for_broadcast_service_receives_broadcast_invite_email(
admin_request,
sample_broadcast_service,
mocker,
broadcast_invitation_email_template,
extra_args,
expected_start_of_invite_url,
):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'invited_user@service.gov.uk'
invite_from = sample_broadcast_service.users[0]
data = dict(
service=str(sample_broadcast_service.id),
email_address=email_address,
from_user=str(invite_from.id),
permissions='send_messages,manage_service,manage_api_keys',
auth_type=EMAIL_AUTH_TYPE,
folder_permissions=['folder_1', 'folder_2', 'folder_3'],
**extra_args
)
admin_request.post(
'service_invite.create_invited_user',
service_id=sample_broadcast_service.id,
_data=data,
_expected_status=201
)
notification = Notification.query.first()
assert notification.reply_to_text == invite_from.email_address
assert len(notification.personalisation.keys()) == 3
assert notification.personalisation['service_name'] == 'Sample broadcast service'
assert notification.personalisation['user_name'] == 'Test User'
assert notification.personalisation['url'].startswith(expected_start_of_invite_url)
assert len(notification.personalisation['url']) > len(expected_start_of_invite_url)
assert str(notification.template_id) == current_app.config['BROADCAST_INVITATION_EMAIL_TEMPLATE_ID']
mocked.assert_called_once_with([(str(notification.id))], queue="notify-internal-tasks")
def test_create_invited_user_without_auth_type(admin_request, sample_service, mocker, invitation_email_template):
mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'invited_user@service.gov.uk'
invite_from = sample_service.users[0]
data = {
'service': str(sample_service.id),
'email_address': email_address,
'from_user': str(invite_from.id),
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': []
}
json_resp = admin_request.post(
'service_invite.create_invited_user',
service_id=sample_service.id,
_data=data,
_expected_status=201
)
assert json_resp['data']['auth_type'] == SMS_AUTH_TYPE
def test_create_invited_user_invalid_email(client, sample_service, mocker, fake_uuid):
mocked = mocker.patch('app.celery.provider_tasks.deliver_email.apply_async')
email_address = 'notanemail'
invite_from = sample_service.users[0]
data = {
'service': str(sample_service.id),
'email_address': email_address,
'from_user': str(invite_from.id),
'permissions': 'send_messages,manage_service,manage_api_keys',
'folder_permissions': [fake_uuid, fake_uuid]
}
data = json.dumps(data)
auth_header = create_admin_authorization_header()
response = client.post(
'/service/{}/invite'.format(sample_service.id),
headers=[('Content-Type', 'application/json'), auth_header],
data=data
)
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['result'] == 'error'
assert json_resp['message'] == {'email_address': ['Not a valid email address']}
assert mocked.call_count == 0
def test_get_all_invited_users_by_service(client, notify_db, notify_db_session, sample_service):
invites = []
for i in range(0, 5):
email = 'invited_user_{}@service.gov.uk'.format(i)
invited_user = create_invited_user(sample_service, to_email_address=email)
invites.append(invited_user)
url = '/service/{}/invite'.format(sample_service.id)
auth_header = create_admin_authorization_header()
response = client.get(
url,
headers=[('Content-Type', 'application/json'), auth_header]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
invite_from = sample_service.users[0]
for invite in json_resp['data']:
assert invite['service'] == str(sample_service.id)
assert invite['from_user'] == str(invite_from.id)
assert invite['auth_type'] == SMS_AUTH_TYPE
assert invite['id']
def test_get_invited_users_by_service_with_no_invites(client, notify_db, notify_db_session, sample_service):
url = '/service/{}/invite'.format(sample_service.id)
auth_header = create_admin_authorization_header()
response = client.get(
url,
headers=[('Content-Type', 'application/json'), auth_header]
)
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert len(json_resp['data']) == 0
def test_get_invited_user_by_service(admin_request, sample_invited_user):
json_resp = admin_request.get(
'service_invite.get_invited_user_by_service',
service_id=sample_invited_user.service.id,
invited_user_id=sample_invited_user.id
)
assert json_resp['data']['email_address'] == sample_invited_user.email_address
def test_get_invited_user_by_service_when_user_does_not_belong_to_the_service(
admin_request,
sample_invited_user,
fake_uuid,
):
json_resp = admin_request.get(
'service_invite.get_invited_user_by_service',
service_id=fake_uuid,
invited_user_id=sample_invited_user.id,
_expected_status=404
)
assert json_resp['result'] == 'error'
def test_update_invited_user_set_status_to_cancelled(client, sample_invited_user):
data = {'status': 'cancelled'}
url = '/service/{0}/invite/{1}'.format(sample_invited_user.service_id, sample_invited_user.id)
auth_header = create_admin_authorization_header()
response = client.post(url,
data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))['data']
assert json_resp['status'] == 'cancelled'
def test_update_invited_user_for_wrong_service_returns_404(client, sample_invited_user, fake_uuid):
data = {'status': 'cancelled'}
url = '/service/{0}/invite/{1}'.format(fake_uuid, sample_invited_user.id)
auth_header = create_admin_authorization_header()
response = client.post(url, data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 404
json_response = json.loads(response.get_data(as_text=True))['message']
assert json_response == 'No result found'
def test_update_invited_user_for_invalid_data_returns_400(client, sample_invited_user):
data = {'status': 'garbage'}
url = '/service/{0}/invite/{1}'.format(sample_invited_user.service_id, sample_invited_user.id)
auth_header = create_admin_authorization_header()
response = client.post(url, data=json.dumps(data),
headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
@pytest.mark.parametrize('endpoint_format_str', [
'/invite/service/{}',
'/invite/service/check/{}',
])
def test_validate_invitation_token_returns_200_when_token_valid(client, sample_invited_user, endpoint_format_str):
token = generate_token(str(sample_invited_user.id), current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'])
url = endpoint_format_str.format(token)
auth_header = create_admin_authorization_header()
response = client.get(url, headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 200
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['data']['id'] == str(sample_invited_user.id)
assert json_resp['data']['email_address'] == sample_invited_user.email_address
assert json_resp['data']['from_user'] == str(sample_invited_user.user_id)
assert json_resp['data']['service'] == str(sample_invited_user.service_id)
assert json_resp['data']['status'] == sample_invited_user.status
assert json_resp['data']['permissions'] == sample_invited_user.permissions
assert json_resp['data']['folder_permissions'] == sample_invited_user.folder_permissions
def test_validate_invitation_token_for_expired_token_returns_400(client):
with freeze_time('2016-01-01T12:00:00'):
token = generate_token(str(uuid.uuid4()), current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'])
url = '/invite/service/{}'.format(token)
auth_header = create_admin_authorization_header()
response = client.get(url, headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['result'] == 'error'
assert json_resp['message'] == {
'invitation': 'Your invitation to GOV.UK Notify has expired. '
'Please ask the person that invited you to send you another one'}
def test_validate_invitation_token_returns_400_when_invited_user_does_not_exist(client):
token = generate_token(str(uuid.uuid4()), current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT'])
url = '/invite/service/{}'.format(token)
auth_header = create_admin_authorization_header()
response = client.get(url, headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 404
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['result'] == 'error'
assert json_resp['message'] == 'No result found'
def test_validate_invitation_token_returns_400_when_token_is_malformed(client):
token = generate_token(
str(uuid.uuid4()),
current_app.config['SECRET_KEY'],
current_app.config['DANGEROUS_SALT']
)[:-2]
url = '/invite/service/{}'.format(token)
auth_header = create_admin_authorization_header()
response = client.get(url, headers=[('Content-Type', 'application/json'), auth_header])
assert response.status_code == 400
json_resp = json.loads(response.get_data(as_text=True))
assert json_resp['result'] == 'error'
assert json_resp['message'] == {
'invitation': 'Something’s wrong with this link. Make sure you’ve copied the whole thing.'
}
def test_get_invited_user(admin_request, sample_invited_user):
json_resp = admin_request.get(
'service_invite.get_invited_user',
invited_user_id=sample_invited_user.id
)
assert json_resp['data']['id'] == str(sample_invited_user.id)
assert json_resp['data']['email_address'] == sample_invited_user.email_address
assert json_resp['data']['service'] == str(sample_invited_user.service_id)
assert json_resp['data']['permissions'] == sample_invited_user.permissions
def test_get_invited_user_404s_if_invite_doesnt_exist(admin_request, sample_invited_user, fake_uuid):
json_resp = admin_request.get(
'service_invite.get_invited_user',
invited_user_id=fake_uuid,
_expected_status=404
)
assert json_resp['result'] == 'error'
| 38.808743
| 114
| 0.712053
| 1,792
| 14,204
| 5.276228
| 0.107143
| 0.070968
| 0.045902
| 0.038075
| 0.836171
| 0.797567
| 0.759492
| 0.731571
| 0.678371
| 0.650026
| 0
| 0.009991
| 0.168474
| 14,204
| 365
| 115
| 38.915068
| 0.790534
| 0
| 0
| 0.59322
| 1
| 0
| 0.201633
| 0.073219
| 0
| 0
| 0
| 0
| 0.216949
| 1
| 0.057627
| false
| 0
| 0.030508
| 0
| 0.088136
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcc01f39787a08644f7bee34b946c9547849f339
| 13,048
|
py
|
Python
|
src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
|
RealImage/grpc
|
4a2f1b1c933eaa37e162f9b0448222caff728494
|
[
"BSD-3-Clause"
] | 1
|
2021-04-27T20:09:23.000Z
|
2021-04-27T20:09:23.000Z
|
src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
|
RealImage/grpc
|
4a2f1b1c933eaa37e162f9b0448222caff728494
|
[
"BSD-3-Clause"
] | null | null | null |
src/python/grpcio_tests/tests/protoc_plugin/_split_definitions_test.py
|
RealImage/grpc
|
4a2f1b1c933eaa37e162f9b0448222caff728494
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
from concurrent import futures
import contextlib
import distutils.spawn
import errno
import importlib
import os
import os.path
import pkgutil
import shutil
import subprocess
import sys
import tempfile
import threading
import unittest
import grpc
from grpc_tools import protoc
from tests.unit.framework.common import test_constants
_MESSAGES_IMPORT = b'import "messages.proto";'
@contextlib.contextmanager
def _system_path(path):
old_system_path = sys.path[:]
sys.path = sys.path[0:1] + path + sys.path[1:]
yield
sys.path = old_system_path
class DummySplitServicer(object):
def __init__(self, request_class, response_class):
self.request_class = request_class
self.response_class = response_class
def Call(self, request, context):
return self.response_class()
class SeparateTestMixin(object):
def testImportAttributes(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
pb2.Request
pb2.Response
if self.should_find_services_in_pb2:
pb2.TestServiceServicer
else:
with self.assertRaises(AttributeError):
pb2.TestServiceServicer
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
pb2_grpc.TestServiceServicer
with self.assertRaises(AttributeError):
pb2_grpc.Request
with self.assertRaises(AttributeError):
pb2_grpc.Response
def testCall(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
pb2_grpc.add_TestServiceServicer_to_server(
DummySplitServicer(pb2.Request, pb2.Response), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = pb2_grpc.TestServiceStub(channel)
request = pb2.Request()
expected_response = pb2.Response()
response = stub.Call(request)
self.assertEqual(expected_response, response)
class CommonTestMixin(object):
def testImportAttributes(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
pb2.Request
pb2.Response
if self.should_find_services_in_pb2:
pb2.TestServiceServicer
else:
with self.assertRaises(AttributeError):
pb2.TestServiceServicer
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
pb2_grpc.TestServiceServicer
with self.assertRaises(AttributeError):
pb2_grpc.Request
with self.assertRaises(AttributeError):
pb2_grpc.Response
def testCall(self):
with _system_path([self.python_out_directory]):
pb2 = importlib.import_module(self.pb2_import)
with _system_path([self.grpc_python_out_directory]):
pb2_grpc = importlib.import_module(self.pb2_grpc_import)
server = grpc.server(
futures.ThreadPoolExecutor(max_workers=test_constants.POOL_SIZE))
pb2_grpc.add_TestServiceServicer_to_server(
DummySplitServicer(pb2.Request, pb2.Response), server)
port = server.add_insecure_port('[::]:0')
server.start()
channel = grpc.insecure_channel('localhost:{}'.format(port))
stub = pb2_grpc.TestServiceStub(channel)
request = pb2.Request()
expected_response = pb2.Response()
response = stub.Call(request)
self.assertEqual(expected_response, response)
class SameSeparateTest(unittest.TestCase, SeparateTestMixin):
def setUp(self):
same_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
self.directory = tempfile.mkdtemp(suffix='same_separate', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = os.path.join(self.directory,
'grpc_python_out')
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
os.makedirs(self.grpc_python_out_directory)
same_proto_file = os.path.join(self.proto_directory,
'same_separate.proto')
open(same_proto_file, 'wb').write(same_proto_contents)
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out=grpc_2_0:{}'.format(
self.grpc_python_out_directory),
same_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.grpc_python_out_directory, '__init__.py'),
'w').write('')
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'same_separate_pb2'
self.pb2_grpc_import = 'same_separate_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
class SameCommonTest(unittest.TestCase, CommonTestMixin):
def setUp(self):
same_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing', 'same.proto')
self.directory = tempfile.mkdtemp(suffix='same_common', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = self.python_out_directory
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
same_proto_file = os.path.join(self.proto_directory,
'same_common.proto')
open(same_proto_file, 'wb').write(same_proto_contents)
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out={}'.format(self.grpc_python_out_directory),
same_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'same_common_pb2'
self.pb2_grpc_import = 'same_common_pb2_grpc'
self.should_find_services_in_pb2 = True
def tearDown(self):
shutil.rmtree(self.directory)
class SplitCommonTest(unittest.TestCase, CommonTestMixin):
def setUp(self):
services_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_services',
'services.proto')
messages_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_messages',
'messages.proto')
self.directory = tempfile.mkdtemp(suffix='split_common', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = self.python_out_directory
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
services_proto_file = os.path.join(self.proto_directory,
'split_common_services.proto')
messages_proto_file = os.path.join(self.proto_directory,
'split_common_messages.proto')
open(services_proto_file, 'wb').write(
services_proto_contents.replace(
_MESSAGES_IMPORT, b'import "split_common_messages.proto";'))
open(messages_proto_file, 'wb').write(messages_proto_contents)
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out={}'.format(self.grpc_python_out_directory),
services_proto_file,
messages_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'split_common_messages_pb2'
self.pb2_grpc_import = 'split_common_services_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
class SplitSeparateTest(unittest.TestCase, SeparateTestMixin):
def setUp(self):
services_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_services',
'services.proto')
messages_proto_contents = pkgutil.get_data(
'tests.protoc_plugin.protos.invocation_testing.split_messages',
'messages.proto')
self.directory = tempfile.mkdtemp(suffix='split_separate', dir='.')
self.proto_directory = os.path.join(self.directory, 'proto_path')
self.python_out_directory = os.path.join(self.directory, 'python_out')
self.grpc_python_out_directory = os.path.join(self.directory,
'grpc_python_out')
os.makedirs(self.proto_directory)
os.makedirs(self.python_out_directory)
os.makedirs(self.grpc_python_out_directory)
services_proto_file = os.path.join(self.proto_directory,
'split_separate_services.proto')
messages_proto_file = os.path.join(self.proto_directory,
'split_separate_messages.proto')
open(services_proto_file, 'wb').write(
services_proto_contents.replace(
_MESSAGES_IMPORT, b'import "split_separate_messages.proto";'))
open(messages_proto_file, 'wb').write(messages_proto_contents)
protoc_result = protoc.main([
'',
'--proto_path={}'.format(self.proto_directory),
'--python_out={}'.format(self.python_out_directory),
'--grpc_python_out=grpc_2_0:{}'.format(
self.grpc_python_out_directory),
services_proto_file,
messages_proto_file,
])
if protoc_result != 0:
raise Exception("unexpected protoc error")
open(os.path.join(self.python_out_directory, '__init__.py'),
'w').write('')
self.pb2_import = 'split_separate_messages_pb2'
self.pb2_grpc_import = 'split_separate_services_pb2_grpc'
self.should_find_services_in_pb2 = False
def tearDown(self):
shutil.rmtree(self.directory)
if __name__ == '__main__':
unittest.main(verbosity=2)
| 41.686901
| 78
| 0.668378
| 1,503
| 13,048
| 5.51497
| 0.159015
| 0.055375
| 0.080347
| 0.058391
| 0.781397
| 0.770419
| 0.749186
| 0.737604
| 0.728073
| 0.728073
| 0
| 0.008239
| 0.237201
| 13,048
| 312
| 79
| 41.820513
| 0.824576
| 0.112738
| 0
| 0.753036
| 0
| 0
| 0.124004
| 0.060443
| 0
| 0
| 0
| 0
| 0.032389
| 1
| 0.060729
| false
| 0
| 0.157895
| 0.004049
| 0.251012
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcc202a66986f9a47006c9f6ad16cf02ff69e076
| 18,640
|
py
|
Python
|
tests/swig/python/gpu/test_cuda_subagent.py
|
twinkarma/FLAMEGPU2_dev
|
733a6c2016c0fd15cd835c6bb2fcb6ded75c8d15
|
[
"MIT"
] | null | null | null |
tests/swig/python/gpu/test_cuda_subagent.py
|
twinkarma/FLAMEGPU2_dev
|
733a6c2016c0fd15cd835c6bb2fcb6ded75c8d15
|
[
"MIT"
] | 4
|
2020-10-19T18:31:24.000Z
|
2020-12-14T21:26:46.000Z
|
tests/swig/python/gpu/test_cuda_subagent.py
|
twinkarma/FLAMEGPU2_dev
|
733a6c2016c0fd15cd835c6bb2fcb6ded75c8d15
|
[
"MIT"
] | null | null | null |
import pytest
from unittest import TestCase
from pyflamegpu import *
from random import randint
AGENT_COUNT = 100;
SUB_MODEL_NAME = "SubModel";
PROXY_SUB_MODEL_NAME = "ProxySubModel";
MODEL_NAME = "Model";
AGENT_NAME = "Agent";
AGENT_VAR1_NAME = "AVar1";
AGENT_VAR2_NAME = "AVar2";
SUB_VAR1_NAME = "SubVar1";
AGENT_VAR_i = "i";
AGENT_VAR_t = "t";
MAPPED_STATE1 = "mapped1";
MAPPED_STATE2 = "mapped2";
UNMAPPED_STATE1 = "unmapped1";
UNMAPPED_STATE2 = "unmapped2";
AddT = """
FLAMEGPU_AGENT_FUNCTION(AddT, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
const unsigned int t = FLAMEGPU->getVariable<unsigned int>("t");
FLAMEGPU->setVariable<unsigned int>("AVar1", v + t);
FLAMEGPU->setVariable<unsigned int>("t", t + 1);
return ALIVE;
}
"""
AddOne = """
FLAMEGPU_AGENT_FUNCTION(AddOne, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
const unsigned int sub_v = FLAMEGPU->getVariable<unsigned int>("SubVar1");
if (sub_v == 12) {
// sub_v should always be it's default value 12 if created in submodel, we never change it
FLAMEGPU->setVariable<unsigned int>("AVar1", v + 1);
} else if (sub_v == 599) {
// sub_v Agents created byproxysubmodel or above will have this value, so original agents set this
FLAMEGPU->setVariable<unsigned int>("AVar1", v + 1);
} else {
FLAMEGPU->setVariable<unsigned int>("AVar1", v + 100000);
}
return ALIVE;
}
"""
AddSubVar = """
FLAMEGPU_AGENT_FUNCTION(AddSubVar, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
const unsigned int sub_v = FLAMEGPU->getVariable<unsigned int>("SubVar1");
FLAMEGPU->setVariable<unsigned int>("AVar1", v + sub_v);
FLAMEGPU->setVariable<unsigned int>("SubVar1", sub_v * 2);
return ALIVE;
}
"""
AddOne2 = """
FLAMEGPU_AGENT_FUNCTION(AddOne2, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
FLAMEGPU->setVariable<unsigned int>("AVar1", v + 1);
return ALIVE;
}
"""
AddTen = """
FLAMEGPU_AGENT_FUNCTION(AddTen, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
FLAMEGPU->setVariable<unsigned int>("AVar1", v + 10);
const unsigned int v2 = FLAMEGPU->getVariable<unsigned int>("AVar2");
FLAMEGPU->setVariable<unsigned int>("AVar2", v2 - 1000);
return ALIVE;
}
"""
KillEven = """
FLAMEGPU_AGENT_FUNCTION(KillEven, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("i");
FLAMEGPU->setVariable<unsigned int>("i", v * 3);
if (FLAMEGPU->getVariable<unsigned int>("AVar2") > UINT_MAX-1000) {
// First iteration
if (v % 4 == 0)
return DEAD;
} else {
// Second iteration
if (v % 2 == 0)
return DEAD;
}
return ALIVE;
}
"""
BirthEven = """
FLAMEGPU_AGENT_FUNCTION(BirthEven, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("i");
FLAMEGPU->setVariable<unsigned int>("i", v * 3);
if (FLAMEGPU->getVariable<unsigned int>("AVar2") > UINT_MAX - 2000) {
// First iteration
if (v % 4 == 0) {
FLAMEGPU->agent_out.setVariable("i", v * 3);
FLAMEGPU->agent_out.setVariable("AVar2", 4000 + v);
}
} else if (FLAMEGPU->getVariable<unsigned int>("AVar2") > UINT_MAX - 4000) {
// Second iteration
if ((v / 3) % 4 == 0) {
FLAMEGPU->agent_out.setVariable("i", v * 3);
FLAMEGPU->agent_out.setVariable("AVar2", 4000 + v);
}
}
return ALIVE;
}
"""
AllowEven = """
FLAMEGPU_AGENT_FUNCTION_CONDITION(AllowEven) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("i");
// First iteration
if (v % 4 == 0) {
return true;
}
return false;
}
"""
UpdateId100 = """
FLAMEGPU_AGENT_FUNCTION(UpdateId100, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("i");
FLAMEGPU->setVariable<unsigned int>("i", v + 100);
return ALIVE;
}
"""
class ExitAlways(pyflamegpu.HostFunctionConditionCallback):
def __init__(self):
super().__init__()
def run(self, FLAMEGPU):
return pyflamegpu.EXIT;
HostBirth = """
FLAMEGPU_HOST_FUNCTION(HostBirth) {
auto a = FLAMEGPU->newAgent(AGENT_NAME);
a.setVariable<unsigned int>(AGENT_VAR1_NAME, 5);
a.setVariable<unsigned int>(AGENT_VAR2_NAME, 500);
}
"""
HostBirth2 = """
FLAMEGPU_HOST_FUNCTION(HostBirth2) {
auto a = FLAMEGPU->newAgent(AGENT_NAME);
a.setVariable<unsigned int>(AGENT_VAR1_NAME, 5);
}
"""
HostBirthUpdate = """
FLAMEGPU_AGENT_FUNCTION(HostBirthUpdate, MsgNone, MsgNone) {
const unsigned int v = FLAMEGPU->getVariable<unsigned int>("AVar1");
if (v == 5) {
FLAMEGPU->setVariable<unsigned int>("AVar1", 500);
}
return ALIVE;
}
"""
def UINT_MAX():
return pow(2, 32)-1;
class TestCUDASubAgent(TestCase):
def test_simple(self):
# Tests whether a sub model is capable of changing an agents variable
# Agents in same named state, with matching variables
sm = pyflamegpu.ModelDescription(SUB_MODEL_NAME);
# Define SubModel
a = sm.newAgent(AGENT_NAME);
a.newVariableUInt(AGENT_VAR1_NAME, 0);
a.newVariableUInt(AGENT_VAR_t, 1);
a.newVariableUInt(SUB_VAR1_NAME, 12);
fn_1 = a.newRTCFunction("1", AddT);
fn_2 = a.newRTCFunction("2", AddOne);
sm.newLayer().addAgentFunction(fn_1);
sm.newLayer().addAgentFunction(fn_2);
exitcdn = ExitAlways()
sm.addExitConditionCallback(exitcdn);
m = pyflamegpu.ModelDescription(MODEL_NAME);
ma = m.newAgent(AGENT_NAME);
# Define Model
ma.newVariableUInt(AGENT_VAR1_NAME, 1);
ma.newVariableUInt(AGENT_VAR2_NAME, UINT_MAX());
fn_3 = ma.newRTCFunction("3", AddTen);
smd = m.newSubModel("sub", sm);
smd.bindAgent(AGENT_NAME, AGENT_NAME, True, True); # auto map vars and states
m.newLayer().addAgentFunction(fn_3);
m.newLayer().addSubModel("sub");
m.newLayer().addAgentFunction(fn_3);
# Init Agents
pop = pyflamegpu.AgentPopulation(ma, AGENT_COUNT);
for i in range(AGENT_COUNT):
ai = pop.getNextInstance();
# Vars all default init
# Init Model
c = pyflamegpu.CUDASimulation(m)
c.SimulationConfig().steps = 1;
c.applyConfig();
c.setPopulationData(pop);
# Run Model
c.step();
# Check result
# Mapped var = init + af + submodel af + af
mapped_result = 1 + 10 + 1 + 1 + 10;
# Unmapped var = init + af + af
unmapped_result = UINT_MAX() - 1000 - 1000;
c.getPopulationData(pop);
for i in range(AGENT_COUNT):
ai = pop.getInstanceAt(i);
assert ai.getVariableUInt(AGENT_VAR1_NAME) == mapped_result
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result
# Run Model
c.step();
# Check result
# Mapped var = mapped_result + af + submodel af + af
mapped_result2 = mapped_result + 10 + 1 + 1 + 10;
# Unmapped var = unmapped_result + af + af
unmapped_result2 = unmapped_result - 1000 - 1000;
c.getPopulationData(pop);
for i in range(AGENT_COUNT):
ai = pop.getInstanceAt(i);
assert ai.getVariableUInt(AGENT_VAR1_NAME) == mapped_result2
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result2
def test_AgentDeath_BeforeSubModel(self):
sm = pyflamegpu.ModelDescription(SUB_MODEL_NAME);
# Define SubModel
a = sm.newAgent(AGENT_NAME);
a.newVariableUInt(AGENT_VAR1_NAME, 0);
a.newVariableUInt(SUB_VAR1_NAME, 12);
fn_1 = a.newRTCFunction("1", AddOne);
sm.newLayer().addAgentFunction(fn_1);
exitcdn = ExitAlways()
sm.addExitConditionCallback(exitcdn);
m = pyflamegpu.ModelDescription(MODEL_NAME);
ma = m.newAgent(AGENT_NAME);
# Define Model
ma.newVariableUInt(AGENT_VAR1_NAME, 1);
ma.newVariableUInt(AGENT_VAR2_NAME, UINT_MAX());
ma.newVariableUInt(AGENT_VAR_i);
fn_2 = ma.newRTCFunction("2", KillEven);
fn_2.setAllowAgentDeath(True);
fn_3 = ma.newRTCFunction("3", AddTen);
smd = m.newSubModel("sub", sm);
smd.bindAgent(AGENT_NAME, AGENT_NAME, True, True); # auto map vars and states
m.newLayer().addAgentFunction(fn_2);
m.newLayer().addAgentFunction(fn_3);
m.newLayer().addSubModel("sub");
m.newLayer().addAgentFunction(fn_3);
# Init Agents
pop = pyflamegpu.AgentPopulation(ma, AGENT_COUNT);
for i in range(AGENT_COUNT):
ai = pop.getNextInstance();
ai.setVariableUInt(AGENT_VAR_i, i);
ai.setVariableUInt(AGENT_VAR1_NAME, i);
ai.setVariableUInt(AGENT_VAR2_NAME, UINT_MAX() - i);
# Other vars all default init
# Init Model
c = pyflamegpu.CUDASimulation(m)
c.SimulationConfig().steps = 1;
c.applyConfig();
c.setPopulationData(pop);
# Run Model
c.step();
# Check result
# Mapped var = init + af + submodel af + af
mapped_result = 10 + 1 + 10;
# Unmapped var = init + af + af
unmapped_result = UINT_MAX() - 1000 - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT*0.75) # if AGENT_COUNT > 1000 this test will fail
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 3 == 0 # Var divides cleanly by 3
__i = int(_i/3); # Calculate original value of AGENT_VAR_i
assert __i % 4 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result - __i;
# Run Model
c.step();
# Check result
# Mapped var = mapped_result + af + submodel af + af
mapped_result2 = mapped_result + 10 + 1 + 10;
# Unmapped var = unmapped_result + af + af
unmapped_result2 = unmapped_result - 1000 - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT/2)
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 9 == 0 # Var divides cleanly by 3
__i = _i/9; # Calculate original value of AGENT_VAR_i
assert __i % 2 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result2;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result2 - __i;
def test_AgentDeath_InSubModel(self):
sm = pyflamegpu.ModelDescription(SUB_MODEL_NAME);
# Define SubModel
a = sm.newAgent(AGENT_NAME);
a.newVariableUInt(AGENT_VAR1_NAME, 0);
a.newVariableUInt(AGENT_VAR2_NAME, 0);
a.newVariableUInt(AGENT_VAR_i, 0);
fn_1 = a.newRTCFunction("1", KillEven);
fn_1.setAllowAgentDeath(True);
sm.newLayer().addAgentFunction(fn_1);
exitcdn = ExitAlways()
sm.addExitConditionCallback(exitcdn);
m = pyflamegpu.ModelDescription(MODEL_NAME);
ma = m.newAgent(AGENT_NAME);
# Define Model
ma.newVariableUInt(AGENT_VAR1_NAME, 1);
ma.newVariableUInt(AGENT_VAR2_NAME, UINT_MAX());
ma.newVariableUInt(AGENT_VAR_i);
fn_2 = ma.newRTCFunction("2", AddOne2);
fn_3 = ma.newRTCFunction("3", AddTen);
smd = m.newSubModel("sub", sm);
smd.bindAgent(AGENT_NAME, AGENT_NAME, True, True); # auto map vars and states
m.newLayer().addAgentFunction(fn_2);
m.newLayer().addSubModel("sub");
m.newLayer().addAgentFunction(fn_3);
# Init Agents
pop = pyflamegpu.AgentPopulation(ma, AGENT_COUNT);
for i in range(AGENT_COUNT):
ai = pop.getNextInstance();
ai.setVariableUInt(AGENT_VAR_i, i);
ai.setVariableUInt(AGENT_VAR1_NAME, i);
ai.setVariableUInt(AGENT_VAR2_NAME, UINT_MAX() - i);
# Other vars all default init
# Init Model
c = pyflamegpu.CUDASimulation(m)
c.SimulationConfig().steps = 1;
c.applyConfig();
c.setPopulationData(pop);
# Run Model
c.step();
# Check result
# Mapped var = init + af + submodel af + af
mapped_result = 1 + 10;
# Unmapped var = init + af + af
unmapped_result = UINT_MAX() - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT*0.75) # if AGENT_COUNT > 1000 this test will fail
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 3 == 0 # Var divides cleanly by 3
__i = int(_i/3); # Calculate original value of AGENT_VAR_i
assert __i % 4 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result - __i;
# Run Model
c.step();
# Check result
# Mapped var = mapped_result + af + submodel af + af
mapped_result2 = mapped_result + 1 + 10;
# Unmapped var = unmapped_result + af + af
unmapped_result2 = unmapped_result - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT/2)
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 9 == 0 # Var divides cleanly by 3
__i = _i/9; # Calculate original value of AGENT_VAR_i
assert __i % 2 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result2;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result2 - __i;
def test_AgentDeath_InNestedSubModel(self):
sm = pyflamegpu.ModelDescription(SUB_MODEL_NAME);
# Define SubModel
a = sm.newAgent(AGENT_NAME);
a.newVariableUInt(AGENT_VAR1_NAME, 0);
a.newVariableUInt(AGENT_VAR2_NAME, 0);
a.newVariableUInt(AGENT_VAR_i, 0);
fn_1 = a.newRTCFunction("1", KillEven);
fn_1.setAllowAgentDeath(True);
sm.newLayer().addAgentFunction(fn_1);
exitcdn = ExitAlways()
sm.addExitConditionCallback(exitcdn);
# Define Proxy SubModel
psm = pyflamegpu.ModelDescription(SUB_MODEL_NAME);
pa = psm.newAgent(AGENT_NAME);
pa.newVariableUInt(AGENT_VAR1_NAME, 0);
pa.newVariableUInt(AGENT_VAR2_NAME, 0);
pa.newVariableUInt(AGENT_VAR_i, 0);
psmd = psm.newSubModel("sub", sm);
psmd.bindAgent(AGENT_NAME, AGENT_NAME, True, True); # auto map vars and states
psm.newLayer().addSubModel("sub");
psm.addExitConditionCallback(exitcdn);
m = pyflamegpu.ModelDescription(MODEL_NAME);
ma = m.newAgent(AGENT_NAME);
# Define Model
ma.newVariableUInt(AGENT_VAR1_NAME, 1);
ma.newVariableUInt(AGENT_VAR2_NAME, UINT_MAX());
ma.newVariableUInt(AGENT_VAR_i);
fn_2 = ma.newRTCFunction("2", AddOne2);
fn_3 = ma.newRTCFunction("3", AddTen);
smd = m.newSubModel("proxysub", sm);
smd.bindAgent(AGENT_NAME, AGENT_NAME, True, True); # auto map vars and states
m.newLayer().addAgentFunction(fn_2);
m.newLayer().addSubModel("proxysub");
m.newLayer().addAgentFunction(fn_3);
# Init Agents
pop = pyflamegpu.AgentPopulation(ma, AGENT_COUNT);
for i in range(AGENT_COUNT):
ai = pop.getNextInstance();
ai.setVariableUInt(AGENT_VAR_i, i);
ai.setVariableUInt(AGENT_VAR1_NAME, i);
ai.setVariableUInt(AGENT_VAR2_NAME, UINT_MAX() - i);
# Other vars all default init
# Init Model
c = pyflamegpu.CUDASimulation(m)
c.SimulationConfig().steps = 1;
c.applyConfig();
c.setPopulationData(pop);
# Run Model
c.step();
# Check result
# Mapped var = init + af + submodel af + af
mapped_result = 1 + 10;
# Unmapped var = init + af + af
unmapped_result = UINT_MAX() - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT*0.75) # if AGENT_COUNT > 1000 this test will fail
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 3 == 0 # Var divides cleanly by 3
__i = int(_i/3); # Calculate original value of AGENT_VAR_i
assert __i % 4 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result - __i;
# Run Model
c.step();
# Check result
# Mapped var = mapped_result + af + submodel af + af
mapped_result2 = mapped_result + 1 + 10;
# Unmapped var = unmapped_result + af + af
unmapped_result2 = unmapped_result - 1000;
c.getPopulationData(pop);
assert pop.getCurrentListSize() == int(AGENT_COUNT/2)
for i in range(pop.getCurrentListSize()):
ai = pop.getInstanceAt(i);
_i = ai.getVariableUInt(AGENT_VAR_i);
assert _i % 9 == 0 # Var divides cleanly by 3
__i = _i/9; # Calculate original value of AGENT_VAR_i
assert __i % 2 != 0 # Agent doesn't have original AGENT_VAR_i that was supposed to be killed
assert ai.getVariableUInt(AGENT_VAR1_NAME) == __i + mapped_result2;
assert ai.getVariableUInt(AGENT_VAR2_NAME) == unmapped_result2 - __i;
| 39.914347
| 108
| 0.623283
| 2,208
| 18,640
| 5.067935
| 0.092391
| 0.047185
| 0.02252
| 0.045576
| 0.834138
| 0.811349
| 0.796157
| 0.791421
| 0.780161
| 0.775871
| 0
| 0.028921
| 0.263573
| 18,640
| 466
| 109
| 40
| 0.786261
| 0.127039
| 0
| 0.675393
| 0
| 0
| 0.259312
| 0.104207
| 0
| 0
| 0
| 0
| 0.089005
| 1
| 0.018325
| false
| 0
| 0.010471
| 0.005236
| 0.073298
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
bcf050175d8444e899f47b47449d5c9f1d3f1014
| 14,809
|
py
|
Python
|
tests/test_sort.py
|
adamrp/emperor
|
ee12881953cdd65e13325c09d5adf87db7e63afd
|
[
"MIT"
] | null | null | null |
tests/test_sort.py
|
adamrp/emperor
|
ee12881953cdd65e13325c09d5adf87db7e63afd
|
[
"MIT"
] | null | null | null |
tests/test_sort.py
|
adamrp/emperor
|
ee12881953cdd65e13325c09d5adf87db7e63afd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# File created on 20 Apr 2013
from __future__ import division
__author__ = "Yoshiki Vazquez Baeza"
__copyright__ = "Copyright 2013, The Emperor Project"
__credits__ = ["Yoshiki Vazquez Baeza"]
__license__ = "BSD"
__version__ = "0.9.3-dev"
__maintainer__ = "Yoshiki Vazquez Baeza"
__email__ = "yoshiki89@gmail.com"
__status__ = "Development"
from unittest import TestCase, main
from numpy import array
from numpy.testing import assert_almost_equal
from emperor.sort import (sort_taxa_table_by_pcoa_coords,
sort_comparison_filenames)
class TopLevelTests(TestCase):
def setUp(self):
self.otu_headers = ['PC.636', 'PC.635', 'PC.356', 'PC.481', 'PC.354',
'PC.593', 'PC.355', 'PC.607', 'PC.634']
self.otu_table = array([[0.02739726, 0.04697987, 0.02, 0.04697987, 0.01,
0.02027027, 0.01360544, 0.01342282, 0.02666667], [0.00684932,
0.02013423, 0.02, 0.00671141, 0., 0.00675676, 0., 0., 0.], [
0.14383562, 0.27516779, 0.65333333, 0.52348993, 0.38926174,
0.69594595, 0.28571429, 0.0738255, 0.19333333], [0., 0.02013423,
0.03333333, 0.01342282, 0., 0.0472973, 0., 0., 0.], [0.78767123,
0.45637584, 0.22, 0.39597315, 0.41610738, 0.20945946, 0.70068027,
0.89932886, 0.77333333], [0.,0.02013423, 0.01333333, 0.00671141,
0.03355705, 0.00675676, 0., 0., 0.],[0., 0., 0.01333333, 0., 0., 0.,
0., 0., 0.], [0.03424658, 0.16107383, 0.02666667, 0.00671141,
0.14765101, 0.01351351, 0., 0.01342282, 0.00666667]])
self.coords = COORDS
self.coords_header = ['PC.354','PC.356','PC.481','PC.593',
'PC.355','PC.607','PC.634', 'PC.636', 'PC.635']
self.coord_fps = ['output_data/emperor/bray_curtis_pc_transformed_q1.txt',
'output_data/emperor/bray_curtis_pc_transformed_q10.txt',
'output_data/emperor/bray_curtis_pc_transformed_q11.txt',
'output_data/emperor/bray_curtis_pc_transformed_q12.txt',
'output_data/emperor/bray_curtis_pc_transformed_q13.txt',
'output_data/emperor/bray_curtis_pc_transformed_q14.txt',
'output_data/emperor/bray_curtis_pc_transformed_q15.txt',
'output_data/emperor/bray_curtis_pc_transformed_q16.txt',
'output_data/emperor/bray_curtis_pc_transformed_q17.txt',
'output_data/emperor/bray_curtis_pc_transformed_q18.txt',
'output_data/emperor/bray_curtis_pc_transformed_q19.txt',
'output_data/emperor/bray_curtis_pc_transformed_q2.txt',
'output_data/emperor/bray_curtis_pc_transformed_q20.txt',
'output_data/emperor/bray_curtis_pc_transformed_q21.txt',
'output_data/emperor/bray_curtis_pc_transformed_q22.txt',
'output_data/emperor/bray_curtis_pc_transformed_q23.txt',
'output_data/emperor/bray_curtis_pc_transformed_q24.txt',
'output_data/emperor/bray_curtis_pc_transformed_q25.txt',
'output_data/emperor/bray_curtis_pc_transformed_q26.txt',
'output_data/emperor/bray_curtis_pc_transformed_q27.txt',
'output_data/emperor/bray_curtis_pc_transformed_q28.txt',
'output_data/emperor/bray_curtis_pc_transformed_q29.txt',
'output_data/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data/emperor/bray_curtis_pc_transformed_q4.txt',
'output_data/emperor/bray_curtis_pc_transformed_q5.txt',
'output_data/emperor/bray_curtis_pc_transformed_q6.txt',
'output_data/emperor/bray_curtis_pc_transformed_q7.txt',
'output_data/emperor/bray_curtis_pc_transformed_q8.txt',
'output_data/emperor/bray_curtis_pc_transformed_q9.txt']
self.coord_fps_garbage = [
'output_data/emperor/bray_qurtis_pc_transformed_q1.txt',
'output_data/emperor/bray_111urtis_q_transformed_q10.txt',
'output_data/emperor/aaaaaaa.txt',
'output_data/emperor/bray_curtis_pc_transformed_q12.txt',
'output_data/emperor/qqq2223_curtis_qc_transformed_q13.txt',
'output_data/emperor/bray_curtis_pc_transformed_q14.txt',
'output_data/emperor/bray_curtis_pc_transformed_reference.txtoutput_data/emperor/bray_curtis_pc_transformed_q15.txt',
'output_data/emperor/bray_curtis_pc_transformed_q16.txt',
'output_data/emperor/bray_curtis_pc_transformed_q17.txt',
'output_data/emperor/bray_curtis_pc_transformed_q18.txt',
'output_data/emperor/bray_curtis_pc_transformed_q19.txt',
'output_data/emperor/bray_curtis_pc_transformed_q2.txt',
'output_data/emperor/boom.txt',
'output_data/emperor/another_file with some characters and stuff .txt',
'output_data/emperor/some_other_file_that_foo_wants_to_compare.txt',
'output_data/emperor/bray_curtis_pc_transformed_q23.txt',
'output_data/emperor/bray_curtis_pc_transformed_q24.txt',
'output_data/emperor/bray_curtis_pc_transformed_q25.txt',
'output_data/emperor/bray_curtis_pc_transformed_q26.txt',
'output_data/emperor/bray_curtis_pc_transformed_q27.txt',
'output_data/emperor/bray_curtis_pc_transformed_q28.txt',
'output_data/emperor/bray_curtis_pc_transformed_q29.txt',
'output_data/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data/emperor/bray_curtis_pc_transformed_q4.txt',
'output_data/emperor/bray_curtis_pc_transformed_q5.txt',
'output_data/emperor/bray_curtis_pc_transformed_q6.txt',
'output_data/emperor/bray_curtis_pc_transformed_q7.txt',
'output_data/emperor/bray_curtis_pc_transformed_q8.txt',
'output_data/emperor/bray_curtis_pc_transformed_q9.txt']
def test_sort_taxa_table_by_pcoa_coords(self):
"""Make sure OTU table and coordinates are sorted equally"""
# case with shuffled inputs
o_headers, o_otu_table = sort_taxa_table_by_pcoa_coords(
self.coords_header, self.otu_table, self.otu_headers)
self.assertEquals(o_headers, ['PC.354','PC.356','PC.481','PC.593',
'PC.355','PC.607','PC.634', 'PC.636', 'PC.635'])
assert_almost_equal(o_otu_table, OTU_TABLE_A)
# case with shuffled inputs and fewer samples
o_headers, o_otu_table = sort_taxa_table_by_pcoa_coords(['PC.354',
'PC.356','PC.635'], self.otu_table, self.otu_headers)
self.assertEquals(o_headers, ['PC.354','PC.356','PC.635'])
assert_almost_equal(o_otu_table, array([[ 0.01, 0.02, 0.04697987],[0.,
0.02, 0.02013423], [0.38926174, 0.65333333, 0.27516779],[0.,
0.03333333, 0.02013423],[0.41610738, 0.22, 0.45637584],[0.03355705,
0.01333333, 0.02013423],[0., 0.01333333, 0.],[0.14765101,
0.02666667, 0.16107383]]))
def test_sort_comparison_filenames_regular(self):
"""Check filenames are sorted correctly"""
# check it correctly sorts the files according to the suffix
out_sorted = sort_comparison_filenames(self.coord_fps)
self.assertEquals(out_sorted, [
'output_data/emperor/bray_curtis_pc_transformed_q1.txt',
'output_data/emperor/bray_curtis_pc_transformed_q2.txt',
'output_data/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data/emperor/bray_curtis_pc_transformed_q4.txt',
'output_data/emperor/bray_curtis_pc_transformed_q5.txt',
'output_data/emperor/bray_curtis_pc_transformed_q6.txt',
'output_data/emperor/bray_curtis_pc_transformed_q7.txt',
'output_data/emperor/bray_curtis_pc_transformed_q8.txt',
'output_data/emperor/bray_curtis_pc_transformed_q9.txt',
'output_data/emperor/bray_curtis_pc_transformed_q10.txt',
'output_data/emperor/bray_curtis_pc_transformed_q11.txt',
'output_data/emperor/bray_curtis_pc_transformed_q12.txt',
'output_data/emperor/bray_curtis_pc_transformed_q13.txt',
'output_data/emperor/bray_curtis_pc_transformed_q14.txt',
'output_data/emperor/bray_curtis_pc_transformed_q15.txt',
'output_data/emperor/bray_curtis_pc_transformed_q16.txt',
'output_data/emperor/bray_curtis_pc_transformed_q17.txt',
'output_data/emperor/bray_curtis_pc_transformed_q18.txt',
'output_data/emperor/bray_curtis_pc_transformed_q19.txt',
'output_data/emperor/bray_curtis_pc_transformed_q20.txt',
'output_data/emperor/bray_curtis_pc_transformed_q21.txt',
'output_data/emperor/bray_curtis_pc_transformed_q22.txt',
'output_data/emperor/bray_curtis_pc_transformed_q23.txt',
'output_data/emperor/bray_curtis_pc_transformed_q24.txt',
'output_data/emperor/bray_curtis_pc_transformed_q25.txt',
'output_data/emperor/bray_curtis_pc_transformed_q26.txt',
'output_data/emperor/bray_curtis_pc_transformed_q27.txt',
'output_data/emperor/bray_curtis_pc_transformed_q28.txt',
'output_data/emperor/bray_curtis_pc_transformed_q29.txt'])
# if files with garbage are passed in, the sorting should be still
# consistent,putting the "garbaged" filenames at the beginning
out_sorted = sort_comparison_filenames(self.coord_fps_garbage)
self.assertEquals(out_sorted, ['output_data/emperor/aaaaaaa.txt',
'output_data/emperor/boom.txt',
'output_data/emperor/another_file with some characters and stuff .txt',
'output_data/emperor/some_other_file_that_foo_wants_to_compare.txt',
'output_data/emperor/bray_qurtis_pc_transformed_q1.txt',
'output_data/emperor/bray_curtis_pc_transformed_q2.txt',
'output_data/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data/emperor/bray_curtis_pc_transformed_q4.txt',
'output_data/emperor/bray_curtis_pc_transformed_q5.txt',
'output_data/emperor/bray_curtis_pc_transformed_q6.txt',
'output_data/emperor/bray_curtis_pc_transformed_q7.txt',
'output_data/emperor/bray_curtis_pc_transformed_q8.txt',
'output_data/emperor/bray_curtis_pc_transformed_q9.txt',
'output_data/emperor/bray_111urtis_q_transformed_q10.txt',
'output_data/emperor/bray_curtis_pc_transformed_q12.txt',
'output_data/emperor/qqq2223_curtis_qc_transformed_q13.txt',
'output_data/emperor/bray_curtis_pc_transformed_q14.txt',
'output_data/emperor/bray_curtis_pc_transformed_reference.txtoutput_data/emperor/bray_curtis_pc_transformed_q15.txt',
'output_data/emperor/bray_curtis_pc_transformed_q16.txt',
'output_data/emperor/bray_curtis_pc_transformed_q17.txt',
'output_data/emperor/bray_curtis_pc_transformed_q18.txt',
'output_data/emperor/bray_curtis_pc_transformed_q19.txt',
'output_data/emperor/bray_curtis_pc_transformed_q23.txt',
'output_data/emperor/bray_curtis_pc_transformed_q24.txt',
'output_data/emperor/bray_curtis_pc_transformed_q25.txt',
'output_data/emperor/bray_curtis_pc_transformed_q26.txt',
'output_data/emperor/bray_curtis_pc_transformed_q27.txt',
'output_data/emperor/bray_curtis_pc_transformed_q28.txt',
'output_data/emperor/bray_curtis_pc_transformed_q29.txt'])
# tricky case with extensions in things that are not the filename
out_sorted = sort_comparison_filenames([
'output_data_q1.txt/emperor/bray_curtis_pc_transformed_q9.txt',
'output_data/emperorq11.txt/bray_curtis_pc_transformed_q2.txt',
'output_data_q44.txt/emperor/bray_curtis_pc_transformed_q11.txt',
'output_dataq-5.txt/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data_q511.txt/emperor/bray_curtis_pc_transformed_q1.txt'])
self.assertEquals(out_sorted, [
'output_data_q511.txt/emperor/bray_curtis_pc_transformed_q1.txt',
'output_data/emperorq11.txt/bray_curtis_pc_transformed_q2.txt',
'output_dataq-5.txt/emperor/bray_curtis_pc_transformed_q3.txt',
'output_data_q1.txt/emperor/bray_curtis_pc_transformed_q9.txt',
'output_data_q44.txt/emperor/bray_curtis_pc_transformed_q11.txt'])
# make sure nothing happens when an empty list is passed
self.assertEquals(sort_comparison_filenames([]), [])
COORDS = array([[0.280399117569, -0.0060128286014, 0.0234854344148, -0.0468109474823, -0.146624450094, 0.00566979124596, -0.0354299634191, -0.255785794275, -4.84141986706e-09],
[0.228820399536, -0.130142097093, -0.287149447883, 0.0864498846421, 0.0442951919304, 0.20604260722, 0.0310003571386, 0.0719920436501, -4.84141986706e-09],
[0.0422628480532, -0.0139681511889, 0.0635314615517, -0.346120552134, -0.127813807608, 0.0139350721063, 0.0300206887328, 0.140147849223, -4.84141986706e-09],
[0.232872767451, 0.139788385269, 0.322871079774, 0.18334700682, 0.0204661596818, 0.0540589147147, -0.0366250872041, 0.0998235721267, -4.84141986706e-09],
[0.170517581885, -0.194113268955, -0.0308965283066, 0.0198086158783, 0.155100062794, -0.279923941712, 0.0576092515759, 0.0242481862127, -4.84141986706e-09],
[-0.0913299284215, 0.424147148265, -0.135627421345, -0.057519480907, 0.151363490722, -0.0253935675552, 0.0517306152066, -0.038738217609, -4.84141986706e-09],
[-0.349339228244, -0.120787589539, 0.115274502117, 0.0694953933826, -0.0253722182853, 0.067853201946, 0.244447634756, -0.0598827706386, -4.84141986706e-09],
[-0.276542163845, -0.144964375408, 0.0666467344429, -0.0677109454288, 0.176070269506, 0.072969390136, -0.229889463523, -0.0465989416581, -4.84141986706e-09],
[-0.237661393984, 0.0460527772512, -0.138135814766, 0.159061025229, -0.247484698646, -0.115211468101, -0.112864033263, 0.0647940729676, -4.84141986706e-09]])
OTU_TABLE_A = array([[ 0.01, 0.02, 0.04697987, 0.02027027, 0.01360544, 0.01342282, 0.02666667, 0.02739726, 0.04697987],
[ 0., 0.02, 0.00671141, 0.00675676, 0., 0., 0., 0.00684932, 0.02013423],
[ 0.38926174, 0.65333333, 0.52348993, 0.69594595, 0.28571429, 0.0738255, 0.19333333, 0.14383562, 0.27516779],
[ 0., 0.03333333, 0.01342282, 0.0472973, 0., 0., 0., 0., 0.02013423],
[ 0.41610738, 0.22, 0.39597315, 0.20945946, 0.70068027, 0.89932886, 0.77333333, 0.78767123, 0.45637584],
[ 0.03355705, 0.01333333, 0.00671141, 0.00675676, 0., 0., 0., 0., 0.02013423],
[ 0., 0.01333333, 0., 0., 0., 0., 0., 0., 0.],
[ 0.14765101, 0.02666667, 0.00671141, 0.01351351, 0., 0.01342282, 0.00666667, 0.03424658, 0.16107383]])
if __name__ == "__main__":
main()
| 62.75
| 176
| 0.708421
| 1,989
| 14,809
| 4.896933
| 0.150327
| 0.12731
| 0.157495
| 0.269199
| 0.768583
| 0.738604
| 0.719302
| 0.684394
| 0.655749
| 0.633162
| 0
| 0.203623
| 0.176244
| 14,809
| 235
| 177
| 63.017021
| 0.594803
| 0.034709
| 0
| 0.603015
| 0
| 0
| 0.508686
| 0.479406
| 0
| 0
| 0
| 0
| 0.045226
| 1
| 0.015075
| false
| 0
| 0.025126
| 0
| 0.045226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c18ded2923a45470de1489355d6ae0e2397c30d
| 615
|
py
|
Python
|
ui/python/run.py
|
calebbergman/scapix-example1
|
668bb5148d7fa014e8ec94d86806b68368f6834d
|
[
"MIT"
] | 1
|
2021-03-02T09:21:29.000Z
|
2021-03-02T09:21:29.000Z
|
ui/python/run.py
|
calebbergman/scapix-example1
|
668bb5148d7fa014e8ec94d86806b68368f6834d
|
[
"MIT"
] | null | null | null |
ui/python/run.py
|
calebbergman/scapix-example1
|
668bb5148d7fa014e8ec94d86806b68368f6834d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../../project/build/vs_python/Debug'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../project/build/vs_python/Release'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../project/build/xcode_mac_python/Debug'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../project/build/xcode_mac_python/Release'))
sys.path.append(os.path.join(os.path.dirname(__file__), '../../project/build/default_python'))
import chatlib
c = chatlib.Contact()
c.name("hello world!")
print (c.name())
| 41
| 104
| 0.728455
| 95
| 615
| 4.431579
| 0.294737
| 0.142518
| 0.154394
| 0.178147
| 0.779097
| 0.779097
| 0.779097
| 0.779097
| 0.779097
| 0.779097
| 0
| 0.001701
| 0.043902
| 615
| 14
| 105
| 43.928571
| 0.714286
| 0.034146
| 0
| 0
| 0
| 0
| 0.344013
| 0.323777
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c201eb6caec060e8859015af29d48b27740f872
| 40
|
py
|
Python
|
tests/test_worksheet.py
|
ecmascriptguru/openpyxl-templates
|
635a84cf1538f713a0aff29c5dfaa1eb578db6a9
|
[
"MIT"
] | 38
|
2017-03-29T09:05:16.000Z
|
2021-07-08T07:48:03.000Z
|
tests/test_worksheet.py
|
ecmascriptguru/openpyxl-templates
|
635a84cf1538f713a0aff29c5dfaa1eb578db6a9
|
[
"MIT"
] | 11
|
2018-01-06T11:34:32.000Z
|
2020-05-08T12:40:39.000Z
|
tests/test_worksheet.py
|
ecmascriptguru/openpyxl-templates
|
635a84cf1538f713a0aff29c5dfaa1eb578db6a9
|
[
"MIT"
] | 15
|
2018-01-06T11:48:39.000Z
|
2020-09-23T02:27:19.000Z
|
# TODO: Add tests for sheet base class
| 20
| 39
| 0.725
| 7
| 40
| 4.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.225
| 40
| 1
| 40
| 40
| 0.935484
| 0.9
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 1
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4c21729940bd99ea5c13710e70c003f64fd0b15a
| 891
|
py
|
Python
|
src/2021/may/27/kaprekars_constant.py
|
xaverrd/braingu-toy-problems
|
608030ab83d6f3161d70d782157f759677cc9d3e
|
[
"MIT"
] | null | null | null |
src/2021/may/27/kaprekars_constant.py
|
xaverrd/braingu-toy-problems
|
608030ab83d6f3161d70d782157f759677cc9d3e
|
[
"MIT"
] | null | null | null |
src/2021/may/27/kaprekars_constant.py
|
xaverrd/braingu-toy-problems
|
608030ab83d6f3161d70d782157f759677cc9d3e
|
[
"MIT"
] | 2
|
2021-05-27T14:23:04.000Z
|
2021-05-28T14:18:35.000Z
|
# Have the function KaprekarsConstant(num) take the num parameter being passed which will be a 4-digit number with at least two distinct digits. Your program should perform the following routine on the number: Arrange the digits in descending order and in ascending order (adding zeroes to fit it to a 4-digit number), and subtract the smaller number from the bigger number. Then repeat the previous step. Performing this routine will always cause you to reach a fixed number: 6174. Then performing the routine on 6174 will always give you 6174 (7641 - 1467 = 6174). Your program should return the number of times this routine must be performed until 6174 is reached. For example: if num is 3524 your program should return 3 because of the following steps: (1) 5432 - 2345 = 3087, (2) 8730 - 0378 = 8352, (3) 8532 - 2358 = 6174.
def kaprekars_constant(str):
# code goes here
return str
| 148.5
| 828
| 0.775533
| 149
| 891
| 4.630872
| 0.610738
| 0.047826
| 0.073913
| 0.037681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.101093
| 0.178451
| 891
| 5
| 829
| 178.2
| 0.84153
| 0.943883
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
4c6471320c3445857ee2708b13ba434bf5865a77
| 17,721
|
py
|
Python
|
polygon/graphql/product/tests/test_product_minimal_variant_price.py
|
giocatron/Polygon
|
b3b0e0047bf20eb91379b594fd26603fcf279880
|
[
"CC-BY-4.0"
] | null | null | null |
polygon/graphql/product/tests/test_product_minimal_variant_price.py
|
giocatron/Polygon
|
b3b0e0047bf20eb91379b594fd26603fcf279880
|
[
"CC-BY-4.0"
] | null | null | null |
polygon/graphql/product/tests/test_product_minimal_variant_price.py
|
giocatron/Polygon
|
b3b0e0047bf20eb91379b594fd26603fcf279880
|
[
"CC-BY-4.0"
] | 2
|
2020-11-24T22:23:08.000Z
|
2020-11-24T23:41:36.000Z
|
from unittest.mock import patch
import graphene
from freezegun import freeze_time
from graphql_relay import from_global_id, to_global_id
from ...discount.enums import DiscountValueTypeEnum
from ...tests.utils import assert_negative_positive_decimal_value, get_graphql_content
@patch(
"polygon.graphql.product.mutations.products"
".update_product_minimal_variant_price_task"
)
def test_product_variant_create_updates_minimal_variant_price(
mock_update_product_minimal_variant_price_task,
staff_api_client,
product,
permission_manage_products,
):
query = """
mutation ProductVariantCreate(
$productId: ID!,
$sku: String!,
$price: PositiveDecimal,
$attributes: [AttributeValueInput]!,
) {
productVariantCreate(
input: {
product: $productId,
sku: $sku,
price: $price,
attributes: $attributes
}
) {
productVariant {
name
}
productErrors {
message
field
}
}
}
"""
product_id = to_global_id("Product", product.pk)
sku = "1"
price = "1.99"
variant_id = graphene.Node.to_global_id(
"Attribute", product.product_type.variant_attributes.first().pk
)
variant_value = "test-value"
variables = {
"productId": product_id,
"sku": sku,
"price": price,
"attributes": [{"id": variant_id, "values": [variant_value]}],
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
assert response.status_code == 200
content = get_graphql_content(response)
data = content["data"]["productVariantCreate"]
assert data["productErrors"] == []
mock_update_product_minimal_variant_price_task.delay.assert_called_once_with(
product.pk
)
@patch(
"polygon.graphql.product.mutations.products"
".update_product_minimal_variant_price_task"
)
def test_product_variant_update_updates_minimal_variant_price(
mock_update_product_minimal_variant_price_task,
staff_api_client,
product,
permission_manage_products,
):
query = """
mutation ProductVariantUpdate(
$id: ID!,
$price: PositiveDecimal,
) {
productVariantUpdate(
id: $id,
input: {
price: $price,
}
) {
productVariant {
name
}
errors {
message
field
}
}
}
"""
variant = product.variants.first()
variant_id = to_global_id("ProductVariant", variant.pk)
price = "1.99"
variables = {"id": variant_id, "price": price}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
assert response.status_code == 200
content = get_graphql_content(response)
data = content["data"]["productVariantUpdate"]
assert data["errors"] == []
mock_update_product_minimal_variant_price_task.delay.assert_called_once_with(
product.pk
)
@patch(
"polygon.graphql.product.mutations.products"
".update_product_minimal_variant_price_task"
)
def test_product_variant_update_updates_invalid_variant_price(
mock_update_product_minimal_variant_price_task,
staff_api_client,
product,
permission_manage_products,
):
query = """
mutation ProductVariantUpdate(
$id: ID!,
$price: PositiveDecimal,
) {
productVariantUpdate(
id: $id,
input: {
price: $price,
}
) {
productVariant {
name
}
productErrors {
field
message
code
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variant = product.variants.first()
variant_id = to_global_id("ProductVariant", variant.pk)
price = "-1.99"
variables = {"id": variant_id, "price": price}
response = staff_api_client.post_graphql(query, variables)
assert_negative_positive_decimal_value(response)
@patch(
"polygon.graphql.product.mutations.products"
".update_product_minimal_variant_price_task"
)
def test_product_variant_update_updates_invalid_cost_price(
mock_update_product_minimal_variant_price_task,
staff_api_client,
product,
permission_manage_products,
):
query = """
mutation ProductVariantUpdate(
$id: ID!,
$costPrice: PositiveDecimal,
) {
productVariantUpdate(
id: $id,
input: {
costPrice: $costPrice,
}
) {
productVariant {
name
}
productErrors {
field
message
code
}
}
}
"""
staff_api_client.user.user_permissions.add(permission_manage_products)
variant = product.variants.first()
variant_id = to_global_id("ProductVariant", variant.pk)
cost_price = "-1.99"
variables = {"id": variant_id, "costPrice": cost_price}
response = staff_api_client.post_graphql(query, variables)
assert_negative_positive_decimal_value(response)
@patch(
"polygon.graphql.product.mutations.products."
"update_product_minimal_variant_price_task"
)
def test_product_variant_delete_updates_minimal_variant_price(
mock_update_product_minimal_variant_price_task,
staff_api_client,
product,
permission_manage_products,
):
query = """
mutation ProductVariantDelete($id: ID!) {
productVariantDelete(id: $id) {
productVariant {
id
}
errors {
field
message
}
}
}
"""
variant = product.variants.first()
variant_id = to_global_id("ProductVariant", variant.pk)
variables = {"id": variant_id}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["productVariantDelete"]
assert data["errors"] == []
mock_update_product_minimal_variant_price_task.delay.assert_called_once_with(
product.pk
)
@patch("polygon.product.utils.update_products_minimal_variant_prices_task")
def test_category_delete_updates_minimal_variant_price(
mock_update_products_minimal_variant_prices_task,
staff_api_client,
categories_tree_with_published_products,
permission_manage_products,
):
parent = categories_tree_with_published_products
product_list = [parent.children.first().products.first(), parent.products.first()]
query = """
mutation CategoryDelete($id: ID!) {
categoryDelete(id: $id) {
category {
name
}
errors {
field
message
}
}
}
"""
variables = {"id": to_global_id("Category", parent.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
assert response.status_code == 200
content = get_graphql_content(response)
data = content["data"]["categoryDelete"]
assert data["errors"] == []
mock_update_products_minimal_variant_prices_task.delay.assert_called_once()
(
_call_args,
call_kwargs,
) = mock_update_products_minimal_variant_prices_task.delay.call_args
assert set(call_kwargs["product_ids"]) == set(p.pk for p in product_list)
for product in product_list:
product.refresh_from_db()
assert not product.category
@patch(
"polygon.graphql.product.mutations.products"
".update_products_minimal_variant_prices_of_catalogues_task"
)
def test_collection_add_products_updates_minimal_variant_price(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
collection,
product_list,
permission_manage_products,
):
sale.collections.add(collection)
assert collection.products.count() == 0
query = """
mutation CollectionAddProducts($id: ID!, $products: [ID]!) {
collectionAddProducts(collectionId: $id, products: $products) {
collection {
products {
totalCount
}
}
errors {
field
message
}
}
}
"""
collection_id = to_global_id("Collection", collection.id)
product_ids = [to_global_id("Product", product.pk) for product in product_list]
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionAddProducts"]
assert data["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(
product_ids=[p.pk for p in product_list]
)
@patch(
"polygon.graphql.product.mutations"
".products.update_products_minimal_variant_prices_of_catalogues_task"
)
def test_collection_remove_products_updates_minimal_variant_price(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
collection,
product_list,
permission_manage_products,
):
sale.collections.add(collection)
assert collection.products.count() == 0
query = """
mutation CollectionRemoveProducts($id: ID!, $products: [ID]!) {
collectionRemoveProducts(collectionId: $id, products: $products) {
collection {
products {
totalCount
}
}
errors {
field
message
}
}
}
"""
collection_id = to_global_id("Collection", collection.id)
product_ids = [to_global_id("Product", product.pk) for product in product_list]
variables = {"id": collection_id, "products": product_ids}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_products]
)
content = get_graphql_content(response)
data = content["data"]["collectionRemoveProducts"]
assert data["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(
product_ids=[p.pk for p in product_list]
)
@freeze_time("2010-05-31 12:00:01")
@patch(
"polygon.graphql.discount.mutations"
".update_products_minimal_variant_prices_of_discount_task"
)
def test_sale_create_updates_products_minimal_variant_prices(
mock_update_minimal_variant_prices_task,
staff_api_client,
permission_manage_discounts,
):
query = """
mutation SaleCreate(
$name: String,
$type: DiscountValueTypeEnum,
$value: PositiveDecimal,
$products: [ID]
) {
saleCreate(input: {
name: $name,
type: $type,
value: $value,
products: $products
}) {
sale {
id
}
errors {
field
message
}
}
}
"""
variables = {
"name": "Half price product",
"type": DiscountValueTypeEnum.PERCENTAGE.name,
"value": "50",
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["saleCreate"]["errors"] == []
relay_sale_id = content["data"]["saleCreate"]["sale"]["id"]
_sale_class_name, sale_id_str = from_global_id(relay_sale_id)
sale_id = int(sale_id_str)
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(sale_id)
@patch(
"polygon.graphql.discount.mutations"
".update_products_minimal_variant_prices_of_discount_task"
)
def test_sale_update_updates_products_minimal_variant_prices(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
permission_manage_discounts,
):
query = """
mutation SaleUpdate($id: ID!, $value: PositiveDecimal) {
saleUpdate(id: $id, input: {value: $value}) {
sale {
id
}
errors {
field
message
}
}
}
"""
variables = {"id": to_global_id("Sale", sale.pk), "value": "99"}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["saleUpdate"]["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(sale.pk)
@patch(
"polygon.graphql.discount.mutations"
".update_products_minimal_variant_prices_of_discount_task"
)
def test_sale_delete_updates_products_minimal_variant_prices(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
permission_manage_discounts,
):
query = """
mutation SaleDelete($id: ID!) {
saleDelete(id: $id) {
sale {
id
}
errors {
field
message
}
}
}
"""
variables = {"id": to_global_id("Sale", sale.pk)}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["saleDelete"]["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(sale.pk)
@patch(
"polygon.graphql.discount.mutations"
".update_products_minimal_variant_prices_of_catalogues_task"
)
def test_sale_add_catalogues_updates_products_minimal_variant_prices(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
product,
category,
collection,
permission_manage_discounts,
):
query = """
mutation SaleCataloguesAdd($id: ID!, $input: CatalogueInput!) {
saleCataloguesAdd(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
sale_id = to_global_id("Sale", sale.pk)
product_id = to_global_id("Product", product.pk)
collection_id = to_global_id("Collection", collection.pk)
category_id = to_global_id("Category", category.pk)
variables = {
"id": sale_id,
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["saleCataloguesAdd"]["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with(
product_ids=[product.pk],
category_ids=[category.pk],
collection_ids=[collection.pk],
)
@patch(
"polygon.graphql.discount.mutations"
".update_products_minimal_variant_prices_of_catalogues_task"
)
def test_sale_remove_catalogues_updates_products_minimal_variant_prices(
mock_update_minimal_variant_prices_task,
staff_api_client,
sale,
product,
category,
collection,
permission_manage_discounts,
):
assert product in sale.products.all()
assert category in sale.categories.all()
assert collection in sale.collections.all()
query = """
mutation SaleCataloguesRemove($id: ID!, $input: CatalogueInput!) {
saleCataloguesRemove(id: $id, input: $input) {
errors {
field
message
}
}
}
"""
sale_id = to_global_id("Sale", sale.pk)
product_id = to_global_id("Product", product.pk)
collection_id = to_global_id("Collection", collection.pk)
category_id = to_global_id("Category", category.pk)
variables = {
"id": sale_id,
"input": {
"products": [product_id],
"collections": [collection_id],
"categories": [category_id],
},
}
response = staff_api_client.post_graphql(
query, variables, permissions=[permission_manage_discounts]
)
assert response.status_code == 200
content = get_graphql_content(response)
assert content["data"]["saleCataloguesRemove"]["errors"] == []
mock_update_minimal_variant_prices_task.delay.assert_called_once_with( # noqa
product_ids=[product.pk],
category_ids=[category.pk],
collection_ids=[collection.pk],
)
| 29.339404
| 86
| 0.612494
| 1,702
| 17,721
| 5.998237
| 0.083431
| 0.067196
| 0.058772
| 0.022333
| 0.80047
| 0.77128
| 0.764326
| 0.750416
| 0.732393
| 0.732393
| 0
| 0.004561
| 0.294848
| 17,721
| 603
| 87
| 29.38806
| 0.81242
| 0.000226
| 0
| 0.624088
| 0
| 0
| 0.389726
| 0.094045
| 0
| 0
| 0
| 0
| 0.072993
| 1
| 0.023723
| false
| 0
| 0.010949
| 0
| 0.034672
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
d5d02269c5bcf35875605489578cfe382dc29db8
| 34
|
py
|
Python
|
test_esc.py
|
htlemke/calx
|
27417db00e7038cec63501b6888f5ab1bdfaf202
|
[
"MIT"
] | null | null | null |
test_esc.py
|
htlemke/calx
|
27417db00e7038cec63501b6888f5ab1bdfaf202
|
[
"MIT"
] | null | null | null |
test_esc.py
|
htlemke/calx
|
27417db00e7038cec63501b6888f5ab1bdfaf202
|
[
"MIT"
] | null | null | null |
def test_me():
print('test')
| 8.5
| 17
| 0.558824
| 5
| 34
| 3.6
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.235294
| 34
| 3
| 18
| 11.333333
| 0.692308
| 0
| 0
| 0
| 0
| 0
| 0.121212
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
d5de05304d8bf6293e708512f1020640efec40c8
| 108
|
py
|
Python
|
build_framework/clean.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | 2
|
2020-03-23T11:29:56.000Z
|
2021-11-24T22:10:07.000Z
|
build_framework/clean.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | null | null | null |
build_framework/clean.py
|
kdschlosser/wxAnimation
|
ad472719a77a081da5e51280d469cfd5d5bfcd3c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from distutils.command.clean import clean as _clean
class clean(_clean):
pass
| 15.428571
| 51
| 0.685185
| 15
| 108
| 4.8
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011364
| 0.185185
| 108
| 7
| 52
| 15.428571
| 0.806818
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 6
|
d5eda6215f206e12bf22c3a3533dbfd98a07f2e9
| 47
|
py
|
Python
|
terrascript/azure/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/azure/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
terrascript/azure/d.py
|
vutsalsinghal/python-terrascript
|
3b9fb5ad77453d330fb0cd03524154a342c5d5dc
|
[
"BSD-2-Clause"
] | null | null | null |
# terrascript/azure/d.py
import terrascript
| 9.4
| 25
| 0.765957
| 6
| 47
| 6
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148936
| 47
| 4
| 26
| 11.75
| 0.9
| 0.468085
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
911ad49f01b3f032ed887320e34c40fdf5ff573b
| 9,492
|
py
|
Python
|
dobbi/collections/emoticons.py
|
iaramer/dobbi
|
8057d38cc7e9af7b10a0a8dc648a9953151ed774
|
[
"Apache-2.0"
] | 23
|
2021-11-07T11:03:14.000Z
|
2022-03-18T15:29:26.000Z
|
dobbi/collections/emoticons.py
|
iaramer/dobbi
|
8057d38cc7e9af7b10a0a8dc648a9953151ed774
|
[
"Apache-2.0"
] | null | null | null |
dobbi/collections/emoticons.py
|
iaramer/dobbi
|
8057d38cc7e9af7b10a0a8dc648a9953151ed774
|
[
"Apache-2.0"
] | null | null | null |
EMOTICONS = {
u":\)": "Happy face or smiley",
u"=\)": "Happy face or smiley",
u":‑\)": "Happy face or smiley",
u":-\]": "Happy face or smiley",
u":\]": "Happy face or smiley",
u":-3": "Happy face smiley",
u":3": "Happy face smiley",
u":->": "Happy face smiley",
u":>": "Happy face smiley",
u"8-\)": "Happy face smiley",
u":o\)": "Happy face smiley",
u":-\}": "Happy face smiley",
u":\}": "Happy face smiley",
u":-\)": "Happy face smiley",
u":c\)": "Happy face smiley",
u":\^\)": "Happy face smiley",
u"=\]": "Happy face smiley",
u":‑D": "Laughing or big grin or laugh with glasses",
u":D": "Laughing or big grin or laugh with glasses",
u":d": "Laughing or big grin or laugh with glasses",
u":-D": "Laughing or big grin or laugh with glasses",
u":-d": "Laughing or big grin or laugh with glasses",
u"8‑D": "Laughing or big grin or laugh with glasses",
u"8D": "Laughing or big grin or laugh with glasses",
u"X‑D": "Laughing or big grin or laugh with glasses",
u"XD": "Laughing or big grin or laugh with glasses",
u"=D": "Laughing or big grin or laugh with glasses",
u"=3": "Laughing or big grin or laugh with glasses",
u"B\^D": "Laughing or big grin or laugh with glasses",
u":-\)\)": "Very happy",
u":\)\)": "Very happy",
u"=\)\)": "Very happy",
u":‑\(": "Frown or sad or angry or pouting",
u"=\(": "Frown or sad or angry or pouting",
u":\(": "Frown or sad or angry or pouting",
u":-\(": "Frown or sad or angry or pouting",
u":‑c": "Frown or sad or angry or pouting",
u":c": "Frown or sad or angry or pouting",
u":‑<": "Frown or sad or angry or pouting",
u":<": "Frown or sad or angry or pouting",
u":‑\[": "Frown or sad or angry or pouting",
u":\[": "Frown or sad or angry or pouting",
u":-\|\|": "Frown or sad or angry or pouting",
u">:\[": "Frown or sad or angry or pouting",
u":\{": "Frown or sad or angry or pouting",
u":@": "Frown or sad or angry or pouting",
u">:\(": "Frown or sad or angry or pouting",
u":'‑\(": "Crying",
u":'\(": "Crying",
u":'‑\)": "Tears of happiness",
u":'\)": "Tears of happiness",
u"D‑':": "Horror",
u"D:<": "Disgust",
u"D:": "Sadness",
u"D8": "Great dismay",
u"D;": "Great dismay",
u"D=": "Great dismay",
u"DX": "Great dismay",
u":‑O": "Surprise",
u"=O": "Surprise",
u":‑o": "Surprise",
u":o": "Surprise",
u":-0": "Shock",
u"8‑0": "Yawn",
u">:O": "Yawn",
u":-\*": "Kiss",
u"=\*": "Kiss",
u":\*": "Kiss",
u":X": "Kiss",
u";‑\)": "Wink or smirk",
u";\)": "Wink or smirk",
u"\*-\)": "Wink or smirk",
u"\*\)": "Wink or smirk",
u";‑\]": "Wink or smirk",
u";\]": "Wink or smirk",
u";\^\)": "Wink or smirk",
u":‑,": "Wink or smirk",
u";D": "Wink or smirk",
u":‑P": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u":P": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u"X‑P": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u"XP": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u":‑Þ": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u":Þ": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u":b": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u"d:": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u"=p": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u">:P": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u":‑/": "Skeptical, annoyed, undecided, uneasy or hesitant",
u":/": "Skeptical, annoyed, undecided, uneasy or hesitant",
u":-\[.\]": "Skeptical, annoyed, undecided, uneasy or hesitant",
u">:/": "Skeptical, annoyed, undecided, uneasy or hesitant",
u"=/": "Skeptical, annoyed, undecided, uneasy or hesitant",
u":L": "Skeptical, annoyed, undecided, uneasy or hesitant",
u"=L": "Skeptical, annoyed, undecided, uneasy or hesitant",
u":S": "Skeptical, annoyed, undecided, uneasy or hesitant",
u":‑\|": "Straight face",
u":\|": "Straight face",
u":\$": "Embarrassed or blushing",
u":‑x": "Sealed lips or wearing braces or tongue-tied",
u":x": "Sealed lips or wearing braces or tongue-tied",
u":‑#": "Sealed lips or wearing braces or tongue-tied",
u":#": "Sealed lips or wearing braces or tongue-tied",
u":‑&": "Sealed lips or wearing braces or tongue-tied",
u":&": "Sealed lips or wearing braces or tongue-tied",
u"O:‑\)": "Angel, saint or innocent",
u"O:\)": "Angel, saint or innocent",
u"0:‑3": "Angel, saint or innocent",
u"0:3": "Angel, saint or innocent",
u"0:‑\)": "Angel, saint or innocent",
u"0:\)": "Angel, saint or innocent",
u":‑b": "Tongue sticking out, cheeky, playful or blowing a raspberry",
u"0;\^\)": "Angel, saint or innocent",
u">:‑\)": "Evil or devilish",
u">:\)": "Evil or devilish",
u"\}:‑\)": "Evil or devilish",
u"\}:\)": "Evil or devilish",
u"3:‑\)": "Evil or devilish",
u"3:\)": "Evil or devilish",
u">;\)": "Evil or devilish",
u"\|;‑\)": "Cool",
u"\|‑O": "Bored",
u":‑J": "Tongue in cheek",
u"#‑\)": "Party all night",
u"%‑\)": "Drunk or confused",
u"%\)": "Drunk or confused",
u":-###..": "Being sick",
u":###..": "Being sick",
u"<:‑\|": "Dump",
u"\(>_<\)": "Troubled",
u"\(>_<\)>": "Troubled",
u"\(';'\)": "Baby",
u"\(\^\^>``": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"\(\^_\^;\)": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"\(-_-;\)": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"-_-": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"\(~_~;\)": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"\(・.・;\)": "Nervous or Embarrassed or Troubled or Shy or Sweat drop",
u"\(-_-\)zzz": "Sleeping",
u"\(\^_-\)": "Wink",
u"\(\(+_+\)\)": "Confused",
u"\(+o+\)": "Confused",
u"\(o\|o\)": "Ultraman",
u"\^_\^": "Joyful",
u"\^\^": "Joyful",
u"\(\^_\^\)/": "Joyful",
u"\(\^O\^\)/": "Joyful",
u"\(\^o\^\)/": "Joyful",
u"\(__\)": "Kowtow as a sign of respect, or dogeza for apology",
u"_\(._.\)_": "Kowtow as a sign of respect, or dogeza for apology",
u"<\(_ _\)>": "Kowtow as a sign of respect, or dogeza for apology",
u"<m\(__\)m>": "Kowtow as a sign of respect, or dogeza for apology",
u"m\(__\)m": "Kowtow as a sign of respect, or dogeza for apology",
u"m\(_ _\)m": "Kowtow as a sign of respect, or dogeza for apology",
u"\('_'\)": "Sad or Crying",
u"\(T_T\)": "Sad or Crying",
u"\(;_;\)": "Sad or Crying",
u"\(;_;": "Sad of Crying",
u"\(;_:\)": "Sad or Crying",
u"\(;O;\)": "Sad or Crying",
u"\(:_;\)": "Sad or Crying",
u"\(ToT\)": "Sad or Crying",
u";_;": "Sad or Crying",
u";-;": "Sad or Crying",
u";n;": "Sad or Crying",
u";;": "Sad or Crying",
u"Q.Q": "Sad or Crying",
u"T.T": "Sad or Crying",
u"QQ": "Sad or Crying",
u"Q_Q": "Sad or Crying",
u"\(-.-\)": "Shame",
u"\(-_-\)": "Shame",
u"\(一一\)": "Shame",
u"\(;一_一\)": "Shame",
u"\(=_=\)": "Tired",
u"\(=\^·\^=\)": "cat",
u"\(=\^··\^=\)": "cat",
u"=_\^= ": "cat",
u"\(..\)": "Looking down",
u"\(._.\)": "Looking down",
u"\^m\^": "Giggling with hand covering mouth",
u"\(・・\?": "Confusion",
u"\(\?_\?\)": "Confusion",
u">\^_\^<": "Normal Laugh",
u"<\^!\^>": "Normal Laugh",
u"\^/\^": "Normal Laugh",
u"(\*\^_\^\*)": "Normal Laugh",
u"\(\^<\^\)": "Normal Laugh",
u"\(\^\^\)": "Normal Laugh",
u"\(\^.\^\)": "Normal Laugh",
u"\(\^_\^.\)": "Normal Laugh",
u"\(\^_\^\)": "Normal Laugh",
u"\(\^J\^\)": "Normal Laugh",
u"\(\*\^.\^\*\)": "Normal Laugh",
u"\(\^—\^)": "Normal Laugh",
u"\(#\^.\^#\)": "Normal Laugh",
u"(\^—\^)": "Waving",
u"\(;_;\)/~~~": "Waving",
u"\(\^.\^\)/~~~": "Waving",
u"\(\$··\)/~~~": "Waving",
u"\(-_-\)/~~~": "Waving",
u"\(T_T\)/~~~": "Waving",
u"\(ToT\)/~~~": "Waving",
u"\(\*\^0\^\*\)": "Excited",
u"\(\*_\*\)": "Amazed",
u"\(\*_\*;": "Amazed",
u"\(+_+\) \(@_@\)": "Amazed",
u"\(\*\^\^\)v": "Laughing, Cheerful",
u"\(\^_\^\)v": "Laughing, Cheerful",
u"\(\(d\[-_-\]b\)\)": "Headphones, Listening to music",
u'\(-"-\)': "Worried",
u"\(ーー;\)": "Worried",
u"\(\^0_0\^\)": "Eyeglasses",
u"\(^v^\)": "Happy",
u"\(^u^\)": "Happy",
u"\(\^\)o\(\^\)": "Happy",
u"\(\^O\^\)": "Happy",
u"\(\^o\^\)": "Happy",
u"\)\^o\^\(": "Happy",
u":O": "Surprised",
u"o_O": "Surprised",
u"Oo": "Surprised",
u"o_0": "Surprised",
u"o.O": "Surpised",
u"\(o.o\)": "Surprised",
u"oO": "Surprised",
u"\(\* ̄m ̄\)": "Dissatisfied",
u"\(‘A`\)": "Snubbed or Deflated",
u"c====3": "Penis",
u"c===3": "Penis",
u"c==3": "Penis",
u"c====8": "Penis",
u"c===8": "Penis",
u"c==8": "Penis",
u"8====D": "Penis",
u"8===D": "Penis",
u"8==D": "Penis",
u"<3": "Heart",
u"</3": "Broken_heart",
}
def preprocess_emoticons(s: str) -> str:
return 'TOKEN_EMOTICON_' + s.replace(',', '') \
.replace(' ', '_') \
.replace('-', '_') \
.upper()
EMOTICONS = {k: preprocess_emoticons(v) for k, v in EMOTICONS.items()}
| 37.666667
| 77
| 0.503898
| 1,328
| 9,492
| 3.594127
| 0.121988
| 0.031427
| 0.031427
| 0.037712
| 0.81961
| 0.790069
| 0.777289
| 0.725539
| 0.687199
| 0.650744
| 0
| 0.004614
| 0.223662
| 9,492
| 251
| 78
| 37.816733
| 0.636179
| 0
| 0
| 0.008097
| 0
| 0
| 0.647493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.004049
| false
| 0
| 0
| 0.004049
| 0.008097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
911d07cea42a61f93b94516da7a1671bf4bdd92c
| 9,523
|
py
|
Python
|
test_j3.py
|
freedomikeppp/makino-j3
|
c04cd37dc3d445cacee30ce96f732917babb7267
|
[
"Apache-2.0"
] | 5
|
2021-06-19T05:16:33.000Z
|
2022-01-18T14:08:56.000Z
|
test_j3.py
|
suntang3954/makino-j3
|
c04cd37dc3d445cacee30ce96f732917babb7267
|
[
"Apache-2.0"
] | null | null | null |
test_j3.py
|
suntang3954/makino-j3
|
c04cd37dc3d445cacee30ce96f732917babb7267
|
[
"Apache-2.0"
] | 6
|
2019-06-04T00:07:08.000Z
|
2021-11-12T23:32:38.000Z
|
# coding: utf-8
'''
本テストはマキノJ3のFanucテストスクリプトです。
※注意 デバイスの操作によって、物理的な機械が動く可能性があります。
必ず安全を確かめ、テストコード内の操作を理解した上で実行して下さい。
'''
import os
import sys
import unittest
from j3 import J3
class TestJ3(unittest.TestCase):
@classmethod
def setUpClass(cls):
'''テストクラスが初期化される際に一度だけ呼ばれる。'''
print('----- TestJ3 start ------')
cls.j3 = J3.get_connection('192.168.48.*:8193')
@classmethod
def tearDownClass(cls):
'''テストクラスが解放される際に一度だけ呼ばれる。'''
cls.j3.close()
print('----- TestJ3 end ------')
def setUp(self):
'''テストごとに開始前に必ず実行'''
if not self.j3.is_open():
self.skipTest('指定されたIPに接続できません。電源が入っていない可能性があります。')
def test_d_dev_operation(self):
'''Dデバイスの読み書きテスト。'''
# first. 初期化しておく
self.j3.write_dev('D11600', 0)
self.j3.write_dev('D11601', 0)
self.j3.write_dev('D11602', 0)
self.j3.write_dev('D11603', 0)
self.j3.write_dev('D11604', 0)
self.assertEqual(self.j3.read_dev('D11600'), 0)
self.assertEqual(self.j3.read_dev('D11601'), 0)
self.assertEqual(self.j3.read_dev('D11602'), 0)
self.assertEqual(self.j3.read_dev('D11603'), 0)
self.assertEqual(self.j3.read_dev('D11604'), 0)
# 1. D11600に1byteサイズで値を書き込み、D11601以降に影響がないかテスト
self.j3.write_dev('D11600', 1)
self.assertEqual(self.j3.read_dev('D11600'), 1)
self.assertEqual(self.j3.read_dev('D11601'), 0)
self.j3.write_dev('D11600', 255)
self.assertEqual(self.j3.read_dev('D11600'), 255)
self.assertEqual(self.j3.read_dev('D11601'), 0)
# 2. D11600に1byteサイズより大きい値を書き込み、例外が出るかテスト
with self.assertRaises(TypeError):
self.j3.write_dev('D11600', 256)
# 3. D11600に2byteサイズで値を書き込み、D11602以降に影響がないかテスト
self.j3.write_dev('D11600', 256, size=2)
self.assertEqual(self.j3.read_dev('D11600', size=2), 256)
self.assertEqual(self.j3.read_dev('D11600'), 0)
self.assertEqual(self.j3.read_dev('D11601'), 1)
self.assertEqual(self.j3.read_dev('D11602'), 0)
self.assertEqual(self.j3.read_dev('D11603'), 0)
self.j3.write_dev('D11600', 1000, size=2)
self.assertEqual(self.j3.read_dev('D11600', size=2), 1000)
self.assertEqual(self.j3.read_dev('D11600'), 232)
self.assertEqual(self.j3.read_dev('D11601'), 3)
self.assertEqual(self.j3.read_dev('D11602'), 0)
self.assertEqual(self.j3.read_dev('D11603'), 0)
self.j3.write_dev('D11600', -10, size=2)
self.assertEqual(self.j3.read_dev('D11600', size=2), -10)
self.assertEqual(self.j3.read_dev('D11600'), 246)
self.assertEqual(self.j3.read_dev('D11601'), 255)
self.assertEqual(self.j3.read_dev('D11602'), 0)
self.assertEqual(self.j3.read_dev('D11603'), 0)
# 4. D11600に2byteサイズより大きい値を書き込み、例外が出るかテスト
with self.assertRaises(TypeError):
self.j3.write_dev('D11600', 65537)
# 5. D11600に4byteサイズで値を書き込み、D11602以降に影響がないかテスト
self.j3.write_dev('D11600', 65537, size=4)
self.assertEqual(self.j3.read_dev('D11600', size=4), 65537)
self.assertEqual(self.j3.read_dev('D11600'), 1)
self.assertEqual(self.j3.read_dev('D11601'), 0)
self.assertEqual(self.j3.read_dev('D11602'), 1)
self.assertEqual(self.j3.read_dev('D11603'), 0)
self.assertEqual(self.j3.read_dev('D11604'), 0)
self.j3.write_dev('D11600', 16777472, size=4)
self.assertEqual(self.j3.read_dev('D11600', size=4), 16777472)
self.assertEqual(self.j3.read_dev('D11600'), 0)
self.assertEqual(self.j3.read_dev('D11601'), 1)
self.assertEqual(self.j3.read_dev('D11602'), 0)
self.assertEqual(self.j3.read_dev('D11603'), 1)
self.assertEqual(self.j3.read_dev('D11604'), 0)
self.j3.write_dev('D11600', -10000, size=4)
self.assertEqual(self.j3.read_dev('D11600', size=4), -10000)
self.assertEqual(self.j3.read_dev('D11600'), 240)
self.assertEqual(self.j3.read_dev('D11601'), 216)
self.assertEqual(self.j3.read_dev('D11602'), 255)
self.assertEqual(self.j3.read_dev('D11603'), 255)
self.assertEqual(self.j3.read_dev('D11604'), 0)
# 6. D11600に4byteサイズより大きい値を書き込み、例外が出るかテスト
with self.assertRaises(TypeError):
self.j3.write_dev('D11600', 4294967297)
# lastly. 最後も初期化しておく
self.j3.write_dev('D11600', 0)
self.j3.write_dev('D11601', 0)
self.j3.write_dev('D11602', 0)
self.j3.write_dev('D11603', 0)
self.j3.write_dev('D11604', 0)
def test_r_dev_operation(self):
'''Rデバイスの読み書きテスト。'''
# first. 初期化しておく
self.j3.write_dev('R6653', 0)
self.assertEqual(self.j3.read_dev('R6653.0'), 0)
self.assertEqual(self.j3.read_dev('R6653.1'), 0)
self.assertEqual(self.j3.read_dev('R6653.2'), 0)
self.assertEqual(self.j3.read_dev('R6653.3'), 0)
self.assertEqual(self.j3.read_dev('R6653.4'), 0)
self.assertEqual(self.j3.read_dev('R6653.5'), 0)
self.assertEqual(self.j3.read_dev('R6653.6'), 0)
self.assertEqual(self.j3.read_dev('R6653.7'), 0)
self.assertEqual(self.j3.read_dev('R6653'), 0)
# 1. R6653にオフセットで1bitを書き込み、他のオフセットに影響がないかテスト
self.j3.write_dev('R6653.0',1)
self.j3.write_dev('R6653.1',0)
self.j3.write_dev('R6653.2',1)
self.j3.write_dev('R6653.3',0)
self.j3.write_dev('R6653.4',1)
self.j3.write_dev('R6653.5',0)
self.j3.write_dev('R6653.6',1)
self.j3.write_dev('R6653.7',0)
self.assertEqual(self.j3.read_dev('R6653.0'), 1)
self.assertEqual(self.j3.read_dev('R6653.1'), 0)
self.assertEqual(self.j3.read_dev('R6653.2'), 1)
self.assertEqual(self.j3.read_dev('R6653.3'), 0)
self.assertEqual(self.j3.read_dev('R6653.4'), 1)
self.assertEqual(self.j3.read_dev('R6653.5'), 0)
self.assertEqual(self.j3.read_dev('R6653.6'), 1)
self.assertEqual(self.j3.read_dev('R6653.7'), 0)
self.assertEqual(self.j3.read_dev('R6653'), 85)
self.j3.write_dev('R6653.0',0)
self.j3.write_dev('R6653.1',1)
self.j3.write_dev('R6653.2',0)
self.j3.write_dev('R6653.3',1)
self.j3.write_dev('R6653.4',0)
self.j3.write_dev('R6653.5',1)
self.j3.write_dev('R6653.6',0)
self.j3.write_dev('R6653.7',1)
self.assertEqual(self.j3.read_dev('R6653.0'), 0)
self.assertEqual(self.j3.read_dev('R6653.1'), 1)
self.assertEqual(self.j3.read_dev('R6653.2'), 0)
self.assertEqual(self.j3.read_dev('R6653.3'), 1)
self.assertEqual(self.j3.read_dev('R6653.4'), 0)
self.assertEqual(self.j3.read_dev('R6653.5'), 1)
self.assertEqual(self.j3.read_dev('R6653.6'), 0)
self.assertEqual(self.j3.read_dev('R6653.7'), 1)
self.assertEqual(self.j3.read_dev('R6653'), 170)
self.j3.write_dev('R6653.0',1)
self.j3.write_dev('R6653.1',1)
self.j3.write_dev('R6653.2',1)
self.j3.write_dev('R6653.3',1)
self.j3.write_dev('R6653.4',1)
self.j3.write_dev('R6653.5',1)
self.j3.write_dev('R6653.6',1)
self.j3.write_dev('R6653.7',1)
self.assertEqual(self.j3.read_dev('R6653.0'), 1)
self.assertEqual(self.j3.read_dev('R6653.1'), 1)
self.assertEqual(self.j3.read_dev('R6653.2'), 1)
self.assertEqual(self.j3.read_dev('R6653.3'), 1)
self.assertEqual(self.j3.read_dev('R6653.4'), 1)
self.assertEqual(self.j3.read_dev('R6653.5'), 1)
self.assertEqual(self.j3.read_dev('R6653.6'), 1)
self.assertEqual(self.j3.read_dev('R6653.7'), 1)
self.assertEqual(self.j3.read_dev('R6653'), 255)
# 2. R6653に値を書き込み、オフセットと一致するかテスト
self.j3.write_dev('R6653',240)
self.assertEqual(self.j3.read_dev('R6653.0'), 0)
self.assertEqual(self.j3.read_dev('R6653.1'), 0)
self.assertEqual(self.j3.read_dev('R6653.2'), 0)
self.assertEqual(self.j3.read_dev('R6653.3'), 0)
self.assertEqual(self.j3.read_dev('R6653.4'), 1)
self.assertEqual(self.j3.read_dev('R6653.5'), 1)
self.assertEqual(self.j3.read_dev('R6653.6'), 1)
self.assertEqual(self.j3.read_dev('R6653.7'), 1)
self.assertEqual(self.j3.read_dev('R6653'), 240)
# lastly. 最後も初期化しておく
self.j3.write_dev('R6653', 0)
def test_file_operation(self):
'''加工ファイル操作テスト。'''
# 1. ファイルの書き込み、存在確認、読み込み、削除を行う
self.skipTest('不揮発性メモリのため、書き込みに回数制限があり、必要な時以外'\
'(write_file, read_file, exist_file, delete_fileの変更時)はskip。')
data = b'O8990\nG4 X10.\nM30\n%'
self.j3.write_file('//CNC_MEM/USER/LIBRARY/O8990', data)
self.assertEqual(self.j3.exist_file('//CNC_MEM/USER/LIBRARY/O8990'), True)
self.assertEqual(self.j3.read_file('//CNC_MEM/USER/LIBRARY/O8990'), b'O8990\nG4X10. \nM30')
self.j3.delete_file('//CNC_MEM/USER/LIBRARY/O8990')
def test_dir_operation(self):
'''ディレクトリ操作テスト。'''
dir_list = self.j3.find_dir('//CNC_MEM/')
self.assertIsInstance(dir_list, list)
for info_map in dir_list:
self.assertIsInstance(info_map, dict)
self.assertTrue('type' in info_map)
self.assertTrue('name' in info_map)
self.assertTrue('size' in info_map)
self.assertTrue('comment' in info_map)
if __name__ == '__main__':
unittest.main()
| 44.293023
| 99
| 0.625013
| 1,335
| 9,523
| 4.326592
| 0.110861
| 0.146468
| 0.292763
| 0.32358
| 0.782029
| 0.760561
| 0.705506
| 0.654259
| 0.646988
| 0.631752
| 0
| 0.153623
| 0.204347
| 9,523
| 214
| 100
| 44.5
| 0.60842
| 0.069936
| 0
| 0.56
| 0
| 0
| 0.14125
| 0.0225
| 0
| 0
| 0
| 0
| 0.56
| 1
| 0.04
| false
| 0
| 0.022857
| 0
| 0.068571
| 0.011429
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
912fd4dde1051fd2ce2da51763ae0b7a9193eefc
| 120
|
py
|
Python
|
mergeit/core/config/__init__.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | 2
|
2016-07-04T13:32:30.000Z
|
2016-07-16T02:51:54.000Z
|
mergeit/core/config/__init__.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | 1
|
2016-08-06T12:47:28.000Z
|
2016-08-06T12:47:28.000Z
|
mergeit/core/config/__init__.py
|
insolite/mergeit
|
27ca0eacab9b1d2fe6bafe5a43184a80e6169cb5
|
[
"MIT"
] | null | null | null |
from .config import Config
from .config_source import ConfigSource
from .yaml_config_source import YamlFileConfigSource
| 30
| 52
| 0.875
| 15
| 120
| 6.8
| 0.466667
| 0.196078
| 0.352941
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 120
| 3
| 53
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e68617370c7bc144cde4cc5b1b859aee4c6a339a
| 115
|
py
|
Python
|
coptic/coptic/settings/__init__.py
|
CopticScriptorium/cts
|
7b53da7e6610a0ee4913b94b25af0cf15921af88
|
[
"Apache-2.0"
] | 1
|
2015-03-19T00:45:57.000Z
|
2015-03-19T00:45:57.000Z
|
coptic/coptic/settings/__init__.py
|
CopticScriptorium/cts
|
7b53da7e6610a0ee4913b94b25af0cf15921af88
|
[
"Apache-2.0"
] | 121
|
2015-01-06T18:32:06.000Z
|
2022-02-10T11:55:20.000Z
|
coptic/coptic/settings/__init__.py
|
CopticScriptorium/cts
|
7b53da7e6610a0ee4913b94b25af0cf15921af88
|
[
"Apache-2.0"
] | 4
|
2015-06-24T04:26:30.000Z
|
2019-09-21T03:03:55.000Z
|
from .base import *
from .secrets import *
# uncomment only one of these
from .dev import *
# from .prod import *
| 16.428571
| 29
| 0.704348
| 17
| 115
| 4.764706
| 0.647059
| 0.246914
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208696
| 115
| 6
| 30
| 19.166667
| 0.89011
| 0.408696
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6974c48761029f4c649afd622b9caef9948f099
| 23
|
py
|
Python
|
app/__init__.py
|
AndrewLester/freedomap
|
8cb2142f8a46d800fad25fb22e01f5b5a164ce96
|
[
"MIT"
] | 6
|
2021-02-17T03:23:18.000Z
|
2021-04-09T14:35:42.000Z
|
app/__init__.py
|
AndrewLester/freedomap
|
8cb2142f8a46d800fad25fb22e01f5b5a164ce96
|
[
"MIT"
] | 6
|
2021-03-10T04:04:40.000Z
|
2021-12-17T08:13:45.000Z
|
app/__init__.py
|
AndrewLester/freedomap
|
8cb2142f8a46d800fad25fb22e01f5b5a164ce96
|
[
"MIT"
] | null | null | null |
from app.exts import *
| 11.5
| 22
| 0.73913
| 4
| 23
| 4.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6a54815fa8763bcba84cf53bc9cd13e980c3afd
| 166
|
py
|
Python
|
tests/test_007.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
tests/test_007.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
tests/test_007.py
|
chingc/euler-python
|
e963752969cfa7a939ef6a8408f5628ce3c96cae
|
[
"MIT"
] | null | null | null |
"""https://projecteuler.net/problem=7"""
from euler.main import nth_prime
def test_007() -> None:
"""Expected: 104743"""
assert nth_prime(10001) == 104743
| 18.444444
| 40
| 0.668675
| 22
| 166
| 4.909091
| 0.863636
| 0.148148
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15
| 0.156627
| 166
| 8
| 41
| 20.75
| 0.621429
| 0.307229
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e6ccc01e5d94f74d784d057a264d2c901bed9994
| 82
|
py
|
Python
|
sssorm/__init__.py
|
troxellophilus/sssorm
|
07192c16f2cb0f5ed254f3864e414abcd80025d8
|
[
"MIT"
] | null | null | null |
sssorm/__init__.py
|
troxellophilus/sssorm
|
07192c16f2cb0f5ed254f3864e414abcd80025d8
|
[
"MIT"
] | null | null | null |
sssorm/__init__.py
|
troxellophilus/sssorm
|
07192c16f2cb0f5ed254f3864e414abcd80025d8
|
[
"MIT"
] | null | null | null |
from sssorm.helper import cursor
from sssorm.model import Model, connect_database
| 27.333333
| 48
| 0.853659
| 12
| 82
| 5.75
| 0.666667
| 0.289855
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109756
| 82
| 2
| 49
| 41
| 0.945205
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc118bbcbf17acad69da9834d6db9d0a7711e86b
| 22
|
py
|
Python
|
src/plotioda/io/__init__.py
|
CoryMartin-NOAA/plotioda
|
e1c13e2dbcc393d9dd35687d3f7a18320b51c319
|
[
"Apache-2.0"
] | null | null | null |
src/plotioda/io/__init__.py
|
CoryMartin-NOAA/plotioda
|
e1c13e2dbcc393d9dd35687d3f7a18320b51c319
|
[
"Apache-2.0"
] | 5
|
2021-11-19T21:20:30.000Z
|
2021-12-08T17:43:52.000Z
|
src/plotioda/io/__init__.py
|
CoryMartin-NOAA/plotioda
|
e1c13e2dbcc393d9dd35687d3f7a18320b51c319
|
[
"Apache-2.0"
] | null | null | null |
from .iodaio import *
| 11
| 21
| 0.727273
| 3
| 22
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.181818
| 22
| 1
| 22
| 22
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc1e2a67fa9c99daf9e61651418e9b96ef1bcd7e
| 52
|
py
|
Python
|
andromeda/modules/loans/serializers/__init__.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | 1
|
2021-09-08T18:58:16.000Z
|
2021-09-08T18:58:16.000Z
|
andromeda/modules/loans/serializers/__init__.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | null | null | null |
andromeda/modules/loans/serializers/__init__.py
|
sango09/andromeda_api_rest
|
b4a3267146f4f9a985fb3f512e652d4ff354bba2
|
[
"MIT"
] | null | null | null |
from .loans import *
from .inventory_loans import *
| 17.333333
| 30
| 0.769231
| 7
| 52
| 5.571429
| 0.571429
| 0.564103
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 52
| 2
| 31
| 26
| 0.886364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc2ae1fc4aac003817870db4be601e5b2c99dcae
| 136
|
py
|
Python
|
API/models/__init__.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | null | null | null |
API/models/__init__.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | null | null | null |
API/models/__init__.py
|
garima-mahato/EVA4
|
e8efe40a4de2d5da3a04314f52a02610ecde16f1
|
[
"MIT"
] | 1
|
2020-05-02T17:15:22.000Z
|
2020-05-02T17:15:22.000Z
|
from .my_custom_model import *
from .resnet import *
from .QuizDNN import *
from .CustomResNet import *
from .CustomResidualNet import *
| 27.2
| 32
| 0.786765
| 17
| 136
| 6.176471
| 0.529412
| 0.380952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139706
| 136
| 5
| 32
| 27.2
| 0.897436
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc69fd0a9ad443c48f9a496d4fe122891e04c770
| 506
|
py
|
Python
|
RecoMuon/CosmicMuonProducer/python/globalCosmicMuons_cff.py
|
samarendran23/cmssw
|
849dd9897db9b894ca83e1b630a3c1eecafd6097
|
[
"Apache-2.0"
] | 6
|
2017-09-08T14:12:56.000Z
|
2022-03-09T23:57:01.000Z
|
RecoMuon/CosmicMuonProducer/python/globalCosmicMuons_cff.py
|
samarendran23/cmssw
|
849dd9897db9b894ca83e1b630a3c1eecafd6097
|
[
"Apache-2.0"
] | 545
|
2017-09-19T17:10:19.000Z
|
2022-03-07T16:55:27.000Z
|
RecoMuon/CosmicMuonProducer/python/globalCosmicMuons_cff.py
|
p2l1pfp/cmssw
|
9bda22bf33ecf18dd19a3af2b3a8cbdb1de556a9
|
[
"Apache-2.0"
] | 14
|
2017-10-04T09:47:21.000Z
|
2019-10-23T18:04:45.000Z
|
import FWCore.ParameterSet.Config as cms
from RecoMuon.TransientTrackingRecHit.MuonTransientTrackingRecHitBuilder_cfi import *
from RecoTracker.TransientTrackingRecHit.TransientTrackingRecHitBuilder_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.StripCPEfromTrackAngle_cfi import *
from RecoLocalTracker.SiPixelRecHits.PixelCPEParmError_cfi import *
from RecoLocalTracker.SiStripRecHitConverter.SiStripRecHitMatcher_cfi import *
from RecoMuon.CosmicMuonProducer.globalCosmicMuons_cfi import *
| 50.6
| 85
| 0.901186
| 42
| 506
| 10.714286
| 0.5
| 0.12
| 0.144444
| 0.193333
| 0.226667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059289
| 506
| 9
| 86
| 56.222222
| 0.945378
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fc90dbc440aea4f45579d656a127cd657d2628be
| 14,737
|
py
|
Python
|
tests/func/test_monitoring.py
|
madtibo/temboard-agent
|
c63b82e243c9155e1878ba94e747b51761d62138
|
[
"PostgreSQL"
] | null | null | null |
tests/func/test_monitoring.py
|
madtibo/temboard-agent
|
c63b82e243c9155e1878ba94e747b51761d62138
|
[
"PostgreSQL"
] | null | null | null |
tests/func/test_monitoring.py
|
madtibo/temboard-agent
|
c63b82e243c9155e1878ba94e747b51761d62138
|
[
"PostgreSQL"
] | null | null | null |
import json
import os
import sys
from urllib2 import HTTPError
from test.temboard import temboard_request
from conftest import ENV
# Import spc
tbda_dir = os.path.realpath(os.path.join(__file__, '..', '..'))
if tbda_dir not in sys.path:
sys.path.insert(0, tbda_dir)
from temboardagent.spc import connector, error # noqa
XSESSION = ''
class TestMonitoring:
def _temboard_login(self):
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='POST',
url='https://%s:%s/login' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={"Content-type": "application/json"},
data={
'username': ENV['agent']['user'],
'password': ENV['agent']['password']
}
)
return json.loads(res)['session']
def test_00_env_pg(self):
"""
[administration] 00: PostgreSQL instance is up & running
"""
conn = connector(
host=ENV['pg']['socket_dir'],
port=ENV['pg']['port'],
user=ENV['pg']['user'],
password=ENV['pg']['password'],
database='postgres'
)
try:
conn.connect()
conn.close()
global XSESSION
XSESSION = self._temboard_login()
assert True
except error:
assert False
def test_01_monitoring_session(self):
"""
[monitoring] 01: GET /monitoring/probe/sessions : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/sessions' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_02_monitoring_xacts(self):
"""
[monitoring] 02: GET /monitoring/probe/xacts : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/xacts' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_03_monitoring_locks(self):
"""
[monitoring] 03: GET /monitoring/probe/locks : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/locks' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_04_monitoring_blocks(self):
"""
[monitoring] 04: GET /monitoring/probe/blocks : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/blocks' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_05_monitoring_bgwriter(self):
"""
[monitoring] 05: GET /monitoring/probe/bgwriter : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/bgwriter' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_06_monitoring_db_size(self):
"""
[monitoring] 06: GET /monitoring/probe/db_size : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/db_size' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_07_monitoring_tblspc_size(self):
"""
[monitoring] 07: GET /monitoring/probe/tblspc_size : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/tblspc_size' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_08_monitoring_filesystems_size(self):
"""
[monitoring] 08: GET /monitoring/probe/filesystems_size : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/filesystems_size' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_09_monitoring_cpu(self):
"""
[monitoring] 09: GET /monitoring/probe/cpu : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/cpu' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_10_monitoring_process(self):
"""
[monitoring] 10: GET /monitoring/probe/process : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/process' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_11_monitoring_memory(self):
"""
[monitoring] 11: GET /monitoring/probe/memory : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/memory' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_12_monitoring_loadavg(self):
"""
[monitoring] 12: GET /monitoring/probe/loadavg : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/loadavg' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_13_monitoring_wal_files(self):
"""
[monitoring] 13: GET /monitoring/probe/wal_files : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/wal_files' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_14_monitoring_replication_lag(self):
"""
[monitoring] 14: GET /monitoring/probe/replication_lag : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/replication_lag' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_15_monitoring_temp_files_size_delta(self):
"""
[monitoring] 15: GET /monitoring/probe/temp_files_size_delta : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/temp_files_size_delta' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_16_monitoring_replication_connection(self):
"""
[monitoring] 16: GET /monitoring/probe/replication_connection : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/replication_connection' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_17_monitoring_heap_bloat(self):
"""
[monitoring] 17: GET /monitoring/probe/heap_bloat : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/heap_bloat' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
def test_18_monitoring_btree_bloat(self):
"""
[monitoring] 18: GET /monitoring/probe/btree_bloat : Check HTTP code returned is 200
""" # noqa
status = 0
try:
(status, res) = temboard_request(
ENV['agent']['ssl_cert_file'],
method='GET',
url='https://%s:%s/monitoring/probe/btree_bloat' % (
ENV['agent']['host'],
ENV['agent']['port']
),
headers={
"Content-type": "application/json",
"X-Session": XSESSION
}
)
except HTTPError as e:
status = e.code
assert status == 200
| 32.603982
| 103
| 0.445138
| 1,335
| 14,737
| 4.794007
| 0.096629
| 0.07375
| 0.050469
| 0.07125
| 0.711563
| 0.711563
| 0.703594
| 0.703594
| 0.703594
| 0.703594
| 0
| 0.024283
| 0.429938
| 14,737
| 451
| 104
| 32.676275
| 0.737531
| 0.113456
| 0
| 0.702997
| 0
| 0
| 0.180917
| 0
| 0
| 0
| 0
| 0
| 0.054496
| 1
| 0.054496
| false
| 0.00545
| 0.019074
| 0
| 0.079019
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
fc99d82e2976159b0941ee645f72eeee37f261c9
| 20,634
|
py
|
Python
|
generated-libraries/python/netapp/name_mapping/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/name_mapping/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/name_mapping/__init__.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
from netapp.connection import NaConnection
from unix_user_info import UnixUserInfo # 5 properties
from name_mapping_direction import NameMappingDirection # 0 properties
from name_mapping_info import NameMappingInfo # 5 properties
from unix_group_info import UnixGroupInfo # 4 properties
from name_mapping_unix_user_get_iter_key_td import NameMappingUnixUserGetIterKeyTd # 2 properties
from name_mapping_unix_group_get_iter_key_td import NameMappingUnixGroupGetIterKeyTd # 2 properties
from unix_user_name import UnixUserName # 1 properties
from name_mapping_get_iter_key_td import NameMappingGetIterKeyTd # 3 properties
class NameMappingConnection(NaConnection):
def name_mapping_delete(self, position, direction):
"""
Delete an existing name mapping entry.
:param position: Position of an existing name mapping in the list of name mappings
for this Vserver.
:param direction: Direction in which the name mapping is applied.
Possible values:
<ul>
<li> "krb_unix" - Kerberos principal name to UNIX user name
mapping,
<li> "win_unix" - Windows user name to UNIX user name
mapping,
<li> "unix_win" - UNIX user name to Windows user name mapping
</ul>
"""
return self.request( "name-mapping-delete", {
'position': [ position, 'position', [ int, 'None' ], False ],
'direction': [ direction, 'direction', [ basestring, 'name-mapping-direction' ], False ],
}, {
} )
def name_mapping_unix_group_modify(self, group_name, group_id=None):
"""
Modify the attributes of a UNIX group.
:param group_name: Specifies UNIX group name.
:param group_id: Specifies an identification number for the UNIX group.
"""
return self.request( "name-mapping-unix-group-modify", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
'group_id': [ group_id, 'group-id', [ int, 'None' ], False ],
}, {
} )
def name_mapping_insert(self, position, direction, replacement, pattern):
"""
Insert a name mapping into the table at a specified position.
:param position: Position within the set of name mappings that this new mapping
will take. If a mapping already exists at this position, it will
be moved to the next position in the list.
:param direction: Direction of the name mapping to be inserted.
Possible values:
<ul>
<li> "krb_unix" - Kerberos principal name to UNIX user name
mapping,
<li> "win_unix" - Windows user name to UNIX user name
mapping,
<li> "unix_win" - UNIX user name to Windows user name mapping
</ul>
:param replacement: The name that is to be used as a replacement if the pattern
associated with this entry matches. The replacement may be a
string containing escape sequences representing subexpressions
from the pattern, as in the UNIX 'sed' program.
:param pattern: Pattern to use to match the name while searching for a name that
can be used as a replacement. The pattern is a UNIX-style regular
expression. Regular expressions are case-insensitive when mapping
from Windows to UNIX, and they are case-sensitive for mappings
from Kerberos to UNIX and UNIX to Windows.
"""
return self.request( "name-mapping-insert", {
'position': [ position, 'position', [ int, 'None' ], False ],
'direction': [ direction, 'direction', [ basestring, 'name-mapping-direction' ], False ],
'replacement': [ replacement, 'replacement', [ basestring, 'None' ], False ],
'pattern': [ pattern, 'pattern', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_unix_user_destroy(self, user_name):
"""
Destroy an existing UNIX user.
:param user_name: Specifies user's UNIX account name.
"""
return self.request( "name-mapping-unix-user-destroy", {
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_unix_group_get(self, group_name, desired_attributes=None):
"""
Get the attributes of a UNIX group.
:param group_name: Specifies UNIX group name.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "name-mapping-unix-group-get", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
'desired_attributes': [ desired_attributes, 'desired-attributes', [ UnixGroupInfo, 'None' ], False ],
}, {
'attributes': [ UnixGroupInfo, False ],
} )
def name_mapping_unix_group_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over a list of UNIX groups.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
UNIX group information object.
All UNIX group information objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "name-mapping-unix-group-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ UnixGroupInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ UnixGroupInfo, 'None' ], False ],
}, {
'attributes-list': [ UnixGroupInfo, True ],
} )
def name_mapping_modify(self, position, direction, replacement=None, pattern=None):
"""
Modify an existing name mapping entry.
:param position: Position of an existing name mapping in the list of name mappings
for this Vserver.
:param direction: Direction in which the name mapping is applied.
Possible values:
<ul>
<li> "krb_unix" - Kerberos principal name to UNIX user name
mapping,
<li> "win_unix" - Windows user name to UNIX user name
mapping,
<li> "unix_win" - UNIX user name to Windows user name mapping
</ul>
:param replacement: The name that is to be used as a replacement if the pattern
associated with this entry matches. The replacement is a string
containing escape sequences representing subexpressions from the
pattern, as in the UNIX 'sed' program.
:param pattern: Pattern to use to match the name while searching for a name that
can be used as a replacement. The pattern is a UNIX-style regular
expression. Regular expressions are case-insensitive when mapping
from Windows to UNIX, and they are case-sensitive for mappings
from Kerberos to UNIX and UNIX to Windows.
"""
return self.request( "name-mapping-modify", {
'position': [ position, 'position', [ int, 'None' ], False ],
'direction': [ direction, 'direction', [ basestring, 'name-mapping-direction' ], False ],
'replacement': [ replacement, 'replacement', [ basestring, 'None' ], False ],
'pattern': [ pattern, 'pattern', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Retrieve the list of name mappings in the cluster.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
name-mapping object.
All name-mapping objects matching this query up to 'max-records'
will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "name-mapping-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ NameMappingInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ NameMappingInfo, 'None' ], False ],
}, {
'attributes-list': [ NameMappingInfo, True ],
} )
def name_mapping_unix_user_modify(self, user_name, full_name=None, user_id=None, group_id=None):
"""
Modify the attributes of a UNIX user.
:param user_name: Specifies user's UNIX account name.
:param full_name: Specifies the full name of the UNIX user.
:param user_id: Specifies an identification number for the UNIX user.
:param group_id: Specifies the primary group identification number for the UNIX
user.
"""
return self.request( "name-mapping-unix-user-modify", {
'full_name': [ full_name, 'full-name', [ basestring, 'None' ], False ],
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
'user_id': [ user_id, 'user-id', [ int, 'None' ], False ],
'group_id': [ group_id, 'group-id', [ int, 'None' ], False ],
}, {
} )
def name_mapping_unix_user_get(self, user_name, desired_attributes=None):
"""
Get the attributes of a UNIX user.
:param user_name: Specifies user's UNIX account name.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "name-mapping-unix-user-get", {
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
'desired_attributes': [ desired_attributes, 'desired-attributes', [ UnixUserInfo, 'None' ], False ],
}, {
'attributes': [ UnixUserInfo, False ],
} )
def name_mapping_unix_group_create(self, group_name, group_id, return_record=None):
"""
Create a new UNIX group.
:param group_name: Specifies UNIX group name.
:param group_id: Specifies an identification number for the UNIX group.
:param return_record: If set to true, returns the UNIX group information on successful
creation.
Default: false
"""
return self.request( "name-mapping-unix-group-create", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
'group_id': [ group_id, 'group-id', [ int, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
}, {
'result': [ UnixGroupInfo, False ],
} )
def name_mapping_unix_group_delete_user(self, group_name, user_name):
"""
Delete a user from a UNIX group
:param group_name: Specifies UNIX group name.
:param user_name: Specifies user's UNIX account name.
"""
return self.request( "name-mapping-unix-group-delete-user", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_create(self, position, direction, replacement, pattern, return_record=None):
"""
Create a new name mapping for a Vserver.
:param position: Position of an existing name mapping in the list of name mappings
for this Vserver.
:param direction: Direction in which the name mapping is applied.
Possible values:
<ul>
<li> "krb_unix" - Kerberos principal name to UNIX user name
mapping,
<li> "win_unix" - Windows user name to UNIX user name
mapping,
<li> "unix_win" - UNIX user name to Windows user name mapping
</ul>
:param replacement: The name that is to be used as a replacement if the pattern
associated with this entry matches. The replacement is a string
containing escape sequences representing subexpressions from the
pattern, as in the UNIX 'sed' program.
:param pattern: Pattern to use to match the name while searching for a name that
can be used as a replacement. The pattern is a UNIX-style regular
expression. Regular expressions are case-insensitive when mapping
from Windows to UNIX, and they are case-sensitive for mappings
from Kerberos to UNIX and UNIX to Windows.
:param return_record: If set to true, returns the name-mapping on successful creation.
Default: false
"""
return self.request( "name-mapping-create", {
'position': [ position, 'position', [ int, 'None' ], False ],
'direction': [ direction, 'direction', [ basestring, 'name-mapping-direction' ], False ],
'replacement': [ replacement, 'replacement', [ basestring, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'pattern': [ pattern, 'pattern', [ basestring, 'None' ], False ],
}, {
'result': [ NameMappingInfo, False ],
} )
def name_mapping_unix_user_get_iter(self, max_records=None, query=None, tag=None, desired_attributes=None):
"""
Iterate over a list of UNIX users.
:param max_records: The maximum number of records to return in this call.
Default: 20
:param query: A query that specifies which objects to return.
A query could be specified on any number of attributes in the
UNIX user information object.
All UNIX user information objects matching this query up to
'max-records' will be returned.
:param tag: Specify the tag from the last call.
It is usually not specified for the first call. For subsequent
calls, copy values from the 'next-tag' obtained from the previous
call.
:param desired_attributes: Specify the attributes that should be returned.
If not present, all attributes for which information is available
will be returned.
If present, only the desired attributes for which information is
available will be returned.
"""
return self.request( "name-mapping-unix-user-get-iter", {
'max_records': max_records,
'query': [ query, 'query', [ UnixUserInfo, 'None' ], False ],
'tag': tag,
'desired_attributes': [ desired_attributes, 'desired-attributes', [ UnixUserInfo, 'None' ], False ],
}, {
'attributes-list': [ UnixUserInfo, True ],
} )
def name_mapping_swap(self, position, direction, with_position):
"""
Swap the position of one name mapping with another. The position
is the place in the sequence of name mappings in which the
mappings are applied.
:param position: Position of an existing name mapping in the list of name mappings
for this Vserver.
:param direction: Direction in which the name mapping is applied.
:param with_position: Position of an existing name mapping entry in the list of name
mappings for this Vserver. This entry will be swapped with the
entry at 'position'.
"""
return self.request( "name-mapping-swap", {
'position': [ position, 'position', [ int, 'None' ], False ],
'direction': [ direction, 'direction', [ basestring, 'None' ], False ],
'with_position': [ with_position, 'with-position', [ int, 'None' ], False ],
}, {
} )
def name_mapping_unix_group_destroy(self, group_name):
"""
Destroy an existing UNIX group.
:param group_name: Specifies UNIX group name.
"""
return self.request( "name-mapping-unix-group-destroy", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_unix_group_add_user(self, group_name, user_name):
"""
Add a user to a UNIX group
:param group_name: Specifies UNIX group name.
:param user_name: Specifies user's UNIX account name.
"""
return self.request( "name-mapping-unix-group-add-user", {
'group_name': [ group_name, 'group-name', [ basestring, 'None' ], False ],
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
}, {
} )
def name_mapping_unix_user_create(self, user_name, user_id, group_id, full_name=None, return_record=None):
"""
Create a new UNIX user.
:param user_name: Specifies user's UNIX account name.
:param user_id: Specifies an identification number for the UNIX user.
:param group_id: Specifies the primary group identification number for the UNIX
user.
:param full_name: Specifies the full name of the UNIX user.
:param return_record: If set to true, returns the UNIX user information on successful
creation.
Default: false
"""
return self.request( "name-mapping-unix-user-create", {
'full_name': [ full_name, 'full-name', [ basestring, 'None' ], False ],
'user_name': [ user_name, 'user-name', [ basestring, 'None' ], False ],
'return_record': [ return_record, 'return-record', [ bool, 'None' ], False ],
'user_id': [ user_id, 'user-id', [ int, 'None' ], False ],
'group_id': [ group_id, 'group-id', [ int, 'None' ], False ],
}, {
'result': [ UnixUserInfo, False ],
} )
| 46.895455
| 115
| 0.586314
| 2,318
| 20,634
| 5.118205
| 0.07981
| 0.069538
| 0.032873
| 0.031861
| 0.863368
| 0.829568
| 0.812879
| 0.784643
| 0.777141
| 0.744184
| 0
| 0.001009
| 0.327421
| 20,634
| 439
| 116
| 47.002278
| 0.853869
| 0.475235
| 0
| 0.583333
| 0
| 0
| 0.210081
| 0.054577
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.319444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5db2fbe1b1dff1ad79a142b2af3276d0139e2780
| 18,811
|
py
|
Python
|
appqos/tests/test_rest_pool.py
|
ppalucki/intel-cmt-cat
|
10689f14d77a800df41ac1f0a6c4875ee23555d6
|
[
"BSD-3-Clause"
] | null | null | null |
appqos/tests/test_rest_pool.py
|
ppalucki/intel-cmt-cat
|
10689f14d77a800df41ac1f0a6c4875ee23555d6
|
[
"BSD-3-Clause"
] | null | null | null |
appqos/tests/test_rest_pool.py
|
ppalucki/intel-cmt-cat
|
10689f14d77a800df41ac1f0a6c4875ee23555d6
|
[
"BSD-3-Clause"
] | null | null | null |
################################################################################
# BSD LICENSE
#
# Copyright(c) 2019-2020 Intel Corporation. All rights reserved.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
################################################################################
"""
Unit tests for rest module POOLs
"""
import json
from jsonschema import validate
import mock
import pytest
import common
from rest_common import get_config, load_json_schema, get_max_cos_id, REST, CONFIG, CONFIG_EMPTY
class TestPools:
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_get(self):
response = REST.get("/pools")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
# validate get all pools response schema
schema, resolver = load_json_schema('get_pool_all_response.json')
validate(data, schema, resolver=resolver)
# assert 4 pools are returned
# structure, types and required fields are validated using schema
assert len(data) == len(CONFIG['pools'])
@mock.patch("common.CONFIG_STORE.get_config", mock.MagicMock(return_value=CONFIG_EMPTY))
def test_get_empty(self):
response = REST.get("/pools")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "No pools in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_get_invalid_id(self):
response = REST.get("/apps/5")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "not found in config" in data["message"]
class TestPool_2:
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_get(self):
response = REST.get("/pools/3")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 200
# validate get 1 pool response schema
schema, resolver = load_json_schema('get_pool_response.json')
validate(data, schema, resolver=resolver)
# assert 1 pool is returned
# structure, types and required fields are validated using schema
assert data['id'] == 3
@mock.patch("common.CONFIG_STORE.get_config", mock.MagicMock(return_value=CONFIG_EMPTY))
def test_get_empty(self):
response = REST.get("/pools/5")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "No pools in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_get_invalid_id(self):
response = REST.get("/pools/5")
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "not found in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_delete(self):
def set_config(data):
for pool in data['pools']:
assert pool['id'] != 3
with mock.patch('common.CONFIG_STORE.set_config', side_effect=set_config) as func_mock:
response = REST.delete("/pools/3")
func_mock.assert_called_once()
assert response.status_code == 200
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_delete_invalid_id(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.delete("/pools/10")
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
# assert 0 apps are returned
assert response.status_code == 404
assert "POOL 10 not found in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_delete_default(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.delete("/pools/0")
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
# assert 0 apps are returned
assert response.status_code == 400
assert "is Default, cannot delete" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_delete_not_empty(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.delete("/pools/1")
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
# assert 0 apps are returned
assert response.status_code == 400
assert "POOL 1 is not empty" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", mock.MagicMock(return_value=CONFIG_EMPTY))
def test_delete_empty_config(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.delete("/pools/1")
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "No pools in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", mock.MagicMock(return_value=CONFIG_EMPTY))
def test_put_empty_config(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/1", {"cbm": "0xc"})
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "No pools in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_cbm(self):
def set_config(data):
for pool in data['pools']:
if pool['id'] == 1:
assert pool['cbm'] == 0xc
with mock.patch('common.CONFIG_STORE.set_config', side_effect=set_config) as func_mock,\
mock.patch('pid_ops.is_pid_valid', return_value=True):
response = REST.put("/pools/1", {"cbm": "0xc"})
func_mock.assert_called_once()
assert response.status_code == 200
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=False))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_cbm_unsupported(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/0", {"cbm": 0x1})
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert "System does not support CAT" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_mba(self):
def set_config(data):
for pool in data['pools']:
if pool['id'] == 1:
assert pool['mba'] == 30
with mock.patch('common.CONFIG_STORE.set_config', side_effect=set_config) as func_mock,\
mock.patch('pid_ops.is_pid_valid', return_value=True):
response = REST.put("/pools/1", {"mba": 30})
func_mock.assert_called_once()
assert response.status_code == 200
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=False))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_mba_unsupported(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/0", {"mba": 30})
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert "System does not support MBA" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_cores(self):
def set_config(data):
for pool in data['pools']:
if pool['id'] == 2:
assert pool['cores'] == [2, 3, 11]
with mock.patch('common.CONFIG_STORE.set_config', side_effect=set_config) as func_mock,\
mock.patch('pid_ops.is_pid_valid', return_value=True):
response = REST.put("/pools/2", {"cores": [2, 3, 11]})
func_mock.assert_called_once()
assert response.status_code == 200
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
def test_put_name(self):
def set_config(data):
for pool in data['pools']:
if pool['id'] == 2:
assert pool['name'] == "test"
with mock.patch('common.CONFIG_STORE.set_config', side_effect=set_config) as func_mock,\
mock.patch('pid_ops.is_pid_valid', return_value=True):
response = REST.put("/pools/2", {"name": "test"})
func_mock.assert_called_once()
assert response.status_code == 200
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
def test_put_duplicate_cores(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/2", {"cores": [1, 2, 3, 11]})
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 400
assert "already assigned to another pool" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
def test_put_empty_cores(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/2", {"cores": []})
func_mock.assert_not_called()
assert response.status_code == 400
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.PQOS_API.get_max_cos_id", new=get_max_cos_id)
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
def test_put_not_exist(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.put("/pools/10", {"cores": [2, 3, 11]})
func_mock.assert_not_called()
data = json.loads(response.data.decode('utf-8'))
assert response.status_code == 404
assert "not found in config" in data["message"]
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.CONFIG_STORE.get_new_pool_id", mock.MagicMock(return_value=5))
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
@mock.patch("caps.cat_supported", mock.MagicMock(return_value=True))
@mock.patch("caps.mba_supported", mock.MagicMock(return_value=True))
@mock.patch("power.validate_power_profiles", mock.MagicMock(return_value=True))
@pytest.mark.parametrize("pool_config", [
{"cores":[11, 12], "cbm": "0xf"}, # no name
{"name":"hello", "cores":[13, 17], "cbm": "0xf"}, # cbm string
{"name":"hello", "cores":[11, 16], "cbm": 3}, # cbm int
{"name":"hello_mba", "cores":[6, 7], "mba": 50}, # mba
{"name":"hello_mba_cbm", "cores":[14, 18], "mba": 50, "cbm": "0xf0"} # cbm & mba
])
def test_post(self, pool_config):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock,\
mock.patch('pid_ops.is_pid_valid', return_value=True):
response = REST.post("/pools", pool_config)
func_mock.assert_called_once()
data = json.loads(response.data.decode('utf-8'))
#validate add pool response schema
schema, resolver = load_json_schema('add_pool_response.json')
validate(data, schema, resolver=resolver)
assert response.status_code == 201
assert data['id'] == 5
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.CONFIG_STORE.get_new_pool_id", mock.MagicMock(return_value=None))
def test_post_exceed_max_number(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.post("/pools", {"cores":[11, 12], "cbm": "0xf"})
func_mock.assert_not_called()
assert response.status_code == 500
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@mock.patch("common.CONFIG_STORE.get_new_pool_id", mock.MagicMock(return_value=5))
@mock.patch("common.PQOS_API.check_core", mock.MagicMock(return_value=True))
def test_post_duplicate_core(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.post("/pools", {"cores":[1, 2, 3], "cbm": "0xf"})
func_mock.assert_not_called()
assert response.status_code == 400
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
def test_post_unknown_param(self):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.post("/pools", {"cores":[20], "cbm": "0xf", "unknown": 1})
func_mock.assert_not_called()
assert response.status_code == 400
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@pytest.mark.parametrize("pool_config", [
{"cores": "invalid", "cbm": "0xf"},
{"cores": [20], "cbm": "invalid"},
{"cores": [20], "mba": "invalid"}
])
def test_post_invalid_value(self, pool_config):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.post("/pools", pool_config)
func_mock.assert_not_called()
assert response.status_code == 400
@mock.patch("common.CONFIG_STORE.get_config", new=get_config)
@pytest.mark.parametrize("no_req_fields_json", [
{"mba": 10}, # missing cores
{"cbm": "0xf"}, # missing cores
{"mba": 10, "cbm": "0xf"}, # missing cores
{"name":"hello", "mba": 10, "cbm": "0xf"}, # missing cores
{"name":"hello", "cbm": "0xf"}, # missing cores
{"cores":[3, 10]}, # missing at least one alloc technology
{"name":"hello", "cores":[3, 10]}, # missing at least one alloc technology
{"name":"hello"}, # missing at least one alloc technology and cores
{"cores":[3, 10], "cbm": "0xf", "apps":[1]} # extra property "apps"
])
def test_post_no_req_fields(self, no_req_fields_json):
with mock.patch('common.CONFIG_STORE.set_config') as func_mock:
response = REST.post("/pools", no_req_fields_json)
func_mock.assert_not_called()
assert response.status_code == 400
| 43.443418
| 96
| 0.657222
| 2,544
| 18,811
| 4.64544
| 0.103774
| 0.075393
| 0.090117
| 0.090624
| 0.827382
| 0.820443
| 0.812574
| 0.811389
| 0.792689
| 0.769673
| 0
| 0.015974
| 0.204614
| 18,811
| 432
| 97
| 43.543981
| 0.773894
| 0.117165
| 0
| 0.695502
| 0
| 0
| 0.235262
| 0.14488
| 0
| 0
| 0.00299
| 0
| 0.238754
| 1
| 0.110727
| false
| 0
| 0.020761
| 0
| 0.138408
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5dc2b4dc69e1bf9e1644f5df4169b958dab57801
| 3,700
|
py
|
Python
|
dialogue-engine/src/programy/storage/stores/sql/store/users.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 104
|
2020-03-30T09:40:00.000Z
|
2022-03-06T22:34:25.000Z
|
dialogue-engine/src/programy/storage/stores/sql/store/users.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 25
|
2020-06-12T01:36:35.000Z
|
2022-02-19T07:30:44.000Z
|
dialogue-engine/src/programy/storage/stores/sql/store/users.py
|
cotobadesign/cotoba-agent-oss
|
3833d56e79dcd7529c3e8b3a3a8a782d513d9b12
|
[
"MIT"
] | 10
|
2020-04-02T23:43:56.000Z
|
2021-05-14T13:47:01.000Z
|
"""
Copyright (c) 2020 COTOBA DESIGN, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
"""
Copyright (c) 2016-2019 Keith Sterling http://www.keithsterling.com
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the
Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from programy.storage.stores.sql.store.sqlstore import SQLStore
from programy.storage.entities.user import UserStore
from programy.storage.stores.sql.dao.user import User
class SQLUserStore(SQLStore, UserStore):
def __init__(self, storage_engine):
SQLStore.__init__(self, storage_engine)
def empty(self):
self._storage_engine.session.query(User).delete()
def add_user(self, userid, client):
user = User(userid=userid, client=client)
self._storage_engine.session.add(user)
return user
def exists(self, userid, clientid):
try:
self._storage_engine.session.query(User).filter(User.userid == userid, User.client == clientid).one()
return True
except Exception:
return False
def get_links(self, userid):
links = []
db_users = self._storage_engine.session.query(User).filter(User.userid == userid)
for user in db_users:
links.append(user.client)
return links
def remove_user(self, userid, clientid):
try:
self._storage_engine.session.query(User).filter(User.userid == userid, User.client == clientid).delete()
return True
except Exception:
pass
return False
def remove_user_from_all_clients(self, userid):
try:
self._storage_engine.session.query(User).filter(User.userid == userid).delete()
return True
except Exception:
pass
return False
| 45.121951
| 126
| 0.735405
| 521
| 3,700
| 5.163148
| 0.28023
| 0.065428
| 0.050558
| 0.053532
| 0.793309
| 0.772491
| 0.760223
| 0.760223
| 0.726022
| 0.726022
| 0
| 0.004058
| 0.200811
| 3,700
| 81
| 127
| 45.679012
| 0.905648
| 0.287027
| 0
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.184211
| false
| 0.052632
| 0.078947
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
5ddd904ed29ee95c6ae7dda623c7543fe63ce13f
| 40
|
py
|
Python
|
zemberek/core/utils/__init__.py
|
Loodos/zemberek-python
|
4f6b47abda98ed5a4d440738d39a92374d50ef6b
|
[
"Apache-2.0"
] | 52
|
2020-08-24T09:52:58.000Z
|
2022-03-19T05:02:06.000Z
|
zemberek/core/utils/__init__.py
|
Loodos/zemberek-python
|
4f6b47abda98ed5a4d440738d39a92374d50ef6b
|
[
"Apache-2.0"
] | 7
|
2020-09-07T09:02:33.000Z
|
2021-11-26T14:15:41.000Z
|
zemberek/core/utils/__init__.py
|
Loodos/zemberek-python
|
4f6b47abda98ed5a4d440738d39a92374d50ef6b
|
[
"Apache-2.0"
] | 7
|
2020-09-23T19:27:55.000Z
|
2022-03-14T09:02:41.000Z
|
from .thread_locks import ReadWriteLock
| 20
| 39
| 0.875
| 5
| 40
| 6.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 40
| 1
| 40
| 40
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
f8e68a8fcfb1b8b3f3c82f5acd6cf859e8a78dec
| 903
|
py
|
Python
|
sorted_nearest/__init__.py
|
endrebak/sorted_nearest
|
cf99b8498cada089ecdb4cf565f5e3304591be0f
|
[
"BSD-3-Clause"
] | null | null | null |
sorted_nearest/__init__.py
|
endrebak/sorted_nearest
|
cf99b8498cada089ecdb4cf565f5e3304591be0f
|
[
"BSD-3-Clause"
] | 3
|
2020-06-19T14:32:25.000Z
|
2021-05-24T14:29:53.000Z
|
sorted_nearest/__init__.py
|
endrebak/sorted_nearest
|
cf99b8498cada089ecdb4cf565f5e3304591be0f
|
[
"BSD-3-Clause"
] | 2
|
2020-01-20T12:39:00.000Z
|
2021-04-14T15:51:05.000Z
|
from sorted_nearest.src.sorted_nearest import (nearest_previous_nonoverlapping,
nearest_next_nonoverlapping,
nearest_nonoverlapping)
from sorted_nearest.src.k_nearest import k_nearest_previous_nonoverlapping, k_nearest_next_nonoverlapping
from sorted_nearest.src.k_nearest_ties import get_all_ties, get_different_ties
from sorted_nearest.src.tiles import maketiles
from sorted_nearest.src.clusters import find_clusters
from sorted_nearest.src.max_disjoint_intervals import max_disjoint
from sorted_nearest.src.introns import find_introns
from sorted_nearest.src.annotate_clusters import annotate_clusters
from sorted_nearest.src.cluster_by import cluster_by
from sorted_nearest.src.merge_by import merge_by
from sorted_nearest.src.windows import makewindows
from sorted_nearest.version import __version__
| 47.526316
| 105
| 0.801772
| 117
| 903
| 5.794872
| 0.239316
| 0.249263
| 0.300885
| 0.324484
| 0.271386
| 0.123894
| 0.123894
| 0
| 0
| 0
| 0
| 0
| 0.166113
| 903
| 18
| 106
| 50.166667
| 0.900398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d1c604eb4d4994fdea2e4d6ee023202a7971479
| 25
|
py
|
Python
|
starry_process/ops/eigh/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 13
|
2020-04-14T17:47:28.000Z
|
2022-03-16T15:19:48.000Z
|
starry_process/ops/eigh/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 22
|
2020-09-23T20:33:22.000Z
|
2022-02-07T17:38:09.000Z
|
starry_process/ops/eigh/__init__.py
|
arfon/starry_process
|
72b2a540e7e4fdb2e6af61507efa1c9861d5c919
|
[
"MIT"
] | 8
|
2020-04-14T17:47:44.000Z
|
2022-02-06T16:39:47.000Z
|
from .eigh import EighOp
| 12.5
| 24
| 0.8
| 4
| 25
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d2dd88c740d9da768d0440c065601e19e80a454
| 43
|
py
|
Python
|
min2net/preprocessing/MW/__init__.py
|
deepanshi98/mlp-project
|
c4af1a86fad4e7d51ace0e183a5b02b32864e9e5
|
[
"Apache-2.0"
] | null | null | null |
min2net/preprocessing/MW/__init__.py
|
deepanshi98/mlp-project
|
c4af1a86fad4e7d51ace0e183a5b02b32864e9e5
|
[
"Apache-2.0"
] | null | null | null |
min2net/preprocessing/MW/__init__.py
|
deepanshi98/mlp-project
|
c4af1a86fad4e7d51ace0e183a5b02b32864e9e5
|
[
"Apache-2.0"
] | 1
|
2022-02-28T21:08:32.000Z
|
2022-02-28T21:08:32.000Z
|
from . import time_domain
from . import raw
| 21.5
| 25
| 0.790698
| 7
| 43
| 4.714286
| 0.714286
| 0.606061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 2
| 26
| 21.5
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
5d44333e2a702ebbce3da35cc15d915b7d524981
| 44
|
py
|
Python
|
mdpy/examples/boyan_chain.py
|
rldotai/mdpy
|
e4949c1a0fa7f5a1a0eaf028b180c851980284d7
|
[
"MIT"
] | 14
|
2017-02-08T04:11:37.000Z
|
2020-05-01T07:22:37.000Z
|
mdpy/examples/boyan_chain.py
|
rldotai/mdpy
|
e4949c1a0fa7f5a1a0eaf028b180c851980284d7
|
[
"MIT"
] | null | null | null |
mdpy/examples/boyan_chain.py
|
rldotai/mdpy
|
e4949c1a0fa7f5a1a0eaf028b180c851980284d7
|
[
"MIT"
] | 3
|
2018-08-13T13:56:31.000Z
|
2022-03-14T15:31:01.000Z
|
"""Boyan's Chain MDP."""
import numpy as np
| 14.666667
| 24
| 0.659091
| 8
| 44
| 3.625
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 2
| 25
| 22
| 0.783784
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5d638ee183cc9c732206781f41075be7975ecd4f
| 74
|
py
|
Python
|
n_utils/nameless-dt-enable-profile.py
|
Jalle19/nameless-deploy-tools
|
755b00ac4c87611cc57cabc3b593c8cafce2498b
|
[
"Apache-2.0"
] | 10
|
2017-02-12T17:04:56.000Z
|
2018-11-10T16:46:57.000Z
|
n_utils/nameless-dt-enable-profile.py
|
Jalle19/nameless-deploy-tools
|
755b00ac4c87611cc57cabc3b593c8cafce2498b
|
[
"Apache-2.0"
] | 22
|
2017-04-06T11:41:01.000Z
|
2019-09-18T05:45:03.000Z
|
n_utils/nameless-dt-enable-profile.py
|
Jalle19/nameless-deploy-tools
|
755b00ac4c87611cc57cabc3b593c8cafce2498b
|
[
"Apache-2.0"
] | 4
|
2020-02-25T11:08:47.000Z
|
2022-02-10T13:29:53.000Z
|
from n_utils.profile_util import cli_enable_profile
cli_enable_profile()
| 18.5
| 51
| 0.878378
| 12
| 74
| 4.916667
| 0.666667
| 0.305085
| 0.542373
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.081081
| 74
| 3
| 52
| 24.666667
| 0.867647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
539f18099231d1c60374443a6998aaef34d7dcc8
| 1,643
|
py
|
Python
|
hasty/templater/forms.py
|
NREL/hasty
|
3beb041813ee92ba8942121d8e33c6b025eea448
|
[
"BSD-3-Clause"
] | 4
|
2020-10-09T19:30:04.000Z
|
2020-11-23T10:05:43.000Z
|
hasty/templater/forms.py
|
NREL/hasty
|
3beb041813ee92ba8942121d8e33c6b025eea448
|
[
"BSD-3-Clause"
] | 29
|
2021-01-28T14:03:05.000Z
|
2022-02-03T18:21:09.000Z
|
hasty/templater/forms.py
|
NREL/haste
|
18e4941d933a90a53c162a64993ff46608a7128f
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
from . import models
from mapp.models import HaystackPointType, HaystackEquipmentType, BrickPointType, BrickEquipmentType
class HaystackEquipmentTemplateForm(forms.ModelForm):
equipment_type = forms.ModelChoiceField(queryset=HaystackEquipmentType.objects.order_by('haystack_tagset'))
points = forms.ModelMultipleChoiceField(queryset=HaystackPointType.objects.all(), widget=forms.CheckboxSelectMultiple)
class Meta:
model = models.HaystackEquipmentTemplate
fields = ('__all__')
class BrickEquipmentTemplateForm(forms.ModelForm):
equipment_type = forms.ModelChoiceField(queryset=BrickEquipmentType.objects.order_by('brick_class'))
points = forms.ModelMultipleChoiceField(queryset=BrickPointType.objects.all(), widget=forms.CheckboxSelectMultiple)
class Meta:
model = models.BrickEquipmentTemplate
fields = ('__all__')
class HaystackFaultTemplateForm(forms.ModelForm):
equipment_type = forms.ModelChoiceField(queryset=HaystackEquipmentType.objects.order_by('haystack_tagset'))
points = forms.ModelMultipleChoiceField(queryset=HaystackPointType.objects.all(), widget=forms.CheckboxSelectMultiple)
class Meta:
model = models.HaystackFaultTemplate
fields = ('__all__')
class BrickFaultTemplateForm(forms.ModelForm):
equipment_type = forms.ModelChoiceField(queryset=BrickEquipmentType.objects.order_by('brick_class'))
points = forms.ModelMultipleChoiceField(queryset=BrickPointType.objects.all(), widget=forms.CheckboxSelectMultiple)
class Meta:
model = models.BrickFaultTemplate
fields = ('__all__')
| 41.075
| 122
| 0.785758
| 143
| 1,643
| 8.832168
| 0.265734
| 0.044339
| 0.072842
| 0.085511
| 0.706255
| 0.706255
| 0.706255
| 0.706255
| 0.706255
| 0.706255
| 0
| 0
| 0.125989
| 1,643
| 39
| 123
| 42.128205
| 0.879526
| 0
| 0
| 0.592593
| 0
| 0
| 0.048691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.703704
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
53b8f2f2e36005bebff39507b2be8cb7f28ec9cf
| 85
|
py
|
Python
|
modules/tinyexr/config.py
|
codetorex/godot
|
3f6a7c74e3ebb3d8e0e204c8a68d110c5dc3991d
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 222
|
2018-09-10T17:52:03.000Z
|
2022-03-24T04:55:19.000Z
|
modules/tinyexr/config.py
|
codetorex/godot
|
3f6a7c74e3ebb3d8e0e204c8a68d110c5dc3991d
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 27
|
2022-01-06T06:31:25.000Z
|
2022-03-29T01:18:01.000Z
|
modules/tinyexr/config.py
|
codetorex/godot
|
3f6a7c74e3ebb3d8e0e204c8a68d110c5dc3991d
|
[
"CC-BY-3.0",
"Apache-2.0",
"MIT"
] | 26
|
2019-04-12T11:21:26.000Z
|
2022-03-11T00:20:53.000Z
|
def can_build(env, platform):
return env["tools"]
def configure(env):
pass
| 12.142857
| 29
| 0.658824
| 12
| 85
| 4.583333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.211765
| 85
| 6
| 30
| 14.166667
| 0.820896
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.25
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 6
|
54df5d7c78b9133e5d2fdb31ea7de15a265b112a
| 115
|
py
|
Python
|
fuse/utils/multiprocessing/__init__.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
fuse/utils/multiprocessing/__init__.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
fuse/utils/multiprocessing/__init__.py
|
alexgo1/fuse-med-ml
|
928375828ff321d2bf7b2084389e34e1db0682e9
|
[
"Apache-2.0"
] | null | null | null |
from .run_multiprocessed import run_multiprocessed, get_from_global_storage
from .helpers import get_chunks_ranges
| 38.333333
| 75
| 0.895652
| 16
| 115
| 6
| 0.625
| 0.354167
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.078261
| 115
| 2
| 76
| 57.5
| 0.90566
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54e087ff6072702ad9c12b994ace0c66c8d8209d
| 43
|
py
|
Python
|
app/routes.py
|
xujl930/wechat-shift-car
|
db0d064d021b3f59a1c08bcd131e300a52f1035d
|
[
"MIT"
] | null | null | null |
app/routes.py
|
xujl930/wechat-shift-car
|
db0d064d021b3f59a1c08bcd131e300a52f1035d
|
[
"MIT"
] | null | null | null |
app/routes.py
|
xujl930/wechat-shift-car
|
db0d064d021b3f59a1c08bcd131e300a52f1035d
|
[
"MIT"
] | null | null | null |
# -*- coding:utf8 -*-
from . import page
| 8.6
| 21
| 0.55814
| 5
| 43
| 4.8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030303
| 0.232558
| 43
| 4
| 22
| 10.75
| 0.69697
| 0.44186
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
54e0920b8f2ae45ce30cb6e9e7e610aa42bff8da
| 68
|
py
|
Python
|
termy/__main__.py
|
dingusagar/termy
|
599185beaa15dbd1f188429e8426bfd1642452ee
|
[
"MIT"
] | 1
|
2022-01-13T19:45:28.000Z
|
2022-01-13T19:45:28.000Z
|
termy/__main__.py
|
dingusagar/termy
|
599185beaa15dbd1f188429e8426bfd1642452ee
|
[
"MIT"
] | null | null | null |
termy/__main__.py
|
dingusagar/termy
|
599185beaa15dbd1f188429e8426bfd1642452ee
|
[
"MIT"
] | null | null | null |
from termy.controller.controller import init_cli_app
init_cli_app()
| 22.666667
| 52
| 0.867647
| 11
| 68
| 5
| 0.636364
| 0.254545
| 0.363636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 68
| 3
| 53
| 22.666667
| 0.873016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
072b305252872a169ba4318f26c9d6af7d155712
| 11,691
|
py
|
Python
|
tests/callbacks/test_torch_scheduler.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | null | null | null |
tests/callbacks/test_torch_scheduler.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | null | null | null |
tests/callbacks/test_torch_scheduler.py
|
NunoEdgarGFlowHub/torchbearer
|
d2b21b8ffcabde5b505cb1c736e05af6ee4276ca
|
[
"MIT"
] | null | null | null |
from unittest import TestCase
from mock import patch, Mock
import warnings
import torchbearer
from torchbearer.callbacks import TorchScheduler, LambdaLR, StepLR, MultiStepLR, ExponentialLR, CosineAnnealingLR,\
ReduceLROnPlateau, CyclicLR
class TestTorchScheduler(TestCase):
def setUp(self):
super(TestTorchScheduler, self).setUp()
warnings.filterwarnings('always')
def tearDown(self):
super(TestTorchScheduler, self).tearDown()
warnings.filterwarnings('default')
def test_torch_scheduler_on_batch_with_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.METRICS: {'test': 101}, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=True)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.step.assert_called_once_with(101)
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
def test_torch_scheduler_on_epoch_with_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.METRICS: {'test': 101}, torchbearer.OPTIMIZER: 'optimizer',
torchbearer.DATA: None}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=False)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.step.assert_called_once_with(101, epoch=1)
mock_scheduler.reset_mock()
def test_torch_scheduler_on_batch_no_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer'}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor=None, step_on_batch=True)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_start_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.step.assert_called_once_with()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
def test_torch_scheduler_on_epoch_no_monitor(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer', torchbearer.METRICS: {}}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor=None, step_on_batch=False)
torch_scheduler.on_start(state)
mock_scheduler.assert_called_once_with('optimizer')
mock_scheduler.reset_mock()
torch_scheduler.on_sample(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_step_training(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
torch_scheduler.on_end_epoch(state)
mock_scheduler.assert_not_called()
mock_scheduler.reset_mock()
def test_monitor_not_found(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer', torchbearer.METRICS: {'not_test': 1.}}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=False)
torch_scheduler.on_start(state)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_start_validation(state)
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_end_epoch(state)
self.assertTrue('Failed to retrieve key `test`' in str(w[0].message))
def test_monitor_found(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer', torchbearer.METRICS: {'test': 1.}}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=False)
torch_scheduler.on_start(state)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_start_training(state)
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_start_validation(state)
self.assertTrue(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_end_epoch(state)
self.assertTrue(len(w) == 0)
def test_batch_monitor_not_found(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer', torchbearer.METRICS: {'not_test': 1.}}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=True)
torch_scheduler.on_start(state)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_step_training(state)
self.assertTrue('Failed to retrieve key `test`' in str(w[0].message))
def test_batch_monitor_found(self):
state = {torchbearer.EPOCH: 1, torchbearer.OPTIMIZER: 'optimizer', torchbearer.METRICS: {'test': 1.}}
mock_scheduler = Mock()
mock_scheduler.return_value = mock_scheduler
torch_scheduler = TorchScheduler(lambda opt: mock_scheduler(opt), monitor='test', step_on_batch=True)
torch_scheduler.on_start(state)
with warnings.catch_warnings(record=True) as w:
torch_scheduler.on_step_training(state)
self.assertTrue(len(w) == 0)
class TestLambdaLR(TestCase):
@patch('torch.optim.lr_scheduler.LambdaLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = LambdaLR(0.1, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.1, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestStepLR(TestCase):
@patch('torch.optim.lr_scheduler.StepLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = StepLR(10, gamma=0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 10, gamma=0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestMultiStepLR(TestCase):
@patch('torch.optim.lr_scheduler.MultiStepLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = MultiStepLR(10, gamma=0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 10, gamma=0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestExponentialLR(TestCase):
@patch('torch.optim.lr_scheduler.ExponentialLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = ExponentialLR(0.4, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.4, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestCosineAnnealingLR(TestCase):
@patch('torch.optim.lr_scheduler.CosineAnnealingLR')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = CosineAnnealingLR(4, eta_min=10, last_epoch=-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 4, eta_min=10, last_epoch=-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
class TestReduceLROnPlateau(TestCase):
@patch('torch.optim.lr_scheduler.ReduceLROnPlateau')
def test_lambda_lr(self, lr_mock):
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = ReduceLROnPlateau(monitor='test', mode='max', factor=0.2, patience=100, verbose=True, threshold=10,
threshold_mode='thresh', cooldown=5, min_lr=0.1, eps=1e-4, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', mode='max', factor=0.2, patience=100, verbose=True, threshold=10,
threshold_mode='thresh', cooldown=5, min_lr=0.1, eps=1e-4)
self.assertTrue(scheduler._step_on_batch == 'batch')
self.assertTrue(scheduler._monitor == 'test')
class TestCyclicLR(TestCase):
def test_lambda_lr(self):
from distutils.version import LooseVersion
import torch
version = torch.__version__ if str(torch.__version__) is torch.__version__ else "0.4.0"
if LooseVersion(version) > LooseVersion("1.0.0"): # CyclicLR is implemented
with patch('torch.optim.lr_scheduler.CyclicLR') as lr_mock:
state = {torchbearer.OPTIMIZER: 'optimizer'}
scheduler = CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1, step_on_batch='batch')
scheduler.on_start(state)
lr_mock.assert_called_once_with('optimizer', 0.01, 0.1, step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1)
self.assertTrue(scheduler._step_on_batch == 'batch')
self.assertTrue(scheduler._monitor == 'test')
else:
self.assertRaises(NotImplementedError, lambda: CyclicLR(0.01, 0.1, monitor='test', step_size_up=200, step_size_down=None, mode='triangular',
gamma=2., scale_fn=None, scale_mode='cycle', cycle_momentum=False, base_momentum=0.7, max_momentum=0.9,
last_epoch=-1, step_on_batch='batch'))
| 41.457447
| 152
| 0.688649
| 1,424
| 11,691
| 5.338483
| 0.087781
| 0.119705
| 0.07156
| 0.054986
| 0.877006
| 0.870692
| 0.838858
| 0.828335
| 0.825178
| 0.812155
| 0
| 0.015529
| 0.206826
| 11,691
| 281
| 153
| 41.604982
| 0.80427
| 0.001967
| 0
| 0.668269
| 0
| 0
| 0.067204
| 0.021858
| 0
| 0
| 0
| 0
| 0.206731
| 1
| 0.081731
| false
| 0
| 0.033654
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
074a844ea59efc9bfea92299eaeabb23a03851f3
| 177
|
py
|
Python
|
cocutils/__init__.py
|
zhs007/cocutils
|
ca8fa1e21570793dfd732143cfc577accfa3a8da
|
[
"Apache-2.0"
] | null | null | null |
cocutils/__init__.py
|
zhs007/cocutils
|
ca8fa1e21570793dfd732143cfc577accfa3a8da
|
[
"Apache-2.0"
] | null | null | null |
cocutils/__init__.py
|
zhs007/cocutils
|
ca8fa1e21570793dfd732143cfc577accfa3a8da
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:utf-8 -*-
from cocutils.utils import procDataFrame
from cocutils.dumpsc import dumpsc
from cocutils.dumpsc2 import sc2png
from cocutils.pathutils import findAllFile
| 35.4
| 42
| 0.819209
| 23
| 177
| 6.304348
| 0.565217
| 0.331034
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018987
| 0.107345
| 177
| 5
| 42
| 35.4
| 0.898734
| 0.112994
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab2e0812f377f79b7363bb522602eaf6240557e6
| 30
|
py
|
Python
|
schema_org/schema_org/sotools/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 1
|
2019-06-19T02:41:02.000Z
|
2019-06-19T02:41:02.000Z
|
schema_org/schema_org/sotools/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 7
|
2019-06-24T20:21:51.000Z
|
2022-01-07T13:06:07.000Z
|
schema_org/schema_org/sotools/__init__.py
|
DataONEorg/d1_ncei_adapter
|
34dd4ed9d581d259a70d7c9a884f520226dd2691
|
[
"Apache-2.0"
] | 3
|
2017-04-17T13:24:20.000Z
|
2019-05-28T18:32:27.000Z
|
"""
"""
from .common import *
| 7.5
| 21
| 0.533333
| 3
| 30
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 30
| 4
| 21
| 7.5
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
ab3c77e52b9ec1578c4973911fbb7a7854636ad3
| 4,792
|
py
|
Python
|
gesture_box.py
|
Grivois/Bibliomancy
|
38db6a25a652e722327cf7c80e56c41d6d18cd20
|
[
"MIT"
] | null | null | null |
gesture_box.py
|
Grivois/Bibliomancy
|
38db6a25a652e722327cf7c80e56c41d6d18cd20
|
[
"MIT"
] | null | null | null |
gesture_box.py
|
Grivois/Bibliomancy
|
38db6a25a652e722327cf7c80e56c41d6d18cd20
|
[
"MIT"
] | null | null | null |
from kivy.gesture import GestureDatabase
from kivy.uix.boxlayout import BoxLayout
from kivy.gesture import Gesture
gesture_strings = {
'left_to_right_line': 'eNq1mE1y3DYQhfe8iLXRFPofuICyTZUOkFLsKVllR5qSxkl8+zS7ZWWaFYfYaDajeQQf8PUDAVBXD18e/vx+uD++nL89H5dfXr9Pbbn6dILl9sPj3R/HD8sJ/U//ouXl9sPL+fnpy/HFf/Jy9fUky9V/mtxGs+Wkq5X5/aenh8fzeltfbxs/ue3XtdVyghzBOoTvfgvgctMOrQFpgz6odeEm1Nfx/L1ep+Xmuh2MGzJAM8HBXWV5+f3u/7vh6EaW+5/3cP/DvHU1Q0W2gebtd80DHSzNr+3AQm0gNGC3aXS8bvKvvWqXJm1oEzBk3bfvYT8m7QmVrI1GgJ3Jdu0xEkCYtG/aAQEVeus89iuPGPY0Zy8ioAjDBmjX1vftI1iUOXseStTfEoCJ6kS2aO/mH+HiZLiso1kfnYFJWPftKcKlyXAZGoqwsBFYh0H7/pEu0bv5R7w0GS+JzxkRVC+NDb931z7SJXsv+wiXJsMlAJRh0vvwpaftLwwc4fJkuL4aNP/YayPYLz5HuDwZLjLSIOjWjTxb3F8aOMLlyXCxIfo1BGZFvz4x/kiX7d38I16ejNcfWRrGAxuxNBu79hLxCryXfaQrk+m+7bU4RFHH/tIjka5Mpuvbiu+2zafoMCXfBfb9I12ZTNc3WzHxChHjMLS+//RKxCuT8b5NHEH0rdcmFjeNgHUmYPf3w8IAMFITtS77y4NGwEpz9uzLjwJZb6aGfT9fjXxV5uxNwfXmxy1jJtzf2DXiVZuyh8a9DViH7se53vZnj0a4OubsyY9UPvFxLcx6wNq1t4jW5qIFVWnYzISHL6Iz9hGtzUULozeVgb65+PFkYuJbJGtzySIOXxO88MqjC1lMnPXt4ePz8fj49i5gur4M+NWrG6ZDW24Yzb/OJ+vLnWtatLFqCpdab6FJ0WDVxqufpoarBtCLSCEiF5FDJC2ihGhYRF1FhNqRhUhwGJcfu2gRaGj1tmDzel6KI+B8aSpi0NGPMryKgUejFTHwuDKPwPNdoYiBx1J7DzzupToj8AS898uPXrQIPMHaQeAJFy9owedHv6oGoBhUNQilb9pSqqOqwehTsqqS0werGpSKG9VSlaoGmtJmZDktqTpATsw6jQCCTbk6AKZqVaVUKxsk26ZmkGyyaZts2qqabFqrA8mmG99k00qBybZJCJPNas0w2ayyYbL1OjJMtr7xTba+8U22vvFNtrHxTbZR2TDZRnWgZBu9qsFmrfoSplrHS5Rq7Y041U1vkmrNjYLNYNObpVpzo57qpreRaq0Zt1QrGycb1DFwstXH2E+TqdbeONmwzhJONqzEnGy46S3ZNs8QJxttxpBsVNkk2TbPmyQbVWJJNqpjkGTjWnU/r97l1vX5+HD/+bz+t8lPrzdr5i7+9fDp/Dk0XwJzLrl6fvp6fL57/HiMKxbb7aq/bqy/nZ6fPn37mF7dO/NXYD9kNfa5I92P6utWf/gHjU3d+Q==',
'right_to_left_line': 'eNp91HtsFEUcwPGr5YC20EKtUA/Q42UPayu2AgVaOjxkqCKegLqCR7m7bm+Plrv73e0BJfzgKHIYQ7BoAoV/JCY8/rGSGBA10CYYNTySxoJAAVMIjxpCKH8AJZDAzDDbhD2y+8ft5rufmdls9iaR7q8LrmooDqgxPR5VM6k8g23CRkhDeMml2G022+pgja5BOiWJdhs/lP7sR1ODAU2HfpSUPOFHj5LDqpypOhIN18T9Otgpsfeuzx92VVEGsNsxPRquU2PQ3wMDXrj0IgEyYSB7gAyETJdYKxIOhvQYZHlgkOuFw9wcZMJgNiobIcelpLFRa2AIrbyavfMHR3qhCA0wlLZ+e/JAtNTfFfdpXOcivOzSmOv6T0AtReQhvCLEpYcP7TXf+FLFMITh1iIf4VUhLub+sjTLsTBVOBBGCHH23uQr0376jYuvC/+8sSCjTYqRCKOsxWsIr1sLJ8JoIf551EWzu//lonFbW3n3JCLFGISxQpzIrR0fOpzHBSYa2lfssEkxDmG8tXgDocBauBAmCHF8b3cycdLGRf3W848qLhviTYRCa/EWQpEQPzrun7o9rylVFCO8bS0mIrxjLUoQSrlo3V++40nLiQgXS/budF/TDPEuwiRrMRlhirUoQ5gqxJG7w11VzXtSxTSE6daiHKFCiL891x933OxmojJS1FLQ609IMQOh0loQhJnWYhbCbCFO7+rcXvV/OxcbAr+v/SPLeI45CO8J0VF6aMx3Hc1cbGr05ubdMb7CuQjUWsxDqBLijPhX2rnY4r5W+1enscr7CB8IcW7dwCuXHQ+42Hb71pCiFuNLno/woRAXNh7bd6lA4WL7siklkflOKRYgfCREZ9nquUd/rnhOqMogvl/5o6oaktuP2wMfuyhJNotNkJJYEz+nJWEhixFzXETJ5l5zXMziAXP8hJIvj5jjpyyWmeNnlDR2GbFVbLtJUFjMN+Kzu0n4nG3WXxmxRw5fQsmGajGKEj1HDl9KyTqnlH3xC0rWHDSiU87poWRVjzkuY/O3G9EtF6qmJGq8ED0s51xOSaRvoYQc7qUklDBik4w+SuraxFQsHpRz+ikJfm88fJuUNZRoxgvpiyqLNnOspSSwxxwDLC43R41FpzkG1bjPq2Swaz1cr0a9Ib8KK2jlr7v4sVvpx26EvCtVqFO4h/q4r/gp2meiRQ=='
}
#This database can compare gestures the user makes to its stored gestures
#and tell us if the user input matches any of them.
gestures = GestureDatabase()
for name, gesture_string in gesture_strings.items():
gesture = gestures.str_to_gesture(gesture_string)
gesture.name = name
gestures.add_gesture(gesture)
class GestureBox(BoxLayout):
def __init__(self, **kwargs):
for name in gesture_strings:
self.register_event_type('on_{}'.format(name))
super(GestureBox, self).__init__(**kwargs)
def on_left_to_right_line(self):
pass
def on_right_to_left_line(self):
pass
#To recognize a gesture, you’ll need to start recording each individual event in the
#touch_down handler, add the data points for each call to touch_move , and then do the
#gesture calculations when all data points have been received in the touch_up handler.
def on_touch_down(self, touch):
#create an user defined variable and add the touch coordinates
touch.ud['gesture_path'] = [(touch.x, touch.y)]
super(GestureBox, self).on_touch_down(touch)
def on_touch_move(self, touch):
touch.ud['gesture_path'].append((touch.x, touch.y))
super(GestureBox, self).on_touch_move(touch)
def on_touch_up(self, touch):
if 'gesture_path' in touch.ud:
#create a gesture object
gesture = Gesture()
#add the movement coordinates
gesture.add_stroke(touch.ud['gesture_path'])
#normalize so thwu willtolerate size variations
gesture.normalize()
#minscore to be attained for a match to be true
match = gestures.find(gesture, minscore=0.3)
if match:
print("{} happened".format(match[1].name))
self.dispatch('on_{}'.format(match[1].name))
super(GestureBox, self).on_touch_up(touch)
| 81.220339
| 1,509
| 0.834098
| 378
| 4,792
| 10.431217
| 0.518519
| 0.010652
| 0.019275
| 0.013695
| 0.025869
| 0.019275
| 0.019275
| 0.019275
| 0.019275
| 0
| 0
| 0.092562
| 0.104967
| 4,792
| 58
| 1,510
| 82.62069
| 0.826766
| 0.122287
| 0
| 0.054054
| 0
| 0.054054
| 0.662136
| 0.637101
| 0
| 1
| 0
| 0
| 0
| 1
| 0.162162
| false
| 0.054054
| 0.081081
| 0
| 0.27027
| 0.027027
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
ab3e59140ee6cd8ea6f8a6f465b1af1ad30bc3f3
| 273
|
py
|
Python
|
tests/conftest.py
|
aerogear/androidctl
|
f057117a893b2e24f47218b98ebf2e4552502eae
|
[
"Apache-2.0"
] | null | null | null |
tests/conftest.py
|
aerogear/androidctl
|
f057117a893b2e24f47218b98ebf2e4552502eae
|
[
"Apache-2.0"
] | 3
|
2017-08-11T08:27:34.000Z
|
2021-06-01T22:46:14.000Z
|
tests/conftest.py
|
aerogear/androidctl
|
f057117a893b2e24f47218b98ebf2e4552502eae
|
[
"Apache-2.0"
] | 1
|
2017-08-10T13:42:05.000Z
|
2017-08-10T13:42:05.000Z
|
import os
import pytest
@pytest.fixture
def cwd():
return os.path.dirname(os.path.realpath(__file__))
@pytest.fixture
def android_home(cwd):
return '%s/fixtures/android-sdk-linux' % cwd
@pytest.fixture
def props_path(cwd):
return '%s/fixtures/props.cfg' % cwd
| 14.368421
| 52
| 0.728938
| 41
| 273
| 4.707317
| 0.463415
| 0.202073
| 0.248705
| 0.186529
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131868
| 273
| 18
| 53
| 15.166667
| 0.814346
| 0
| 0
| 0.272727
| 0
| 0
| 0.18315
| 0.18315
| 0
| 0
| 0
| 0
| 0
| 1
| 0.272727
| false
| 0
| 0.181818
| 0.272727
| 0.727273
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
db81b8b0b0c15d1780e9f7c904d9dc18cfa4b8f8
| 133
|
py
|
Python
|
checkstyle_filter/git/__init__.py
|
GengroHirano/GithubCheckReporter
|
ef8ac9121f24931393e6405fa8a9533cc311aeb7
|
[
"MIT"
] | null | null | null |
checkstyle_filter/git/__init__.py
|
GengroHirano/GithubCheckReporter
|
ef8ac9121f24931393e6405fa8a9533cc311aeb7
|
[
"MIT"
] | null | null | null |
checkstyle_filter/git/__init__.py
|
GengroHirano/GithubCheckReporter
|
ef8ac9121f24931393e6405fa8a9533cc311aeb7
|
[
"MIT"
] | null | null | null |
# ヘヴィノコー
# __
# /◎_ヽ
# |<゚Д゚)~
# |ヽ /⌒/⌒/ (ノ_|⊃
# | ソ ノ ノ /_ノ
# ヽ_人_人_/∪∪
# link: http://heno2.com/2ch/v.php?21036
| 14.777778
| 40
| 0.37594
| 27
| 133
| 1.962963
| 0.703704
| 0.075472
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075269
| 0.300752
| 133
| 8
| 41
| 16.625
| 0.44086
| 0.87218
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
dbb33ea7a680eb375ff998661db53618c8c72ce3
| 47
|
py
|
Python
|
esl/economics/accounting/__init__.py
|
vishalbelsare/ESL
|
cea6feda1e588d5f441742dbb1e4c5479b47d357
|
[
"Apache-2.0"
] | 37
|
2019-10-13T12:23:32.000Z
|
2022-03-19T10:40:29.000Z
|
esl/economics/accounting/__init__.py
|
vishalbelsare/ESL
|
cea6feda1e588d5f441742dbb1e4c5479b47d357
|
[
"Apache-2.0"
] | 3
|
2020-03-20T04:44:06.000Z
|
2021-01-12T06:18:33.000Z
|
esl/economics/accounting/__init__.py
|
vishalbelsare/ESL
|
cea6feda1e588d5f441742dbb1e4c5479b47d357
|
[
"Apache-2.0"
] | 10
|
2019-11-06T15:59:06.000Z
|
2021-08-09T17:28:24.000Z
|
from esl._esl._economics._accounting import *
| 15.666667
| 45
| 0.808511
| 6
| 47
| 5.833333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 2
| 46
| 23.5
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
dbd9ae6e48e16e9c98f1173b8b568444c437512d
| 80
|
py
|
Python
|
backend/crypto/__init__.py
|
falso-de-verdade/api
|
48bdb4d850f12218f2750767d3cbe9860f2a26a2
|
[
"MIT"
] | null | null | null |
backend/crypto/__init__.py
|
falso-de-verdade/api
|
48bdb4d850f12218f2750767d3cbe9860f2a26a2
|
[
"MIT"
] | 7
|
2020-11-26T15:36:24.000Z
|
2020-12-05T03:01:00.000Z
|
backend/crypto/__init__.py
|
falso-de-verdade/api
|
48bdb4d850f12218f2750767d3cbe9860f2a26a2
|
[
"MIT"
] | 1
|
2020-11-22T22:31:21.000Z
|
2020-11-22T22:31:21.000Z
|
from .token import generate_token
from .hashing import default_backend as hasher
| 40
| 46
| 0.8625
| 12
| 80
| 5.583333
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1125
| 80
| 2
| 46
| 40
| 0.943662
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
91b4131ffff5d248cdfde07d448a6b4030886e89
| 112
|
py
|
Python
|
jmanager/utils.py
|
tiliaio/jmanager-python
|
084f6a6c2e91d21e4b03645af2423fc0927eaf92
|
[
"MIT"
] | 1
|
2020-06-17T20:01:16.000Z
|
2020-06-17T20:01:16.000Z
|
jmanager/utils.py
|
tiliaio/jmanager-python
|
084f6a6c2e91d21e4b03645af2423fc0927eaf92
|
[
"MIT"
] | null | null | null |
jmanager/utils.py
|
tiliaio/jmanager-python
|
084f6a6c2e91d21e4b03645af2423fc0927eaf92
|
[
"MIT"
] | 1
|
2020-06-25T21:14:17.000Z
|
2020-06-25T21:14:17.000Z
|
def get_module_name(module_name):
if len(module_name) > 3:
return module_name[:-3].replace(' ', '_')
| 37.333333
| 49
| 0.642857
| 16
| 112
| 4.125
| 0.5625
| 0.606061
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021978
| 0.1875
| 112
| 3
| 49
| 37.333333
| 0.703297
| 0
| 0
| 0
| 0
| 0
| 0.017699
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
91b9f278aed6a8be57d95112c4126496621e17c5
| 195
|
py
|
Python
|
LintCode/001. A+B Problem.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | 3
|
2019-01-08T02:53:44.000Z
|
2021-07-26T07:03:27.000Z
|
LintCode/001. A+B Problem.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | null | null | null |
LintCode/001. A+B Problem.py
|
QinganZhao/LXXtCode
|
9debb10f9d33dcdb3def9d141a638b8172d25ff3
|
[
"MIT"
] | null | null | null |
class Solution:
"""
@param a: An integer
@param b: An integer
@return: The sum of a and b
"""
def aplusb(self, a, b):
# write your code here
return a + b
| 19.5
| 32
| 0.523077
| 29
| 195
| 3.517241
| 0.655172
| 0.176471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.374359
| 195
| 9
| 33
| 21.666667
| 0.836066
| 0.471795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
37d0f7c8279fd6e67906c06ff64fb4ba75f2ef92
| 116
|
py
|
Python
|
pydashlite/__init__.py
|
glowlex/pydashlite
|
cbc96478fa610aeae95b5584b406aa0c35b89db1
|
[
"MIT"
] | null | null | null |
pydashlite/__init__.py
|
glowlex/pydashlite
|
cbc96478fa610aeae95b5584b406aa0c35b89db1
|
[
"MIT"
] | null | null | null |
pydashlite/__init__.py
|
glowlex/pydashlite
|
cbc96478fa610aeae95b5584b406aa0c35b89db1
|
[
"MIT"
] | null | null | null |
from .arrays import *
from .objects import *
from .collections import *
from .tools import *
from .strings import *
| 19.333333
| 26
| 0.741379
| 15
| 116
| 5.733333
| 0.466667
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 116
| 5
| 27
| 23.2
| 0.895833
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
37ff8aa02aaa5b23ebe2d79e09f6b75fe7869dce
| 247
|
py
|
Python
|
config.py
|
purin52002/RepeatColab
|
664317bf1f49c8d2e023a5d665e7d0de8178d683
|
[
"MIT"
] | null | null | null |
config.py
|
purin52002/RepeatColab
|
664317bf1f49c8d2e023a5d665e7d0de8178d683
|
[
"MIT"
] | null | null | null |
config.py
|
purin52002/RepeatColab
|
664317bf1f49c8d2e023a5d665e7d0de8178d683
|
[
"MIT"
] | 1
|
2020-01-18T03:30:27.000Z
|
2020-01-18T03:30:27.000Z
|
class UserPlofile:
dir_path = 'user_plofile'
class ColabPath:
train = 'https://colab.research.google.com/drive/1padoMj3dW0lr9nkko2384XXXD37dVhDE'
evacuate = 'https://colab.research.google.com/drive/1idRwx4c_5PcxpLmj0ZIyIG9BaLXnQAS8'
| 30.875
| 90
| 0.781377
| 25
| 247
| 7.6
| 0.72
| 0.105263
| 0.189474
| 0.252632
| 0.336842
| 0.336842
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 0.109312
| 247
| 7
| 91
| 35.285714
| 0.790909
| 0
| 0
| 0
| 0
| 0
| 0.639676
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 6
|
531d61953ef4417e6d4a360305487b7d18f341d4
| 178
|
py
|
Python
|
src/lesson04/super_calc.py
|
robert-abela/python
|
2880f6a08cd58072c4e7d4dd8f4b449f9fa5a3d9
|
[
"MIT"
] | 2
|
2018-11-07T17:19:42.000Z
|
2019-09-21T06:15:36.000Z
|
src/lesson04/super_calc.py
|
robert-abela/python
|
2880f6a08cd58072c4e7d4dd8f4b449f9fa5a3d9
|
[
"MIT"
] | null | null | null |
src/lesson04/super_calc.py
|
robert-abela/python
|
2880f6a08cd58072c4e7d4dd8f4b449f9fa5a3d9
|
[
"MIT"
] | 1
|
2018-10-23T06:16:17.000Z
|
2018-10-23T06:16:17.000Z
|
def superCalc(n1, n2):
print(n1 + n2)
print(n1 - n2)
print(n1 * n2)
print(n1 / n2)
n1 = int(input("enter n1: "))
n2 = int(input("enter n2: "))
superCalc(n1, n2)
| 17.8
| 29
| 0.55618
| 29
| 178
| 3.413793
| 0.275862
| 0.282828
| 0.363636
| 0.444444
| 0.40404
| 0.40404
| 0.40404
| 0.40404
| 0.40404
| 0.40404
| 0
| 0.119403
| 0.247191
| 178
| 9
| 30
| 19.777778
| 0.619403
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.125
| 0.5
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
5352ea4f30ce56cb4863f2049b64da3bf6bae524
| 3,295
|
py
|
Python
|
public_apis/Categories/Entertainment/aakhilv.py
|
osam7a/public-apis
|
7b21c77999a9ed47cc8430ad6a6d6c997150dfc7
|
[
"MIT"
] | null | null | null |
public_apis/Categories/Entertainment/aakhilv.py
|
osam7a/public-apis
|
7b21c77999a9ed47cc8430ad6a6d6c997150dfc7
|
[
"MIT"
] | null | null | null |
public_apis/Categories/Entertainment/aakhilv.py
|
osam7a/public-apis
|
7b21c77999a9ed47cc8430ad6a6d6c997150dfc7
|
[
"MIT"
] | null | null | null |
import aiohttp
import requests
class Response:
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
class Fun:
def __init__(self):
self.base_url = 'https://api.aakhilv.me'
# Aliasing
self.wyr = self.would_you_rather
self.async_wyr = self.async_would_you_rather
def would_you_rather(self, limit = 1):
if limit > 884:
raise ValueError('Limit must be 884 or less')
resp = requests.get(self.base_url + f'/fun/wyr?num={limit}')
_json = resp.json()
return _json if len(_json) > 1 else _json[0]
def fact(self, limit = 1):
if limit > 350:
raise ValueError('Limit must be 883504 or less')
resp = requests.get(self.base_url + f'/fun/facts?num={limit}')
_json = resp.json()
return _json if len(_json) > 1 else _json[0]
async def async_fact(self, limit = 1):
if limit > 350:
raise ValueError('Limit must be 350 or less')
async with aiohttp.ClientSession() as cs:
async with cs.get(self.base_url + f'/fun/facts?num={limit}') as resp:
_json = await resp.json()
return _json if len(_json) > 1 else _json[0]
async def async_would_you_rather(self, limit = 1):
if limit > 884:
raise ValueError('Limit must be 884 or less')
async with aiohttp.ClientSession() as cs:
async with cs.get(self.base_url + f'/fun/wyr?num={limit}') as resp:
_json = await resp.json()
return _json if len(_json) > 1 else _json[0]
class Bio:
def __init__(self, limit = 1):
self.base_url = 'https://api.aakhilv.me'
def terms(self, limit = 1):
if limit > 10:
raise ValueError('Limit must be 10 or less')
resp = requests.get(self.base_url + f'/bio/terms?num={limit}')
_json = resp.json()
if len(_json) > 1:
result = []
for i in _json:
result.append(Response(
term = i['term'],
definition = i['definition']
))
return result
else:
return Response(
term = _json[0]['term'],
definition = _json[0]['definition']
)
async def async_terms(self, limit = 1):
if limit > 10:
raise ValueError('Limit must be 10 or less')
async with aiohttp.ClientSession() as cs:
async with cs.get(self.base_url + f'/bio/terms?num={limit}') as resp:
_json = await resp.json()
if len(_json) > 1:
result = []
for i in _json:
result.append(Response(
term = i['term'],
definition = i['definition']
))
return result
else:
return Response(
term = _json[0]['term'],
definition = _json[0]['definition']
)
if __name__ == '__main__':
print(Fun().fact(5))
terms = Bio().terms()
print("term: " + terms.term + " " + "definition: " + terms.definition)
| 35.815217
| 81
| 0.511077
| 396
| 3,295
| 4.083333
| 0.171717
| 0.044527
| 0.054422
| 0.044527
| 0.808287
| 0.805813
| 0.805813
| 0.771181
| 0.761905
| 0.715523
| 0
| 0.027683
| 0.375114
| 3,295
| 92
| 82
| 35.815217
| 0.757649
| 0.002428
| 0
| 0.604938
| 0
| 0
| 0.123554
| 0.02678
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.024691
| 0
| 0.234568
| 0.024691
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
535e9d3cb399f836eab47dec87731c5e79929d2a
| 6,321
|
py
|
Python
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_input.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 1
|
2022-03-15T06:08:14.000Z
|
2022-03-15T06:08:14.000Z
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_input.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | 58
|
2021-10-01T12:43:37.000Z
|
2021-12-08T22:58:43.000Z
|
h2o-py/tests/testdir_algos/automl/pyunit_automl_input.py
|
vishalbelsare/h2o-3
|
9322fb0f4c0e2358449e339a434f607d524c69fa
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function
import sys, os
sys.path.insert(1, os.path.join("..","..",".."))
import h2o
from h2o.automl import H2OAutoML
from tests import pyunit_utils as pu
from _automl_utils import import_dataset
max_models = 2
def test_invalid_project_name():
print("Check constructor raises error if project name is invalid")
try:
H2OAutoML(project_name="1nvalid")
except Exception as e:
assert "H2OAutoML" in str(e)
assert "1nvalid" in str(e)
def test_no_x_train_set_only():
print("AutoML run with x not provided and train set only")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml1", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds.target, training_frame=ds.train)
assert aml.project_name == "py_aml1", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerance == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_validation_sets():
print("AutoML run with x not provided with train and valid")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml2", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid)
assert aml.project_name == "py_aml2", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerance == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
log_df = aml.event_log.as_data_frame()
warn_messages = log_df[log_df['level'] == 'WARN']['message']
assert warn_messages.str.startswith("User specified a validation frame with cross-validation still enabled").any(), \
"a warning should have been raised for using a validation frame with CV enabled"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_test_sets():
print("AutoML run with x not provided with train and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml3", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds.target, training_frame=ds.train, leaderboard_frame=ds.test)
assert aml.project_name == "py_aml3", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerance == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_train_and_validation_and_test_sets():
print("AutoML run with x not provided with train, valid, and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml4", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234, nfolds=0)
aml.train(y=ds.target, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
assert aml.project_name == "py_aml4", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerance == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
log_df = aml.event_log.as_data_frame()
warn_messages = log_df[log_df['level'] == 'WARN']['message']
assert not warn_messages.str.startswith("User specified a validation frame with cross-validation still enabled").any(), \
"no warning should have been raised as CV was disabled"
print("Check leaderboard")
print(aml.leaderboard)
def test_no_x_y_as_idx_train_and_validation_and_test_sets():
print("AutoML run with x not provided and y as col idx with train, valid, and test")
ds = import_dataset()
aml = H2OAutoML(project_name="py_aml5", stopping_rounds=3, stopping_tolerance=0.001, stopping_metric="AUC", max_models=max_models, seed=1234)
aml.train(y=ds.target_idx, training_frame=ds.train, validation_frame=ds.valid, leaderboard_frame=ds.test)
assert aml.project_name == "py_aml5", "Project name is not set"
assert aml.stopping_rounds == 3, "stopping_rounds is not set to 3"
assert aml.stopping_tolerance == 0.001, "stopping_tolerance is not set to 0.001"
assert aml.stopping_metric == "AUC", "stopping_metrics is not set to `AUC`"
assert aml.max_models == 2, "max_models is not set to 2"
assert aml.seed == 1234, "seed is not set to `1234`"
print("Check leaderboard")
print(aml.leaderboard)
def test_frames_can_be_passed_as_key():
print("Check that all AutoML frames can be passed as keys.")
ds = import_dataset()
kw_args = [
dict(training_frame=ds.train.frame_id),
dict(training_frame=ds.train, validation_frame=ds.valid.frame_id),
dict(training_frame=ds.train, blending_frame=ds.valid.frame_id),
dict(training_frame=ds.train, leaderboard_frame=ds.test.frame_id),
]
for kwargs in kw_args:
aml = H2OAutoML(project_name="py_aml_frames_as_keys", seed=1, max_models=1, nfolds=0)
aml.train(y=ds.target, **kwargs)
h2o.remove(aml)
pu.run_tests([
test_invalid_project_name,
test_no_x_train_set_only,
test_no_x_train_and_validation_sets,
test_no_x_train_and_test_sets,
test_no_x_train_and_validation_and_test_sets,
test_no_x_y_as_idx_train_and_validation_and_test_sets,
test_frames_can_be_passed_as_key,
])
| 47.886364
| 155
| 0.726151
| 1,002
| 6,321
| 4.341317
| 0.123753
| 0.062069
| 0.055172
| 0.057471
| 0.855402
| 0.816552
| 0.807816
| 0.775172
| 0.736552
| 0.718161
| 0
| 0.033251
| 0.167379
| 6,321
| 131
| 156
| 48.251908
| 0.793274
| 0
| 0
| 0.409091
| 0
| 0
| 0.288246
| 0.003322
| 0
| 0
| 0
| 0
| 0.309091
| 1
| 0.063636
| false
| 0.027273
| 0.109091
| 0
| 0.172727
| 0.163636
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
5364ec98ee652d5dfd5affdff9b4e816eefd4ca1
| 96
|
py
|
Python
|
backend/app/app/models/__init__.py
|
levakin/netmon
|
7d071824a16084be0dc87d85405ab7a27af3d2c1
|
[
"Apache-2.0"
] | 1
|
2020-07-14T06:21:59.000Z
|
2020-07-14T06:21:59.000Z
|
backend/app/app/models/__init__.py
|
levakin/netmon
|
7d071824a16084be0dc87d85405ab7a27af3d2c1
|
[
"Apache-2.0"
] | null | null | null |
backend/app/app/models/__init__.py
|
levakin/netmon
|
7d071824a16084be0dc87d85405ab7a27af3d2c1
|
[
"Apache-2.0"
] | 1
|
2021-11-28T10:32:33.000Z
|
2021-11-28T10:32:33.000Z
|
from .machine import Machine
from .machine_process import MachineProcess
from .user import User
| 24
| 43
| 0.84375
| 13
| 96
| 6.153846
| 0.461538
| 0.275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 96
| 3
| 44
| 32
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
5364f42d514af2198a727acebba0ae482e318051
| 2,071
|
py
|
Python
|
tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py
|
rajivshah3/checkov
|
c6a6eca21bedae50574814c92973b65d2d963581
|
[
"Apache-2.0"
] | 1
|
2021-02-16T15:07:29.000Z
|
2021-02-16T15:07:29.000Z
|
tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py
|
rajivshah3/checkov
|
c6a6eca21bedae50574814c92973b65d2d963581
|
[
"Apache-2.0"
] | null | null | null |
tests/terraform/checks/resource/aws/test_ALBListenerHTTPS.py
|
rajivshah3/checkov
|
c6a6eca21bedae50574814c92973b65d2d963581
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from checkov.terraform.checks.resource.aws.ALBListenerHTTPS import check
from checkov.common.models.enums import CheckResult
class TestALBListenerHTTPS(unittest.TestCase):
def test_success_redirect(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['80'], 'protocol': ['HTTP'],
'default_action': [{'type': ['redirect'], 'redirect': [
{'port': ['443'], 'protocol': ['HTTPS'], 'status_code': ['HTTP_301']}]}]}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_success(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['443'], 'protocol': ['HTTPS']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_nlb_tcp_success(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['22'], 'protocol': ['TCP']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_nlb_udp_success(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['53'], 'protocol': ['UDP']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_nlb_tcp_udp_success(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['53'], 'protocol': ['TCP_UDP']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.PASSED, scan_result)
def test_failure(self):
resource_conf = {'load_balancer_arn': ['${aws_lb.front_end.arn}'], 'port': ['80'], 'protocol': ['HTTP']}
scan_result = check.scan_resource_conf(conf=resource_conf)
self.assertEqual(CheckResult.FAILED, scan_result)
if __name__ == '__main__':
unittest.main()
| 42.265306
| 115
| 0.666828
| 247
| 2,071
| 5.242915
| 0.222672
| 0.166795
| 0.074131
| 0.092664
| 0.742857
| 0.742857
| 0.742857
| 0.742857
| 0.742857
| 0.742857
| 0
| 0.011105
| 0.173829
| 2,071
| 48
| 116
| 43.145833
| 0.745763
| 0
| 0
| 0.34375
| 0
| 0
| 0.208595
| 0.066634
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.1875
| false
| 0.15625
| 0.09375
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 6
|
727be39c90a71ec9908777f6a8751ba14f6a4f10
| 57
|
py
|
Python
|
test/__main__.py
|
fantastic001/papertrack
|
f5a13ccabf1836a60020eb65e2e998b6ba33d8bd
|
[
"MIT"
] | null | null | null |
test/__main__.py
|
fantastic001/papertrack
|
f5a13ccabf1836a60020eb65e2e998b6ba33d8bd
|
[
"MIT"
] | null | null | null |
test/__main__.py
|
fantastic001/papertrack
|
f5a13ccabf1836a60020eb65e2e998b6ba33d8bd
|
[
"MIT"
] | null | null | null |
import unittest
from test import *
unittest.main()
| 7.125
| 19
| 0.701754
| 7
| 57
| 5.714286
| 0.714286
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22807
| 57
| 7
| 20
| 8.142857
| 0.909091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
72954853ffbb71be5149f5423f44905f30330f83
| 3,665
|
py
|
Python
|
DrivingPerformance/general/stopPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | null | null | null |
DrivingPerformance/general/stopPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | 4
|
2020-03-05T15:42:17.000Z
|
2020-03-19T05:37:58.000Z
|
DrivingPerformance/rank1/road4R/stopPerf.py
|
ahmedkrmn/AirSim
|
527c8d794df37d5714cfe661a2618945123c4d9c
|
[
"MIT"
] | null | null | null |
# Helper function to get the stop sign performance with moving left.
def StopSignLeft(stopPlace, flag, x, y, Speed, prev_perf):
place = stopPlace[1][0]
Boundary1 = [[stopPlace[0][0], stopPlace[0][1]], [place-13, place-3]]
Boundary2 = [[stopPlace[0][0], stopPlace[0][1]], [place-36, place-26]]
Boundary3 = [[stopPlace[0][0], stopPlace[0][1]], [place-51, place-41]]
Boundary4 = [[stopPlace[0][0], stopPlace[0][1]], [place-66, place-56]]
if(stopPlace[0][0] <= x <= stopPlace[0][1] and stopPlace[1][0] <= y <= stopPlace[1][1]):
flag = True
if(flag and Boundary1[0][0] <= x <= Boundary1[0][1] and Boundary1[1][0] <= y <= Boundary1[1][1] and Speed>5 and prev_perf<2):
return 2,flag
if(flag and Boundary2[0][0] <= x <= Boundary2[0][1] and Boundary2[1][0] <= y <= Boundary2[1][1] and Speed > 5 and prev_perf < 3):
return 3,flag
if(flag and Boundary3[0][0] <= x <= Boundary3[0][1] and Boundary3[1][0] <= y <= Boundary3[1][1] and Speed > 5 and prev_perf < 4):
return 4,flag
if(flag and Boundary4[0][0] <= x <= Boundary4[0][1] and Boundary4[1][0] <= y <= Boundary4[1][1] and Speed > 5 and prev_perf < 5):
return 5,flag
return prev_perf,flag
# Helper function to get the stop sign performance with moving left.
def StopSignRight(stopPlace, flag, x, y, Speed, prev_perf):
place = stopPlace[1][1]
Boundary1 = [[stopPlace[0][0], stopPlace[0][1]], [place+3, place+13]]
Boundary2 = [[stopPlace[0][0], stopPlace[0][1]], [place+13, place+36]]
Boundary3 = [[stopPlace[0][0], stopPlace[0][1]], [place+36, place+51]]
Boundary4 = [[stopPlace[0][0], stopPlace[0][1]], [place+51, place+100]]
if(stopPlace[0][0] <= x <= stopPlace[0][1] and stopPlace[1][0] <= y <= stopPlace[1][1]):
flag = True
if(flag and Boundary1[0][0] <= x <= Boundary1[0][1] and Boundary1[1][0] <= y <= Boundary1[1][1] and Speed>5 and prev_perf<2):
return 2,flag
if(flag and Boundary2[0][0] <= x <= Boundary2[0][1] and Boundary2[1][0] <= y <= Boundary2[1][1] and Speed > 5 and prev_perf < 3):
return 3,flag
if(flag and Boundary3[0][0] <= x <= Boundary3[0][1] and Boundary3[1][0] <= y <= Boundary3[1][1] and Speed > 5 and prev_perf < 4):
return 4,flag
if(flag and Boundary4[0][0] <= x <= Boundary4[0][1] and Boundary4[1][0] <= y <= Boundary4[1][1] and Speed > 5 and prev_perf < 5):
return 5,flag
return prev_perf,flag
# Helper function to get the stop sign performance with moving Down.
def StopSignDown(stopPlace, flag, x, y, Speed, prev_perf):
place = stopPlace[0][0]
Boundary1 = [[place-13, place-3], [stopPlace[1][0], stopPlace[1][1]]]
Boundary2 = [[place-36, place-13], [stopPlace[1][0], stopPlace[1][1]]]
Boundary3 = [[place-51, place-36], [stopPlace[1][0], stopPlace[1][1]]]
Boundary4 = [[place-100, place-51], [stopPlace[1][0], stopPlace[1][1]]]
if(stopPlace[0][0] <= x <= stopPlace[0][1] and stopPlace[1][0] <= y <= stopPlace[1][1]):
flag = True
if(flag and Boundary1[0][0] <= x <= Boundary1[0][1] and Boundary1[1][0] <= y <= Boundary1[1][1] and Speed>5 and prev_perf<2):
return 2,flag
if(flag and Boundary2[0][0] <= x <= Boundary2[0][1] and Boundary2[1][0] <= y <= Boundary2[1][1] and Speed > 5 and prev_perf < 3):
return 3,flag
if(flag and Boundary3[0][0] <= x <= Boundary3[0][1] and Boundary3[1][0] <= y <= Boundary3[1][1] and Speed > 5 and prev_perf < 4):
return 4,flag
if(flag and Boundary4[0][0] <= x <= Boundary4[0][1] and Boundary4[1][0] <= y <= Boundary4[1][1] and Speed > 5 and prev_perf < 5):
return 5,flag
return prev_perf,flag
| 64.298246
| 133
| 0.603274
| 606
| 3,665
| 3.618812
| 0.075908
| 0.049248
| 0.02052
| 0.05472
| 0.922937
| 0.922937
| 0.882809
| 0.882809
| 0.825353
| 0.713178
| 0
| 0.107713
| 0.196999
| 3,665
| 57
| 134
| 64.298246
| 0.637445
| 0.055116
| 0
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72c6bec97a21582369fc540a78d172be5fcee071
| 48,276
|
py
|
Python
|
parsetab.py
|
jtyuan/racetrack
|
b2ab4481bff71cf68a2e206a52847273aa62a599
|
[
"BSD-3-Clause"
] | null | null | null |
parsetab.py
|
jtyuan/racetrack
|
b2ab4481bff71cf68a2e206a52847273aa62a599
|
[
"BSD-3-Clause"
] | null | null | null |
parsetab.py
|
jtyuan/racetrack
|
b2ab4481bff71cf68a2e206a52847273aa62a599
|
[
"BSD-3-Clause"
] | null | null | null |
# parsetab.py
# This file is automatically generated. Do not edit.
_tabversion = '3.2'
_lr_method = 'LALR'
_lr_signature = '8HR\xee\xd8K\xc1gX\xbc\x8cr\x1fy\xf4#'
_lr_action_items = {'PEEK':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[142,142,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'STAR':([4,19,21,57,72,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,298,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,97,-131,-130,-107,-99,-96,-129,-98,-102,-134,224,-132,224,-101,224,224,224,-97,-125,-122,224,-103,-126,224,-133,224,224,224,-108,224,224,224,224,224,224,-109,224,224,224,224,97,224,-105,-128,-100,-127,-129,224,-104,-94,-106,224,224,-95,]),'EXTERN_TYPE':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[18,18,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,18,-18,-16,-74,-20,18,-19,-21,-13,-8,-15,-12,-9,-11,]),'FLOATNUMBER':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[144,144,144,144,144,144,-90,144,144,-75,-74,144,144,144,144,144,144,144,-81,144,144,144,144,144,144,144,144,144,144,144,144,-89,144,144,144,144,-82,-87,-91,144,144,-83,-85,-92,-93,-86,-84,-88,]),'VOID':([0,1,4,6,14,43,52,58,69,91,108,114,128,129,133,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,295,301,302,318,320,324,352,353,],[2,2,-65,-23,-26,-60,-6,-7,-22,-59,2,-10,-17,2,2,-14,-75,-28,-25,-27,-24,2,-18,-16,-74,-20,2,-19,-21,-13,-8,-33,-15,-12,-9,-11,-34,-32,-31,]),'GLOBAL':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[3,3,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,3,-18,-16,-74,-20,3,-19,-21,-13,-8,-15,-12,-9,-11,]),'NUMBER':([98,115,118,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[122,145,177,145,145,145,145,145,-90,145,145,-75,-74,145,145,145,145,145,145,145,-81,145,145,145,145,145,145,145,145,145,145,145,145,-89,145,145,145,145,-82,-87,-91,145,145,-83,-85,-92,-93,-86,-84,-88,]),',':([4,19,21,31,42,43,44,46,47,54,55,56,57,59,60,61,64,70,75,77,87,91,92,95,96,100,107,119,120,121,122,140,141,144,145,148,155,158,161,163,166,167,169,176,177,178,182,189,193,196,203,206,207,214,217,239,240,256,259,261,262,264,266,268,271,272,273,274,275,276,277,278,279,280,281,282,283,284,286,288,293,298,305,309,314,316,321,323,325,328,331,335,339,343,345,348,363,],[-65,-46,-45,50,50,-60,63,67,50,50,50,50,-44,50,50,85,90,94,-73,99,110,-59,113,50,-53,50,131,-54,-71,-70,-72,-134,50,-131,-130,-107,-99,-96,-129,-98,-102,-134,-132,-58,-56,-57,50,247,50,50,-101,-124,-123,-97,-125,-122,-55,303,304,-103,307,-126,310,312,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,-119,317,50,322,-105,-128,-100,-127,50,50,50,344,347,351,322,-104,-94,-106,-95,]),'IDENT':([0,1,2,4,6,10,14,15,19,21,27,28,29,30,34,35,36,37,39,40,41,43,45,49,50,52,53,57,58,63,64,67,69,72,85,88,90,91,94,97,98,99,103,108,109,110,113,114,115,125,126,128,129,130,131,133,134,135,137,138,143,147,149,150,153,157,159,160,164,167,172,173,174,175,180,181,184,187,191,194,197,198,200,202,204,205,208,209,211,212,215,216,218,219,220,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,237,238,242,244,246,247,248,252,253,254,257,289,291,295,298,301,302,303,304,306,307,310,311,312,315,318,320,322,324,330,334,336,344,347,352,353,356,358,359,360,362,366,367,],[4,4,-47,-65,-23,4,-26,4,-46,-45,4,4,4,4,4,4,4,4,4,4,4,-60,4,4,4,-6,4,-44,-7,4,4,4,-22,4,4,4,4,-59,4,4,4,4,4,4,4,4,4,-10,4,4,4,-17,4,4,4,4,4,-48,-49,4,4,4,4,4,4,4,-90,4,4,-45,4,-75,-28,-25,-27,-24,4,4,4,-18,-16,4,4,-74,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,-81,4,4,4,4,4,4,4,4,4,4,4,4,4,-20,4,-19,4,-21,4,4,4,-89,-37,-8,-33,4,-15,-12,4,4,4,4,4,4,4,-82,-9,-11,4,-34,-87,-91,-40,4,4,-32,-31,-83,-85,-92,-93,-86,-84,-88,]),'NEW':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[147,147,147,147,147,147,-90,147,147,-75,-74,147,147,147,147,147,147,147,-81,147,147,147,147,147,147,147,147,147,147,147,147,-89,147,147,147,147,-82,-87,-91,147,147,-83,-85,-92,-93,-86,-84,-88,]),'RIGHTSHIFT':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,233,-132,233,-101,233,233,233,-97,-125,-122,233,-103,-126,233,-133,233,-121,233,-108,233,233,-111,233,233,-110,-109,-120,233,233,233,233,-105,-128,-100,-127,-129,233,-104,-94,-106,233,233,-95,]),'DOT':([4,19,21,57,144,145,148,155,158,161,163,166,167,169,203,214,261,264,271,305,309,314,316,328,343,345,348,363,],[-65,-46,-45,-44,-131,-130,205,-99,-96,-129,-98,-102,-134,-132,-101,-97,-103,-126,-133,-105,-128,-100,-127,-129,-104,-94,-106,-95,]),'LEFTSHIFT':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,222,-132,222,-101,222,222,222,-97,-125,-122,222,-103,-126,222,-133,222,-121,222,-108,222,222,-111,222,222,-110,-109,-120,222,222,222,222,-105,-128,-100,-127,-129,222,-104,-94,-106,222,222,-95,]),'INCR':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[150,150,150,150,150,150,-90,150,150,-75,-74,150,150,150,150,150,150,150,-81,150,150,150,150,150,150,150,150,150,150,150,150,-89,150,150,150,150,-82,-87,-91,150,150,-83,-85,-92,-93,-86,-84,-88,]),'TRANS':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[7,7,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,7,-18,-16,-74,-20,7,-19,-21,-13,-8,-15,-12,-9,-11,]),'SEMI':([4,19,21,31,32,38,48,51,57,64,75,76,77,95,100,104,117,120,121,122,123,124,144,145,148,155,158,161,163,166,167,168,169,182,196,201,203,206,207,214,217,239,241,251,255,261,264,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,293,300,305,308,309,314,316,319,323,341,343,345,348,349,363,365,],[-65,-46,-45,-2,52,58,69,-67,-44,88,-73,-66,-69,-2,-2,128,175,-71,-70,-72,-68,181,-131,-130,-107,-99,-96,-129,-98,-102,-134,225,-132,-2,-2,257,-101,-124,-123,-97,-125,-122,289,295,302,-103,-126,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,315,-119,-2,324,-105,330,-128,-100,-127,336,-2,353,-104,-94,-106,358,-95,367,]),'STATIC_CAST':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[151,151,151,151,151,151,-90,151,151,-75,-74,151,151,151,151,151,151,151,-81,151,151,151,151,151,151,151,151,151,151,151,151,-89,151,151,151,151,-82,-87,-91,151,151,-83,-85,-92,-93,-86,-84,-88,]),')':([4,19,21,42,43,47,49,51,53,54,55,56,57,59,60,62,68,70,71,73,75,76,77,78,79,80,82,83,84,87,91,94,96,116,119,120,121,122,123,139,140,141,144,145,148,155,158,161,163,166,167,169,176,177,178,193,199,203,206,207,210,214,217,219,239,240,250,252,258,259,261,263,264,265,269,270,271,272,273,274,275,276,277,278,279,280,281,282,283,284,286,287,296,297,298,299,304,305,306,309,311,314,316,322,325,326,327,328,329,331,332,333,338,339,340,342,343,345,348,355,357,361,363,],[-65,-46,-45,-2,-60,-2,-2,-67,-2,-2,81,-2,-44,-2,-2,86,93,-51,95,-52,-73,-66,-69,100,101,102,104,105,106,109,-59,-2,-53,-50,-54,-71,-70,-72,-68,198,-134,-2,-131,-130,-107,-99,-96,-129,-98,-102,-134,-132,-58,-56,-57,-2,255,-101,-124,-123,264,-97,-125,-2,-122,-55,294,-2,-80,-79,-103,308,-126,309,313,314,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,-119,316,321,-43,-42,323,-2,-105,-2,-128,-2,-100,-127,-2,-2,-78,343,-129,345,346,348,349,-41,-42,-43,354,-104,-94,-106,363,364,365,-95,]),'(':([3,4,7,8,9,16,17,18,22,23,25,31,33,115,142,143,149,150,151,152,153,154,156,157,159,162,164,165,167,170,171,172,173,196,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,261,267,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[27,-65,28,29,30,34,35,36,39,40,41,49,53,153,200,153,153,153,208,209,153,211,212,153,-90,216,153,218,219,237,238,153,-75,252,-74,153,153,153,153,153,153,153,-81,153,153,153,153,153,153,153,153,153,153,153,153,-89,306,311,153,153,153,153,-82,-87,-91,153,153,-83,-85,-92,-93,-86,-84,-88,]),'IS_INVALID':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[154,154,154,154,154,154,-90,154,154,-75,-74,154,154,154,154,154,154,154,-81,154,154,154,154,154,154,154,154,154,154,154,154,-89,154,154,154,154,-82,-87,-91,154,154,-83,-85,-92,-93,-86,-84,-88,]),'NE':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,227,-132,227,-101,227,227,227,-97,-125,-122,227,-103,-126,227,-133,227,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,227,227,227,-105,-128,-100,-127,-129,227,-104,-94,-106,227,227,-95,]),'OUT_PORT':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[8,8,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,8,-18,-16,-74,-20,8,-19,-21,-13,-8,-15,-12,-9,-11,]),'ENQUEUE':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[156,156,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'LT':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,229,-132,229,-101,229,229,229,-97,-125,-122,229,-103,-126,229,-133,229,-121,-113,-108,-115,229,-111,-112,-114,-110,-109,-120,229,229,229,229,-105,-128,-100,-127,-129,229,-104,-94,-106,229,229,-95,]),'DOUBLE_COLON':([4,19,21,57,160,167,],[-65,37,-45,-44,215,-45,]),'PLUS':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,231,-132,231,-101,231,231,231,-97,-125,-122,231,-103,-126,231,-133,231,231,231,-108,231,231,-111,231,231,-110,-109,231,231,231,231,231,-105,-128,-100,-127,-129,231,-104,-94,-106,231,231,-95,]),'DECR':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[149,149,149,149,149,149,-90,149,149,-75,-74,149,149,149,149,149,149,149,-81,149,149,149,149,149,149,149,149,149,149,149,149,-89,149,149,149,149,-82,-87,-91,149,149,-83,-85,-92,-93,-86,-84,-88,]),'ACTION':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[9,9,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,9,-18,-16,-74,-20,9,-19,-21,-13,-8,-15,-12,-9,-11,]),':':([4,81,102,167,292,],[-65,103,126,220,220,]),'=':([4,74,96,119,],[-65,98,118,179,]),'ASSIGN':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,196,203,206,207,214,217,239,261,264,271,272,273,274,275,276,277,278,279,280,281,282,283,284,286,305,309,314,316,343,345,348,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,235,-132,253,-101,-124,-123,-97,-125,-122,-103,-126,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,-119,-105,-128,-100,-127,-104,-94,-106,-95,]),'$end':([0,1,4,5,6,11,12,14,24,26,43,52,58,69,91,114,128,138,173,174,175,180,181,194,197,202,242,246,248,254,291,301,302,318,320,],[-2,-2,-65,0,-23,-5,-3,-26,-1,-4,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,-18,-16,-74,-20,-19,-21,-13,-8,-15,-12,-9,-11,]),'GT':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,223,-132,223,-101,223,223,223,-97,-125,-122,223,-103,-126,223,-133,223,-121,-113,-108,-115,223,-111,-112,-114,-110,-109,-120,223,223,223,223,-105,-128,-100,-127,-129,223,-104,-94,-106,223,223,-95,]),'PROTOCOL':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[13,13,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,13,-18,-16,-74,-20,13,-19,-21,-13,-8,-15,-12,-9,-11,]),'STRING':([13,20,50,98,99,115,118,143,149,150,153,157,159,164,172,173,179,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,317,330,334,344,347,351,356,358,359,360,362,366,367,],[32,38,75,121,75,161,176,161,161,161,161,161,-90,161,161,-75,240,-74,161,161,161,161,161,161,161,-81,161,161,161,161,161,161,161,161,161,161,161,161,-89,161,161,328,161,-82,335,-87,-91,161,161,361,-83,-85,-92,-93,-86,-84,-88,]),'STALL_AND_WAIT':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[162,162,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'OOD':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[166,166,166,166,166,166,-90,166,166,-75,-74,166,166,166,166,166,166,166,-81,166,166,166,166,166,166,166,166,166,166,166,166,-89,166,166,166,166,-82,-87,-91,166,166,-83,-85,-92,-93,-86,-84,-88,]),'ENUM':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[16,16,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,16,-18,-16,-74,-20,16,-19,-21,-13,-8,-15,-12,-9,-11,]),'ELSE':([173,202,334,],[-75,-74,350,]),'MACHINE':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[17,17,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,17,-18,-16,-74,-20,17,-19,-21,-13,-8,-15,-12,-9,-11,]),'GE':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,226,-132,226,-101,226,226,226,-97,-125,-122,226,-103,-126,226,-133,226,-121,-113,-108,-115,226,-111,-112,-114,-110,-109,-120,226,226,226,226,-105,-128,-100,-127,-129,226,-104,-94,-106,226,226,-95,]),'LE':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,230,-132,230,-101,230,230,230,-97,-125,-122,230,-103,-126,230,-133,230,-121,-113,-108,-115,230,-111,-112,-114,-110,-109,-120,230,230,230,230,-105,-128,-100,-127,-129,230,-104,-94,-106,230,230,-95,]),'SLASH':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,232,-132,232,-101,232,232,232,-97,-125,-122,232,-103,-126,232,-133,232,232,232,-108,232,232,232,232,232,232,-109,232,232,232,232,232,-105,-128,-100,-127,-129,232,-104,-94,-106,232,232,-95,]),'[':([4,19,21,57,144,145,148,155,158,161,163,166,167,169,203,214,261,264,271,305,309,314,316,328,343,345,348,363,],[-65,-46,-45,-44,-131,-130,204,-99,-96,-129,-98,-102,-134,-132,-101,-97,-103,-126,-133,-105,-128,-100,-127,-129,-104,-94,-106,-95,]),'INCLUDE':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[20,20,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,20,-18,-16,-74,-20,20,-19,-21,-13,-8,-15,-12,-9,-11,]),']':([4,19,21,57,144,145,148,155,158,161,163,166,167,169,203,204,206,207,214,217,239,258,259,260,261,264,271,272,273,274,275,276,277,278,279,280,281,282,283,284,286,304,305,309,314,316,326,343,345,348,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,-132,-101,-2,-124,-123,-97,-125,-122,-80,-79,305,-103,-126,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,-119,-2,-105,-128,-100,-127,-78,-104,-94,-106,-95,]),'IF':([115,157,159,173,202,225,257,315,330,334,350,356,358,359,360,362,366,367,],[165,165,-90,-75,-74,-81,-89,-82,-87,-91,165,-83,-85,-92,-93,-86,-84,-88,]),'AND':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,221,-132,221,-101,221,221,221,-97,-125,-122,221,-103,-126,221,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,221,-119,221,-105,-128,-100,-127,-129,221,-104,-94,-106,221,221,-95,]),'DASH':([4,19,21,57,115,143,144,145,148,149,150,153,155,157,158,159,161,163,164,166,167,168,169,172,173,201,202,203,204,206,207,210,214,217,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,239,253,257,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,304,305,306,307,309,311,314,315,316,328,329,330,334,343,344,345,347,348,355,356,357,358,359,360,362,363,366,367,],[-65,-46,-45,-44,164,164,-131,-130,-107,164,164,164,-99,164,-96,-90,-129,-98,164,-102,-134,228,-132,164,-75,228,-74,-101,164,228,228,228,-97,-125,164,164,164,164,164,164,-81,164,164,164,164,164,164,164,164,164,164,164,-122,164,-89,228,-103,-126,228,-133,228,228,228,-108,228,228,-111,228,228,-110,-109,228,228,228,228,228,164,-105,164,164,-128,164,-100,-82,-127,-129,228,-87,-91,-104,164,-94,164,-106,228,-83,228,-85,-92,-93,-86,-95,-84,-88,]),'RETURN':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[143,143,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'EQ':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,234,-132,234,-101,234,234,234,-97,-125,-122,234,-103,-126,234,-133,234,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,234,234,234,-105,-128,-100,-127,-129,234,-104,-94,-106,234,234,-95,]),'STRUCT':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[22,22,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,22,-18,-16,-74,-20,22,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_STOP_SLOTS':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[171,171,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'STATE_DECL':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[23,23,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,23,-18,-16,-74,-20,23,-19,-21,-13,-8,-15,-12,-9,-11,]),'CHECK_ALLOCATE':([115,157,159,173,202,225,257,315,330,334,356,358,359,360,362,366,367,],[152,152,-90,-75,-74,-81,-89,-82,-87,-91,-83,-85,-92,-93,-86,-84,-88,]),'LIT_BOOL':([115,118,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[169,178,169,169,169,169,169,-90,169,169,-75,-74,169,169,169,169,169,169,169,-81,169,169,169,169,169,169,169,169,169,169,169,169,-89,169,169,169,169,-82,-87,-91,169,169,-83,-85,-92,-93,-86,-84,-88,]),'IS_VALID':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[170,170,170,170,170,170,-90,170,170,-75,-74,170,170,170,170,170,170,170,-81,170,170,170,170,170,170,170,170,170,170,170,170,-89,170,170,170,170,-82,-87,-91,170,170,-83,-85,-92,-93,-86,-84,-88,]),'NOT':([115,143,149,150,153,157,159,164,172,173,202,204,218,219,221,222,223,224,225,226,227,228,229,230,231,232,233,234,235,236,253,257,304,306,307,311,315,330,334,344,347,356,358,359,360,362,366,367,],[172,172,172,172,172,172,-90,172,172,-75,-74,172,172,172,172,172,172,172,-81,172,172,172,172,172,172,172,172,172,172,172,172,-89,172,172,172,172,-82,-87,-91,172,172,-83,-85,-92,-93,-86,-84,-88,]),'{':([4,28,35,43,51,63,70,73,75,76,77,86,91,93,94,95,96,100,101,103,105,106,109,116,117,119,120,121,122,123,124,126,127,138,176,177,178,186,198,240,254,294,313,321,337,346,350,354,364,],[-65,45,45,-60,-67,45,-51,-52,-73,-66,-69,108,-59,115,-2,-2,-53,-2,125,-2,129,130,45,-50,115,-54,-71,-70,-72,-68,115,-2,187,45,-58,-56,-57,244,45,-55,45,115,115,-2,115,115,115,115,115,]),'}':([1,4,6,11,12,14,26,43,45,52,58,64,65,66,69,88,89,90,91,108,111,112,114,115,125,128,129,130,132,133,136,138,146,157,159,173,174,175,180,181,183,184,185,187,188,190,191,192,194,195,197,202,213,225,242,243,244,245,246,248,249,254,257,289,290,291,295,301,302,315,318,320,324,330,334,336,352,353,356,358,359,360,362,366,367,],[-2,-65,-23,-5,-3,-26,-4,-60,-2,-6,-7,-2,91,-64,-22,-2,-63,-2,-59,-2,-61,-62,-10,173,-2,-17,-2,-2,194,-2,-30,-14,202,-77,-90,-75,-28,-25,-27,-24,242,-2,-36,-2,246,248,-2,-39,-18,-29,-16,-74,-76,-81,-20,-35,-2,291,-19,-21,-38,-13,-89,-37,318,-8,-33,-15,-12,-82,-9,-11,-34,-87,-91,-40,-32,-31,-83,-85,-92,-93,-86,-84,-88,]),'OR':([4,19,21,57,144,145,148,155,158,161,163,166,167,168,169,201,203,206,207,210,214,217,239,259,261,264,269,271,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,305,309,314,316,328,329,343,345,348,355,357,363,],[-65,-46,-45,-44,-131,-130,-107,-99,-96,-129,-98,-102,-134,236,-132,236,-101,236,236,236,-97,-125,-122,236,-103,-126,236,-133,-118,-121,-113,-108,-115,-117,-111,-112,-114,-110,-109,-120,-116,236,-119,236,-105,-128,-100,-127,-129,236,-104,-94,-106,236,236,-95,]),'IN_PORT':([0,1,4,6,14,43,52,58,69,91,114,128,138,173,174,175,180,181,187,194,197,202,242,244,246,248,254,291,301,302,318,320,],[25,25,-65,-23,-26,-60,-6,-7,-22,-59,-10,-17,-14,-75,-28,-25,-27,-24,25,-18,-16,-74,-20,25,-19,-21,-13,-8,-15,-12,-9,-11,]),}
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
_lr_goto_items = {'decl':([0,1,187,244,],[1,1,1,1,]),'statements':([93,117,124,294,313,337,346,350,354,364,],[114,174,180,320,334,352,356,359,362,366,]),'type_enums':([125,184,],[183,243,]),'pairsx':([50,99,],[76,123,]),'type_members':([108,129,133,],[132,188,195,]),'statements_inner':([115,157,],[146,213,]),'enumeration':([115,143,149,150,153,157,164,172,204,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,247,253,304,306,307,311,344,347,],[155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,155,293,155,155,155,155,155,155,155,]),'file':([0,],[5,]),'type_state':([130,191,],[191,191,]),'type_member':([108,129,133,],[133,133,133,]),'type_or_void':([108,129,133,],[134,134,134,]),'param':([49,53,94,103,126,252,],[70,70,70,70,70,70,]),'aexpr':([115,143,149,150,153,157,164,172,204,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,253,304,306,307,311,344,347,],[148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,148,]),'literal':([115,143,149,150,153,157,164,172,204,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,253,304,306,307,311,344,347,],[163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,163,]),'params':([49,53,94,103,126,252,],[71,78,116,127,186,296,]),'statement':([115,157,],[157,157,]),'var':([113,115,131,143,149,150,153,157,164,172,200,204,209,211,212,216,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,237,238,253,304,306,307,311,312,344,347,],[141,158,193,158,158,158,158,158,158,158,256,158,263,265,266,268,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,158,287,288,158,158,158,158,158,333,158,158,]),'if_statement':([115,157,350,],[159,159,360,]),'type':([0,1,27,34,36,39,40,49,53,67,85,94,103,108,115,126,129,133,143,147,149,150,153,157,164,172,187,204,208,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,244,252,253,303,304,306,307,310,311,322,344,347,],[10,10,42,54,56,59,60,72,72,92,107,72,72,135,160,72,135,135,160,203,160,160,160,160,160,160,10,160,262,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,160,10,298,160,325,160,160,160,331,160,339,160,160,]),'empty':([0,1,31,42,45,47,49,53,54,55,56,59,60,64,88,90,94,95,100,103,108,125,126,129,130,133,141,182,184,187,191,193,196,204,219,244,252,293,304,306,311,321,322,323,325,],[11,11,51,51,66,51,73,73,51,51,51,51,51,66,66,66,73,51,51,73,136,185,73,136,192,136,51,51,185,11,192,51,51,258,258,11,297,51,258,258,258,51,340,51,51,]),'declsx':([0,1,187,244,],[12,26,12,12,]),'func_decl':([0,1,187,244,],[6,6,6,6,]),'func_def':([0,1,187,244,],[14,14,14,14,]),'idents':([28,35,63,109,138,198,254,],[44,55,87,138,197,254,301,]),'void':([0,1,108,129,133,187,244,],[15,15,137,137,137,15,15,]),'identx':([45,64,88,90,],[65,89,111,112,]),'type_states':([130,191,],[190,249,]),'pair':([50,99,],[77,77,]),'type_enum':([125,184,],[184,184,]),'typestr':([0,1,27,34,36,39,40,49,53,67,85,94,103,108,115,126,129,133,143,147,149,150,153,157,164,172,187,204,208,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,244,252,253,303,304,306,307,310,311,322,344,347,],[19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,19,]),'types':([252,322,],[299,338,]),'pairs':([31,42,47,54,55,56,59,60,95,100,141,182,193,196,293,321,323,325,],[48,62,68,79,80,82,83,84,117,124,199,241,250,251,319,337,341,342,]),'ident':([0,1,10,15,27,28,29,30,34,35,36,37,39,40,41,45,49,50,53,63,64,67,72,85,88,90,94,97,98,99,103,108,109,110,113,115,125,126,129,130,131,133,134,138,143,147,149,150,153,157,160,164,172,184,187,191,198,200,204,205,208,209,211,212,215,216,218,219,220,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,237,238,244,247,252,253,254,298,303,304,306,307,310,311,312,322,344,347,],[21,21,31,33,21,43,46,47,21,43,21,57,21,21,61,64,21,74,21,43,64,21,96,21,64,64,21,119,120,74,21,21,43,139,140,167,182,21,21,189,140,21,196,43,167,21,167,167,167,167,214,167,167,182,21,189,43,140,167,261,21,140,140,140,267,140,167,167,271,167,167,167,167,167,167,167,167,167,167,167,167,167,167,167,140,140,21,292,21,167,43,96,21,167,167,167,21,167,140,21,167,167,]),'expr':([115,143,149,150,153,157,164,172,204,218,219,221,222,223,224,226,227,228,229,230,231,232,233,234,235,236,253,304,306,307,311,344,347,],[168,201,206,207,210,168,217,239,259,269,259,272,273,274,275,276,277,278,279,280,281,282,283,284,285,286,300,259,259,329,259,355,357,]),'exprs':([204,219,304,306,311,],[260,270,326,327,332,]),'decls':([0,187,244,],[24,245,290,]),}
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
_lr_productions = [
("S' -> file","S'",1,None,None,None),
('file -> decls','file',1,'p_file','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',218),
('empty -> <empty>','empty',0,'p_empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',222),
('decls -> declsx','decls',1,'p_decls','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',225),
('declsx -> decl declsx','declsx',2,'p_declsx__list','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',229),
('declsx -> empty','declsx',1,'p_declsx__none','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',239),
('decl -> PROTOCOL STRING SEMI','decl',3,'p_decl__protocol','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',243),
('decl -> INCLUDE STRING SEMI','decl',3,'p_decl__include','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',252),
('decl -> MACHINE ( idents ) : params { decls }','decl',9,'p_decl__machine0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',261),
('decl -> MACHINE ( idents pairs ) : params { decls }','decl',10,'p_decl__machine1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',265),
('decl -> ACTION ( ident pairs ) statements','decl',6,'p_decl__action','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',269),
('decl -> IN_PORT ( ident , type , var pairs ) statements','decl',10,'p_decl__in_port','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',273),
('decl -> OUT_PORT ( ident , type , var pairs ) SEMI','decl',10,'p_decl__out_port','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',277),
('decl -> TRANS ( idents , idents , ident ) idents','decl',9,'p_decl__trans0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',281),
('decl -> TRANS ( idents , idents ) idents','decl',7,'p_decl__trans1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',285),
('decl -> TRANS ( idents , idents , ident ) idents idents','decl',10,'p_decl__trans2','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',289),
('decl -> TRANS ( idents , idents ) idents idents','decl',8,'p_decl__trans3','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',293),
('decl -> EXTERN_TYPE ( type pairs ) SEMI','decl',6,'p_decl__extern0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',297),
('decl -> GLOBAL ( type pairs ) { type_members }','decl',8,'p_decl__global','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',302),
('decl -> STRUCT ( type pairs ) { type_members }','decl',8,'p_decl__struct','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',307),
('decl -> ENUM ( type pairs ) { type_enums }','decl',8,'p_decl__enum','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',311),
('decl -> STATE_DECL ( type pairs ) { type_states }','decl',8,'p_decl__state_decl','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',316),
('decl -> type ident pairs SEMI','decl',4,'p_decl__object','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',322),
('decl -> func_decl','decl',1,'p_decl__func_decl','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',327),
('func_decl -> void ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',331),
('func_decl -> type ident ( params ) pairs SEMI','func_decl',7,'p_func_decl__0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',332),
('decl -> func_def','decl',1,'p_decl__func_def','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',336),
('func_def -> void ident ( params ) pairs statements','func_def',7,'p_func_def__0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',340),
('func_def -> type ident ( params ) pairs statements','func_def',7,'p_func_def__0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',341),
('type_members -> type_member type_members','type_members',2,'p_type_members__list','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',346),
('type_members -> empty','type_members',1,'p_type_members__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',350),
('type_member -> type_or_void ident ( types ) pairs SEMI','type_member',7,'p_type_method__0','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',354),
('type_member -> type_or_void ident ( params ) pairs statements','type_member',7,'p_type_method__1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',358),
('type_member -> type_or_void ident pairs SEMI','type_member',4,'p_type_member__1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',362),
('type_member -> type_or_void ident ASSIGN expr SEMI','type_member',5,'p_type_member__2','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',366),
('type_enums -> type_enum type_enums','type_enums',2,'p_type_enums__list','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',372),
('type_enums -> empty','type_enums',1,'p_type_enums__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',376),
('type_enum -> ident pairs SEMI','type_enum',3,'p_type_enum','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',380),
('type_states -> type_state type_states','type_states',2,'p_type_states__list','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',385),
('type_states -> empty','type_states',1,'p_type_states__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',389),
('type_state -> ident , enumeration pairs SEMI','type_state',5,'p_type_state','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',393),
('types -> type , types','types',3,'p_types__multiple','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',398),
('types -> type','types',1,'p_types__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',402),
('types -> empty','types',1,'p_types__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',406),
('typestr -> typestr DOUBLE_COLON ident','typestr',3,'p_typestr__multi','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',410),
('typestr -> ident','typestr',1,'p_typestr__single','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',414),
('type -> typestr','type',1,'p_type__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',418),
('void -> VOID','void',1,'p_void','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',422),
('type_or_void -> type','type_or_void',1,'p_type_or_void','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',426),
('type_or_void -> void','type_or_void',1,'p_type_or_void','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',427),
('params -> param , params','params',3,'p_params__many','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',432),
('params -> param','params',1,'p_params__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',436),
('params -> empty','params',1,'p_params__none','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',440),
('param -> type ident','param',2,'p_param','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',444),
('param -> type STAR ident','param',3,'p_param__pointer','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',448),
('param -> type STAR ident = STRING','param',5,'p_param__pointer_default','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',452),
('param -> type ident = NUMBER','param',4,'p_param__default_number','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',456),
('param -> type ident = LIT_BOOL','param',4,'p_param__default_bool','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',460),
('param -> type ident = STRING','param',4,'p_param__default_string','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',464),
('idents -> { identx }','idents',3,'p_idents__braced','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',469),
('idents -> ident','idents',1,'p_idents__bare','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',473),
('identx -> ident SEMI identx','identx',3,'p_identx__multiple_1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',477),
('identx -> ident , identx','identx',3,'p_identx__multiple_1','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',478),
('identx -> ident identx','identx',2,'p_identx__multiple_2','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',482),
('identx -> empty','identx',1,'p_identx__single','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',486),
('ident -> IDENT','ident',1,'p_ident','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',490),
('pairs -> , pairsx','pairs',2,'p_pairs__list','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',495),
('pairs -> empty','pairs',1,'p_pairs__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',499),
('pairsx -> pair , pairsx','pairsx',3,'p_pairsx__many','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',503),
('pairsx -> pair','pairsx',1,'p_pairsx__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',508),
('pair -> ident = STRING','pair',3,'p_pair__assign','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',513),
('pair -> ident = ident','pair',3,'p_pair__assign','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',514),
('pair -> ident = NUMBER','pair',3,'p_pair__assign','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',515),
('pair -> STRING','pair',1,'p_pair__literal','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',519),
('statements -> { statements_inner }','statements',3,'p_statements__inner','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',524),
('statements -> { }','statements',2,'p_statements__none','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',528),
('statements_inner -> statement statements_inner','statements_inner',2,'p_statements_inner__many','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',532),
('statements_inner -> statement','statements_inner',1,'p_statements_inner__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',536),
('exprs -> expr , exprs','exprs',3,'p_exprs__multiple','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',540),
('exprs -> expr','exprs',1,'p_exprs__one','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',544),
('exprs -> empty','exprs',1,'p_exprs__empty','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',548),
('statement -> expr SEMI','statement',2,'p_statement__expression','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',552),
('statement -> expr ASSIGN expr SEMI','statement',4,'p_statement__assign','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',556),
('statement -> ENQUEUE ( var , type ) statements','statement',7,'p_statement__enqueue','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',560),
('statement -> ENQUEUE ( var , type , expr ) statements','statement',9,'p_statement__enqueue_latency','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',564),
('statement -> STALL_AND_WAIT ( var , var ) SEMI','statement',7,'p_statement__stall_and_wait','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',568),
('statement -> PEEK ( var , type pairs ) statements','statement',8,'p_statement__peek','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',572),
('statement -> CHECK_ALLOCATE ( var ) SEMI','statement',5,'p_statement__check_allocate','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',576),
('statement -> CHECK_STOP_SLOTS ( var , STRING , STRING ) SEMI','statement',9,'p_statement__check_stop','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',580),
('statement -> RETURN expr SEMI','statement',3,'p_statement__return','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',584),
('statement -> if_statement','statement',1,'p_statement__if','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',588),
('if_statement -> IF ( expr ) statements','if_statement',5,'p_if_statement__if','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',592),
('if_statement -> IF ( expr ) statements ELSE statements','if_statement',7,'p_if_statement__if_else','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',596),
('if_statement -> IF ( expr ) statements ELSE if_statement','if_statement',7,'p_statement__if_else_if','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',600),
('aexpr -> STATIC_CAST ( type , expr )','aexpr',6,'p_expr__static_cast','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',605),
('aexpr -> STATIC_CAST ( type , STRING , expr )','aexpr',8,'p_expr__static_cast_ptr','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',609),
('aexpr -> var','aexpr',1,'p_expr__var','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',613),
('aexpr -> type ident','aexpr',2,'p_expr__localvar','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',617),
('aexpr -> literal','aexpr',1,'p_expr__literal','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',621),
('aexpr -> enumeration','aexpr',1,'p_expr__enumeration','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',625),
('aexpr -> ident ( exprs )','aexpr',4,'p_expr__func_call','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',629),
('aexpr -> NEW type','aexpr',2,'p_expr__new','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',633),
('aexpr -> OOD','aexpr',1,'p_expr__null','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',637),
('aexpr -> aexpr DOT ident','aexpr',3,'p_expr__member','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',641),
('aexpr -> aexpr DOT ident ( exprs )','aexpr',6,'p_expr__member_method_call','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',645),
('aexpr -> aexpr [ exprs ]','aexpr',4,'p_expr__member_method_call_lookup','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',649),
('aexpr -> type DOUBLE_COLON ident ( exprs )','aexpr',6,'p_expr__class_method_call','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',653),
('expr -> aexpr','expr',1,'p_expr__aexpr','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',657),
('expr -> expr STAR expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',661),
('expr -> expr SLASH expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',662),
('expr -> expr PLUS expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',663),
('expr -> expr DASH expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',664),
('expr -> expr LT expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',665),
('expr -> expr GT expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',666),
('expr -> expr LE expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',667),
('expr -> expr GE expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',668),
('expr -> expr EQ expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',669),
('expr -> expr NE expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',670),
('expr -> expr AND expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',671),
('expr -> expr OR expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',672),
('expr -> expr RIGHTSHIFT expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',673),
('expr -> expr LEFTSHIFT expr','expr',3,'p_expr__binary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',674),
('expr -> NOT expr','expr',2,'p_expr__unary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',679),
('expr -> INCR expr','expr',2,'p_expr__unary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',680),
('expr -> DECR expr','expr',2,'p_expr__unary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',681),
('expr -> DASH expr','expr',2,'p_expr__unary_op','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',682),
('aexpr -> ( expr )','aexpr',3,'p_expr__parens','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',686),
('aexpr -> IS_VALID ( var )','aexpr',4,'p_expr__is_valid_ptr','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',690),
('aexpr -> IS_INVALID ( var )','aexpr',4,'p_expr__is_invalid_ptr','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',694),
('literal -> STRING','literal',1,'p_literal__string','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',698),
('literal -> NUMBER','literal',1,'p_literal__number','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',702),
('literal -> FLOATNUMBER','literal',1,'p_literal__float','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',706),
('literal -> LIT_BOOL','literal',1,'p_literal__bool','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',710),
('enumeration -> ident : ident','enumeration',3,'p_enumeration','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',714),
('var -> ident','var',1,'p_var','/Users/lx/Courses/Computer Architecture/gem5-jty/src/mem/slicc/parser.py',718),
]
| 294.365854
| 23,349
| 0.688562
| 10,047
| 48,276
| 3.245347
| 0.063103
| 0.028768
| 0.057535
| 0.090413
| 0.677084
| 0.645157
| 0.620591
| 0.59897
| 0.590505
| 0.581979
| 0
| 0.394855
| 0.026494
| 48,276
| 163
| 23,350
| 296.171779
| 0.298934
| 0.001284
| 0
| 0.012987
| 1
| 0.87013
| 0.354989
| 0.207696
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
72de367388f2994444139ddcab1a4cb1ccb610de
| 901
|
py
|
Python
|
tvsched/application/models/auth.py
|
astsu-dev/tv-schedule
|
1cea36147e66b7163df6f7098cfda4d43f3bdde2
|
[
"MIT"
] | null | null | null |
tvsched/application/models/auth.py
|
astsu-dev/tv-schedule
|
1cea36147e66b7163df6f7098cfda4d43f3bdde2
|
[
"MIT"
] | null | null | null |
tvsched/application/models/auth.py
|
astsu-dev/tv-schedule
|
1cea36147e66b7163df6f7098cfda4d43f3bdde2
|
[
"MIT"
] | null | null | null |
from dataclasses import dataclass
import uuid
from tvsched.entities.auth import Role
@dataclass(frozen=True)
class UserAdd:
"""Data for register user with USER role."""
username: str
password: str
@dataclass(frozen=True)
class UserWithRoleAdd:
"""Data for register user with specific role."""
username: str
password: str
role: Role
@dataclass(frozen=True)
class UserLogIn:
"""Data for log in user"""
username: str
password: str
@dataclass(frozen=True)
class UserInRepoAdd:
"""Data for adding user to repo."""
username: str
password_hash: str
role: Role
@dataclass(frozen=True)
class UserInRepo:
"""Data in repo about user."""
id: uuid.UUID
username: str
password_hash: str
role: Role
@dataclass(frozen=True)
class UserInToken:
"""Data about user in stored in token."""
id: uuid.UUID
role: Role
| 15.807018
| 52
| 0.672586
| 115
| 901
| 5.252174
| 0.321739
| 0.149007
| 0.188742
| 0.238411
| 0.569536
| 0.402318
| 0.402318
| 0.344371
| 0.192053
| 0.192053
| 0
| 0
| 0.226415
| 901
| 56
| 53
| 16.089286
| 0.866571
| 0.214206
| 0
| 0.709677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.16129
| 0.096774
| 0
| 0.806452
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 6
|
72f4036a3eb4241cf66b34ced4c87979babd994c
| 2,538
|
py
|
Python
|
EdgyBoneTools/BoneScripts/FlipBones.py
|
herr-edgy/EdgyBoneTools
|
5ddaa57e85ac2795f9f6e7c1b936b1d6251d2c91
|
[
"MIT"
] | null | null | null |
EdgyBoneTools/BoneScripts/FlipBones.py
|
herr-edgy/EdgyBoneTools
|
5ddaa57e85ac2795f9f6e7c1b936b1d6251d2c91
|
[
"MIT"
] | null | null | null |
EdgyBoneTools/BoneScripts/FlipBones.py
|
herr-edgy/EdgyBoneTools
|
5ddaa57e85ac2795f9f6e7c1b936b1d6251d2c91
|
[
"MIT"
] | null | null | null |
import bpy
import math
from math import radians
from mathutils import Matrix
def FlipBoneXY():
bones = bpy.context.selected_editable_bones
if len(bones) == 0:
raise RuntimeError("No bones selected in edit mode!")
bpy.context.scene.transform_orientation_slots[0].type = 'NORMAL'
bpy.context.scene.tool_settings.transform_pivot_point = 'INDIVIDUAL_ORIGINS'
# for bone in bones:
# roll = bone.roll
# bone.roll = roll + 0.5 * math.pi
for bone in bones:
old_head = bone.head.copy()
R = Matrix.Rotation(radians(180), 4, bone.z_axis.normalized())
bone.transform(R, roll=True)
offset_vec = -(bone.head - old_head)
bone.head += offset_vec
bone.tail += offset_vec
def FlipBoneYZ():
bones = bpy.context.selected_editable_bones
if len(bones) == 0:
raise RuntimeError("No bones selected in edit mode!")
bpy.context.scene.transform_orientation_slots[0].type = 'NORMAL'
bpy.context.scene.tool_settings.transform_pivot_point = 'INDIVIDUAL_ORIGINS'
# for bone in bones:
# roll = bone.roll
# bone.roll = roll + 0.5 * math.pi
for bone in bones:
old_head = bone.head.copy()
R = Matrix.Rotation(radians(180), 4, bone.x_axis.normalized())
bone.transform(R, roll=True)
offset_vec = -(bone.head - old_head)
bone.head += offset_vec
bone.tail += offset_vec
def FlipBoneZX():
bones = bpy.context.selected_editable_bones
if len(bones) == 0:
raise RuntimeError("No bones selected in edit mode!")
bpy.context.scene.transform_orientation_slots[0].type = 'NORMAL'
bpy.context.scene.tool_settings.transform_pivot_point = 'INDIVIDUAL_ORIGINS'
# for bone in bones:
# roll = bone.roll
# bone.roll = roll + 0.5 * math.pi
for bone in bones:
old_head = bone.head.copy()
R = Matrix.Rotation(radians(180), 4, bone.y_axis.normalized())
bone.transform(R, roll=True)
offset_vec = -(bone.head - old_head)
bone.head += offset_vec
bone.tail += offset_vec
def RollBone90():
bones = bpy.context.selected_editable_bones
if len(bones) == 0:
raise RuntimeError("No bones selected in edit mode!")
for bone in bones:
roll = bone.roll
print(roll)
newRoll = roll + 0.5 * math.pi
print(newRoll)
if(newRoll > 2.0 * math.pi):
print(newRoll)
newRoll = newRoll - 2.0 * math.pi
bone.roll = newRoll
| 32.126582
| 80
| 0.6316
| 339
| 2,538
| 4.59882
| 0.179941
| 0.064144
| 0.040411
| 0.062861
| 0.887749
| 0.860808
| 0.860808
| 0.844131
| 0.844131
| 0.844131
| 0
| 0.017516
| 0.257683
| 2,538
| 79
| 81
| 32.126582
| 0.809979
| 0.090623
| 0
| 0.696429
| 0
| 0
| 0.085254
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.142857
| 0.053571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
beab8aaa6b1b9547658d8de8589ece2fa0df7b7b
| 43
|
py
|
Python
|
quantumnetworks/utils/__init__.py
|
Phionx/quantumnetworks
|
9e89cac61cdbc1885689336cd31bb5f7dbdde22a
|
[
"MIT"
] | 7
|
2021-11-22T18:45:00.000Z
|
2022-02-21T16:01:10.000Z
|
quantumnetworks/utils/__init__.py
|
Phionx/quantumnetworks
|
9e89cac61cdbc1885689336cd31bb5f7dbdde22a
|
[
"MIT"
] | 35
|
2021-09-29T08:05:35.000Z
|
2022-02-20T02:11:15.000Z
|
quantumnetworks/utils/__init__.py
|
Phionx/quantumnetworks
|
9e89cac61cdbc1885689336cd31bb5f7dbdde22a
|
[
"MIT"
] | 1
|
2022-02-01T18:05:34.000Z
|
2022-02-01T18:05:34.000Z
|
"""
Utils
"""
from .visualization import *
| 8.6
| 28
| 0.651163
| 4
| 43
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.162791
| 43
| 4
| 29
| 10.75
| 0.777778
| 0.116279
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
fe66c6a06cee0320dc2867e805317f7362e3f7b2
| 90,427
|
py
|
Python
|
notebooks-text-format/gan_tricks.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 166
|
2021-07-16T17:33:09.000Z
|
2022-03-30T03:35:34.000Z
|
notebooks-text-format/gan_tricks.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 29
|
2021-07-21T16:31:51.000Z
|
2022-03-31T19:50:13.000Z
|
notebooks-text-format/gan_tricks.py
|
arpitvaghela/probml-notebooks
|
32ecb309dd474b989fd1c6ce4ad6dab7a25bbead
|
[
"MIT"
] | 48
|
2021-07-17T08:26:18.000Z
|
2022-03-31T03:36:18.000Z
|
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.11.3
# kernelspec:
# display_name: Python 3
# name: python3
# ---
# + [markdown] id="vbK-ql3_xq5D"
# # 5+n tricks for GANs
# + [markdown] id="RzqOA0kAxTtH"
# The idea behind GANs is to train an implicit model that can produce samples $x' \sim p_g$ that looks like samples taken from the actual data distribution $x \sim p_{data}$ by always trying to fool a discriminator which attempts to differentiate between real and generated samples. The generator and the discriminator are then trained jointly by playing the following min-max game.
#
# $$
# \min_{G} \max_{D} \mathbb{E}_{x \sim q_{data}}[\log D(x)] +\mathbb{E}_{x \sim p_g}[\log(1- D(x))]
# $$
#
# However, it might not be practical to compute the following min-max game to see this consider a fixed generator, the above min-max game becomes the following optimisation problem
#
# $$\max_{D} \mathbb{E}_{x \sim q_{data}}[\log D(x)] +\mathbb{E}_{x \sim p_g}[\log(1- D(x))] $$
#
# it can be shown that the optimal discriminator is
#
# $$D_g^*(x)= \frac{p_{g}(x)}{p_{g}(x)+p_{data}(x)}$$
#
# and the derivative of our discriminator $D(x)$ is
#
# $$\nabla D^*_g(x) = \frac{1}{p_{data}(x)}\nabla_xp_{data}(x) - \frac{1}{p_g(x)}\nabla_xp_g(x)$$
#
# If $p_g$ and $p_{data}$ do not share the same support, the derivative of our discriminator $D(x)$ becomes unbounded and thus ill-defined. This means we cannot use gradient descent in such a case to find the equilibrium point of our min-max game. However, for datasets that lies on some lower-dimensional manifold, such as natural images, this is often the case because the manifold will only overlap with a distribution $p_g(x)$, where $p_g(x)$ has non-zero measure everywhere, on the manifold. This tutorial thus outlines some basic techniques that can address this and other issues surrounding GAN training.
# + [markdown] id="N3beEBwbxwDW"
# ## 1. Instance noise to address the problem of disjoint support
#
# From: [Sønderby, C. K., Caballero, J., Theis, L., Shi, W., & Huszár, F. (2016). Amortised map inference for image super-resolution. arXiv preprint arXiv:1610.04490](https://arxiv.org/pdf/1610.04490.pdf)
#
# One simple approach to addressing the disjoint support problem is to apply some instance noise to both the real and fake samples. The noise term can further be reduced/annealed over time to improve performance.
#
# ```
# real = real + alpha*torch.randn_like(real)
# fake = fake + alpha*torch.randn_like(fake)
# ```
# + [markdown] id="vnN-NbY4xynA"
# ## 2. Use Spectral Normalisation
#
# From: [Miyato, T., Kataoka, T., Koyama, M., & Yoshida, Y. (2018). Spectral normalization for generative adversarial networks. arXiv preprint arXiv:1802.05957.](https://arxiv.org/pdf/1802.05957.pdf)
#
# Another approach of addressing the issue of non-computable or intractable statistics a GAN might provide is to add regularity conditions such as Lipschitz continuity on the discriminator, one way of doing this is to use spectral normalisations, which use power iteration to ensure that the discriminator is Lipschitz continuous by normalising the weight matrix of the neural network by its largest singular value.
#
# $$
# W_{SN} = \frac{W}{\sigma(W)}, \text{where } \sigma(W) = \frac{||Wh||_2}{||h||_2}$$
#
# ```
# from torch import nn
# from torch.nn.utils.parametrizations import spectral_norm
# m = spectral_norm(nn.Linear(20, 40))
# ```
# + [markdown] id="beoIcKE8x5Ni"
# ## 3. Use refinement techniques to incorporate discriminator information post-training
#
# From : [Azadi, S., Olsson, C., Darrell, T., Goodfellow, I., & Odena, A. (2018). Discriminator rejection sampling. arXiv preprint arXiv:1810.06758.](https://arxiv.org/pdf/1810.06758)
#
# One simple approach to improving the quality of samples from a GAN is to use the truncation trick which samples from a truncated normal distribution instead of a normal distribution to avoid sampling at the tails reducing the chance of sampling from regions that lead to poorer sample quality however also reduces the diversity of the samples attained.
#
# ```
# from scipy.stats import truncnorm
# threshold = 1
# values = truncnorm.rvs(-threshold, threshold, size=(64, 100))
# ```
# + [markdown] id="Q2ChVYh_yGKA"
# A more principled approach is to use the discriminator to perform rejection sampling using the discriminator but that is too expensive. A better approach is to use discriminator driven latent sampling [DDLS](https://arxiv.org/pdf/2003.06060) or discriminator gradient flow [DGFlow](https://arxiv.org/abs/2012.00780) that uses Langevin sampling to sample from the discriminator.
#
# ```
# def _velocity(z_img, D, G):
# z_img_t = z_img.clone()
# z_img_t.requires_grad_(True)
# if z_img_t.grad is not None:
# z_img_t.grad.zero_()
# d_score = D(G(z_img_t))
# d_score.backward(torch.ones_like(d_score).to(z_img.device))
# return z_img_t.grad.data
#
# def refine_samples(z_img, G, D, eta=.01, noise_factor=0.,
# num_steps=200):
# for t in tqdm(range(1, num_steps)):
# v = _velocity(z_img, D, G)
# z_img = z_img.data + eta * v + np.sqrt(2*eta) * noise_factor * torch.randn_like(z_img)
# return G(z_img), z_img
# ```
# + [markdown] id="s42aCzJdyItZ"
# 
# + [markdown] id="dF7wej9wyagS"
# ## 4. Consider using latent optimisation to incorporate discriminator information during training
#
# From : [Wu, Y., Donahue, J., Balduzzi, D., Simonyan, K., & Lillicrap, T. (2019). Logan: Latent optimisation for generative adversarial networks. arXiv preprint arXiv:1912.00953.](https://arxiv.org/pdf/1912.00953.pdf)
#
# Another way of incorporating the discriminator is to sample in the gradient direction rather than from random samples $z \sim p(z)$ from the prior. This can be done by first sampling a random $z$ and then computing $\nabla_z D(G(z))$ and taking one gradient step in that direction generated as such
#
# ```
# noise = noise.view(*noise.shape, 1, 1)
# d_score = D(G(noise))
# d_score.backward(torch.ones_like(d_score).to(noise.device))
# noise = noise + alpha*d_score.grad.data
# ```
# + [markdown] id="6aYS47mmyebG"
# This results in better samples and more stable GAN training as it adds a symplectic gradient adjustment into the optimisation procedure, reducing the negative effects of cycling.
# + [markdown] id="NbFAvg2Pyg4p"
# ## 5. Top-k samples for discriminator training to incorporate discriminator information during training
#
# From: [Sinha, S., Zhao, Z., Goyal, A., Raffel, C., & Odena, A. (2020). Top-k training of gans: Improving gan performance by throwing away bad samples. arXiv preprint arXiv:2002.06224.](https://arxiv.org/pdf/2002.06224.pdf)
#
# The discriminator can also be used during training to take the top_k samples identified by the discriminator during training to compute the loss for the generator. This is because the bottom k-samples produce misleading updates, that point away from the data manifold.
#
# ```
# topk_predictions = torch.topk( predictions, k )
# ```
# + [markdown] id="5EXossN1ykWi"
# The value of k is annealed through the training process, as the gan gets better and the discriminator gets more informative we can increase the value of k.
| 674.828358
| 83,058
| 0.943103
| 4,072
| 90,427
| 20.922151
| 0.798379
| 0.000751
| 0.001068
| 0.001127
| 0.00628
| 0.005517
| 0.005517
| 0.001855
| 0.001855
| 0.001033
| 0
| 0.160806
| 0.013215
| 90,427
| 133
| 83,059
| 679.902256
| 0.793953
| 0.997047
| 0
| null | 0
| null | 0
| 0
| null | 1
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
feafba27e2318b4bcb303712dcb12664aa9444c6
| 35
|
py
|
Python
|
awpy/__init__.py
|
kamnsv/awpy
|
132a0d75d76553e8ad52c5e1ea7e942e12f52ad6
|
[
"MIT"
] | 18
|
2022-02-04T00:06:43.000Z
|
2022-03-23T00:55:18.000Z
|
awpy/__init__.py
|
kamnsv/awpy
|
132a0d75d76553e8ad52c5e1ea7e942e12f52ad6
|
[
"MIT"
] | 7
|
2022-02-04T00:06:48.000Z
|
2022-02-23T16:50:19.000Z
|
awpy/__init__.py
|
kamnsv/awpy
|
132a0d75d76553e8ad52c5e1ea7e942e12f52ad6
|
[
"MIT"
] | 4
|
2022-02-05T04:22:53.000Z
|
2022-03-20T12:56:41.000Z
|
from awpy.parser import DemoParser
| 17.5
| 34
| 0.857143
| 5
| 35
| 6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.114286
| 35
| 1
| 35
| 35
| 0.967742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
feb372918686f3d53517a56b1e2edf88824404cc
| 47
|
py
|
Python
|
network/v5/__init__.py
|
houzeyu2683/DIWM
|
d31e5bd7bc52215c9440d13046304dfef0b25463
|
[
"MIT"
] | null | null | null |
network/v5/__init__.py
|
houzeyu2683/DIWM
|
d31e5bd7bc52215c9440d13046304dfef0b25463
|
[
"MIT"
] | null | null | null |
network/v5/__init__.py
|
houzeyu2683/DIWM
|
d31e5bd7bc52215c9440d13046304dfef0b25463
|
[
"MIT"
] | null | null | null |
from .model import *
from .machine import *
| 7.833333
| 22
| 0.680851
| 6
| 47
| 5.333333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.234043
| 47
| 5
| 23
| 9.4
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
22ca35bf3d3a8b7a63fe2a8f098304d082ab6e89
| 1,067
|
py
|
Python
|
env/lib/python2.7/sre_compile.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | null | null | null |
env/lib/python2.7/sre_compile.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | 6
|
2020-06-05T22:57:03.000Z
|
2021-06-10T18:48:39.000Z
|
env/lib/python2.7/sre_compile.py
|
essien1990/Flask-Mysqldb
|
e0917b90c45a0aaf922bfa672ddb479cb450a02d
|
[
"MIT"
] | 1
|
2021-12-16T17:09:52.000Z
|
2021-12-16T17:09:52.000Z
|
XSym
0078
270dc93767ee01f3df4e18ba5a05669f
/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/sre_compile.py
| 213.4
| 945
| 0.099344
| 16
| 1,067
| 6.5625
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.237288
| 0.88941
| 1,067
| 5
| 945
| 213.4
| 0.652542
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
22e073034c450e34ebbcbff8a89c350bce34306b
| 220
|
py
|
Python
|
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
src/onegov/swissvotes/collections/__init__.py
|
politbuero-kampagnen/onegov-cloud
|
20148bf321b71f617b64376fe7249b2b9b9c4aa9
|
[
"MIT"
] | null | null | null |
from onegov.swissvotes.collections.pages import TranslatablePageCollection
from onegov.swissvotes.collections.votes import SwissVoteCollection
__all__ = (
'SwissVoteCollection',
'TranslatablePageCollection',
)
| 24.444444
| 74
| 0.822727
| 17
| 220
| 10.411765
| 0.588235
| 0.112994
| 0.225989
| 0.350282
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 220
| 8
| 75
| 27.5
| 0.903061
| 0
| 0
| 0
| 0
| 0
| 0.204545
| 0.118182
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 1
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
22f260f81c9617087e895e003a8481412a5736e5
| 24,032
|
py
|
Python
|
tests/magnus/test_nodes.py
|
vishalbelsare/magnus-core
|
77a4eb8ecca70b276b48e5f7a11166f7c3196f6b
|
[
"Apache-2.0"
] | 3
|
2022-03-15T15:19:14.000Z
|
2022-03-29T02:38:27.000Z
|
tests/magnus/test_nodes.py
|
vishalbelsare/magnus-core
|
77a4eb8ecca70b276b48e5f7a11166f7c3196f6b
|
[
"Apache-2.0"
] | 18
|
2022-02-02T08:28:25.000Z
|
2022-03-30T12:38:40.000Z
|
tests/magnus/test_nodes.py
|
vishalbelsare/magnus-core
|
77a4eb8ecca70b276b48e5f7a11166f7c3196f6b
|
[
"Apache-2.0"
] | 2
|
2022-01-24T11:10:09.000Z
|
2022-03-15T15:19:46.000Z
|
import os
import pytest
from magnus import defaults # pylint: disable=import-error
from magnus import nodes # pylint: disable=import-error
def test_base_node_command_friendly_name_replaces_whitespace_with_character():
node = nodes.BaseNode(name='test', internal_name='test', config='test_config', execution_type=None)
assert node.command_friendly_name() == 'test'
node.internal_name = 'test '
assert node.command_friendly_name() == 'test' + defaults.COMMAND_FRIENDLY_CHARACTER
def test_base_node_get_internal_name_from_command_name_replaces_character_with_whitespace():
assert nodes.BaseNode.get_internal_name_from_command_name('test') == 'test'
assert nodes.BaseNode.get_internal_name_from_command_name('test%') == 'test '
def test_base_node_get_step_log_name_returns_internal_name_if_no_map_variable():
node = nodes.BaseNode(name='test', internal_name='test', config='test_config', execution_type=None)
assert node.get_step_log_name() == 'test'
def test_base_node_get_step_log_name_returns_map_modified_internal_name_if_map_variable():
node = nodes.BaseNode(name='test', internal_name='test.' + defaults.MAP_PLACEHOLDER,
config='test_config', execution_type=None)
assert node.get_step_log_name(map_variable={'map_key': 'a'}) == 'test.a'
def test_base_node_get_step_log_name_returns_map_modified_internal_name_if_map_variable_multiple():
node = nodes.BaseNode(
name='test', internal_name='test.' + defaults.MAP_PLACEHOLDER + '.step.' + defaults.MAP_PLACEHOLDER,
config='test_config', execution_type=None)
assert node.get_step_log_name(map_variable={'map_key': 'a', 'map_key1': 'b'}) == 'test.a.step.b'
def test_base_node_get_branch_log_name_returns_null_if_not_set():
node = nodes.BaseNode(name='test', internal_name='test', config='test_config', execution_type=None)
assert node.get_branch_log_name() is None
def test_base_node_get_branch_log_name_returns_internal_name_if_set():
node = nodes.BaseNode(name='test', internal_name='test', config='test_config',
execution_type=None, internal_branch_name='test_internal')
assert node.get_branch_log_name() is 'test_internal'
def test_base_node_get_branch_log_name_returns_map_modified_internal_name_if_map_variable():
node = nodes.BaseNode(name='test', internal_name='test_', config='test_config',
execution_type=None, internal_branch_name='test.' + defaults.MAP_PLACEHOLDER)
assert node.get_branch_log_name(map_variable={'map_key': 'a'}) == 'test.a'
def test_base_node_get_branch_log_name_returns_map_modified_internal_name_if_map_variable_multiple():
node = nodes.BaseNode(name='test', internal_name='test_', config='test_config',
execution_type=None,
internal_branch_name='test.' + defaults.MAP_PLACEHOLDER + '.step.' + defaults.MAP_PLACEHOLDER)
assert node.get_branch_log_name(map_variable={'map_key': 'a', 'map_key1': 'b'}) == 'test.a.step.b'
def test_base_node_get_on_failure_node_returns_none_if_not_defined():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
assert node.get_on_failure_node() is None
def test_base_node_get_on_failure_node_returns_node_name_if_defined():
node = nodes.BaseNode(name='test', internal_name='test', config={'on_failure': 'fail'}, execution_type=None)
assert node.get_on_failure_node() == 'fail'
def test_base_node_get_catalog_settings_returns_none_if_not_defined():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
assert node.get_catalog_settings() is None
def test_base_node_get_catalog_settings_returns_node_name_if_defined():
node = nodes.BaseNode(name='test', internal_name='test', config={'catalog': 'some settings'}, execution_type=None)
assert node.get_catalog_settings() == 'some settings'
def test_base_node_get_branch_by_name_raises_exception():
node = nodes.BaseNode(name='test', internal_name='test', config={'catalog': 'some settings'}, execution_type=None)
with pytest.raises(Exception):
node.get_branch_by_name('fail')
def test_base_node_get_next_node_returns_config_next():
node = nodes.BaseNode(name='test', internal_name='test', config={'next': 'IamNext'}, execution_type=None)
assert node.get_next_node() == 'IamNext'
def test_base_node_get_mode_config_returns_mode_config_if_present():
node = nodes.BaseNode(name='test', internal_name='test',
config={'mode_config': {'local': 'some settings'}},
execution_type=None)
assert node.get_mode_config('local') == 'some settings'
def test_base_node_get_mode_config_returns_empty_dict_if_not_present():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
assert node.get_mode_config('local') == {}
def test_base_node_get_max_attempts_returns_max_attempts_as_in_config():
node = nodes.BaseNode(name='test', internal_name='test', config={'retry': 2}, execution_type=None)
assert node.get_max_attempts() == 2
def test_base_node_get_max_attempts_returns_max_attempts_as_1_if_not_in_config():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
assert node.get_max_attempts() == 1
def test_base_node_execute_raises_not_implemented_error():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
with pytest.raises(NotImplementedError):
node.execute(executor='test')
def test_base_node_execute_as_graph_raises_not_implemented_error():
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
with pytest.raises(NotImplementedError):
node.execute_as_graph(executor='test')
def test_validate_node_gets_specs_from_default_specs(mocker, monkeypatch):
mock_load_yaml = mocker.MagicMock()
monkeypatch.setattr(nodes.utils, 'load_yaml', mock_load_yaml)
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
with pytest.raises(Exception):
nodes.validate_node(node)
args, _ = mock_load_yaml.call_args
assert args[0].endswith(defaults.NODE_SPEC_FILE)
def test_validate_node_raises_exception_for_unspecified_node(mocker, monkeypatch):
dummy_specs = {'test': {}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
node.node_type = 'test1'
with pytest.raises(Exception):
nodes.validate_node(node)
def test_validate_node_does_not_raise_exception_for_specified_node(mocker, monkeypatch):
dummy_specs = {'dummy': {}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
node.node_type = 'dummy'
nodes.validate_node(node)
def test_validate_node_sends_message_back_if_dot_present_in_name(mocker, monkeypatch):
dummy_specs = {'dummy': {}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test.', internal_name='test', config={}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 1
assert messages[0] == 'Node names cannot have . in them'
def test_validate_node_sends_message_back_if_character_present_in_name(mocker, monkeypatch):
dummy_specs = {'dummy': {}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test%', internal_name='test', config={}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 1
assert messages[0] == "Node names cannot have '%' in them"
def test_validate_node_messages_empty_if_name_is_valid(mocker, monkeypatch):
dummy_specs = {'dummy': {}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 0
def test_validate_node_sends_messages_if_required_are_not_present(mocker, monkeypatch):
dummy_specs = {'dummy': {'required': ['dummy_required']}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={'dummy_required1': True}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 1
assert messages[0] == 'test should have dummy_required field'
def test_validate_node_sends_empty_if_required_present(mocker, monkeypatch):
dummy_specs = {'dummy': {'required': ['dummy_required']}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={'dummy_required': True}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 0
def test_validate_node_sends_messages_if_error_on_are_present(mocker, monkeypatch):
dummy_specs = {'dummy': {'error_on': ['dummy_required']}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={'dummy_required': True}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 1
assert messages[0] == 'test should not have dummy_required field'
def test_validate_node_sends_empty_if_error_on_not_present(mocker, monkeypatch):
dummy_specs = {'dummy': {'error_on': ['dummy_required']}}
monkeypatch.setattr(nodes.utils, 'load_yaml', mocker.MagicMock(return_value=dummy_specs))
node = nodes.BaseNode(name='test', internal_name='test', config={'dummy_required1': True}, execution_type=None)
node.node_type = 'dummy'
messages = nodes.validate_node(node)
assert len(messages) == 0
def test_task_node_mocks_if_mock_is_true(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
task_node = nodes.TaskNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
task_node.execute(executor=mock_executor, mock=True)
assert mock_attempt_log.status == defaults.SUCCESS
def test_task_node_sets_attempt_log_fail_in_exception_of_execution(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
task_node = nodes.TaskNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
task_node.execution_type = mocker.MagicMock()
task_node.execution_type.execute_command = mocker.MagicMock(side_effect=Exception())
task_node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.FAIL
def test_task_node_sets_attempt_log_success_in_no_exception_of_execution(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
task_node = nodes.TaskNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
task_node.execution_type = mocker.MagicMock()
task_node.execution_type.execute_command = mocker.MagicMock()
task_node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
def test_task_node_execute_as_graph_raises_exception():
task_node = nodes.TaskNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
with pytest.raises(Exception):
task_node.execute_as_graph(None)
def test_fail_node_sets_branch_log_fail(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_branch_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
mock_executor.run_log_store.get_branch_log = mocker.MagicMock(return_value=mock_branch_log)
node = nodes.FailNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
assert mock_branch_log.status == defaults.FAIL
def test_fail_node_sets_attempt_log_success_even_in_exception(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
mock_executor.run_log_store.get_branch_log = mocker.MagicMock(side_effect=Exception())
node = nodes.FailNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
def test_fail_node_execute_as_graph_raises_exception():
fail_node = nodes.FailNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
with pytest.raises(Exception):
fail_node.execute_as_graph(None)
def test_success_node_sets_branch_log_success(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_branch_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
mock_executor.run_log_store.get_branch_log = mocker.MagicMock(return_value=mock_branch_log)
node = nodes.SuccessNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
assert mock_branch_log.status == defaults.SUCCESS
def test_success_node_sets_attempt_log_success_even_in_exception(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
mock_executor.run_log_store.get_branch_log = mocker.MagicMock(side_effect=Exception())
node = nodes.SuccessNode(name='test', internal_name='test', config={'command': 'nocommand'}, execution_type=None)
node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
def test_success_node_execute_as_graph_raises_exception():
success_node = nodes.SuccessNode(name='test', internal_name='test',
config={'command': 'nocommand'}, execution_type=None)
with pytest.raises(Exception):
success_node.execute_as_graph(None)
def test_parallel_node_raises_exception_for_empty_branches():
with pytest.raises(Exception):
nodes.ParallelNode(name='test', internal_name='test', config={'branches': {}}, execution_type='python')
def test_parallel_node_get_sub_graphs_creates_graphs(mocker, monkeypatch):
mock_create_graph = mocker.MagicMock(return_value='agraphobject')
monkeypatch.setattr(nodes, 'create_graph', mock_create_graph)
parallel_config = {
'branches': {
'a': {},
'b': {}
}
}
node = nodes.ParallelNode(name='test', internal_name='test', config=parallel_config, execution_type='python')
assert mock_create_graph.call_count == 2
assert len(node.branches.items()) == 2
def test_parallel_node_get_branch_by_name_raises_exception_if_branch_not_found(mocker, monkeypatch):
monkeypatch.setattr(nodes.ParallelNode, 'get_sub_graphs', mocker.MagicMock())
node = nodes.ParallelNode(name='test', internal_name='test', config={}, execution_type='python')
with pytest.raises(Exception):
node.get_branch_by_name('a')
def test_parallel_node_get_branch_by_name_returns_branch_if_found(mocker, monkeypatch):
monkeypatch.setattr(nodes.ParallelNode, 'get_sub_graphs', mocker.MagicMock())
node = nodes.ParallelNode(name='test', internal_name='test', config={}, execution_type='python')
node.branches = {'a': 'somegraph'}
assert node.get_branch_by_name('a') == 'somegraph'
def test_parallel_node_execute_raises_exception(mocker, monkeypatch):
monkeypatch.setattr(nodes.ParallelNode, 'get_sub_graphs', mocker.MagicMock())
node = nodes.ParallelNode(name='test', internal_name='test', config={}, execution_type='python')
with pytest.raises(Exception):
node.execute(executor='test')
def test_nodes_map_node_raises_exception_if_config_not_have_iterate_on():
with pytest.raises(Exception):
nodes.MapNode(name='test', internal_name='test', config={}, execution_type='test')
def test_nodes_map_node_raises_exception_if_config_not_have_iterate_as():
with pytest.raises(Exception):
nodes.MapNode(name='test', internal_name='test', config={'iterate_on': 'y'}, execution_type='test')
def test_nodes_map_node_names_the_branch_as_defaults_place_holder(monkeypatch, mocker):
monkeypatch.setattr(nodes.MapNode, 'get_sub_graph', mocker.MagicMock())
node = nodes.MapNode(name='test', internal_name='test', config={
'iterate_on': 'a', 'iterate_as': 'y_i'}, execution_type='test')
assert node.branch_placeholder_name == defaults.MAP_PLACEHOLDER
def test_nodes_map_get_sub_graph_calls_create_graph_with_correct_naming(mocker, monkeypatch):
mock_create_graph = mocker.MagicMock()
monkeypatch.setattr(nodes, 'create_graph', mock_create_graph)
_ = nodes.MapNode(name='test', internal_name='test', config={
'iterate_on': 'a', 'iterate_as': 'y_i', 'branch': {}}, execution_type='test')
mock_create_graph.assert_called_once_with({}, internal_branch_name='test.' + defaults.MAP_PLACEHOLDER)
def test_nodes_map_get_branch_by_name_returns_a_sub_graph(mocker, monkeypatch):
mock_create_graph = mocker.MagicMock(return_value='a')
monkeypatch.setattr(nodes, 'create_graph', mock_create_graph)
node = nodes.MapNode(name='test', internal_name='test', config={
'iterate_on': 'a', 'iterate_as': 'y_i', 'branch': {}}, execution_type='test')
assert node.get_branch_by_name('anyname') == 'a'
def test_nodes_map_node_execute_raises_exception(mocker, monkeypatch):
monkeypatch.setattr(nodes.MapNode, 'get_sub_graph', mocker.MagicMock())
node = nodes.MapNode(name='test', internal_name='test', config={
'iterate_on': 'a', 'iterate_as': 'y_i'}, execution_type='test')
with pytest.raises(Exception):
node.execute('dummy')
def test_nodes_dag_node_raises_exception_if_dag_definition_is_not_present():
with pytest.raises(Exception):
nodes.DagNode(name='test', internal_name='test', config={}, execution_type='test')
def test_node_dag_node_get_sub_graph_raises_exception_if_dag_block_not_present(mocker, monkeypatch):
mock_load_yaml = mocker.MagicMock(return_value={})
monkeypatch.setattr(nodes.utils, 'load_yaml', mock_load_yaml)
with pytest.raises(Exception):
nodes.DagNode(name='test', internal_name='test', config={'dag_definition': 'a'}, execution_type='test')
def test_nodes_dag_node_get_sub_graph_calls_create_graph_with_correct_parameters(mocker, monkeypatch):
mock_load_yaml = mocker.MagicMock(return_value={'dag': 'a'})
mock_create_graph = mocker.MagicMock(return_value='branch')
monkeypatch.setattr(nodes.utils, 'load_yaml', mock_load_yaml)
monkeypatch.setattr(nodes, 'create_graph', mock_create_graph)
_ = nodes.DagNode(name='test', internal_name='test', config={'dag_definition': 'a'}, execution_type='test')
mock_create_graph.assert_called_once_with('a', internal_branch_name='test.' + defaults.DAG_BRANCH_NAME)
def test_nodes_dag_node_get_branch_by_name_raises_exception_if_branch_name_is_invalid(mocker, monkeypatch):
monkeypatch.setattr(nodes.DagNode, 'get_sub_graph', mocker.MagicMock(return_value='branch'))
node = nodes.DagNode(name='test', internal_name='test', config={'dag_definition': 'a'}, execution_type='test')
with pytest.raises(Exception):
node.get_branch_by_name('test')
def test_nodes_dag_node_get_branch_by_name_returns_if_branch_name_is_valid(mocker, monkeypatch):
monkeypatch.setattr(nodes.DagNode, 'get_sub_graph', mocker.MagicMock(return_value='branch'))
node = nodes.DagNode(name='test', internal_name='test', config={'dag_definition': 'a'}, execution_type='test')
assert node.get_branch_by_name('test.' + defaults.DAG_BRANCH_NAME) == 'branch'
def test_nodes_dag_node_execute_raises_exception(mocker, monkeypatch):
monkeypatch.setattr(nodes.DagNode, 'get_sub_graph', mocker.MagicMock(return_value='branch'))
node = nodes.DagNode(name='test', internal_name='test', config={'dag_definition': 'a'}, execution_type='test')
with pytest.raises(Exception):
node.execute('dummy')
def test_nodes_as_is_node_accepts_what_is_given():
node = nodes.AsISNode(name='test', internal_name='test', config={
'command_config': {'render_string': 'test'}}, execution_type='test')
assert node.config['command_config']['render_string'] == 'test'
def test_as_is_node_execute_as_graph_raises_exception():
as_is_node = nodes.AsISNode(name='test', internal_name='test',
config={'command': 'nocommand'}, execution_type=None)
with pytest.raises(Exception):
as_is_node.execute_as_graph(None)
def test_as_is_node_sets_attempt_log_success(mocker, monkeypatch):
mock_attempt_log = mocker.MagicMock()
mock_executor = mocker.MagicMock()
mock_executor.run_log_store.create_attempt_log = mocker.MagicMock(return_value=mock_attempt_log)
node = nodes.AsISNode(name='test', internal_name='test', config={}, execution_type=None)
node.execute(executor=mock_executor)
assert mock_attempt_log.status == defaults.SUCCESS
def test_is_terminal_node_when_has_next():
node = nodes.BaseNode(name='test', internal_name='test_', config={'next': 'yes'},
execution_type=None, internal_branch_name='test_')
assert not node.is_terminal_node()
def test_is_terminal_node_when_no_next():
node = nodes.BaseNode(name='test', internal_name='test_', config={'none': 'no'},
execution_type=None, internal_branch_name='test_')
assert node.is_terminal_node()
def test_get_neighbors_no_neighbors():
node = nodes.BaseNode(name='test', internal_name='test_', config={},
execution_type=None, internal_branch_name='test_')
assert node.get_neighbors() == []
def test_get_neighbors_only_next():
node = nodes.BaseNode(name='test', internal_name='test_', config={'next': 'a'},
execution_type=None, internal_branch_name='test_')
neighbors = node.get_neighbors()
assert len(neighbors) == 1
assert neighbors[0] == 'a'
def test_get_neighbors_both_next_and_on_failure():
node = nodes.BaseNode(name='test', internal_name='test_', config={'next': 'a', 'on_failure': 'b'},
execution_type=None, internal_branch_name='test_')
neighbors = node.get_neighbors()
assert len(neighbors) == 2
assert neighbors[0] == 'a'
assert neighbors[1] == 'b'
| 39.526316
| 120
| 0.744674
| 3,172
| 24,032
| 5.246217
| 0.057692
| 0.07115
| 0.063458
| 0.07812
| 0.913046
| 0.888468
| 0.851331
| 0.823088
| 0.770567
| 0.698756
| 0
| 0.001344
| 0.133031
| 24,032
| 607
| 121
| 39.591433
| 0.79736
| 0.002372
| 0
| 0.505587
| 0
| 0
| 0.095695
| 0
| 0
| 0
| 0
| 0
| 0.164804
| 1
| 0.184358
| false
| 0
| 0.011173
| 0
| 0.195531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
a3cae03d9461b2c9691b6a092a1814f4d5de2f6a
| 108
|
py
|
Python
|
daskperiment/config.py
|
takumiohym/daskperiment
|
292752f29a9f77b948b2a5050122109b8f4fe148
|
[
"BSD-3-Clause"
] | 27
|
2019-01-25T01:44:20.000Z
|
2019-11-13T14:26:56.000Z
|
daskperiment/config.py
|
nikkkkhil/dask_task
|
1802efacd43e68e5e7dd8eb82e27b3cbf09d74ae
|
[
"BSD-3-Clause"
] | 64
|
2019-01-29T03:52:31.000Z
|
2019-04-25T07:55:09.000Z
|
daskperiment/config.py
|
nikkkkhil/dask_task
|
1802efacd43e68e5e7dd8eb82e27b3cbf09d74ae
|
[
"BSD-3-Clause"
] | 5
|
2019-02-04T20:58:39.000Z
|
2019-04-14T11:45:48.000Z
|
import pathlib
_CACHE_DIR = pathlib.Path('daskperiment_cache')
_LOG_DIR = pathlib.Path('daskperiment_log')
| 21.6
| 47
| 0.805556
| 14
| 108
| 5.785714
| 0.5
| 0.246914
| 0.345679
| 0.641975
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.083333
| 108
| 4
| 48
| 27
| 0.818182
| 0
| 0
| 0
| 0
| 0
| 0.314815
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
a3e6f15e7a2076c2584e349545927052133e260d
| 16,792
|
py
|
Python
|
install/app_store/tk-multi-workfiles2/v0.9.7/python/tk_multi_workfiles/crash_dbg_form.py
|
JoanAzpeitia/lp_sg
|
e0ee79555e419dd2ae3a5f31e5515b3f40b22a62
|
[
"MIT"
] | 4
|
2019-01-11T03:41:28.000Z
|
2019-09-12T06:57:17.000Z
|
bundle_cache/app_store/tk-multi-workfiles2/v0.11.6/python/tk_multi_workfiles/crash_dbg_form.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | null | null | null |
bundle_cache/app_store/tk-multi-workfiles2/v0.11.6/python/tk_multi_workfiles/crash_dbg_form.py
|
ColinKennedy/tk-config-default2-respawn
|
855fb8033daa549b92615792442f19a7f9c4f55c
|
[
"Linux-OpenIB"
] | 2
|
2019-01-10T05:00:18.000Z
|
2020-02-15T16:32:56.000Z
|
import sgtk
from sgtk.platform.qt import QtGui
from .ui.crash_dbg_form import Ui_CrashDbgForm
import threading
import random
import time
class SgRunner(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
self._lock = threading.Lock()
self._run = True
self._sg_searches = [
{"entity_type":"Task",
"filters":[['project', 'is', {'type': 'Project', 'name': 'Another Demo Project', 'id': 67}], ['entity', 'type_is', 'Asset']],
"fields":['project', 'code', 'description', 'image', 'entity.Asset.sg_asset_type', 'entity', 'content', 'step', 'sg_status_list', 'task_assignees', 'name'],
"order":[]},
{"entity_type":"Task",
"filters":[['project', 'is', {'type': 'Project', 'name': 'Another Demo Project', 'id': 67}], ['entity', 'type_is', 'Shot']],
"fields":['project', 'entity.Shot.sg_sequence', 'code', 'description', 'image', 'entity', 'content', 'step', 'sg_status_list', 'task_assignees', 'name'],
"order":[]}
]
self._thread_local = threading.local()
@property
def _shotgun(self):
self._lock.acquire()
try:
if not hasattr(self._thread_local, "sg"):
self._thread_local.sg = sgtk.util.shotgun.create_sg_connection()
return self._thread_local.sg
finally:
self._lock.release()
def stop(self):
self._lock.acquire()
try:
self._run = False
finally:
self._lock.release()
def run(self):
res = {}
while True:
self._lock.acquire()
try:
if not self._run:
break
finally:
self._lock.release()
"""
s = []
for tick in range(512):
time.sleep(0.001)
multiplier = random.randint(1, 8)
for i in range(8*multiplier):
s.append(tick*i)
time.sleep(2)
res = dict((i, c) for i, c in enumerate(s))
"""
sg_search = self._sg_searches[random.randint(0, len(self._sg_searches)-1)]
res = self._shotgun.find(sg_search["entity_type"],
sg_search["filters"],
sg_search["fields"],
sg_search["order"])
print len(res)
class CrashDbgForm(QtGui.QWidget):
def __init__(self, parent=None):
QtGui.QWidget.__init__(self, parent)
self._ui = Ui_CrashDbgForm()
self._ui.setupUi(self)
refresh_action = QtGui.QAction("Refresh", self)
refresh_action.setShortcut(QtGui.QKeySequence(QtGui.QKeySequence.Refresh))
refresh_action.triggered.connect(self._on_refresh_triggered)
self.addAction(refresh_action)
# create model:
self._model = QtGui.QStandardItemModel()
self._ui.tree_view.setModel(self._model)
self._ui.list_view.setModel(self._model)
# create sg query threads:
self._sg_runner_threads = []
self._sg_runner_threads.append(SgRunner())
self._sg_runner_threads.append(SgRunner())
self._sg_runner_threads.append(SgRunner())
for thread in self._sg_runner_threads:
thread.start()
def closeEvent(self, event):
"""
"""
for thread in self._sg_runner_threads:
print "Stopping sg runner thread..."
thread.stop()
thread.join()
print " > Stopped!"
if self._model:
self._model.deleteLater()
self._model = None
return QtGui.QWidget.closeEvent(self, event)
def _on_refresh_triggered(self):
"""
"""
#time.sleep(0.1)
self._model.clear()
self._repopulate_model()
self._repopulate_model()
self._repopulate_model()
def _update_groups(self, group_names):
"""
"""
new_items = []
for name in group_names:
group_item = QtGui.QStandardItem(name)
new_items.append(group_item)
if new_items:
self._model.invisibleRootItem().appendRows(new_items)
def _add_group(self, group_name):
"""
"""
group_item = QtGui.QStandardItem(group_name)
self._model.invisibleRootItem().appendRow(group_item)
return group_item
def _add_files(self, group_item, file_names):
"""
"""
new_items = []
for name in file_names:
item = QtGui.QStandardItem(name)
new_items.append(item)
if new_items:
group_item.appendRows(new_items)
def _repopulate_model(self):
"""
"""
search_id = random.randint(0, 19)
if search_id == 0:
self._update_groups(["Sequence 01"])
elif search_id == 1:
self._update_groups(["123", "Anm - Animation"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 2:
self._update_groups(["Anm", "Anm - Animation"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 3:
self._update_groups(["Animation"])
grp = self._add_group("Animation")
self._add_files(grp, ["reviewtest", "reviewtest", "reviewtest", "reviewtest", "launchtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "crashtest", "shouldbreak", "reviewtest", "scene", "reviewtest", "reviewtest"])
elif search_id == 4:
self._update_groups(["shot_010", "Anm - Animation", "Comp - MoreComp", "FX - Effects", "FX - More FX", "Light - EvenMoreLighting", "Light - Lighting", "Light - MoreLighting", "Light - StillMoreLighting", "Light - YetMoreLighting", "More Anim - MoreAnim", "Roto - Roto"])
grp = self._add_group("Comp - MoreComp")
self._add_files(grp, ["nopublishes"])
grp = self._add_group("Light - EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 5:
self._update_groups(["Anm", "Anm - Animation"])
elif search_id == 6:
self._update_groups(["Animation"])
elif search_id == 7:
self._update_groups(["Comp", "Comp - MoreComp"])
grp = self._add_group("Comp - MoreComp")
self._add_files(grp, ["nopublishes"])
elif search_id == 8:
self._update_groups(["FX", "FX - Effects", "FX - More FX"])
elif search_id == 9:
self._update_groups(["Light", "Light - EvenMoreLighting", "Light - Lighting", "Light - MoreLighting", "Light - StillMoreLighting", "Light - YetMoreLighting"])
grp = self._add_group("Light - EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
grp = self._add_group("Light - YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 10:
self._update_groups(["EvenMoreLighting"])
grp = self._add_group("EvenMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 11:
self._update_groups(["Lighting"])
grp = self._add_group("Lighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 12:
self._update_groups(["MoreLighting"])
grp = self._add_group("MoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 13:
self._update_groups(["StillMoreLighting"])
grp = self._add_group("StillMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 14:
self._update_groups(["YetMoreLighting"])
grp = self._add_group("YetMoreLighting")
self._add_files(grp, ["testscene", "writenodetestBAD", "writenodeconversiontest", "scene", "writenodetest", "osxreviewtest", "writenodeconversiontestb", "testscene", "sendtoreviewtest", "osxreviewtest", "testscene", "writenodeconversiontest", "testscene", "writenodetestBAD", "sendtoreviewtest", "writenodeconversiontest", "scene1", "writenodeconversiontest"])
elif search_id == 15:
self._update_groups(["More Anim", "More Anim - MoreAnim"])
elif search_id == 16:
self._update_groups(["Roto", "Roto - Roto"])
elif search_id == 17:
self._update_groups(["shot_020", "Light - Lighting"])
elif search_id == 18:
self._update_groups(["Light", "Light - Lighting"])
elif search_id == 19:
self._update_groups(["The End", "Anm - Animation", "Anm - Animation B", "Comp - Finalize"])
grp = self._add_group("Anm - Animation")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
grp = self._add_group("Anm - Animation B")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
grp = self._add_group("Comp - Finalize")
self._add_files(grp, ["reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "testscene", "writenodeconversiontest", "reviewtest", "reviewtest", "reviewtest", "colourspacetest", "scene", "scene", "reviewtest", "sendtoreviewtest", "testscene", "shouldbreak", "reviewtest", "writenodeconversiontest", "reviewtest", "launchtest", "writenodetest", "writenodeconversiontestb", "osxreviewtest", "crashtest", "reviewtest", "writenodetestBAD", "writenodeconversiontest", "reviewtest", "sendtoreviewtest", "testscene", "nopublishes", "reviewtest", "reviewtest", "osxreviewtest", "testscene", "scene1"])
| 66.371542
| 625
| 0.648583
| 1,400
| 16,792
| 7.565
| 0.137143
| 0.030403
| 0.021717
| 0.032575
| 0.765745
| 0.73874
| 0.719007
| 0.705788
| 0.705788
| 0.705788
| 0
| 0.006051
| 0.202835
| 16,792
| 253
| 626
| 66.371542
| 0.785149
| 0.003156
| 0
| 0.356021
| 0
| 0
| 0.452607
| 0.130936
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.031414
| null | null | 0.015707
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
4306cd9b12bae76fa6623c99fa9393b64094544d
| 24
|
py
|
Python
|
huuey/hue/scenes/__init__.py
|
Bioto/Huuey-python
|
b64b8476cf9126df8f2bc4b5c4c5ad47545ebb0b
|
[
"MIT"
] | null | null | null |
huuey/hue/scenes/__init__.py
|
Bioto/Huuey-python
|
b64b8476cf9126df8f2bc4b5c4c5ad47545ebb0b
|
[
"MIT"
] | null | null | null |
huuey/hue/scenes/__init__.py
|
Bioto/Huuey-python
|
b64b8476cf9126df8f2bc4b5c4c5ad47545ebb0b
|
[
"MIT"
] | null | null | null |
from .scene import Scene
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
431e50d50dc1d56e18dce811b630dc3846f46318
| 178
|
py
|
Python
|
app/views/qr_code.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
app/views/qr_code.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
app/views/qr_code.py
|
aluisq/flask_mysql_jinja
|
6fc9bd23ac31647744be0a1016bca42e2bc32961
|
[
"MIT"
] | null | null | null |
from app import cursor, app
from flask import render_template, request, redirect, url_for
@app.route("/qr-code")
def qr_code():
return render_template('public/qr_code.html')
| 29.666667
| 61
| 0.764045
| 28
| 178
| 4.678571
| 0.642857
| 0.137405
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.117978
| 178
| 6
| 62
| 29.666667
| 0.834395
| 0
| 0
| 0
| 0
| 0
| 0.150838
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
4a76b2dc2131d8dee68695c2796ffacd17c022e2
| 138
|
py
|
Python
|
src/astrild/rays/skys/__init__.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 3
|
2021-07-27T14:45:58.000Z
|
2022-01-31T21:09:46.000Z
|
src/astrild/rays/skys/__init__.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:47:45.000Z
|
2021-11-03T10:47:45.000Z
|
src/astrild/rays/skys/__init__.py
|
Christovis/wys-ars
|
bb15f2d392842f9b32de12b5db5c86079bc97105
|
[
"MIT"
] | 1
|
2021-11-03T10:17:34.000Z
|
2021-11-03T10:17:34.000Z
|
from .sky_array import SkyArray
from .sky_namaster import SkyNamaster
from .sky_healpix import SkyHealpix
from .sky_utils import SkyUtils
| 27.6
| 37
| 0.855072
| 20
| 138
| 5.7
| 0.55
| 0.245614
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115942
| 138
| 4
| 38
| 34.5
| 0.934426
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
4a8ba4c9f75f93e259fc105b3d4015edadc40cbf
| 44
|
py
|
Python
|
horizon_bsn/api/__init__.py
|
sarath-kumar/horizon-bsn
|
5e50db9586316a7893f83f72e9a9e7a039437182
|
[
"Apache-2.0"
] | null | null | null |
horizon_bsn/api/__init__.py
|
sarath-kumar/horizon-bsn
|
5e50db9586316a7893f83f72e9a9e7a039437182
|
[
"Apache-2.0"
] | 76
|
2016-08-15T21:46:39.000Z
|
2018-12-18T23:49:20.000Z
|
horizon_bsn/api/__init__.py
|
sarath-kumar/horizon-bsn
|
5e50db9586316a7893f83f72e9a9e7a039437182
|
[
"Apache-2.0"
] | 5
|
2016-08-15T21:41:45.000Z
|
2020-03-01T17:03:46.000Z
|
from horizon_bsn.api import neutron # noqa
| 22
| 43
| 0.795455
| 7
| 44
| 4.857143
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.159091
| 44
| 1
| 44
| 44
| 0.918919
| 0.090909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
437347f333c7a79ec4950b5aa979310e595622f4
| 438
|
py
|
Python
|
basilisk.py
|
JJFReibel/RattleBasilisk
|
f95cd523b708e349cec2cf25a6cb5b60603f67d0
|
[
"MIT"
] | null | null | null |
basilisk.py
|
JJFReibel/RattleBasilisk
|
f95cd523b708e349cec2cf25a6cb5b60603f67d0
|
[
"MIT"
] | null | null | null |
basilisk.py
|
JJFReibel/RattleBasilisk
|
f95cd523b708e349cec2cf25a6cb5b60603f67d0
|
[
"MIT"
] | null | null | null |
# RattleBasilisk
# By Jean-Jacques F. Reibel
# I will not be held responsible for:
# any shenanigans
import os
os.system("printf '\e[0;31;1;1m\n'")
os.system("printf '\e[1;32;1;1m /\/\/\/\/\/\/\/\/\/\/\/\\\n'")
os.system("printf '\e[0;31;1;5m ~'")
os.system("printf '\e[0;31;1;1m>'")
os.system("printf '\e[0;33;1;1m°'")
os.system("printf '\e[1;32;1;1m)~~~~~~~~~~~~~~###\n'")
os.system("printf '\e[0;31;1;1m\n'")
| 29.2
| 77
| 0.53653
| 78
| 438
| 3.025641
| 0.371795
| 0.237288
| 0.415254
| 0.444915
| 0.610169
| 0.542373
| 0.542373
| 0.542373
| 0.45339
| 0.347458
| 0
| 0.093085
| 0.141553
| 438
| 14
| 78
| 31.285714
| 0.531915
| 0.210046
| 0
| 0.25
| 0
| 0
| 0.667647
| 0.182353
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.125
| 0
| 0.125
| 0.875
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 6
|
4379a23b7a0eafaa35de5ad32541b848982df154
| 96
|
py
|
Python
|
v1/cloud/__init__.py
|
trisongz/fileio
|
2812e13003b79e8d19b5aa1f4e16feb2cd48b115
|
[
"MIT"
] | null | null | null |
v1/cloud/__init__.py
|
trisongz/fileio
|
2812e13003b79e8d19b5aa1f4e16feb2cd48b115
|
[
"MIT"
] | null | null | null |
v1/cloud/__init__.py
|
trisongz/fileio
|
2812e13003b79e8d19b5aa1f4e16feb2cd48b115
|
[
"MIT"
] | null | null | null |
from . import auth
from .gcp_gcs import open_gcs, _auth_gcp_gcs
from .aws_s3 import _auth_aws_s3
| 32
| 44
| 0.833333
| 19
| 96
| 3.736842
| 0.421053
| 0.28169
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02381
| 0.125
| 96
| 3
| 45
| 32
| 0.821429
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
438ca2045706e386fa775b56dea829c1c691644d
| 129
|
py
|
Python
|
keydom/__init__.py
|
pirogoeth/keydom
|
7890e63f8ff33371e2fe4c3277935533858aefe3
|
[
"MIT"
] | null | null | null |
keydom/__init__.py
|
pirogoeth/keydom
|
7890e63f8ff33371e2fe4c3277935533858aefe3
|
[
"MIT"
] | null | null | null |
keydom/__init__.py
|
pirogoeth/keydom
|
7890e63f8ff33371e2fe4c3277935533858aefe3
|
[
"MIT"
] | null | null | null |
from keydom import migrations
from keydom import models
from keydom import routes
from keydom import util
__version__ = "0.0.1"
| 18.428571
| 29
| 0.806202
| 20
| 129
| 5
| 0.5
| 0.4
| 0.64
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.155039
| 129
| 6
| 30
| 21.5
| 0.889908
| 0
| 0
| 0
| 0
| 0
| 0.03876
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.8
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
439c8e9901000a6e60217c066cd75f4a4f7ad4f1
| 224
|
py
|
Python
|
Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 2.py
|
edaaydinea/365-days-of-coding-challenge
|
baf06a9bef75ff45194e57357e20085b9cde2498
|
[
"MIT"
] | 4
|
2022-01-05T12:14:13.000Z
|
2022-01-08T16:03:32.000Z
|
Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 2.py
|
edaaydinea/365-days-of-code
|
baf06a9bef75ff45194e57357e20085b9cde2498
|
[
"MIT"
] | null | null | null |
Courses/100 Days of Code The Complete Python Pro Bootcamp/Day 6/Hurdle 2.py
|
edaaydinea/365-days-of-code
|
baf06a9bef75ff45194e57357e20085b9cde2498
|
[
"MIT"
] | null | null | null |
def turn_right():
turn_left()
turn_left()
turn_left()
def jump():
move()
turn_left()
move()
turn_right()
move()
turn_right()
move()
turn_left()
while not at_goal():
jump()
| 11.2
| 20
| 0.535714
| 28
| 224
| 3.964286
| 0.357143
| 0.36036
| 0.216216
| 0.288288
| 0.306306
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.316964
| 224
| 19
| 21
| 11.789474
| 0.72549
| 0
| 0
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| true
| 0
| 0
| 0
| 0.133333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
43a2268f5c484a7d76c114ba769bcbe3cbad67fe
| 52
|
py
|
Python
|
src/public/repository/keys.py
|
akornatskyy/sample-blog-api
|
87183780d7f2322ae27231e8eacea1f942f87d1d
|
[
"MIT"
] | null | null | null |
src/public/repository/keys.py
|
akornatskyy/sample-blog-api
|
87183780d7f2322ae27231e8eacea1f942f87d1d
|
[
"MIT"
] | 78
|
2019-06-03T03:24:36.000Z
|
2021-06-25T15:19:17.000Z
|
src/public/repository/keys.py
|
akornatskyy/sample-blog-api
|
87183780d7f2322ae27231e8eacea1f942f87d1d
|
[
"MIT"
] | null | null | null |
"""
"""
def get_daily_quote():
return 'q:gdq'
| 7.428571
| 22
| 0.538462
| 7
| 52
| 3.714286
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 52
| 6
| 23
| 8.666667
| 0.65
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| true
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 6
|
43c24fb60cdc8373e1f76bf3df835f894bca48ec
| 63
|
py
|
Python
|
Arithmetic/__init__.py
|
ndiab/CRYPTO
|
6691972dbfc33eae4c3c133a4df00de4e336b0cb
|
[
"Apache-2.0"
] | 26
|
2017-03-27T10:32:59.000Z
|
2022-02-22T10:36:17.000Z
|
Arithmetic/__init__.py
|
ndiab/CRYPTO
|
6691972dbfc33eae4c3c133a4df00de4e336b0cb
|
[
"Apache-2.0"
] | 1
|
2018-02-11T19:13:57.000Z
|
2018-02-11T19:25:22.000Z
|
Arithmetic/__init__.py
|
ndiab/CRYPTO
|
6691972dbfc33eae4c3c133a4df00de4e336b0cb
|
[
"Apache-2.0"
] | 4
|
2018-04-26T12:28:27.000Z
|
2022-01-23T15:09:23.000Z
|
from Arithmetic.arith import *
from Arithmetic.prime import *
| 15.75
| 30
| 0.793651
| 8
| 63
| 6.25
| 0.625
| 0.56
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 63
| 3
| 31
| 21
| 0.925926
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
6009d7de0253a5ca4651e1982bc9b2c34c00351a
| 11,536
|
py
|
Python
|
src/python/model_generators/keras/code_generator.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | 2
|
2020-06-25T00:06:38.000Z
|
2020-09-11T18:59:45.000Z
|
src/python/model_generators/keras/code_generator.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | 20
|
2020-06-25T00:16:35.000Z
|
2020-06-25T19:24:14.000Z
|
src/python/model_generators/keras/code_generator.py
|
computer-geek64/guinn
|
11e10a9fbf1f99fd0ff8e15d7a812679ae7015f4
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
# code_generator.py
def add_layers(layers):
return_string = '''model = tf.keras.Sequential()\n'''
for layer in layers:
if layer['type'] == 'dense':
if layer['activation_function'] is not None:
return_string += '''model.add(tf.keras.layers.Dense(units={units}, activation=\'{activation}\',
use_bias={use_bias}))\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'], use_bias=layer['use_bias'])
else:
return_string += '''model.add(tf.keras.layers.Dense(units={units}, activation={activation},
use_bias={use_bias}))\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'], use_bias=layer['use_bias'])
elif layer['type'] == 'input_layer':
return_string += '''model.add(tf.keras.layers.InputLayer(input_shape={input_shape})\n''' \
.format(input_shape=layer['input_shape'])
elif layer['type'] == 'flatten':
return_string += '''model.add(tf.keras.layers.Flatten())\n'''
elif layer['type'] == 'embedding':
return_string += '''model.add(tf.keras.layers.Embedding(input_dim={input_dim}, output_dim={output_dim}\n''' \
.format(input_dim=layer['input_dim'], output_dim=layer['output_dim'])
elif layer['type'] == 'dropout':
return_string += '''model.add(tf.keras.layers.Dropout(rate={rate}\n''' \
.format(rate=layer['rate'])
elif layer['type'] == 'batch_normalization':
return_string += '''model.add(tf.keras.layers.BatchNormalization(axis={axis}, momentum={momentum},
epsilon={epsilon})\n''' \
.format(axis=layer['axis'], momentum=layer['momentum'], epsilon=layer['epsilon'])
elif layer['type'] == 'conv_2d':
if layer['activation_function'] is not None:
return_string += '''model.add(tf.keras.layers.Conv2D(filters={filters}, kernel_size={kernel_size},
strides={strides}, activation=\'{activation}\', use_bias={use_bias})\n''' \
.format(filters=layer['filters'], kernel_size=layer['kernel_size'], strides=layer['strides'],
activation=layer['activation_function'], use_bias=layer['use_bias'])
else:
return_string += '''model.add(tf.keras.layers.Conv2D(filters={filters}, kernel_size={kernel_size},
strides={strides}, activation={activation}, use_bias={use_bias})\n''' \
.format(filters=layer['filters'], kernel_size=layer['kernel_size'], strides=layer['strides'],
activation=layer['activation_function'], use_bias=layer['use_bias'])
elif layer['type'] == 'max_pooling_2d':
return_string += '''model.add(tf.keras.layers.MaxPool2D(pool_size={pool_size}, strides={strides})\n''' \
.format(pool_size=layer['kernel_size'], strides=layer['strides'])
elif layer['type'] == 'average_pooling_2d':
return_string += '''model.add(tf.keras.layers.AveragePooling2D(pool_size={pool_size}, strides={strides})\n''' \
.format(pool_size=layer['kernel_size'], strides=layer['strides'])
elif layer['type'] == 'rnn':
if layer['activation_function'] is not None:
return_string += '''model.add(tf.keras.layers.SimpleRNN(units={units}, activation=\'{activation}\',
use_bias={use_bias}, dropout={dropout}, recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
else:
return_string += '''model.add(tf.keras.layers.SimpleRNN(units={units}, activation={activation},
use_bias={use_bias}, dropout={dropout}, recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['type'] == 'lstm':
if layer['activation_function'] is None and layer['recurrent_activation'] is not None:
return_string += '''model.add(tf.keras.layers.LSTM(units={units}, activation={activation},
recurrent_activation=\'{recurrent_activation}\', use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['activation_function'] is not None and layer['recurrent_activation'] is None:
return_string += '''model.add(tf.keras.layers.LSTM(units={units}, activation=\'{activation}\',
recurrent_activation={recurrent_activation}, use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['activation_function'] is None and layer['recurrent_activation'] is None:
return_string += '''model.add(tf.keras.layers.LSTM(units={units}, activation={activation},
recurrent_activation={recurrent_activation}, use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
else:
return_string += '''model.add(tf.keras.layers.LSTM(units={units}, activation=\'{activation}\',
recurrent_activation=\'{recurrent_activation}\', use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['type'] == 'gru':
if layer['activation_function'] is None and layer['recurrent_activation'] is not None:
return_string += '''model.add(tf.keras.layers.GRU(units={units}, activation={activation},
recurrent_activation=\'{recurrent_activation}\', use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['activation_function'] is not None and layer['recurrent_activation'] is None:
return_string += '''model.add(tf.keras.layers.GRU(units={units}, activation=\'{activation}\',
recurrent_activation={recurrent_activation}, use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
elif layer['activation_function'] is None and layer['recurrent_activation'] is None:
return_string += '''model.add(tf.keras.layers.GRU(units={units}, activation={activation},
recurrent_activation={recurrent_activation}, use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
else:
return_string += '''model.add(tf.keras.layers.GRU(units={units}, activation=\'{activation}\',
recurrent_activation=\'{recurrent_activation}\', use_bias={use_bias}, dropout={dropout},
recurrent_dropout={recurrent_dropout})\n''' \
.format(units=layer['nodes'], activation=layer['activation_function'],
recurrent_activation=layer['recurrent_activation_function'], use_bias=layer['use_bias'],
dropout=layer['dropout'], recurrent_dropout=layer['recurrent_dropout'])
return return_string
def add_compile_fit(optimizer, x_train, y_train, loss, batch_size, epochs):
if optimizer is None and loss is not None:
return_string = '''model.compile(optimizer={optimizer}, loss=\'{loss}\')\n''' \
.format(optimizer=optimizer, loss=loss)
elif optimizer is not None and loss is None:
return_string = '''model.compile(optimizer=\'{optimizer}\', loss={loss})\n''' \
.format(optimizer=optimizer, loss=loss)
elif optimizer is None and loss is None:
return_string = '''model.compile(optimizer={optimizer}, loss={loss})\n''' \
.format(optimizer=optimizer, loss=loss)
else:
return_string = '''model.compile(optimizer=\'{optimizer}\', loss={loss})\n''' \
.format(optimizer=optimizer, loss=loss)
if x_train is None or y_train is None:
return_string += '''model.fit(
# x_train,
# y_train,
batch_size={batch_size}, epochs={epochs}
)\n''' \
.format(batch_size=batch_size, epochs=epochs)
else:
return_string += '''model.fit(x_train, y_train, batch_size={batch_size}, epochs={epochs})\n''' \
.format(batch_size=batch_size, epochs=epochs)
return return_string
def generate_template(layers, loss='mse', optimizer='sgd', x_train=None, y_train=None, batch_size=32, epochs=10):
template = '''import tensorflow as tf\n'''
template += add_layers(layers)
template += add_compile_fit(optimizer, x_train, y_train, loss, batch_size, epochs)
return template
# TEST
'''layers = [{'type': 'dense',
'nodes': 10,
'activation_function': None,
'use_bias': True},
{'type': 'dense',
'nodes': 5,
'activation_function': 'relu',
'use_bias': True}]
print(generate_template(layers=layers))'''
| 70.773006
| 123
| 0.612517
| 1,218
| 11,536
| 5.582923
| 0.074713
| 0.059706
| 0.101471
| 0.061765
| 0.857941
| 0.857059
| 0.853088
| 0.828824
| 0.828824
| 0.816471
| 0
| 0.0017
| 0.235177
| 11,536
| 163
| 124
| 70.773006
| 0.769013
| 0.003467
| 0
| 0.643357
| 0
| 0.020979
| 0.468803
| 0.249219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020979
| false
| 0
| 0.006993
| 0
| 0.048951
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
60625155c9f8f5e0e8125bcce015c57d8bd87c7f
| 14,886
|
py
|
Python
|
app/tests/v2/system-tests/test_incidents.py
|
GraceKiarie/iReporter
|
1011f878f9fb643798192aeed1b68c3e6de4dedc
|
[
"MIT"
] | 1
|
2018-12-14T09:52:39.000Z
|
2018-12-14T09:52:39.000Z
|
app/tests/v2/system-tests/test_incidents.py
|
GraceKiarie/iReporter
|
1011f878f9fb643798192aeed1b68c3e6de4dedc
|
[
"MIT"
] | 6
|
2018-12-08T11:15:46.000Z
|
2018-12-15T11:04:36.000Z
|
app/tests/v2/system-tests/test_incidents.py
|
GraceKiarie/iReporter
|
1011f878f9fb643798192aeed1b68c3e6de4dedc
|
[
"MIT"
] | 5
|
2018-12-04T11:00:54.000Z
|
2019-06-13T12:53:50.000Z
|
"""Test for methods applied to Red Flags"""
import json
from app.tests.v2.base import BaseTestCase
class TestIncidentsTestCase(BaseTestCase):
"""Tests for redflags"""
def signup(self):
"""
create a test user
"""
response = self.app.post('/api/v2/auth/signup/',
data=json.dumps(self.person_existing_user),
headers={'content-type': 'application/json'}
)
return response
def login(self):
"""
sign in a user
"""
response = self.app.post('api/v2/auth/login/',
data=json.dumps(self.correct_login1),
headers={'content-type': 'application/json'})
return response
def get_jwt_token(self):
"""
get jwt token
"""
self.signup()
t = self.login()
data = json.loads(t.get_data())
self.data = data.get("data")[0]
self.token = self.data.get('token')
return self.token
def post_incident(self, data):
"""
post a redflag
"""
self.get_jwt_token()
token = self.token
redflag = self.app.post('/api/v2/incidents/',
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': token})
return redflag
def put_incident(self, data):
"""
post a redflag
"""
self.get_jwt_token()
token = self.token
redflag = self.app.put('/api/v2/incidents/1/',
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': token})
return redflag
def patch_comment(self, id, data):
"""
post a redflag
"""
self.get_jwt_token()
token = self.token
redflag = self.app.patch('/api/v2/incidents/'+id+'/comment',
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': token})
return redflag
def patch_location(self, id, data):
"""
post a redflag
"""
self.get_jwt_token()
token = self.token
redflag = self.app.patch('/api/v2/incidents/'+id+'/location',
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': token})
return redflag
def patch_status(self, id, data):
"""
post a redflag
"""
self.get_jwt_token()
token = self.token
redflag = self.app.patch('/api/v2/incidents/'+id+'/status',
data=json.dumps(data),
headers={'content-type': 'application/json',
'Authorization': token})
return redflag
def test_app_works(self):
response = self.app.get('api/v2')
self.assertEqual(response.status_code, 301)
def test_post_incident(self):
"""Test for posting an incident"""
# correct request
response = self.post_incident(self.red_flag2)
self.assertEqual(response.status_code, 201)
data = json.loads(response.get_data())
self.assertEqual(data['message'], 'Created incident successfully!')
def test_post_incident_exists(self):
"""Test for posting an incident twice"""
# correct request
self.post_incident(self.red_flag2)
response = self.post_incident(self.red_flag2)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Incident already exists')
def test_missing_token(self):
"""Test for posting an incident"""
# correct request
response = self.app.post('/api/v2/incidents/',
data=json.dumps(self.red_flag3),
headers={'content-type': 'application/json'})
self.assertEqual(response.status_code, 401)
data = json.loads(response.get_data())
self.assertEqual(data['message'], 'Missing Authorization Header')
def test_post_incident_not_json(self):
"""Test for posting an incident"""
response = self.post_incident(self.red_flag3)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['message'], 'Input payload validation failed')
def test_new_incident_no_title(self):
"""Test for posting a redflag without title"""
#no title
response = self.post_incident(self.redflag_no_title)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'],"Title is invalid or empty")
def test_new_incident_no_comment(self):
"""Test for posting a redflag without a body"""
#no description
response = self.post_incident(self.redflag_no_comment)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'description is invalid or empty')
def test_new_incident_invalid_image(self):
"""Test for posting a redflag without a vali link image"""
#no body
response = self.post_incident(self.redflag_invalid_image)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Images link is invalid')
def test_new_incident_invalid_video(self):
"""Test for posting a redflag without a valid link video"""
#no body
response = self.post_incident(self.redflag_invalid_video)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Video link is invalid')
def test_new_incident_invalid_location(self):
"""Test for posting a redflag without a vali location image"""
#no body
response = self.post_incident(self.redflag_invalid_location)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'location input is invalid')
def test_new_incident_invalid_type(self):
"""Test for posting a redflag without a valid type"""
#no body
response = self.post_incident(self.redflag_invalid_type)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Type is invalid or empty')
def test_view_all_incidents(self):
"""Test for viewing all incidents"""
self.post_incident(self.red_flag)
response = self.app.get('/api/v2/incidents/')
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data['message'], "All incidents found successfully")
def test_view_an_incident(self):
"""Test for vieving a particular redflag"""
# existing redflag
self.post_incident(self.red_flag)
response = self.app.get('/api/v2/incidents/1/')
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data['message'], "Incident successfully retrieved!")
def test_view_incident_not_found(self):
"""Test for viewing an incident that does not exist"""
# redflag does not exist
response = self.app.get('/api/v2/incidents/1344/')
self.assertEqual(response.status_code, 404)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "Incident not found")
def test_modify_location(self):
"""Test for modifying an incident location """
self.post_incident(self.red_flag)
response = self.patch_location('1',self.redflag_location)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data['message'], "location successfully updated")
def test_modify_bad_location(self):
"""Test for modifying an incident location """
self.post_incident(self.red_flag)
response = self.patch_location('1',self.redflag_location1)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "location input format should be a valid lat n long pair")
def test_modify_location_not_found(self):
self.post_incident(self.red_flag)
response = self.patch_location('120', self.update_redflag)
self.assertEqual(response.status_code, 404)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "Incindent not found")
def test_modify_comment(self):
"""Test for modifying an incident comment """
self.post_incident(self.red_flag)
response = self.patch_comment('1',self.redflag_comment)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data['message'], "comment successfully updated")
def test_modify_comment_not_found(self):
self.post_incident(self.red_flag)
response = self.patch_comment('120', self.update_redflag)
self.assertEqual(response.status_code, 404)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "Incindent not found")
def test_delete_an_incident(self):
"""Test for deleting a redflag"""
self.post_incident(self.red_flag)
token = self.token
response = self.app.delete('/api/v2/incidents/1/',
headers={'content-type': 'application/json',
'Authorization': token})
self.assertEqual(response.status_code, 200)
def test_delete_incident_not_found(self):
self.post_incident(self.red_flag)
token = self.token
response = self.app.delete('/api/v2/incidents/2222/',
headers={'content-type': 'application/json',
'Authorization': token})
self.assertEqual(response.status_code, 404)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "Incident not found")
def test_not_allowed(self):
self.post_incident(self.red_flag)
token = self.token
response = self.app.patch('/api/v2/incidents/2222/',
headers={'content-type': 'application/json',
'Authorization': token})
self.assertEqual(response.status_code, 405)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "Method is not allowed on this url")
def test_update_incident(self):
"""Test for updating an incident"""
# correct request
self.post_incident(self.red_flag2)
response = self.put_incident(self.update_redflag)
self.assertEqual(response.status_code, 200)
data = json.loads(response.get_data())
self.assertEqual(data['message'], 'Incident updated successfully!')
def test_update_incident_no_title(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
response = self.put_incident( self.redflag_no_title)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Title is invalid or empty')
def test_update_incident_no_image(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
response = self.put_incident( self.redflag_invalid_image)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Images link is invalid')
def test_update_incident_no_location(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
response = self.put_incident( self.redflag_invalid_location )
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'location input is invalid')
def test_update_incident_no_comment(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
response = self.put_incident( self.redflag_no_comment )
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'comment is invalid or empty')
def test_update_incident_not_found(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
token = self.token
response = self.app.put('/api/v2/incidents/123/',
data=json.dumps(self.red_flag ),
headers={'content-type': 'application/json',
'Authorization': token})
self.assertEqual(response.status_code, 404)
data = json.loads(response.get_data())
self.assertEqual(data['error'], 'Incindent not found')
def test_update_incident_not_json(self):
"""Test for updating an incident"""
self.post_incident(self.red_flag2)
response = self.put_incident(self.red_flag3)
self.assertEqual(response.status_code, 400)
data = json.loads(response.get_data())
self.assertEqual(data['message'], 'Input payload validation failed')
def test_modify_status(self):
"""Test for modifying an incident location """
self.post_incident(self.red_flag)
response = self.patch_status('1',self.status_Resolved)
self.assertEqual(response.status_code, 403)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "you do not have permission to do that!")
def test_modify_location_not_status(self):
self.post_incident(self.red_flag)
response = self.patch_status('120', self.status_Rejected)
self.assertEqual(response.status_code, 403)
data = json.loads(response.get_data())
self.assertEqual(data['error'], "you do not have permission to do that!")
| 41.814607
| 98
| 0.606745
| 1,698
| 14,886
| 5.154299
| 0.087161
| 0.102834
| 0.056673
| 0.102719
| 0.86186
| 0.825297
| 0.782564
| 0.764168
| 0.722692
| 0.687271
| 0
| 0.014636
| 0.27939
| 14,886
| 356
| 99
| 41.814607
| 0.801249
| 0.085382
| 0
| 0.550607
| 0
| 0
| 0.133197
| 0.006867
| 0
| 0
| 0
| 0
| 0.242915
| 1
| 0.157895
| false
| 0
| 0.008097
| 0
| 0.202429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71569f00abad4bf8bde4418446f9c96b182f8f10
| 238
|
py
|
Python
|
Mechinmind/Article/admin.py
|
ayanshaikh18/MechInMind
|
69d272073d24407bfa97b99949e7c2b1c2dc28e8
|
[
"MIT"
] | null | null | null |
Mechinmind/Article/admin.py
|
ayanshaikh18/MechInMind
|
69d272073d24407bfa97b99949e7c2b1c2dc28e8
|
[
"MIT"
] | null | null | null |
Mechinmind/Article/admin.py
|
ayanshaikh18/MechInMind
|
69d272073d24407bfa97b99949e7c2b1c2dc28e8
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from markdownx.admin import MarkdownxModelAdmin
from . import models
admin.site.register(models.Category)
admin.site.register(models.SubCategory)
admin.site.register(models.Article, MarkdownxModelAdmin)
| 26.444444
| 56
| 0.844538
| 29
| 238
| 6.931034
| 0.448276
| 0.134328
| 0.253731
| 0.343284
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 238
| 8
| 57
| 29.75
| 0.913636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
7169d46edd8be0e34dfdf8f57ab7e71166442ce6
| 42,860
|
py
|
Python
|
tests/admin/clush-tests/CLINodesetTest.py
|
utdsimmons/ohpc
|
70dc728926a835ba049ddd3f4627ef08db7c95a0
|
[
"Apache-2.0"
] | 692
|
2015-11-12T13:56:43.000Z
|
2022-03-30T03:45:59.000Z
|
tests/admin/clush-tests/CLINodesetTest.py
|
utdsimmons/ohpc
|
70dc728926a835ba049ddd3f4627ef08db7c95a0
|
[
"Apache-2.0"
] | 1,096
|
2015-11-12T09:08:22.000Z
|
2022-03-31T21:48:41.000Z
|
tests/admin/clush-tests/CLINodesetTest.py
|
utdsimmons/ohpc
|
70dc728926a835ba049ddd3f4627ef08db7c95a0
|
[
"Apache-2.0"
] | 224
|
2015-11-12T21:17:03.000Z
|
2022-03-30T00:57:48.000Z
|
#!/usr/bin/env python
# scripts/nodeset.py tool test suite
# Written by S. Thiell
"""Unit test for CLI/Nodeset.py"""
import random
import sys
import unittest
from TLib import *
from ClusterShell.CLI.Nodeset import main
from ClusterShell.NodeUtils import GroupResolverConfig
from ClusterShell.NodeSet import std_group_resolver, set_std_group_resolver
class CLINodesetTestBase(unittest.TestCase):
"""Base unit test class for testing CLI/Nodeset.py"""
def _nodeset_t(self, args, input, expected_stdout, expected_rc=0,
expected_stderr=None):
CLI_main(self, main, [ 'nodeset' ] + args, input, expected_stdout,
expected_rc, expected_stderr)
class CLINodesetTest(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py"""
def _battery_count(self, args):
self._nodeset_t(args + ["--count", "foo"], None, "1\n")
self._nodeset_t(args + ["--count", "foo", "bar"], None, "2\n")
self._nodeset_t(args + ["--count", "foo", "foo"], None, "1\n")
self._nodeset_t(args + ["--count", "foo", "foo", "bar"], None, "2\n")
self._nodeset_t(args + ["--count", "foo[0]"], None, "1\n")
self._nodeset_t(args + ["--count", "foo[2]"], None, "1\n")
self._nodeset_t(args + ["--count", "foo[1,2]"], None, "2\n")
self._nodeset_t(args + ["--count", "foo[1-2]"], None, "2\n")
self._nodeset_t(args + ["--count", "foo[1,2]", "foo[1-2]"], None, "2\n")
self._nodeset_t(args + ["--count", "foo[1-200,245-394]"], None, "350\n")
self._nodeset_t(args + ["--count", "foo[395-442]", "foo[1-200,245-394]"], None, "398\n")
self._nodeset_t(args + ["--count", "foo[395-442]", "foo", "foo[1-200,245-394]"], None, "399\n")
self._nodeset_t(args + ["--count", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "400\n")
self._nodeset_t(args + ["--count", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "400\n")
# from stdin
self._nodeset_t(args + ["--count"], "foo\n", "1\n")
self._nodeset_t(args + ["--count"], "foo\nbar\n", "2\n")
self._nodeset_t(args + ["--count"], "foo\nfoo\n", "1\n")
self._nodeset_t(args + ["--count"], "foo\nfoo\nbar\n", "2\n")
self._nodeset_t(args + ["--count"], "foo[0]\n", "1\n")
self._nodeset_t(args + ["--count"], "foo[2]\n", "1\n")
self._nodeset_t(args + ["--count"], "foo[1,2]\n", "2\n")
self._nodeset_t(args + ["--count"], "foo[1-2]\n", "2\n")
self._nodeset_t(args + ["--count"], "foo[1,2]\nfoo[1-2]\n", "2\n")
self._nodeset_t(args + ["--count"], "foo[1-200,245-394]\n", "350\n")
self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo[1-200,245-394]\n", "398\n")
self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo\nfoo[1-200,245-394]\n", "399\n")
self._nodeset_t(args + ["--count"], "foo[395-442]\nfoo\nfoo[0-200,245-394]\n", "400\n")
self._nodeset_t(args + ["--count"], "foo[395-442]\nbar3,bar24\nfoo[1-200,245-394]\n", "400\n")
def test_001_count(self):
"""test nodeset --count"""
self._battery_count([])
self._battery_count(["--autostep=1"])
self._battery_count(["--autostep=2"])
self._battery_count(["--autostep=5"])
self._battery_count(["--autostep=auto"])
self._battery_count(["--autostep=0%"])
self._battery_count(["--autostep=50%"])
self._battery_count(["--autostep=100%"])
def test_002_count_intersection(self):
"""test nodeset --count --intersection"""
self._nodeset_t(["--count", "foo", "--intersection", "bar"], None, "0\n")
self._nodeset_t(["--count", "foo", "--intersection", "foo"], None, "1\n")
self._nodeset_t(["--count", "foo", "--intersection", "foo", "-i", "bar"], None, "0\n")
self._nodeset_t(["--count", "foo[0]", "--intersection", "foo0"], None, "1\n")
self._nodeset_t(["--count", "foo[2]", "--intersection", "foo"], None, "0\n")
self._nodeset_t(["--count", "foo[1,2]", "--intersection", "foo[1-2]"], None, "2\n")
self._nodeset_t(["--count", "foo[395-442]", "--intersection", "foo[1-200,245-394]"], None, "0\n")
self._nodeset_t(["--count", "foo[395-442]", "--intersection", "foo", "-i", "foo[1-200,245-394]"], None, "0\n")
self._nodeset_t(["--count", "foo[395-442]", "-i", "foo", "-i", "foo[0-200,245-394]"], None, "0\n")
self._nodeset_t(["--count", "foo[395-442]", "--intersection", "bar3,bar24", "-i", "foo[1-200,245-394]"], None, "0\n")
def test_003_count_intersection_stdin(self):
"""test nodeset --count --intersection (stdin)"""
self._nodeset_t(["--count", "--intersection", "bar"], "foo\n", "0\n")
self._nodeset_t(["--count", "--intersection", "foo"], "foo\n", "1\n")
self._nodeset_t(["--count", "--intersection", "foo", "-i", "bar"], "foo\n", "0\n")
self._nodeset_t(["--count", "--intersection", "foo0"], "foo[0]\n", "1\n")
self._nodeset_t(["--count", "--intersection", "foo"], "foo[2]\n", "0\n")
self._nodeset_t(["--count", "--intersection", "foo[1-2]"], "foo[1,2]\n", "2\n")
self._nodeset_t(["--count", "--intersection", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n")
self._nodeset_t(["--count", "--intersection", "foo", "-i", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n")
self._nodeset_t(["--count", "-i", "foo", "-i", "foo[0-200,245-394]"], "foo[395-442]\n", "0\n")
self._nodeset_t(["--count", "--intersection", "bar3,bar24", "-i", "foo[1-200,245-394]"], "foo[395-442]\n", "0\n")
def _battery_fold(self, args):
self._nodeset_t(args + ["--fold", "foo"], None, "foo\n")
self._nodeset_t(args + ["--fold", "foo", "bar"], None, "bar,foo\n")
self._nodeset_t(args + ["--fold", "foo", "foo"], None, "foo\n")
self._nodeset_t(args + ["--fold", "foo", "foo", "bar"], None, "bar,foo\n")
self._nodeset_t(args + ["--fold", "foo[0]"], None, "foo0\n")
self._nodeset_t(args + ["--fold", "foo[2]"], None, "foo2\n")
self._nodeset_t(args + ["--fold", "foo[1,2]"], None, "foo[1-2]\n")
self._nodeset_t(args + ["--fold", "foo[1-2]"], None, "foo[1-2]\n")
self._nodeset_t(args + ["--fold", "foo[1,2]", "foo[1-2]"], None, "foo[1-2]\n")
self._nodeset_t(args + ["--fold", "foo[1-200,245-394]"], None, "foo[1-200,245-394]\n")
self._nodeset_t(args + ["--fold", "foo[395-442]", "foo[1-200,245-394]"], None, "foo[1-200,245-442]\n")
self._nodeset_t(args + ["--fold", "foo[395-442]", "foo", "foo[1-200,245-394]"], None, "foo,foo[1-200,245-442]\n")
self._nodeset_t(args + ["--fold", "foo[395-442]", "foo", "foo[0-200,245-394]"], None, "foo,foo[0-200,245-442]\n")
self._nodeset_t(args + ["--fold", "foo[395-442]", "bar3,bar24", "foo[1-200,245-394]"], None, "bar[3,24],foo[1-200,245-442]\n")
# stdin
self._nodeset_t(args + ["--fold"], "foo\n", "foo\n")
self._nodeset_t(args + ["--fold"], "foo\nbar\n", "bar,foo\n")
self._nodeset_t(args + ["--fold"], "foo\nfoo\n", "foo\n")
self._nodeset_t(args + ["--fold"], "foo\nfoo\nbar\n", "bar,foo\n")
self._nodeset_t(args + ["--fold"], "foo[0]\n", "foo0\n")
self._nodeset_t(args + ["--fold"], "foo[2]\n", "foo2\n")
self._nodeset_t(args + ["--fold"], "foo[1,2]\n", "foo[1-2]\n")
self._nodeset_t(args + ["--fold"], "foo[1-2]\n", "foo[1-2]\n")
self._nodeset_t(args + ["--fold"], "foo[1,2]\nfoo[1-2]\n", "foo[1-2]\n")
self._nodeset_t(args + ["--fold"], "foo[1-200,245-394]\n", "foo[1-200,245-394]\n")
self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo[1-200,245-394]\n", "foo[1-200,245-442]\n")
self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo\nfoo[1-200,245-394]\n", "foo,foo[1-200,245-442]\n")
self._nodeset_t(args + ["--fold"], "foo[395-442]\nfoo\nfoo[0-200,245-394]\n", "foo,foo[0-200,245-442]\n")
self._nodeset_t(args + ["--fold"], "foo[395-442]\nbar3,bar24\nfoo[1-200,245-394]\n", "bar[3,24],foo[1-200,245-442]\n")
def test_004_fold(self):
"""test nodeset --fold"""
self._battery_fold([])
self._battery_fold(["--autostep=3"])
# --autostep=auto (1.7)
self._battery_fold(["--autostep=auto"])
self._battery_count(["--autostep=0%"])
self._battery_count(["--autostep=50%"])
self._battery_count(["--autostep=100%"])
def test_005_fold_autostep(self):
"""test nodeset --fold --autostep=X"""
self._nodeset_t(["--autostep=2", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=2", "-f", "foo4", "foo2", "foo0", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=3", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=4", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=5", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0,2,4,6]\n")
self._nodeset_t(["--autostep=auto", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=auto", "-f", "foo4", "foo2", "foo0", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=auto", "-f", "foo4", "foo2", "foo0", "foo2", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=auto", "-f", "foo4", "foo2", "foo0", "foo5", "foo6"], None, "foo[0,2,4-6]\n")
self._nodeset_t(["--autostep=auto", "-f", "foo4", "foo2", "foo0", "foo9", "foo6"], None, "foo[0,2,4,6,9]\n")
self._nodeset_t(["--autostep=75%", "-f", "foo0", "foo2", "foo4", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=75%", "-f", "foo4", "foo2", "foo0", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=80%", "-f", "foo4", "foo2", "foo0", "foo2", "foo6"], None, "foo[0-6/2]\n")
self._nodeset_t(["--autostep=80%", "-f", "foo4", "foo2", "foo0", "foo5", "foo6"], None, "foo[0,2,4-6]\n")
self._nodeset_t(["--autostep=80%", "-f", "foo4", "foo2", "foo0", "foo9", "foo6"], None, "foo[0-6/2,9]\n")
self._nodeset_t(["--autostep=81%", "-f", "foo4", "foo2", "foo0", "foo9", "foo6"], None, "foo[0,2,4,6,9]\n")
self._nodeset_t(["--autostep=100%", "-f", "foo4", "foo2", "foo0", "foo9", "foo6"], None, "foo[0,2,4,6,9]\n")
def test_006_expand(self):
"""test nodeset --expand"""
self._nodeset_t(["--expand", "foo"], None, "foo\n")
self._nodeset_t(["--expand", "foo", "bar"], None, "bar foo\n")
self._nodeset_t(["--expand", "foo", "foo"], None, "foo\n")
self._nodeset_t(["--expand", "foo[0]"], None, "foo0\n")
self._nodeset_t(["--expand", "foo[2]"], None, "foo2\n")
self._nodeset_t(["--expand", "foo[1,2]"], None, "foo1 foo2\n")
self._nodeset_t(["--expand", "foo[1-2]"], None, "foo1 foo2\n")
self._nodeset_t(["--expand", "foo[1-2],bar"], None, "bar foo1 foo2\n")
def test_007_expand_stdin(self):
"""test nodeset --expand (stdin)"""
self._nodeset_t(["--expand"], "foo\n", "foo\n")
self._nodeset_t(["--expand"], "foo\nbar\n", "bar foo\n")
self._nodeset_t(["--expand"], "foo\nfoo\n", "foo\n")
self._nodeset_t(["--expand"], "foo[0]\n", "foo0\n")
self._nodeset_t(["--expand"], "foo[2]\n", "foo2\n")
self._nodeset_t(["--expand"], "foo[1,2]\n", "foo1 foo2\n")
self._nodeset_t(["--expand"], "foo[1-2]\n", "foo1 foo2\n")
self._nodeset_t(["--expand"], "foo[1-2],bar\n", "bar foo1 foo2\n")
def test_008_expand_separator(self):
"""test nodeset --expand -S"""
self._nodeset_t(["--expand", "-S", ":", "foo"], None, "foo\n")
self._nodeset_t(["--expand", "-S", ":", "foo", "bar"], None, "bar:foo\n")
self._nodeset_t(["--expand", "--separator", ":", "foo", "bar"], None, "bar:foo\n")
self._nodeset_t(["--expand", "--separator=:", "foo", "bar"], None, "bar:foo\n")
self._nodeset_t(["--expand", "-S", ":", "foo", "foo"], None, "foo\n")
self._nodeset_t(["--expand", "-S", ":", "foo[0]"], None, "foo0\n")
self._nodeset_t(["--expand", "-S", ":", "foo[2]"], None, "foo2\n")
self._nodeset_t(["--expand", "-S", ":", "foo[1,2]"], None, "foo1:foo2\n")
self._nodeset_t(["--expand", "-S", ":", "foo[1-2]"], None, "foo1:foo2\n")
self._nodeset_t(["--expand", "-S", " ", "foo[1-2]"], None, "foo1 foo2\n")
self._nodeset_t(["--expand", "-S", ",", "foo[1-2],bar"], None, "bar,foo1,foo2\n")
self._nodeset_t(["--expand", "-S", "uuu", "foo[1-2],bar"], None, "baruuufoo1uuufoo2\n")
self._nodeset_t(["--expand", "-S", "\\n", "foo[1-2]"], None, "foo1\nfoo2\n")
def test_009_fold_xor(self):
"""test nodeset --fold --xor"""
self._nodeset_t(["--fold", "foo", "-X", "bar"], None, "bar,foo\n")
self._nodeset_t(["--fold", "foo", "-X", "foo"], None, "\n")
self._nodeset_t(["--fold", "foo[1,2]", "-X", "foo[1-2]"], None, "\n")
self._nodeset_t(["--fold", "foo[1-10]", "-X", "foo[5-15]"], None, "foo[1-4,11-15]\n")
self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo[1-200,245-394]"], None, "foo[1-200,245-442]\n")
self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo", "-X", "foo[1-200,245-394]"], None, "foo,foo[1-200,245-442]\n")
self._nodeset_t(["--fold", "foo[395-442]", "-X", "foo", "-X", "foo[0-200,245-394]"], None, "foo,foo[0-200,245-442]\n")
self._nodeset_t(["--fold", "foo[395-442]", "-X", "bar3,bar24", "-X", "foo[1-200,245-394]"], None, "bar[3,24],foo[1-200,245-442]\n")
def test_010_fold_xor_stdin(self):
"""test nodeset --fold --xor (stdin)"""
self._nodeset_t(["--fold", "-X", "bar"], "foo\n", "bar,foo\n")
self._nodeset_t(["--fold", "-X", "foo"], "foo\n", "\n")
self._nodeset_t(["--fold", "-X", "foo[1-2]"], "foo[1,2]\n", "\n")
self._nodeset_t(["--fold", "-X", "foo[5-15]"], "foo[1-10]\n", "foo[1-4,11-15]\n")
self._nodeset_t(["--fold", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "foo[1-200,245-442]\n")
self._nodeset_t(["--fold", "-X", "foo", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "foo,foo[1-200,245-442]\n")
self._nodeset_t(["--fold", "-X", "foo", "-X", "foo[0-200,245-394]"], "foo[395-442]\n", "foo,foo[0-200,245-442]\n")
self._nodeset_t(["--fold", "-X", "bar3,bar24", "-X", "foo[1-200,245-394]"], "foo[395-442]\n", "bar[3,24],foo[1-200,245-442]\n")
# using stdin for -X
self._nodeset_t(["-f","foo[2-4]","-X","-"], "foo4 foo5 foo6\n", "foo[2-3,5-6]\n")
self._nodeset_t(["-f","-X","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n")
def test_011_fold_exclude(self):
"""test nodeset --fold --exclude"""
# Empty result
self._nodeset_t(["--fold", "foo", "-x", "foo"], None, "\n")
# With no range
self._nodeset_t(["--fold", "foo,bar", "-x", "foo"], None, "bar\n")
# Normal with range
self._nodeset_t(["--fold", "foo[0-5]", "-x", "foo[0-10]"], None, "\n")
self._nodeset_t(["--fold", "foo[0-10]", "-x", "foo[0-5]"], None, "foo[6-10]\n")
# Do no change
self._nodeset_t(["--fold", "foo[6-10]", "-x", "bar[0-5]"], None, "foo[6-10]\n")
self._nodeset_t(["--fold", "foo[0-10]", "foo[13-18]", "--exclude", "foo[5-10,15]"], None, "foo[0-4,13-14,16-18]\n")
def test_012_fold_exclude_stdin(self):
"""test nodeset --fold --exclude (stdin)"""
# Empty result
self._nodeset_t(["--fold", "-x", "foo"], "", "\n")
self._nodeset_t(["--fold", "-x", "foo"], "\n", "\n")
self._nodeset_t(["--fold", "-x", "foo"], "foo\n", "\n")
# With no range
self._nodeset_t(["--fold", "-x", "foo"], "foo,bar\n", "bar\n")
# Normal with range
self._nodeset_t(["--fold", "-x", "foo[0-10]"], "foo[0-5]\n", "\n")
self._nodeset_t(["--fold", "-x", "foo[0-5]"], "foo[0-10]\n", "foo[6-10]\n")
# Do no change
self._nodeset_t(["--fold", "-x", "bar[0-5]"], "foo[6-10]\n", "foo[6-10]\n")
self._nodeset_t(["--fold", "--exclude", "foo[5-10,15]"], "foo[0-10]\nfoo[13-18]\n", "foo[0-4,13-14,16-18]\n")
# using stdin for -x
self._nodeset_t(["-f","foo[1-6]","-x","-"], "foo4 foo5 foo6\n", "foo[1-3]\n")
self._nodeset_t(["-f","-x","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n")
def test_013_fold_intersection(self):
"""test nodeset --fold --intersection"""
# Empty result
self._nodeset_t(["--fold", "foo", "-i", "foo"], None, "foo\n")
# With no range
self._nodeset_t(["--fold", "foo,bar", "--intersection", "foo"], None, "foo\n")
# Normal with range
self._nodeset_t(["--fold", "foo[0-5]", "-i", "foo[0-10]"], None, "foo[0-5]\n")
self._nodeset_t(["--fold", "foo[0-10]", "-i", "foo[0-5]"], None, "foo[0-5]\n")
self._nodeset_t(["--fold", "foo[6-10]", "-i", "bar[0-5]"], None, "\n")
self._nodeset_t(["--fold", "foo[0-10]", "foo[13-18]", "-i", "foo[5-10,15]"], None, "foo[5-10,15]\n")
# numerical bracket folding (#228)
self._nodeset_t(["--fold", "node123[1-2]", "-i", "node1232"], None, "node1232\n")
self._nodeset_t(["--fold", "node023[1-2]0", "-i", "node02320"], None, "node02320\n")
self._nodeset_t(["--fold", "node023[1-2]0-ipmi2", "-i", "node02320-ipmi2"], None, "node02320-ipmi2\n")
def test_014_fold_intersection_stdin(self):
"""test nodeset --fold --intersection (stdin)"""
# Empty result
self._nodeset_t(["--fold", "--intersection", "foo"], "", "\n")
self._nodeset_t(["--fold", "--intersection", "foo"], "\n", "\n")
self._nodeset_t(["--fold", "-i", "foo"], "foo\n", "foo\n")
# With no range
self._nodeset_t(["--fold", "-i", "foo"], "foo,bar\n", "foo\n")
# Normal with range
self._nodeset_t(["--fold", "-i", "foo[0-10]"], "foo[0-5]\n", "foo[0-5]\n")
self._nodeset_t(["--fold", "-i", "foo[0-5]"], "foo[0-10]\n", "foo[0-5]\n")
# Do no change
self._nodeset_t(["--fold", "-i", "bar[0-5]"], "foo[6-10]\n", "\n")
self._nodeset_t(["--fold", "-i", "foo[5-10,15]"], "foo[0-10]\nfoo[13-18]\n", "foo[5-10,15]\n")
# using stdin for -i
self._nodeset_t(["-f","foo[1-6]","-i","-"], "foo4 foo5 foo6\n", "foo[4-6]\n")
self._nodeset_t(["-f","-i","-","foo[1-6]"], "foo4 foo5 foo6\n", "foo[1-6]\n")
# numerical bracket folding (#228)
self._nodeset_t(["--fold", "-i", "node123[1-2]"], "node1232\n", "node1232\n")
self._nodeset_t(["--fold", "-i", "node023[1-2]0"], "node02320\n", "node02320\n")
self._nodeset_t(["--fold", "-i", "node023[1-2]0-ipmi2"], "node02320-ipmi2\n", "node02320-ipmi2\n")
def test_015_rangeset(self):
"""test nodeset --rangeset"""
self._nodeset_t(["--fold","--rangeset","1,2"], None, "1-2\n")
self._nodeset_t(["--expand","-R","1-2"], None, "1 2\n")
self._nodeset_t(["--fold","-R","1-2","-X","2-3"], None, "1,3\n")
def test_016_rangeset_stdin(self):
"""test nodeset --rangeset (stdin)"""
self._nodeset_t(["--fold","--rangeset"], "1,2\n", "1-2\n")
self._nodeset_t(["--expand","-R"], "1-2\n", "1 2\n")
self._nodeset_t(["--fold","-R","-X","2-3"], "1-2\n", "1,3\n")
def test_017_stdin(self):
"""test nodeset - (stdin)"""
self._nodeset_t(["-f","-"], "foo\n", "foo\n")
self._nodeset_t(["-f","-"], "foo1 foo2 foo3\n", "foo[1-3]\n")
self._nodeset_t(["--autostep=2", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=auto", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=100%", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
self._nodeset_t(["--autostep=0%", "-f"], "foo0 foo2 foo4 foo6\n", "foo[0-6/2]\n")
def test_018_split(self):
"""test nodeset --split"""
self._nodeset_t(["--split=2","-f", "bar"], None, "bar\n")
self._nodeset_t(["--split", "2","-f", "foo,bar"], None, "bar\nfoo\n")
self._nodeset_t(["--split", "2","-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar bur foo\ngcc oof\n")
self._nodeset_t(["--split=2","-f", "foo[2-9]"], None, "foo[2-5]\nfoo[6-9]\n")
self._nodeset_t(["--split=2","-f", "foo[2-3,7]", "bar9"], None, "bar9,foo2\nfoo[3,7]\n")
self._nodeset_t(["--split=3","-f", "foo[2-9]"], None, "foo[2-4]\nfoo[5-7]\nfoo[8-9]\n")
self._nodeset_t(["--split=1","-f", "foo2", "foo3"], None, "foo[2-3]\n")
self._nodeset_t(["--split=4","-f", "foo[2-3]"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=4","-f", "foo3", "foo2"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=2","-e", "foo[2-9]"], None, "foo2 foo3 foo4 foo5\nfoo6 foo7 foo8 foo9\n")
self._nodeset_t(["--split=3","-e", "foo[2-9]"], None, "foo2 foo3 foo4\nfoo5 foo6 foo7\nfoo8 foo9\n")
self._nodeset_t(["--split=1","-e", "foo3", "foo2"], None, "foo2 foo3\n")
self._nodeset_t(["--split=4","-e", "foo[2-3]"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=4","-e", "foo2", "foo3"], None, "foo2\nfoo3\n")
self._nodeset_t(["--split=2","-c", "foo2", "foo3"], None, "1\n1\n")
def test_019_contiguous(self):
"""test nodeset --contiguous"""
self._nodeset_t(["--contiguous", "-f", "bar"], None, "bar\n")
self._nodeset_t(["--contiguous", "-f", "foo,bar"], None, "bar\nfoo\n")
self._nodeset_t(["--contiguous", "-f", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n")
self._nodeset_t(["--contiguous", "-e", "foo", "bar", "bur", "oof", "gcc"], None, "bar\nbur\nfoo\ngcc\noof\n")
self._nodeset_t(["--contiguous", "-f", "foo2"], None, "foo2\n")
self._nodeset_t(["--contiguous", "-R", "-f", "2"], None, "2\n")
self._nodeset_t(["--contiguous", "-f", "foo[2-9]"], None, "foo[2-9]\n")
self._nodeset_t(["--contiguous", "-f", "foo[2-3,7]", "bar9"], None, "bar9\nfoo[2-3]\nfoo7\n")
self._nodeset_t(["--contiguous", "-R", "-f", "2-3,7", "9"], None, "2-3\n7\n9\n")
self._nodeset_t(["--contiguous", "-f", "foo2", "foo3"], None, "foo[2-3]\n")
self._nodeset_t(["--contiguous", "-f", "foo3", "foo2"], None, "foo[2-3]\n")
self._nodeset_t(["--contiguous", "-f", "foo3", "foo1"], None, "foo1\nfoo3\n")
self._nodeset_t(["--contiguous", "-f", "foo[1-5/2]", "foo7"], None, "foo1\nfoo3\nfoo5\nfoo7\n")
def test_020_slice(self):
"""test nodeset -I/--slice"""
self._nodeset_t(["--slice=0","-f", "bar"], None, "bar\n")
self._nodeset_t(["--slice=0","-e", "bar"], None, "bar\n")
self._nodeset_t(["--slice=1","-f", "bar"], None, "\n")
self._nodeset_t(["--slice=0-1","-f", "bar"], None, "bar\n")
self._nodeset_t(["-I0","-f", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-R", "-I0","-f", "34-68,89-90"], None, "34\n")
self._nodeset_t(["-I 0","-f", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-I 0","-e", "bar[34-68,89-90]"], None, "bar34\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar34"], None, "bar[35-38]\n")
self._nodeset_t(["-I 0-3","-f", "bar[34-68,89-90]", "-x", "bar35"], None, "bar[34,36-38]\n")
self._nodeset_t(["-I 0-3","-e", "bar[34-68,89-90]"], None, "bar34 bar35 bar36 bar37\n")
self._nodeset_t(["-I 3,1,0,2","-f", "bar[34-68,89-90]"], None, "bar[34-37]\n")
self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f", "bar[34-68,89-90]"], None, "bar[35,37,41,44,50,54,64,68,89]\n")
self._nodeset_t(["-I 8","-f", "bar[34-68,89-90]"], None, "bar42\n")
self._nodeset_t(["-I 8-100","-f", "bar[34-68,89-90]"], None, "bar[42-68,89-90]\n")
self._nodeset_t(["-I 0-100","-f", "bar[34-68,89-90]"], None, "bar[34-68,89-90]\n")
self._nodeset_t(["-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93%", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=94%", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f", "bar[34-68,89-90]"], None, "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f", "bar[34-68]"], None, "bar[42-68/2]\n")
self._nodeset_t(["--autostep=100%", "-I 8-100/2","-f", "bar[34-68]"], None, "bar[42-68/2]\n")
def test_021_slice_stdin(self):
"""test nodeset -I/--slice (stdin)"""
self._nodeset_t(["--slice=0","-f"], "bar\n", "bar\n")
self._nodeset_t(["--slice=0","-e"], "bar\n", "bar\n")
self._nodeset_t(["--slice=1","-f"], "bar\n", "\n")
self._nodeset_t(["--slice=0-1","-f"], "bar\n", "bar\n")
self._nodeset_t(["-I0","-f"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-R", "-I0","-f"], "34-68,89-90\n", "34\n")
self._nodeset_t(["-I 0","-f"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-I 0","-e"], "bar[34-68,89-90]\n", "bar34\n")
self._nodeset_t(["-I 0-3","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n")
self._nodeset_t(["-I 0-3","-f", "-x", "bar34"], "bar[34-68,89-90]\n", "bar[35-38]\n")
self._nodeset_t(["-I 0-3","-f", "-x", "bar35"], "bar[34-68,89-90]\n", "bar[34,36-38]\n")
self._nodeset_t(["-I 0-3","-e"], "bar[34-68,89-90]\n", "bar34 bar35 bar36 bar37\n")
self._nodeset_t(["-I 3,1,0,2","-f"], "bar[34-68,89-90]\n", "bar[34-37]\n")
self._nodeset_t(["-I 1,3,7,10,16,20,30,34-35,37","-f"], "bar[34-68,89-90]\n", "bar[35,37,41,44,50,54,64,68,89]\n")
self._nodeset_t(["-I 8","-f"], "bar[34-68,89-90]\n", "bar42\n")
self._nodeset_t(["-I 8-100","-f"], "bar[34-68,89-90]\n", "bar[42-68,89-90]\n")
self._nodeset_t(["-I 0-100","-f"], "bar[34-68,89-90]\n", "bar[34-68,89-90]\n")
self._nodeset_t(["-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=93.33%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42-68/2,90]\n")
self._nodeset_t(["--autostep=94%", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=auto", "-I 8-100/2","-f"], "bar[34-68,89-90]\n", "bar[42,44,46,48,50,52,54,56,58,60,62,64,66,68,90]\n")
self._nodeset_t(["--autostep=2", "-I 8-100/2","-f"], "bar[34-68]\n", "bar[42-68/2]\n")
def test_022_output_format(self):
"""test nodeset -O"""
self._nodeset_t(["--expand", "--output-format", "/path/%s/", "foo"], None, "/path/foo/\n")
self._nodeset_t(["--expand", "-O", "/path/%s/", "-S", ":", "foo"], None, "/path/foo/\n")
self._nodeset_t(["--expand", "-O", "/path/%s/", "foo[2]"], None, "/path/foo2/\n")
self._nodeset_t(["--expand", "-O", "%s-ib0", "foo[1-4]"], None, "foo1-ib0 foo2-ib0 foo3-ib0 foo4-ib0\n")
self._nodeset_t(["--expand", "-O", "%s-ib0", "-S", ":", "foo[1-4]"], None, "foo1-ib0:foo2-ib0:foo3-ib0:foo4-ib0\n")
self._nodeset_t(["--fold", "-O", "%s-ib0", "foo1", "foo2"], None, "foo[1-2]-ib0\n")
self._nodeset_t(["--count", "-O", "result-%s", "foo1", "foo2"], None, "result-2\n")
self._nodeset_t(["--contiguous", "-O", "%s-ipmi", "-f", "foo[2-3,7]", "bar9"], None, "bar9-ipmi\nfoo[2-3]-ipmi\nfoo7-ipmi\n")
self._nodeset_t(["--split=2", "-O", "%s-ib", "-e", "foo[2-9]"], None, "foo2-ib foo3-ib foo4-ib foo5-ib\nfoo6-ib foo7-ib foo8-ib foo9-ib\n")
self._nodeset_t(["--split=3", "-O", "hwm-%s", "-f", "foo[2-9]"], None, "hwm-foo[2-4]\nhwm-foo[5-7]\nhwm-foo[8-9]\n")
self._nodeset_t(["-I0", "-O", "{%s}", "-f", "bar[34-68,89-90]"], None, "{bar34}\n")
# RangeSet mode (-R)
self._nodeset_t(["--fold", "-O", "{%s}", "--rangeset", "1,2"], None, "{1-2}\n")
self._nodeset_t(["--expand", "-O", "{%s}", "-R","1-2"], None, "{1} {2}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-R","1-2","-X","2-3"], None, "{1,3}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-S", ":", "--rangeset", "1,2"], None, "{1-2}\n")
self._nodeset_t(["--expand", "-O", "{%s}", "-S", ":", "-R","1-2"], None, "{1}:{2}\n")
self._nodeset_t(["--fold", "-O", "{%s}", "-S", ":", "-R","1-2","-X","2-3"], None, "{1,3}\n")
self._nodeset_t(["-R", "-I0", "-O", "{%s}", "-f", "34-68,89-90"], None, "{34}\n")
def test_023_axis(self):
"""test nodeset folding with --axis"""
self._nodeset_t(["--axis=0","-f", "bar"], None, "bar\n")
self._nodeset_t(["--axis=1","-f", "bar"], None, "bar\n")
self._nodeset_t(["--axis=1","-R","-f", "1,2,3"], None, None, 2,
"--axis option is only supported when folding nodeset\n")
self._nodeset_t(["--axis=1","-e", "bar"], None, None, 2,
"--axis option is only supported when folding nodeset\n")
# 1D and 2D nodeset: fold along axis 0 only
self._nodeset_t(["--axis=1","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-[1-2]-1,comp-[1-2]-2,comp-[1-2]-3,login-[1-2]\n')
# 1D and 2D nodeset: fold along axis 1 only
self._nodeset_t(["--axis=2","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-1-[1-3],comp-2-[1-3],login-1,login-2\n')
# 1D and 2D nodeset: fold along last axis only
self._nodeset_t(["--axis=-1","-f", "comp-[1-2]-[1-3],login-[1-2]"], None,
'comp-1-[1-3],comp-2-[1-3],login-[1-2]\n')
# test for a common case
ndnodes = []
for ib in range(2):
for idx in range(500):
ndnodes.append("node%d-ib%d" % (idx, ib))
random.shuffle(ndnodes)
self._nodeset_t(["--axis=1","-f"] + ndnodes, None,
"node[0-499]-ib0,node[0-499]-ib1\n")
exp_result = []
for idx in range(500):
exp_result.append("node%d-ib[0-1]" % idx)
self._nodeset_t(["--axis=2","-f"] + ndnodes, None,
','.join(exp_result) + '\n')
# 4D test
ndnodes = ["c-1-2-3-4", "c-2-2-3-4", "c-3-2-3-4", "c-5-5-5-5",
"c-5-7-5-5", "c-5-9-5-5", "c-5-11-5-5", "c-9-8-8-08",
"c-9-8-8-09"]
self._nodeset_t(["--axis=1","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=3","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=4","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-2","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2-3","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=3-4","-f"] + ndnodes, None,
"c-5-5-5-5,c-5-7-5-5,c-5-9-5-5,c-5-11-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-3","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
self._nodeset_t(["--axis=2-4","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-1-2-3-4,c-2-2-3-4,c-3-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["--axis=1-4","-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-[08-09]\n")
self._nodeset_t(["-f"] + ndnodes, None,
"c-5-[5,7,9,11]-5-5,c-[1-3]-2-3-4,c-9-8-8-[08-09]\n")
# a case where axis and autostep are working
self._nodeset_t(["--autostep=4", "--axis=1-2","-f"] + ndnodes, None,
"c-5-[5-11/2]-5-5,c-[1-3]-2-3-4,c-9-8-8-08,c-9-8-8-09\n")
def test_024_axis_stdin(self):
"""test nodeset folding with --axis (stdin)"""
self._nodeset_t(["--axis=0","-f"], "bar\n", "bar\n")
self._nodeset_t(["--axis=1","-f"], "bar\n", "bar\n")
self._nodeset_t(["--axis=1","-R","-f"], "1,2,3", None, 2,
"--axis option is only supported when folding nodeset\n")
self._nodeset_t(["--axis=1","-e"], "bar\n", None, 2,
"--axis option is only supported when folding nodeset\n")
# 1D and 2D nodeset: fold along axis 0 only
self._nodeset_t(["--axis=1","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-[1-2]-1,comp-[1-2]-2,comp-[1-2]-3,login-[1-2]\n')
# 1D and 2D nodeset: fold along axis 1 only
self._nodeset_t(["--axis=2","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-1-[1-3],comp-2-[1-3],login-1,login-2\n')
# 1D and 2D nodeset: fold along last axis only
self._nodeset_t(["--axis=-1","-f"], "comp-[1-2]-[1-3],login-[1-2]\n",
'comp-1-[1-3],comp-2-[1-3],login-[1-2]\n')
# test for a common case
ndnodes = []
for ib in range(2):
for idx in range(500):
ndnodes.append("node%d-ib%d" % (idx, ib))
random.shuffle(ndnodes)
self._nodeset_t(["--axis=1","-f"], '\n'.join(ndnodes) + '\n',
"node[0-499]-ib0,node[0-499]-ib1\n")
exp_result = []
for idx in range(500):
exp_result.append("node%d-ib[0-1]" % idx)
self._nodeset_t(["--axis=2","-f"], '\n'.join(ndnodes) + '\n',
','.join(exp_result) + '\n')
class CLINodesetGroupResolverTest1(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py with custom Group Resolver"""
def setUp(self):
# Special tests that require a default group source set
f = make_temp_file("""
[Main]
default: local
[local]
map: echo example[1-100]
all: echo example[1-1000]
list: echo foo bar moo
""")
set_std_group_resolver(GroupResolverConfig(f.name))
def tearDown(self):
set_std_group_resolver(None)
def test_022_list(self):
"""test nodeset --list"""
self._nodeset_t(["--list"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-ll"], None, "@bar example[1-100]\n@foo example[1-100]\n@moo example[1-100]\n")
self._nodeset_t(["-lll"], None, "@bar example[1-100] 100\n@foo example[1-100] 100\n@moo example[1-100] 100\n")
self._nodeset_t(["-l", "example[4,95]", "example5"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-ll", "example[4,95]", "example5"], None, "@moo example[4-5,95]\n@bar example[4-5,95]\n@foo example[4-5,95]\n")
self._nodeset_t(["-lll", "example[4,95]", "example5"], None, "@moo example[4-5,95] 3/100\n@bar example[4-5,95] 3/100\n@foo example[4-5,95] 3/100\n")
# test empty result
self._nodeset_t(["-l", "foo[3-70]", "bar6"], None, "")
# more arg-mixed tests
self._nodeset_t(["-a", "-l"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-a", "-l", "-x example[1-100]"], None, "")
self._nodeset_t(["-a", "-l", "-x example[1-40]"], None, "@moo\n@bar\n@foo\n")
self._nodeset_t(["-l", "-x example3"], None, "") # no -a, remove from nothing
self._nodeset_t(["-l", "-i example3"], None, "") # no -a, intersect from nothing
self._nodeset_t(["-l", "-X example3"], None, "@moo\n@bar\n@foo\n") # no -a, xor from nothing
self._nodeset_t(["-l", "-", "-i example3"], "example[3,500]\n", "@moo\n@bar\n@foo\n")
def test_023_list_all(self):
"""test nodeset --list-all"""
self._nodeset_t(["--list-all"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-L"], None, "@bar\n@foo\n@moo\n")
self._nodeset_t(["-LL"], None, "@bar example[1-100]\n@foo example[1-100]\n@moo example[1-100]\n")
self._nodeset_t(["-LLL"], None, "@bar example[1-100] 100\n@foo example[1-100] 100\n@moo example[1-100] 100\n")
class CLINodesetGroupResolverTest2(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py with custom Group Resolver"""
def setUp(self):
# Special tests that require a default group source set
f = make_temp_file("""
[Main]
default: test
[test]
map: echo example[1-100]
all: echo @foo,@bar,@moo
list: echo foo bar moo
[other]
map: echo nova[030-489]
all: echo @baz,@qux,@norf
list: echo baz qux norf
""")
set_std_group_resolver(GroupResolverConfig(f.name))
def tearDown(self):
set_std_group_resolver(None)
def test_024_groups(self):
self._nodeset_t(["--split=2","-r", "unknown2", "unknown3"], None, "unknown2\nunknown3\n")
self._nodeset_t(["-f", "-a"], None, "example[1-100]\n")
self._nodeset_t(["-f", "@moo"], None, "example[1-100]\n")
self._nodeset_t(["-f", "@moo", "@bar"], None, "example[1-100]\n")
self._nodeset_t(["-e", "-a"], None, ' '.join(["example%d" % i for i in range(1, 101)]) + '\n')
self._nodeset_t(["-c", "-a"], None, "100\n")
self._nodeset_t(["-r", "-a"], None, "@bar\n")
self._nodeset_t(["--split=2","-r", "unknown2", "unknown3"], None, "unknown2\nunknown3\n")
# We need to split following unit tests in order to reset group
# source in setUp/tearDown...
def test_025_groups(self):
self._nodeset_t(["-s", "test", "-c", "-a", "-d"], None, "100\n")
def test_026_groups(self):
self._nodeset_t(["-s", "test", "-r", "-a"], None, "@test:bar\n")
def test_027_groups(self):
self._nodeset_t(["-s", "test", "-G", "-r", "-a"], None, "@bar\n")
def test_028_groups(self):
self._nodeset_t(["-s", "test", "--groupsources"], None, "test (default)\nother\n")
def test_029_groups(self):
self._nodeset_t(["-s", "test", "-q", "--groupsources"], None, "test\nother\n")
def test_030_groups(self):
self._nodeset_t(["-f", "-a", "-"], "example101\n", "example[1-101]\n")
self._nodeset_t(["-f", "-a", "-"], "example102 example101\n", "example[1-102]\n")
# Check default group source switching...
def test_031_groups(self):
self._nodeset_t(["-s", "other", "-c", "-a", "-d"], None, "460\n")
self._nodeset_t(["-s", "test", "-c", "-a", "-d"], None, "100\n")
def test_032_groups(self):
self._nodeset_t(["-s", "other", "-r", "-a"], None, "@other:baz\n")
self._nodeset_t(["-s", "test", "-r", "-a"], None, "@test:bar\n")
def test_033_groups(self):
self._nodeset_t(["-s", "other", "-G", "-r", "-a"], None, "@baz\n")
self._nodeset_t(["-s", "test", "-G", "-r", "-a"], None, "@bar\n")
def test_034_groups(self):
self._nodeset_t(["--groupsources"], None, "test (default)\nother\n")
def test_035_groups(self):
self._nodeset_t(["-s", "other", "--groupsources"], None, "other (default)\ntest\n")
def test_036_groups(self):
self._nodeset_t(["--groupsources"], None, "test (default)\nother\n")
def test_037_groups_output_format(self):
self._nodeset_t(["-r", "-O", "{%s}", "-a"], None, "{@bar}\n")
def test_038_groups_output_format(self):
self._nodeset_t(["-O", "{%s}", "-s", "other", "-r", "-a"], None, "{@other:baz}\n")
def test_039_list_all(self):
"""test nodeset --list-all (multi sources)"""
self._nodeset_t(["--list-all"], None,
"@bar\n@foo\n@moo\n@other:baz\n@other:norf\n@other:qux\n")
self._nodeset_t(["--list-all", "-G"], None,
"@bar\n@foo\n@moo\n@baz\n@norf\n@qux\n")
self._nodeset_t(["-GL"], None,
"@bar\n@foo\n@moo\n@baz\n@norf\n@qux\n")
self._nodeset_t(["--list-all", "-s", "other"], None,
"@other:baz\n@other:norf\n@other:qux\n@test:bar\n@test:foo\n@test:moo\n")
self._nodeset_t(["--list-all", "-G", "-s", "other"], None,
"@baz\n@norf\n@qux\n@bar\n@foo\n@moo\n") # 'other' source first
class CLINodesetGroupResolverTest3(CLINodesetTestBase):
"""Unit test class for testing CLI/Nodeset.py with custom Group Resolver
A case we support: one of the source misses the list upcall.
"""
def setUp(self):
# Special tests that require a default group source set
f = make_temp_file("""
[Main]
default: test
[test]
map: echo example[1-100]
all: echo @foo,@bar,@moo
list: echo foo bar moo
[other]
map: echo nova[030-489]
all: echo @baz,@qux,@norf
list: echo baz qux norf
[pdu]
map: echo pdu-[0-3]-[1-2]
""")
set_std_group_resolver(GroupResolverConfig(f.name))
def tearDown(self):
set_std_group_resolver(None)
def test_040_list_all(self):
"""test nodeset --list-all (w/ missing list upcall)"""
self._nodeset_t(["--list-all"], None,
"@bar\n@foo\n@moo\n@other:baz\n@other:norf\n@other:qux\n", 0,
"Warning: No list upcall defined for group source pdu\n")
self._nodeset_t(["-LL"], None,
"@bar example[1-100]\n@foo example[1-100]\n@moo example[1-100]\n"
"@other:baz nova[030-489]\n@other:norf nova[030-489]\n@other:qux nova[030-489]\n", 0,
"Warning: No list upcall defined for group source pdu\n")
self._nodeset_t(["-LLL"], None,
"@bar example[1-100] 100\n@foo example[1-100] 100\n@moo example[1-100] 100\n"
"@other:baz nova[030-489] 460\n@other:norf nova[030-489] 460\n@other:qux nova[030-489] 460\n", 0,
"Warning: No list upcall defined for group source pdu\n")
def test_041_list_failure(self):
"""test nodeset --list -s source w/ missing list upcall"""
self._nodeset_t(["--list", "-s", "pdu"], None, "", 1,
'No list upcall defined for group source "pdu"\n')
| 59.527778
| 156
| 0.521349
| 7,029
| 42,860
| 3.047375
| 0.050363
| 0.137442
| 0.205602
| 0.173576
| 0.864192
| 0.826097
| 0.780439
| 0.72591
| 0.662838
| 0.585854
| 0
| 0.105672
| 0.192114
| 42,860
| 719
| 157
| 59.61057
| 0.512938
| 0.059473
| 0
| 0.20073
| 0
| 0.087591
| 0.401387
| 0.077877
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096715
| false
| 0
| 0.012774
| 0
| 0.118613
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
718cba4bcbd3c109ecf77094633022e98791250e
| 133
|
py
|
Python
|
src/ape_plugins/__init__.py
|
Ninjagod1251/ape
|
9b40ef15f25362ddb83cb6d571d60cab041fce4a
|
[
"Apache-2.0"
] | 210
|
2021-04-29T05:42:42.000Z
|
2022-03-31T15:50:17.000Z
|
src/ape_plugins/__init__.py
|
Ninjagod1251/ape
|
9b40ef15f25362ddb83cb6d571d60cab041fce4a
|
[
"Apache-2.0"
] | 370
|
2021-04-29T01:54:32.000Z
|
2022-03-31T19:19:29.000Z
|
src/ape_plugins/__init__.py
|
Ninjagod1251/ape
|
9b40ef15f25362ddb83cb6d571d60cab041fce4a
|
[
"Apache-2.0"
] | 25
|
2021-04-29T05:08:50.000Z
|
2022-03-11T20:43:56.000Z
|
from ape import plugins
from ape.api import ConfigDict
@plugins.register(plugins.Config)
def config_class():
return ConfigDict
| 16.625
| 33
| 0.789474
| 18
| 133
| 5.777778
| 0.611111
| 0.134615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.142857
| 133
| 7
| 34
| 19
| 0.912281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.4
| 0.2
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 6
|
71ad3c3a161bb4d5f288e68d165d5aa2f2c29b48
| 136
|
py
|
Python
|
release/scripts/presets/camera/Panasonic_AG-HVX200.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 365
|
2015-02-10T15:10:55.000Z
|
2022-03-03T15:50:51.000Z
|
release/scripts/presets/camera/Panasonic_AG-HVX200.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 45
|
2015-01-09T15:34:20.000Z
|
2021-10-05T14:44:23.000Z
|
release/scripts/presets/camera/Panasonic_AG-HVX200.py
|
rbabari/blender
|
6daa85f14b2974abfc3d0f654c5547f487bb3b74
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 172
|
2015-01-25T15:16:53.000Z
|
2022-01-31T08:25:36.000Z
|
import bpy
bpy.context.camera.sensor_width = 4.68
bpy.context.camera.sensor_height = 2.633
bpy.context.camera.sensor_fit = 'HORIZONTAL'
| 27.2
| 44
| 0.801471
| 22
| 136
| 4.818182
| 0.590909
| 0.283019
| 0.45283
| 0.622642
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056
| 0.080882
| 136
| 4
| 45
| 34
| 0.792
| 0
| 0
| 0
| 0
| 0
| 0.073529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
71cba20870ea4e87c8c6d54889a0f92c2eb79d65
| 134
|
py
|
Python
|
scrapli_community/eltex/esr/__init__.py
|
ikievite/scrapli_community
|
b160ae6c21177c949a0b8210810ba2584b31861f
|
[
"MIT"
] | 37
|
2020-11-13T20:50:30.000Z
|
2022-03-25T16:15:28.000Z
|
scrapli_community/eltex/esr/__init__.py
|
ikievite/scrapli_community
|
b160ae6c21177c949a0b8210810ba2584b31861f
|
[
"MIT"
] | 84
|
2020-08-02T16:20:15.000Z
|
2022-03-02T14:38:26.000Z
|
scrapli_community/eltex/esr/__init__.py
|
ikievite/scrapli_community
|
b160ae6c21177c949a0b8210810ba2584b31861f
|
[
"MIT"
] | 25
|
2020-08-01T23:51:37.000Z
|
2022-02-21T10:06:33.000Z
|
"""scrapli_community.eltex.esr"""
from scrapli_community.eltex.esr.eltex_esr import SCRAPLI_PLATFORM
__all__ = ("SCRAPLI_PLATFORM",)
| 26.8
| 66
| 0.80597
| 17
| 134
| 5.823529
| 0.470588
| 0.242424
| 0.424242
| 0.484848
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067164
| 134
| 4
| 67
| 33.5
| 0.792
| 0.201493
| 0
| 0
| 0
| 0
| 0.158416
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 6
|
71ce385e5438e2444734eb392b366b81a558e0bd
| 9,026
|
py
|
Python
|
tests/components/senseme/test_config_flow.py
|
charithmadhuranga/core
|
06329a2f43ba82df6e0c6250fa6965f9259cfe8a
|
[
"Apache-2.0"
] | 4
|
2021-07-11T09:11:00.000Z
|
2022-02-27T14:43:50.000Z
|
tests/components/senseme/test_config_flow.py
|
charithmadhuranga/core
|
06329a2f43ba82df6e0c6250fa6965f9259cfe8a
|
[
"Apache-2.0"
] | 277
|
2021-10-04T06:39:33.000Z
|
2021-12-28T22:04:17.000Z
|
tests/components/senseme/test_config_flow.py
|
charithmadhuranga/core
|
06329a2f43ba82df6e0c6250fa6965f9259cfe8a
|
[
"Apache-2.0"
] | 1
|
2022-02-09T00:30:51.000Z
|
2022-02-09T00:30:51.000Z
|
"""Test the SenseME config flow."""
from unittest.mock import patch
from homeassistant import config_entries
from homeassistant.components.senseme.const import DOMAIN
from homeassistant.const import CONF_HOST, CONF_ID
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import (
MOCK_ADDRESS,
MOCK_DEVICE,
MOCK_DEVICE2,
MOCK_DEVICE_ALTERNATE_IP,
MOCK_UUID,
_patch_discovery,
)
from tests.common import MockConfigEntry
async def test_form_user(hass: HomeAssistant) -> None:
"""Test we get the form as a user."""
with _patch_discovery(), patch(
"homeassistant.components.senseme.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"device": MOCK_UUID,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Haiku Fan"
assert result2["data"] == {
"info": MOCK_DEVICE.get_device_info,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_manual_entry(hass: HomeAssistant) -> None:
"""Test we get the form as a user with a discovery but user chooses manual."""
with _patch_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"device": None,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "manual"
with patch(
"homeassistant.components.senseme.config_flow.async_get_device_by_ip_address",
return_value=MOCK_DEVICE,
), patch(
"homeassistant.components.senseme.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: MOCK_ADDRESS,
},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "Haiku Fan"
assert result3["data"] == {
"info": MOCK_DEVICE.get_device_info,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_no_discovery(hass: HomeAssistant) -> None:
"""Test we get the form as a user with no discovery."""
with _patch_discovery(no_device=True), patch(
"homeassistant.components.senseme.config_flow.async_get_device_by_ip_address",
return_value=MOCK_DEVICE,
), patch(
"homeassistant.components.senseme.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: "not a valid address",
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "manual"
assert result2["errors"] == {CONF_HOST: "invalid_host"}
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"],
{
CONF_HOST: MOCK_ADDRESS,
},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_CREATE_ENTRY
assert result3["title"] == "Haiku Fan"
assert result3["data"] == {
"info": MOCK_DEVICE.get_device_info,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_manual_entry_cannot_connect(hass: HomeAssistant) -> None:
"""Test we get the form as a user."""
with _patch_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"device": None,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_FORM
assert result2["step_id"] == "manual"
with patch(
"homeassistant.components.senseme.config_flow.async_get_device_by_ip_address",
return_value=None,
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
CONF_HOST: MOCK_ADDRESS,
},
)
await hass.async_block_till_done()
assert result3["type"] == RESULT_TYPE_FORM
assert result3["step_id"] == "manual"
assert result3["errors"] == {CONF_HOST: "cannot_connect"}
async def test_discovery(hass: HomeAssistant) -> None:
"""Test we can setup a discovered device."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
"info": MOCK_DEVICE2.get_device_info,
},
unique_id=MOCK_DEVICE2.uuid,
)
entry.add_to_hass(hass)
with _patch_discovery(), patch(
"homeassistant.components.senseme.async_get_device_by_device_info",
return_value=(True, MOCK_DEVICE2),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
with _patch_discovery(), patch(
"homeassistant.components.senseme.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data={CONF_ID: MOCK_UUID},
)
assert result["type"] == RESULT_TYPE_FORM
assert not result["errors"]
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{
"device": MOCK_UUID,
},
)
await hass.async_block_till_done()
assert result2["type"] == RESULT_TYPE_CREATE_ENTRY
assert result2["title"] == "Haiku Fan"
assert result2["data"] == {
"info": MOCK_DEVICE.get_device_info,
}
assert len(mock_setup_entry.mock_calls) == 1
async def test_discovery_existing_device_no_ip_change(hass: HomeAssistant) -> None:
"""Test we can setup a discovered device."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
"info": MOCK_DEVICE.get_device_info,
},
unique_id=MOCK_DEVICE.uuid,
)
entry.add_to_hass(hass)
with _patch_discovery(), patch(
"homeassistant.components.senseme.async_get_device_by_device_info",
return_value=(True, MOCK_DEVICE),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
with _patch_discovery():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data={CONF_ID: MOCK_UUID},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_discovery_existing_device_ip_change(hass: HomeAssistant) -> None:
"""Test a config entry ips get updated from discovery."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
"info": MOCK_DEVICE.get_device_info,
},
unique_id=MOCK_DEVICE.uuid,
)
entry.add_to_hass(hass)
with _patch_discovery(device=MOCK_DEVICE_ALTERNATE_IP), patch(
"homeassistant.components.senseme.async_get_device_by_device_info",
return_value=(True, MOCK_DEVICE),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_DISCOVERY},
data={CONF_ID: MOCK_UUID},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.data["info"]["address"] == "127.0.0.8"
| 31.893993
| 86
| 0.637381
| 1,045
| 9,026
| 5.183732
| 0.090909
| 0.049843
| 0.049843
| 0.073103
| 0.859332
| 0.853794
| 0.82758
| 0.82758
| 0.817611
| 0.812627
| 0
| 0.006726
| 0.258808
| 9,026
| 282
| 87
| 32.007092
| 0.80299
| 0.003213
| 0
| 0.652174
| 0
| 0
| 0.128024
| 0.07142
| 0
| 0
| 0
| 0
| 0.178261
| 1
| 0
| false
| 0
| 0.034783
| 0
| 0.034783
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 6
|
e0a210caaa0158f2bf99f0dfaac0adbbb6ca3e19
| 47
|
py
|
Python
|
tx_tecreports/fetcher/__init__.py
|
texas/tx_tecreports
|
1e2311c8355117b45d7b23a4db41512c0cb6ce80
|
[
"Apache-2.0"
] | 1
|
2021-02-19T20:16:19.000Z
|
2021-02-19T20:16:19.000Z
|
tx_tecreports/fetcher/__init__.py
|
texas/tx_tecreports
|
1e2311c8355117b45d7b23a4db41512c0cb6ce80
|
[
"Apache-2.0"
] | null | null | null |
tx_tecreports/fetcher/__init__.py
|
texas/tx_tecreports
|
1e2311c8355117b45d7b23a4db41512c0cb6ce80
|
[
"Apache-2.0"
] | null | null | null |
from .base import get_report, get_filings_list
| 23.5
| 46
| 0.851064
| 8
| 47
| 4.625
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.106383
| 47
| 1
| 47
| 47
| 0.880952
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
e0adda8542fb99baf9979a7ae02f0a259d300bf1
| 18
|
py
|
Python
|
utils/__init__.py
|
TorchCraft/tc-bumper
|
69b4c6019d63c30af64f01b2e52ba84cd1ad859c
|
[
"MIT"
] | 1
|
2021-10-31T14:01:23.000Z
|
2021-10-31T14:01:23.000Z
|
utils/__init__.py
|
TorchCraft/tc-bumper
|
69b4c6019d63c30af64f01b2e52ba84cd1ad859c
|
[
"MIT"
] | 1
|
2017-03-18T16:56:24.000Z
|
2017-03-18T16:56:24.000Z
|
utils/__init__.py
|
edran/tc-bumper
|
69b4c6019d63c30af64f01b2e52ba84cd1ad859c
|
[
"MIT"
] | 1
|
2021-10-31T14:01:14.000Z
|
2021-10-31T14:01:14.000Z
|
from cd import cd
| 9
| 17
| 0.777778
| 4
| 18
| 3.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 18
| 1
| 18
| 18
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 6
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.